diff --git "a/4469.jsonl" "b/4469.jsonl" new file mode 100644--- /dev/null +++ "b/4469.jsonl" @@ -0,0 +1,1669 @@ +{"seq_id":"39353441727","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport thermocepstrum as tc\n\ndef k_fstar(nnjen, interval=range(1, 20, 2), corrfactors=np.arange(1, 3), plot = False):\n jjjen = {}\n for cor in corrfactors:\n TSKIP_LIST = np.array([nnjen.Nyquist_f_THz / j for j in interval], dtype=int)\n jjjen[cor] = tc.heatcurrent.fstar_analysis(nnjen, TSKIP_LIST, Kmin_corrfactor=cor, plot=False)\n if plot == True:\n plot_k(jjjen, nnjen, TSKIP_LIST, corrfactors, 'kappa vs fstar')\n return jjjen, TSKIP_LIST\n\n\ndef plot_k(jjjen, nnjen, TSKIP_LIST, corrs=np.arange(1,3), title=None):\n f, ax = plt.subplots(1, figsize=(8.0, 6.0), constrained_layout=True)\n ls = 12\n #\n ax.tick_params(axis='x', labelsize=ls)\n ax.tick_params(axis='y', labelsize=ls)\n kappa_Kmin = {}\n kappa_Kmin_err = {}\n Pstar_Kmin = {}\n FSTAR_LIST = nnjen.Nyquist_f_THz / TSKIP_LIST\n\n for cor in corrs:\n kappa_Kmin[cor] = np.array([j.kappa_Kmin for j in jjjen[cor]])\n kappa_Kmin_err[cor] = np.array([j.kappa_Kmin_std for j in jjjen[cor]])\n Pstar_Kmin[cor] = np.array([j.dct.aic_Kmin + 1 for j in jjjen[cor]])\n f1 = ax.plot(FSTAR_LIST, kappa_Kmin[cor], '-o', label='c={}'.format(cor))\n ax.fill_between(FSTAR_LIST, kappa_Kmin[cor] - kappa_Kmin_err[cor], kappa_Kmin[cor] + kappa_Kmin_err[cor],\n alpha=0.4)\n if title is not None:\n ax.set_title(title, fontsize=ls)\n ax.set_xlabel('F* (THz)', fontsize=ls)\n ax.set_ylabel(r'$\\kappa$ (W/m/K)', fontsize=ls)\n ax.legend(loc='lower right', fontsize=ls, ncol=len(corrs))\n return\n\n\ndef block_analysis(jen, temp, tmax, dt, vol, fstar, corrs=np.arange(1, 3), u='metal_vis'):\n mean = np.zeros(np.size(corrs))\n std = np.zeros(np.size(corrs))\n mean_std = np.zeros(np.size(corrs))\n\n i = 0\n vis = {}\n vis_std = {}\n for cor in corrs:\n\n Nstep = int(np.rint(tmax / (dt * 1e-3)))\n maxrows = np.size(jen, 0)\n Ncurrs = maxrows // Nstep\n vis[cor] = []\n vis_std[cor] = []\n t = []\n for ij in range(Ncurrs):\n init = Nstep * ij\n end = Nstep * (ij + 1) if Nstep * (ij + 1) < jen.shape[0] else jen.shape[0]\n\n tmean = np.mean(temp[init:end])\n t.append(tmean)\n\n jj = tc.HeatCurrent(j=jen[init:end], DT_FS=dt, TEMPERATURE=tmean, units=u, VOLUME=vol, PSD_FILTER_W=0.3)\n rj = jj.resample_current(fstar_THz=fstar, plot=False, PSD_FILTER_W=0.10)\n rj.cepstral_analysis(Kmin_corrfactor=cor)\n\n vis[cor].append(rj.kappa_Kmin * 100)\n vis_std[cor].append(rj.kappa_Kmin_std * 100)\n mean[i] = np.mean(vis[cor])\n std[i] = np.std(vis[cor])\n mean_std[i] = np.mean(vis_std[cor])\n i += 1\n return vis, vis_std\ndef block_analysis_pstar(jen, temp, tmax, dt, vol, fstar, corrs=np.arange(1, 3), u='metal_vis'):\n mean = np.zeros(np.size(corrs))\n std = np.zeros(np.size(corrs))\n mean_std = np.zeros(np.size(corrs))\n\n i = 0\n vis = {}\n vis_std = {}\n pstar = {}\n for cor in corrs:\n\n Nstep = int(np.rint(tmax / (dt * 1e-3)))\n maxrows = np.size(jen, 0)\n Ncurrs = maxrows // Nstep\n vis[cor] = np.zeros(Ncurrs)\n vis_std[cor] = np.zeros(Ncurrs)\n pstar[cor] = np.zeros(Ncurrs)\n t = []\n for ij in range(Ncurrs):\n init = Nstep * ij\n end = Nstep * (ij + 1) if Nstep * (ij + 1) < jen.shape[0] else jen.shape[0]\n\n tmean = np.mean(temp[init:end])\n t.append(tmean)\n\n jj = tc.HeatCurrent(j=jen[init:end], DT_FS=dt, TEMPERATURE=tmean, units=u, VOLUME=vol, PSD_FILTER_W=0.3)\n rj = jj.resample_current(fstar_THz=fstar, plot=False, PSD_FILTER_W=0.10)\n rj.cepstral_analysis(Kmin_corrfactor=cor)\n\n vis[cor][ij] = rj.kappa_Kmin * 100\n vis_std[cor][ij] = rj.kappa_Kmin_std * 100\n pstar[cor][ij] = rj.dct.aic_Kmin + 1\n mean[i] = np.mean(vis[cor])\n std[i] = np.std(vis[cor])\n mean_std[i] = np.mean(vis_std[cor])\n i += 1\n return vis, vis_std, pstar\n","repo_name":"cesaremalosso/scriptini","sub_path":"mymodules/cepstral_tools.py","file_name":"cepstral_tools.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"36744647535","text":"import sys\nsys.dont_write_bytecode = True\nsys.excepthook = sys.__excepthook__\nimport os\nimport re\nimport token\nimport tokenize\n\nfrom .errors import TestSpecError\n\n\n# include directive aliases; the final keyword name is always 'include'\nINCLUDE_KEYWORDS = [ 'include', 'insert directive file' ]\n\n\nclass ScriptSpec:\n\n def __init__(self, lineno, keyword, attrs, attr_names, value):\n \"\"\n self.keyword = keyword\n self.attrs = attrs\n self.attr_names = attr_names # retains order, duplicates possible\n self.value = value\n self.lineno = lineno # a string; line_num or include_filename:line_num\n\n\nclass ScriptReader:\n\n def __init__(self, filename, nested_depth=0):\n \"\"\n self.filename = filename\n self.nested_depth = nested_depth\n\n self.speclineL = [] # list of [line number, raw spec string]\n self.specL = [] # list of ScriptSpec objects\n self.shbang = None # None or a string\n\n self.readfile()\n\n def basename(self):\n \"\"\"\n Returns the base name of the source file without the extension.\n \"\"\"\n return os.path.splitext( os.path.basename( self.filename ) )[0]\n\n def getSpecList(self):\n \"\"\"\n Returns a list of ScriptSpec objects whose order is the same as in\n the source script.\n \"\"\"\n return self.specL\n\n vvtpat = re.compile( '[ \\t]*#[ \\t]*VVT[ \\t]*:' )\n\n def readfile(self):\n \"\"\n lines = read_directive_lines( self.filename )\n\n self.spec = None\n for line,lineno in lines:\n info = self._get_file_line_info( lineno )\n if lineno == 1 and line[:2] == '#!':\n self.shbang = line[2:].strip()\n else:\n self.parse_line( line, info )\n\n if self.spec is not None:\n self.speclineL.append( self.spec )\n\n self.process_specs()\n\n def parse_line(self, line, info):\n \"\"\n if line:\n char0 = line[0]\n\n if char0 == '#':\n m = ScriptReader.vvtpat.match( line )\n if m is not None:\n self.parse_spec( line[m.end():], info )\n\n elif self.spec is not None:\n # an empty line stops any continuation\n self.speclineL.append( self.spec )\n self.spec = None\n\n def parse_spec(self, line, info):\n \"\"\"\n Parse the contents of the line after a #VVT: marker.\n \"\"\"\n line = line.strip()\n if line:\n if line[0] == ':':\n # continuation of previous spec\n if self.spec is None:\n raise TestSpecError( \"A #VVT:: continuation was found\" + \\\n \" but there is nothing to continue, line \" + info )\n elif len(line) > 1:\n self.spec[1] += ' ' + line[1:]\n elif self.spec is None:\n # no existing spec and new spec found\n self.spec = [ info, line ]\n else:\n # spec exists and new spec found\n self.speclineL.append( self.spec )\n self.spec = [ info, line ]\n elif self.spec is not None:\n # an empty line stops any continuation\n self.speclineL.append( self.spec )\n self.spec = None\n\n # the following pattern should match the first paren enclosed stuff,\n # but parens within double quotes are ignored\n # 1. this would match as few chars within parens\n # [(].*?[)]\n # 2. this would match as few chars within parens unless there is a\n # double quote in the parens\n # [(][^\"]*?[)]\n # 3. this would match as few chars within double quotes\n # [\"].*?[\"]\n # 4. this would match as few chars within double quotes possible\n # chars on either side (but as few of them as well)\n # .*?[\"].*?[\"].*?\n # 5. this will match either number 2 or number 4 above as a regex group\n # ([^\"]*?|.*?[\"].*?[\"].*?)\n # 6. this adds back the parens on the outside\n # [(]([^\"]*?|.*?[\"].*?[\"].*?)[)]\n ATTRPAT = re.compile( '[(]([^\"]*?|.*?[\"].*?[\"].*?)[)]' )\n\n # this pattern matches everything up to the first ':' or '=' or paren\n DEFPAT = re.compile( '.*?[:=(]' )\n\n def process_specs(self):\n \"\"\"\n Turns the list of string specifications into keywords with attributes\n and content.\n \"\"\"\n kpat = ScriptReader.DEFPAT\n\n for info,line in self.speclineL:\n key = None\n val = None\n attrs = None\n attr_names = None\n m = kpat.match( line )\n if m:\n key = line[:m.end()-1].strip()\n rest = line[m.end()-1:]\n\n attrs,attr_names,val = check_parse_attributes_section( rest, info )\n\n else:\n key = line.strip()\n\n if not key:\n raise TestSpecError(\n 'missing or invalid specification keyword, line ' + info )\n\n if key in INCLUDE_KEYWORDS:\n # an alias is replaced with the primary name\n key = INCLUDE_KEYWORDS[0]\n # replace 'val' with the specs list from the included file\n val = self._parse_insert_file( info, val )\n\n specobj = ScriptSpec( info, key, attrs, attr_names, val )\n self.specL.append( specobj )\n\n def _parse_insert_file(self, info, filename):\n \"\"\n if filename is None or not filename.strip():\n raise TestSpecError( 'missing include file name, line ' + info )\n\n if not os.path.isabs( filename ):\n d = os.path.dirname( os.path.abspath( self.filename ) )\n filename = os.path.normpath( os.path.join( d, filename ) )\n\n try:\n inclreader = ScriptReader( filename, self.nested_depth+1 )\n except TestSpecError:\n raise\n except Exception:\n raise TestSpecError( 'at line ' + info + ' the include '\n 'failed: ' + str( sys.exc_info()[1] ) )\n\n return inclreader.getSpecList()\n\n def _get_file_line_info(self, lineno):\n \"\"\n if self.nested_depth == 0:\n return str(lineno)\n else:\n return os.path.basename(self.filename)+':'+str(lineno)\n\n\ndef read_directive_lines( filename ):\n \"\"\n lines = []\n\n skipnl = False\n with open( filename, 'rt' ) as fp:\n for tok_type,tok,beg,end,line in tokenize.generate_tokens( fp.readline ):\n\n if tok_type == tokenize.COMMENT:\n lines.append( (tok.strip(),end[0]) )\n skipnl = True\n\n else:\n if tok_type == tokenize.NL:\n if not skipnl:\n lines.append( ('',end[0]) )\n elif tok_type == token.STRING:\n lines.append( ('',end[0]) )\n elif tok_type == token.NEWLINE:\n pass\n else:\n break\n skipnl = False\n\n return lines\n\n\ndef split_attr_match( matchobj, origstr ):\n \"\"\n attrs = origstr[:matchobj.end()]\n attrs = attrs.lstrip('(').rstrip(')').strip()\n\n therest = origstr[matchobj.end():].strip()\n\n return attrs, therest\n\n\ndef parse_attr_string( attrstr, info ):\n \"\"\n D = {}\n L = []\n for s in attrstr.split(','):\n s = s.strip().strip('\"').strip()\n i = s.find( '=' )\n if i == 0:\n raise TestSpecError( \\\n 'invalid attribute specification, line ' + info )\n elif i > 0:\n n = s[:i].strip()\n v = s[i+1:].strip().strip('\"')\n D[n] = v\n L.append(n)\n elif s:\n D[s] = ''\n L.append(s)\n\n return D,L\n\n\ndef check_parse_attributes_section( a_string, info ):\n \"\"\n attrD = None\n nameL = None\n tail = None\n\n attrs = None\n a_string = a_string.strip()\n\n if a_string and a_string[0] == '(':\n\n m = ScriptReader.ATTRPAT.match( a_string )\n if m:\n attrs,rest = split_attr_match( m, a_string )\n\n if rest:\n if rest[0] in ':=':\n tail = rest[1:].strip()\n elif rest[0] == '#':\n tail = ''\n else:\n raise TestSpecError( 'extra text following attributes, ' + \\\n 'line '+info )\n else:\n tail = ''\n else:\n raise TestSpecError( \\\n 'malformed attribute specification, line ' + info )\n\n elif a_string and a_string[0] in ':=':\n tail = a_string[1:].strip()\n else:\n tail = a_string.strip()\n\n if attrs is not None:\n attrD,nameL = parse_attr_string( attrs, info )\n\n return attrD, nameL, tail\n","repo_name":"sandialabs/vvtest","sub_path":"libvvtest/ScriptReader.py","file_name":"ScriptReader.py","file_ext":"py","file_size_in_byte":8871,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"34823737364","text":"dataset_type = 'CocoDataset'\ndata_root = '/usr/videodate/dataset/coco/'\nbase_lr = 0.32\nwarmup_iters = 2000\n\nmodel = dict(\n type='GFL',\n pretrained='torchvision://resnet50',\n backbone=dict(\n type='ResNet',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n norm_cfg=dict(type='BN', requires_grad=True),\n norm_eval=True,\n dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),\n stage_with_dcn=(False, True, True, True),\n style='pytorch'),\n neck=dict(\n type='FPN',\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n start_level=1,\n add_extra_convs=True,\n extra_convs_on_inputs=False, # use P5\n num_outs=5,\n relu_before_extra_convs=True),\n bbox_head=dict(\n type='VFNetHead',\n num_classes=4,\n in_channels=256,\n stacked_convs=3,\n feat_channels=256,\n strides=[8, 16, 32, 64, 128],\n center_sampling=False,\n dcn_on_last_conv=True,\n use_atss=True,\n use_vfl=True,\n loss_cls=dict(\n type='VarifocalLoss',\n use_sigmoid=True,\n alpha=0.75,\n gamma=2.0,\n iou_weighted=True,\n loss_weight=1.0),\n loss_bbox=dict(type='CIoULoss', loss_weight=1.5),\n loss_bbox_refine=dict(type='CIoULoss', loss_weight=2.0)))\n\n# training and testing settings\ntrain_cfg = dict(\n assigner=dict(type='ATSSAssigner', topk=9),\n allowed_border=-1,\n pos_weight=-1,\n debug=False)\ntest_cfg = dict(\n nms_pre=1000,\n min_bbox_size=0,\n score_thr=0.05,\n nms=dict(type='nms', iou_threshold=0.6),\n max_per_img=100)\n\n\ndata = dict(\n samples_per_gpu=4,\n workers_per_gpu=2,\n train=dict(\n type='CocoDataset',\n ann_file=\n '/usr/videodate/dataset/coco/annotations/coco_half_person_80_train.json',\n img_prefix='/usr/videodate/dataset/coco/train2017/',\n classes=['person', 'bottle', 'chair', 'potted plant'],\n pipeline=[\n dict(type='LoadImageFromFile', to_float32=True),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='Resize',\n img_scale=[(1333, 480), (1333, 960)],\n multiscale_mode='range',\n keep_ratio=True),\n dict(type='PhotoMetricDistortion', brightness_delta=48),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(\n type='Normalize',\n mean=[127.5, 127.5, 127.5],\n std=[128, 128, 128],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n ]),\n val=dict(\n type='CocoDataset',\n ann_file=\n '/usr/videodate/dataset/coco/annotations/coco_half_person_80_val.json',\n img_prefix='/usr/videodate/dataset/coco/val2017/',\n classes=['person', 'bottle', 'chair', 'potted plant'],\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[127.5, 127.5, 127.5],\n std=[128, 128, 128],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]),\n test=dict(\n type='CocoDataset',\n ann_file=\n '/usr/videodate/dataset/coco/annotations/coco_half_person_80_val.json',\n img_prefix='/usr/videodate/dataset/coco/val2017/',\n classes=['person', 'bottle', 'chair', 'potted plant'],\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[127.5, 127.5, 127.5],\n std=[128, 128, 128],\n to_rgb=True),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]))\nevaluation = dict(interval=1, metric='bbox', classwise=True)\n# optimizer = dict(type='AdamW', lr=0.001)\n# optimizer_config = dict(grad_clip=None)\n# lr_config = dict(\n# policy='step',\n# warmup='linear',\n# warmup_iters=2000,\n# warmup_ratio=0.01,\n# step=[90, 110, 115])\n\noptimizer = dict(type='SGD',\n lr=0.01,\n momentum=0.9,\n weight_decay=0.0001,\n paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))\noptimizer_config = dict(grad_clip=None)\n# learning policy\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=500,\n warmup_ratio=0.001,\n step=[16, 22])\ntotal_epochs = 24\n\ncheckpoint_config = dict(interval=1)\nlog_config = dict(\n interval=20,\n hooks=[dict(type='TextLoggerHook'),\n dict(type='TensorboardLoggerHook')])\n\n# custom_hooks = [dict(type=\"EMAHook\", momentum=0.1, interval=2, warm_up=warmup_iters, resume_from=None, priority='HIGHEST')]\n\ndevice_ids = range(0, 2)\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\n# work_dir = 'work_dirs/paa_atss_OSACSP_pafpn_private_SGD_lr0.32_cosine_ema'\nwork_dir = 'work_dirs/vfnet_CSPOSA_yefpn_private_head_3cls/'\nload_from = None\nresume_from = None\n# resume_from = None\nworkflow = [('train', 1)]\ngpu_ids = range(0, 2)\n","repo_name":"HAOCHENYE/yehc_mmdet","sub_path":"configs/vfnet/vfnet_resnet50.py","file_name":"vfnet_resnet50.py","file_ext":"py","file_size_in_byte":6109,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"70863955601","text":"from matplotlib.style import use\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nimport torch\nimport numpy as np\nfrom transformers import AutoTokenizer\nfrom tqdm import tqdm\nimport os\nimport pandas as pd\nimport re\nfrom datetime import datetime\n\n\nclass TamilDataset(Dataset):\n def __init__(self, dataset, target, tokenizer=None, device='cpu', use_cache=False, tokenizer_kwargs={}):\n self.dataset = dataset\n self.use_cache=use_cache\n self.target = target\n self.device = device\n print('self.use_cache', self.use_cache)\n if(not use_cache):\n self.tokenizer = tokenizer\n self.tokenizer_kwargs = tokenizer_kwargs\n self.tokenizer_kwargs.setdefault('max_length', 512)\n self.tokenizer_kwargs.setdefault('truncation', True)\n self.tokenizer_kwargs.setdefault('padding', 'max_length')\n print('init done')\n\n def __len__(self):\n return len(self.dataset)\n\n\n def __getitem__(self, idx):\n if(not self.use_cache):\n print('before batch')\n batch = self.tokenizer(self.dataset[idx], return_tensors='pt', **self.tokenizer_kwargs)\n print('after batch')\n return {'data': batch['input_ids'].to(self.device), 'target': torch.tensor(np.array(self.target[idx], dtype=np.float32)).to(self.device)}\n else:\n print({'data': self.dataset[idx], 'target': self.target[idx]})\n return {'data': self.dataset[idx].to(self.device), 'target': self.target[idx].to(self.device)}\n\n\ndef encode(tokenizer, dataset, target, device='cpu', tokenizer_kwargs={}):\n tokenizer_kwargs.setdefault('max_length', 512)\n tokenizer_kwargs.setdefault('truncation', True)\n tokenizer_kwargs.setdefault('padding', 'max_length')\n batch = tokenizer(dataset, return_tensors='pt', **tokenizer_kwargs)\n return {'data': batch['input_ids'].to(device), 'target': torch.tensor(np.array(target, dtype=np.float32)).to(device)}\n\ndef corrupt_dataset(data):\n x = np.random.randint(2, size=1)[0]\n l = len(data) \n s = int(len(data)/10)\n new_data = data\n if(x):\n label = 1\n places = [0] + list(np.random.randint(1, l-1, size=s))\n places.sort()\n\n ### logic to be optimized\n for i in range(len(places)-1):\n new_data += data[places[i]:places[i+1]-1]\n # new_data = data[:places[0]-1] + data[places[0] : places[1]-1] + data[places[1]:]\n else:\n places = [-1, -1]\n label = 0\n return {'data' : new_data, 'label' : label, 'places' : places }\n\n\ndef ReadDatasetFiles(root_path, use_cache=False, cache_dir = './cache/dump/', test=False):\n if(use_cache):\n try:\n print(cache_dir)\n print(os.listdir(cache_dir))\n filename = cache_dir + sorted(os.listdir(cache_dir))[-1]\n print('reading from file', filename)\n pd_dataset = pd.read_pickle(filename)\n print('finished reading file from cache')\n print(pd_dataset.head())\n data = list(pd_dataset['data'])\n labels = list(pd_dataset['target'])\n dataset = TamilDataset(data, labels, use_cache=use_cache)\n return dataset\n except:\n use_cache = False\n print(\"cannot use cache\")\n\n text_file_names = os.listdir(root_path)\n dataset = []\n\n for file_name in tqdm(text_file_names):\n with open(root_path + file_name, 'r', encoding=\"utf8\") as f:\n dataset.append([f.read()])\n if(test):\n return dataset[:100]\n \n return dataset\n\ndef ProcessDataset(dataset, test=False):\n dataset_processed = []\n for tdata in tqdm(dataset):\n tdata = re.split('|\\n', tdata[0])[1:]\n dataset_processed.append(list(filter(None, [tdata_.strip('\\n') for tdata_ in tdata])))\n\n dataset_combined = []\n for data in dataset_processed:\n dataset_combined += data\n dataset_combined = dataset_combined[:100]\n corrupted_dataset = list(map(corrupt_dataset, tqdm(dataset_combined)))\n return corrupted_dataset\n\ndef TokenizeAllData(dataset, tokenizer, device='cpu', tokenizer_kwargs = {}):\n dataset = [encode(tokenizer, data['data'], data['label'], device=device, tokenizer_kwargs=tokenizer_kwargs) for data in tqdm(dataset)]\n df = pd.DataFrame(dataset)\n now = datetime.now() # current date and time\n date_time = now.strftime(\"%Y_%m_%d_%H_%M_%S\")\n print('writing to file ', cache_dir + date_time + '.pkl')\n df.to_pickle(cache_dir + date_time + '.pkl')\n print('finished writing...')\n exit(0)\n\n\n\ndef TamilDataLoader(root_path, tokenizer_name=\"monsoon-nlp/tamillion\", batch_size=1, device='cpu', write_cache=False, use_cache=False, cache_dir = './cache/dump/', test=False, tokenizer_kwargs = {}):\n\n print('use_cache', use_cache)\n dataset = ReadDatasetFiles(root_path, tokenizer_name, batch_size, test=test)\n if(use_cache):\n train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, test=test)\n print('returning dataloader')\n return train_dataloader\n\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n tokenizer.pad_token = 0 \n processed_dataset = ProcessDataset(dataset, test)\n\n if(write_cache):\n TokenizeAllData(processed_dataset, tokenizer, device, tokenizer_kwargs)\n\n # del processed_dataset['places']\n df = pd.DataFrame(processed_dataset)\n data = list(df['data']) \n labels = list(df['label'])\n dataset = TamilDataset(data, labels, tokenizer, device, tokenizer_kwargs = tokenizer_kwargs)\n train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n\n return train_dataloader\n\n\nif __name__ == '__main__':\n GPT2CNN_kwargs = {'max_length' : 1024,}\n ElectraCNN_kwargs = {'max_length' : 512}\n tokenizer_name = 'abinayam/gpt-2-tamil'\n tokenizer_kwargs = GPT2CNN_kwargs\n root_path = './T_Dataset/train/train/'\n\n\n test=True\n write_cache = False\n cache_dir = './cache/tokenizers/' + tokenizer_name + '/'\n if(write_cache and not os.path.exists(cache_dir)):\n print(\"can not use cache because \", cache_dir, \"does not exists\")\n os.makedirs(cache_dir)\n print('creating', cache_dir) \n\n train_dataloader = TamilDataLoader(root_path, tokenizer_name=tokenizer_name, batch_size = 2, device='cpu', write_cache= write_cache, cache_dir = cache_dir, test=test, tokenizer_kwargs=tokenizer_kwargs)\n batch = next(iter(train_dataloader))\n print(batch)\n\n","repo_name":"knitts-team/Context-Comprehension-Enhancement-Tamil","sub_path":"T_DataLoader/DataLoader.py","file_name":"DataLoader.py","file_ext":"py","file_size_in_byte":6523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28829868436","text":"import turtle as tr\n\n\nscreen=tr.Screen()\nscreen.bgcolor(\"black\")\n\n\ndef create_outer_circle(avenger):\n avenger.setposition(80,12)\n avenger.pendown()\n avenger.pencolor(\"white\")\n avenger.pensize(2)\n avenger.fillcolor(\"#581fe9\")\n avenger.begin_fill()\n avenger.circle(152)\n avenger.end_fill()\n avenger.penup()\n\n\ndef create_inner_circle(avenger):\n avenger.pensize(2)\n avenger.pencolor(\"black\")\n avenger.fillcolor(\"black\")\n avenger.setposition(80,29)\n avenger.pendown()\n avenger.begin_fill()\n avenger.circle(135)\n avenger.end_fill()\n avenger.penup()\n\n\ndef create_A(avenger):\n avenger.goto(0,0)\n avenger.pendown()\n avenger.pensize(3)\n avenger.pencolor(\"white\")\n avenger.fillcolor(\"#581fe9\")\n avenger.begin_fill()\n avenger.forward(25)\n avenger.right(-60)\n avenger.forward(70)\n avenger.right(60)\n avenger.forward(50)\n avenger.right(90)\n avenger.forward(30)\n avenger.right(-90)\n avenger.forward(70)\n avenger.right(-90)\n avenger.forward(290)\n avenger.right(-90)\n avenger.forward(75)\n avenger.right(-60)\n avenger.forward(370)\n avenger.goto(0,0)\n avenger.end_fill()\n avenger.penup()\n\n\ndef create_gap(avenger):\n avenger.pendown()\n avenger.fillcolor(\"black\")\n avenger.pencolor(\"white\")\n avenger.begin_fill()\n avenger.penup()\n avenger.goto(71,88)\n avenger.pendown()\n avenger.right(240)\n avenger.forward(38) #1\n avenger.right(-90)\n avenger.forward(90) #2\n avenger.goto(71,88)\n avenger.end_fill()\n avenger.penup()\n\n\ndef arrow(avenger):\n avenger.pensize(3)\n avenger.goto(110,32)\n avenger.pendown()\n avenger.right(60)\n avenger.forward(80)\n avenger.goto(110,112)\n avenger.penup()\n\nif __name__ == '__main__':\n avenger=tr.Turtle()\n avenger.color(\"red\")\n # avenger.speed(20)\n avenger.hideturtle()\n avenger.penup()\n create_outer_circle(avenger)\n create_inner_circle(avenger)\n create_A(avenger)\n create_gap(avenger)\n arrow(avenger)\n\nscreen.mainloop()","repo_name":"Nitin-Pilkhwal/Turtle-designs","sub_path":"Avengers_logo.py","file_name":"Avengers_logo.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"40729142786","text":"# -*- coding: utf-8 -*-\n# author:SHAN\n# datetime:2021/10/14 20:41\n\nfrom processer import get_entities\nfrom collections import Counter\n\n\nclass Metric(object):\n def __init__(self, id2ent):\n self.id2ent = id2ent\n self.reset()\n\n def reset(self):\n self.origins = []\n self.founds = []\n self.rights = []\n\n\n def update(self, labels, tags, flag='crf'):\n '''\n labels_paths: [[],[],[],....]\n pred_paths: [[],[],[],.....]\n\n :param label_paths:\n :param pred_paths:\n :return:\n Example:\n >>> labels_paths = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]\n >>> pred_paths = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]\n '''\n for label, tag in zip(labels, tags):\n if flag == 'crf':\n label_entities = get_entities(label, self.id2ent)\n tag_entities = get_entities(tag, self.id2ent)\n else:\n label_entities = labels\n tag_entities = tags\n self.origins.extend(label_entities)\n self.founds.extend(tag_entities)\n self.rights.extend([tag_entity for tag_entity in tag_entities if tag_entity in label_entities])\n\n def compute(self, origin, found, right):\n recall = 0 if origin == 0 else (right / origin)\n precision = 0 if found == 0 else (right / found)\n f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (precision + recall)\n return recall, precision, f1\n\n def result(self):\n class_info = {}\n origin_counter = Counter([x[0] for x in self.origins])\n found_counter = Counter([x[0] for x in self.founds])\n right_counter = Counter([x[0] for x in self.rights])\n for type_, count in origin_counter.items(): # 对每一种不同的标签分别做运算\n origin = count\n found = found_counter.get(type_, 0) # 返回标签为 type_ 的个数\n right = right_counter.get(type_, 0)\n recall, precision, f1 = self.compute(origin, found, right)\n class_info[type_] = {\"precision\": round(precision, 4), 'recall': round(recall, 4), 'f1': round(f1, 4)} # 四舍五入\n\n origin = len(self.origins)\n found = len(self.founds)\n right = len(self.rights)\n recall, precision, f1 = self.compute(origin, found, right)\n\n # print('Precision: {}\\nRecall: {}\\nF1: {}\\n'.format(precision, recall, f1))\n # 第一个返回值为所有标签的 metric,第二个返回值记录了不同标签分别的 metric\n return {'precision': precision, 'recall': recall, 'f1': f1}, class_info\n","repo_name":"FightingFrogg/medical_ner","sub_path":"utils/metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71193250963","text":"from collections import deque\n\n#Find all compliment (X-number) for tree1 and search for them in tree2\nclass Solution:\n def iterateTree(self, root, arr, x, isTree1):\n if root == None:\n return\n if isTree1:\n arr.add(x - root.data)\n else:\n arr.add(root.data)\n if root.data >= x:\n self.iterateTree(root.left, arr, x, isTree1)\n else:\n self.iterateTree(root.left, arr, x, isTree1)\n self.iterateTree(root.right, arr, x, isTree1)\n\n def countPairs(self, root1, root2, x):\n tree1 = set()\n self.iterateTree(root1, tree1, x, True)\n tree2 = set()\n self.iterateTree(root2, tree2, x, False)\n pairs = tree1.intersection(tree2)\n return len(pairs)\n\n# Tree Node\nclass Node:\n def __init__(self, val):\n self.right = None\n self.data = val\n self.left = None\n\n# Function to Build Tree\ndef buildTree(s):\n # Corner Case\n if len(s) == 0 or s[0] == \"N\":\n return None\n\n # Creating list of strings from input\n # string after spliting by space\n ip = list(map(str, s.split()))\n\n # Create the root of the tree\n root = Node(int(ip[0]))\n size = 0\n q = deque()\n\n # Push the root to the queue\n q.append(root)\n size = size + 1\n\n # Starting from the second element\n i = 1\n while size > 0 and i < len(ip):\n # Get and remove the front of the queue\n currNode = q[0]\n q.popleft()\n size = size - 1\n\n # Get the current node's value from the string\n currVal = ip[i]\n\n # If the left child is not null\n if currVal != \"N\":\n # Create the left child for the current node\n currNode.left = Node(int(currVal))\n\n # Push it to the queue\n q.append(currNode.left)\n size = size + 1\n # For the right child\n i = i + 1\n if i >= len(ip):\n break\n currVal = ip[i]\n\n # If the right child is not null\n if currVal != \"N\":\n # Create the right child for the current node\n currNode.right = Node(int(currVal))\n\n # Push it to the queue\n q.append(currNode.right)\n size = size + 1\n i = i + 1\n return root\n\nif __name__ == \"__main__\":\n s1 = '5 3 7 2 4 6 8'\n s2 = '10 6 15 3 8 11 18'\n root1 = buildTree(s1)\n root2 = buildTree(s2)\n x = 16\n ob = Solution()\n print(ob.countPairs(root1, root2, x))\n","repo_name":"JoyalPeter/GeekForGeeksPOTD","sub_path":"December 2023/Dec 3/MyApproach.py","file_name":"MyApproach.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17837707990","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nimport tkinter as tk\ndef say_hi():\n print(\"演示下,以后设计\")\nroot = tk.Tk()\nroot.geometry('800x600')\nframe1 = tk.Frame(root,bg='blue',bd=2,height = 200, width=600)\nframe2 = tk.Frame(root,bd=2,width=550,bg='#ff3399',height = 200)\nroot.title(\"tkinter frame\")\nlabel = tk.Label(frame2, text=\"发送信息\", justify=tk.LEFT)\nL2 = tk.Label(frame1,text='接\\n收\\n区',width=2, justify=tk.LEFT, font=(\"宋体\", 12, \"bold\"))\nL2 .pack(padx=2,pady=40,side=tk.LEFT,anchor=tk.N) # 添加接收区文字标签\nv = '收到新信息:\\n'# 添加接收区的文本框\ntxt1 = tk.Text(frame1, height = 10,yscrollcommand=1)\ntxt1.pack()\ntxt2 = tk.Text(frame2, height = 10,yscrollcommand=1)\ntxt2.pack()\nlabel.pack(side=tk.LEFT)\nhi_there = tk.Button(frame2, text=\"发送\", command=say_hi)\nhi_there.pack()\nframe1.pack_propagate(0)\nframe1.pack(padx=1, pady=1)\nframe2.pack_propagate(0)\nframe2.pack(padx=10, pady=10)\nroot.mainloop()\n","repo_name":"marginlove/python","sub_path":"daima/10.9.py","file_name":"10.9.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2025272052","text":"import os\nimport time\nfrom dataclasses import fields\nfrom typing import Dict, Type\n\nimport gradio as gr\nimport json\nimport torch\nfrom gradio import Accordion, Tab\n\nfrom swift.llm import SftArguments\nfrom swift.ui.base import BaseUI\nfrom swift.ui.llm_train.advanced import Advanced\nfrom swift.ui.llm_train.dataset import Dataset\nfrom swift.ui.llm_train.hyper import Hyper\nfrom swift.ui.llm_train.lora import LoRA\nfrom swift.ui.llm_train.model import Model\nfrom swift.ui.llm_train.quantization import Quantization\nfrom swift.ui.llm_train.runtime import Runtime\nfrom swift.ui.llm_train.save import Save\nfrom swift.ui.llm_train.self_cog import SelfCog\nfrom swift.utils import get_logger\n\nlogger = get_logger()\n\n\nclass LLMTrain(BaseUI):\n\n group = 'llm_train'\n\n sub_ui = [\n Model,\n Dataset,\n Runtime,\n Save,\n LoRA,\n Hyper,\n Quantization,\n SelfCog,\n Advanced,\n ]\n\n locale_dict: Dict[str, Dict] = {\n 'llm_train': {\n 'label': {\n 'zh': 'LLM训练',\n 'en': 'LLM Training',\n }\n },\n 'submit_alert': {\n 'value': {\n 'zh':\n '任务已开始,请查看tensorboard或日志记录,关闭本页面不影响训练过程',\n 'en':\n 'Task started, please check the tensorboard or log file, '\n 'closing this page does not affect training'\n }\n },\n 'submit': {\n 'value': {\n 'zh': '🚀 开始训练',\n 'en': '🚀 Begin'\n }\n },\n 'dry_run': {\n 'label': {\n 'zh': '仅生成运行命令',\n 'en': 'Dry-run'\n },\n 'info': {\n 'zh': '仅生成运行命令,开发者自行运行',\n 'en': 'Generate run command only, for manually running'\n }\n },\n 'gpu_id': {\n 'label': {\n 'zh': '选择可用GPU',\n 'en': 'Choose GPU'\n },\n 'info': {\n 'zh': '选择训练使用的GPU号,如CUDA不可用只能选择CPU',\n 'en': 'Select GPU to train'\n }\n },\n 'gpu_memory_fraction': {\n 'label': {\n 'zh': 'GPU显存限制',\n 'en': 'GPU memory fraction'\n },\n 'info': {\n 'zh':\n '设置使用显存的比例,一般用于显存测试',\n 'en':\n 'Set the memory fraction ratio of GPU, usually used in memory test'\n }\n },\n 'sft_type': {\n 'label': {\n 'zh': '训练方式',\n 'en': 'Train type'\n },\n 'info': {\n 'zh': '选择训练的方式',\n 'en': 'Select the training type'\n }\n },\n 'seed': {\n 'label': {\n 'zh': '随机数种子',\n 'en': 'Seed'\n },\n 'info': {\n 'zh': '选择随机数种子',\n 'en': 'Select a random seed'\n }\n },\n 'dtype': {\n 'label': {\n 'zh': '训练精度',\n 'en': 'Training Precision'\n },\n 'info': {\n 'zh': '选择训练精度',\n 'en': 'Select the training precision'\n }\n },\n 'use_ddp': {\n 'label': {\n 'zh': '使用DDP',\n 'en': 'Use DDP'\n },\n 'info': {\n 'zh': '是否使用数据并行训练',\n 'en': 'Use Distributed Data Parallel to train'\n }\n },\n 'neftune_alpha': {\n 'label': {\n 'zh': 'neftune_alpha',\n 'en': 'neftune_alpha'\n },\n 'info': {\n 'zh': '使用neftune提升训练效果',\n 'en': 'Use neftune to improve performance'\n }\n }\n }\n\n choice_dict = {}\n default_dict = {}\n for f in fields(SftArguments):\n if 'choices' in f.metadata:\n choice_dict[f.name] = f.metadata['choices']\n default_dict[f.name] = getattr(SftArguments, f.name)\n\n @classmethod\n def do_build_ui(cls, base_tab: Type['BaseUI']):\n with gr.TabItem(elem_id='llm_train', label=''):\n gpu_count = 0\n default_device = 'cpu'\n if torch.cuda.is_available():\n gpu_count = torch.cuda.device_count()\n default_device = '0'\n with gr.Blocks():\n Model.build_ui(base_tab)\n Dataset.build_ui(base_tab)\n Runtime.build_ui(base_tab)\n with gr.Row():\n gr.Dropdown(elem_id='sft_type', scale=4)\n gr.Textbox(elem_id='seed', scale=4)\n gr.Dropdown(elem_id='dtype', scale=4)\n gr.Checkbox(elem_id='use_ddp', value=False, scale=4)\n gr.Slider(\n elem_id='neftune_alpha',\n minimum=0.0,\n maximum=1.0,\n step=0.05,\n scale=4)\n with gr.Row():\n gr.Dropdown(\n elem_id='gpu_id',\n multiselect=True,\n choices=[str(i) for i in range(gpu_count)] + ['cpu'],\n value=default_device,\n scale=8)\n gr.Textbox(\n elem_id='gpu_memory_fraction', value='1.0', scale=4)\n gr.Checkbox(elem_id='dry_run', value=False, scale=4)\n submit = gr.Button(\n elem_id='submit', scale=4, variant='primary')\n\n Save.build_ui(base_tab)\n LoRA.build_ui(base_tab)\n Hyper.build_ui(base_tab)\n Quantization.build_ui(base_tab)\n SelfCog.build_ui(base_tab)\n Advanced.build_ui(base_tab)\n submit.click(\n cls.train, [\n value for value in cls.elements().values()\n if not isinstance(value, (Tab, Accordion))\n ], [\n cls.element('running_cmd'),\n cls.element('logging_dir'),\n cls.element('runtime_tab')\n ],\n show_progress=True)\n\n @classmethod\n def train(cls, *args):\n ignore_elements = ('model_type', 'logging_dir', 'more_params')\n sft_args = fields(SftArguments)\n sft_args = {\n arg.name: getattr(SftArguments, arg.name)\n for arg in sft_args\n }\n kwargs = {}\n kwargs_is_list = {}\n other_kwargs = {}\n more_params = {}\n keys = [\n key for key, value in cls.elements().items()\n if not isinstance(value, (Tab, Accordion))\n ]\n for key, value in zip(keys, args):\n compare_value = sft_args.get(key)\n compare_value_arg = str(compare_value) if not isinstance(\n compare_value, (list, dict)) else compare_value\n compare_value_ui = str(value) if not isinstance(\n value, (list, dict)) else value\n if key not in ignore_elements and key in sft_args and compare_value_ui != compare_value_arg and value:\n kwargs[key] = value if not isinstance(\n value, list) else ' '.join(value)\n kwargs_is_list[key] = isinstance(value, list) or getattr(\n cls.element(key), 'is_list', False)\n else:\n other_kwargs[key] = value\n if key == 'more_params' and value:\n more_params = json.loads(value)\n\n kwargs.update(more_params)\n sft_args = SftArguments(**kwargs)\n params = ''\n\n for e in kwargs:\n if kwargs_is_list[e]:\n params += f'--{e} {kwargs[e]} '\n else:\n params += f'--{e} \"{kwargs[e]}\" '\n params += '--add_output_dir_suffix False '\n for key, param in more_params.items():\n params += f'--{key} \"{param}\" '\n ddp_param = ''\n devices = other_kwargs['gpu_id']\n devices = [d for d in devices if d]\n if other_kwargs['use_ddp']:\n ddp_param = f'NPROC_PER_NODE={len(devices)}'\n assert (len(devices) == 1 or 'cpu' not in devices)\n gpus = ','.join(devices)\n cuda_param = ''\n if gpus != 'cpu':\n cuda_param = f'CUDA_VISIBLE_DEVICES={gpus}'\n\n log_file = os.path.join(sft_args.logging_dir, 'run.log')\n run_command = f'{cuda_param} {ddp_param} nohup swift sft {params} > {log_file} 2>&1 &'\n logger.info(f'Run training: {run_command}')\n if not other_kwargs['dry_run']:\n os.makedirs(sft_args.logging_dir, exist_ok=True)\n os.system(run_command)\n time.sleep(1) # to make sure the log file has been created.\n gr.Info(cls.locale('submit_alert', cls.lang)['value'])\n return run_command, sft_args.logging_dir, gr.update(visible=True)\n","repo_name":"tastelikefeet/swift","sub_path":"swift/ui/llm_train/llm_train.py","file_name":"llm_train.py","file_ext":"py","file_size_in_byte":9327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"31118439906","text":"from fastapi import FastAPI, Path, Query, File, UploadFile, Form\nfrom typing import List\n\nfrom starlette.responses import HTMLResponse\nfrom pydantic import BaseModel\n\nfrom vn.vn import VisualNarrator\n\n\nvn_app = FastAPI()\nvn = VisualNarrator()\n\n\nclass UserStoryFile(BaseModel):\n file_name: str\n system_name: str\n success: str\n output: dict\n\n\n@vn_app.get('/')\ndef root():\n content = '''VisualNarrator API entry point\n \n \n
\n

Visual Narrator

\n

User story file:
\n

\n

System name:
\n

\n

Optional outputs to include:
\n Prolog
\n JSON
\n HTML Report

\n \n
\n \n '''\n return HTMLResponse(content=content)\n\n\ndef __stories_from_file(lines: List[str]):\n return [str(line).lstrip('b\\'').rstrip('\\\\n\\'') for line in lines]\n\n\ndef __mine(file_name: str,\n stories: List[str],\n systemname: str,\n prolog: bool = False,\n json: bool = False,\n report: bool = False):\n success = False\n\n # Read file contents\n if stories:\n success = True\n\n # Pass settings\n vn.prolog = prolog\n vn.json = json\n\n # Run VN\n res = vn.run(file_name,\n systemname,\n stories=stories,\n write_local=False)\n\n # Fill output\n output = {}\n output['ontology'] = res['output_ontobj']\n\n if prolog:\n output['prolog'] = res['output_prologobj']\n if json:\n output['json'] = res['output_json']\n if report:\n output['report'] = res['report']\n\n return {\"file_name\": file_name,\n \"system_name\": systemname,\n \"success\": success,\n \"output\": output}\n\n\n@vn_app.post(\"/mine/\", response_model=UserStoryFile)\nasync def mine_user_stories(file: UploadFile = File(...),\n systemname: str = Query('System', description='Name of system', min_length=1),\n prolog: bool = Query(False, description='Return Prolog'),\n json: bool = Query(False, description='Return JSON'),\n report: bool = Query(False, description='Return HTML report')):\n stories = __stories_from_file(file.file.readlines())\n return __mine(file_name=file.filename,\n systemname=systemname,\n stories=stories,\n prolog=prolog,\n json=json,\n report=report)\n\n\n@vn_app.post(\"/mineform/\", response_model=UserStoryFile)\nasync def mine_user_stories_form(file: UploadFile = File(...),\n systemname: str = Form('System', min_length=1),\n prolog: bool = Form(False),\n json: bool = File(False),\n report: bool = File(False)):\n stories = __stories_from_file(file.file.readlines())\n return __mine(file_name=file.filename,\n systemname=systemname,\n stories=stories,\n prolog=prolog,\n json=json,\n report=report)\n","repo_name":"MarcelRobeer/VisualNarrator","sub_path":"vn/ui/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"3"} +{"seq_id":"30090873261","text":"# -*- coding: utf-8 -*-\n\nimport cv2.cv as cv\n\ncapture = cv.CaptureFromCAM(0)\ncv.NamedWindow(\"test\",1)\ncv.NamedWindow(\"test2\",1)\n\nwhile True:\n img = cv.QueryFrame(capture)\n \n imgHSV = cv.CreateImage(cv.GetSize(img), 8, 3)\n cv.CvtColor(img, imgHSV, cv.CV_BGR2HSV)\n \n imgBG = cv.CreateImage(cv.GetSize(img), 8, 1)\n cv.InRangeS(imgHSV,(165,100,150),(179,255,255),imgBG)\n #imgBG1 = cv.CreateImage(cv.GetSize(img), 8, 1)\n #cv.InRangeS(imgHSV,(0,100,150),(10,255,255),imgBG1)\n #imgBG2 = cv.CreateImage(cv.GetSize(img), 8, 1)\n #cv.InRangeS(imgHSV,(160,150,150),(179,255,255),imgBG2)\n \n #cv.Copy(imgBG1, imgBG2)\n \n moments = cv.Moments(cv.GetMat(imgBG))\n \n m10 = cv.GetSpatialMoment(moments, 1, 0)\n m01 = cv.GetSpatialMoment(moments, 0, 1)\n m00 = cv.GetCentralMoment(moments, 0, 0)\n \n if(m00 > 0): \n posX = m10/m00\n posY = m01/m00\n \n #print posX, posY\n \n cv.Circle(img, (int(posX), int(posY)), 5, (255,0,0), 5, cv.CV_AA)\n \n cv.ShowImage(\"test\", img)\n cv.ShowImage(\"test2\", imgBG)\n \n if(cv.WaitKey(20)!= -1):\n break","repo_name":"apocalyp0sys/OpenCV-pekagame","sub_path":"new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17793051375","text":"# import sys\n# sys.path.insert(0, '/home/ec2-user/SageMaker/Accessbank CTR/src')\nfrom features.preprocess import identify_columns\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.feature_extraction import FeatureHasher\nfrom sklearn.model_selection import train_test_split\nfrom utils.utils import numerical_attribute,categorical_attribute,hash_features\n\n\ndef pipeline(hash_size):\n \n \"\"\"\n \n Function contains the pipeline methods to be used.\n It is broken down into numerical, categorical and hash pipeline\n \n \"\"\"\n num_pipeline = Pipeline(steps= [('imputer', SimpleImputer(strategy='mean')), ('std_scaler', MinMaxScaler())])\n cat_pipeline = Pipeline(steps=[('imputer', SimpleImputer(strategy='constant', fill_value='Missing')),\n ('one_hot_encoding', OneHotEncoder(handle_unknown = \"ignore\", sparse = False))])\n hash_pipeline = Pipeline([('imputer', SimpleImputer(strategy='constant', fill_value='Missing')),\n ('hasher', FeatureHasher(n_features=hash_size, input_type='string'))])\n \n return num_pipeline,cat_pipeline,hash_pipeline\n\ndef train_test(data,hash_size,test_size):\n identify_columns(data,high_dim=hash_size, verbose=True)\n y = data['event_type']\n X = data.drop(['event_type'], axis=1)\n X_train, X_test, y_train, y_test = train_test_split(X,y,stratify = y,test_size=test_size)\n return X_train, X_test, y_train, y_test\n\ndef fit_transform(data, hash_size, test_size):\n \n \"\"\"\n \n Function that builds the pipeline and returns the \n pipeline object and the data to be used for modeling\n \n Args:\n hash_bucket size\n \n Returns:\n pipeline object\n data to be used for training after being transformed by the pipeline\n \n \"\"\"\n\n num_pipeline,cat_pipeline,hash_pipeline = pipeline(hash_size)\n full_pipeline = ColumnTransformer(\n transformers=[\n ('num', num_pipeline, numerical_attribute),\n ('cat', cat_pipeline, categorical_attribute),\n ('hash', hash_pipeline, hash_features)\n ])\n X_train, X_test, y_train, y_test = train_test(data,hash_size,test_size)\n \n full_pipeline.fit(X_train)\n \n X_train = full_pipeline.transform(X_train)\n X_test = full_pipeline.transform(X_test)\n \n print(X_train.shape)\n return X_train, X_test, y_train, y_test, full_pipeline","repo_name":"Sensei-akin/Customer-acquisition","sub_path":"Catboost-local/src/train/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14527832539","text":"import os\nimport requests\nimport datetime as dt\nimport logging\nimport telegram\nimport sqlite3\nimport re\nfrom telegram.ext import MessageHandler, Updater\n\nfrom dotenv import load_dotenv\nfrom wind_direct import wind\n\nload_dotenv()\nTORR = 133.3223684\nCHAT_ID = os.getenv('TELEGRAM_CHAT_ID')\nTOKEN = os.getenv('WEATHER_TOKEN')\nTELEGRAM_TOKEN = os.getenv('TELEGRAM_TOKEN')\nNN = '56.20, 44.00'\nWEATHER_URL_4_DAYS = 'https://api.openweathermap.org/data/2.5/forecast?q=' \\\n '{}&units={}&appid={}'\n\nWEATHER_URL = f'https://wttr.in/{NN}'\nUNITS = {'format': 2,\n 'M': '',\n 'Q': '',\n 'lang': 'ru'}\nbot = telegram.Bot(TELEGRAM_TOKEN)\n\n\ndef what_weather(city):\n response = requests.get(WEATHER_URL, params=UNITS)\n if response.status_code == 200:\n return f'Погода в Н.Новогороде: {response.text.strip()}'\n else:\n return '<ошибка на сервере>'\n\n\ndef weather_send(update, context):\n chat = update.effective_chat\n context.bot.send_message(chat_id=chat.id,\n text=what_weather(NN))\n\n\ndef weather(update, context):\n keyword = ' '.join(context.args)\n hours = ''.join(re.findall(r'\\d+', keyword))\n word = ' '.join(keyword.replace(hours, '').split())\n if hours == '':\n hours = 21\n conn = sqlite3.connect(\"weather.sqlite\", check_same_thread=False)\n cursor = conn.cursor()\n chat = update.effective_chat\n city_name = word\n units = 'metric'\n r4 = requests.get(WEATHER_URL_4_DAYS.format(\n city_name, units, TOKEN)).json()\n if requests.get(WEATHER_URL_4_DAYS.format(\n city_name, units, TOKEN)).json()['cod'] == '404':\n r4 = requests.get(WEATHER_URL_4_DAYS.format(\n 'Moscow', units, TOKEN)).json()\n counts1 = int(hours) // 3\n text1 = f\"Погода в н.п. - {r4['city']['name']} на {counts1 * 3} часов:\"\n bot.send_message(chat_id=chat.id, text=text1)\n r4 = r4['list']\n counts = 0\n for resp in r4:\n if counts == counts1:\n break\n counts += 1\n timestamp = int(resp['dt'])\n value = dt.datetime.fromtimestamp(timestamp)\n sql = \"SELECT icon FROM weather_id WHERE id=?\"\n des = (str(resp['weather'][0]['id']),)\n logging.debug(des)\n cursor.execute(sql, des)\n sql1 = \"SELECT icon FROM weather_icons WHERE day_icon=?\"\n q1 = cursor.fetchall()[0][0]\n logging.debug(q1)\n cursor.execute(sql1, (q1,))\n q2 = cursor.fetchall()[0][0]\n bot.send_message(\n chat_id=chat.id,\n text=(f\"🕗 {value.strftime('%Y-%m-%d %H:%M')} \"\n f\"⛅{resp['clouds']['all']}\"\n f\"🌡{resp['main']['temp']}°С \"\n f\"💧{resp['main']['humidity']}% \"\n f\"P{round(float(resp['main']['pressure']) * 100 / TORR)} \"\n f\"👀{round(resp['visibility'] / 1000)} км \"\n f\"{q2} \"\n f\"🌬{round(resp['wind']['speed'], 1)}\"\n f\"{wind(int(resp['wind']['deg']))} м/с\"))\n conn.close()\n","repo_name":"AlexKrup7/weatherbot","sub_path":"weather_bot.py","file_name":"weather_bot.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20458037149","text":"import json\nfrom Card import Card\nimport random\n\n\nclass Deck:\n\n \"\"\" This class applies sall the functionality of a sueca deck\"\"\"\n\n def __init__(self):\n self.cards = []\n with open('/usr/SuecaPY/src/deck.json') as json_file:\n data = json.load(json_file)\n buffer = data['buffer']\n for p in data['deck']:\n suit = p['suit']\n value = p['number']\n self.cards.append(Card(suit, value, buffer))\n\n def shuffleDeck(self, times=1):\n for n in range(times):\n x = [i for i in self.cards]\n random.shuffle(x)\n self.cards = (x)\n\n def cutDeck(self, percentage=0):\n # Always from top to bottom\n cardsToMove = round(len(self.cards) * percentage)\n sliceObj = slice(cardsToMove)\n self.cards = [\n *self.cards[cardsToMove:len(self.cards)], *self.cards[sliceObj]]\n print('control')\n","repo_name":"nunes-pedro/sueca","sub_path":"src/Deck.py","file_name":"Deck.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72436282320","text":"from socket import *\r\n\r\nserverPort = 12000\r\nserverHost = \"127.0.0.1\"\r\n\r\nclientSocket = socket(AF_INET, SOCK_STREAM)\r\nclientSocket.connect((serverHost, serverPort))\r\n\r\nwhile True:\r\n command = input('Enter command (Random/Add/Subtract/Exit): ')\r\n if command.lower() == \"exit\":\r\n break\r\n arg1 = input('Enter first number: ')\r\n arg2 = input('Enter second number: ')\r\n\r\n message = f\"{command};{arg1};{arg2}\"\r\n clientSocket.send(message.encode())\r\n response = clientSocket.recv(1024)\r\n print('Server response:', response.decode())\r\n\r\nclientSocket.close()","repo_name":"Zaenj/TCP4","sub_path":"Opgave4/Client4.py","file_name":"Client4.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15737208205","text":"class hash_table:\n def __init__(self):\n self.table = [None] * 127\n\n # Hash function\n def Hash_func(self, value):\n key = 0\n for i in range(0, len(value)):\n key += ord(value[i])\n return key % 127\n\n def Insert(self, value):\n hash = self.Hash_func(value)\n if self.table[hash] is None:\n self.table[hash] = value\n\n def Search(self, value):\n hash = self.Hash_func(value);\n if self.table[hash] is None:\n return None\n else:\n print(\"Se encontro el elemento en\")\n return hex(id(self.table[hash]))\n\n def Remove(self, value):\n hash = self.Hash_func(value);\n if self.table[hash] is None:\n print(\"No se encontro el elemento\", value)\n else:\n print(\"Element with value\", value, \"deleted\")\n self.table[hash] is None;\n\n\nH = hash_table()\nH.Insert(\"Alo\")\nH.Insert(\"Bou\")\nH.Insert(\"Col\")\n\nprint(H.Search(\"Bou\"))\n","repo_name":"spaingmzdaeg/BusquedaHash","sub_path":"Hash.py","file_name":"Hash.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"401074264","text":"from typing import Tuple, Iterable\nfrom math import sqrt\n\nfrom OpenGL.GL import *\n\nfrom .shaders import Program\n\nVec3 = Tuple[float, float, float]\n\n\ndef normalize(vec: Vec3) -> Vec3:\n x, y, z = vec\n magnitude = sqrt(x * x + y * y + z * z)\n return x / magnitude, y / magnitude, z / magnitude\n\n\nclass DirectionalLight:\n def __init__(self, color: Vec3, brightness: float, direction: Vec3):\n self.color: Vec3 = tuple(x * brightness for x in color)\n self.direction: Vec3 = normalize(direction)\n\n def push_uniform(self, shader: Program, var: str) -> None:\n glUniform3f(shader.uniforms[var + \".color\"], *self.color)\n glUniform3f(shader.uniforms[var + \".direction\"], *self.direction)\n\n @staticmethod\n def push_uniform_array(shader: Program, array: str, lights: Iterable[\"DirectionalLight\"]) -> None:\n for i, light in enumerate(lights):\n light.push_uniform(shader, f\"{array}[{i}]\")\n","repo_name":"poletaevvlad/CubePlayer","sub_path":"cubeplayer/renderer/engine/light.py","file_name":"light.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6262966247","text":"#!/usr/bin/env python3\n\n\"\"\"Search for code fragments that dynamically allocate structures containing\npointers.\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport sys\n\nfrom shrike import php7\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nparser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument(\n 'inputfiles', nargs='+',\n help=(\n 'A list of pickle files mapping fragments to their summaries, '\n 'or the result of a previous run of the script to dump'))\nparser.add_argument(\n '-o', '--output',\n help=\"The output file to which results will be logged\")\nparser.add_argument(\n '-p', '--php',\n help=\"The PHP binary to use\")\nparser.add_argument(\n '-j', '--jobs', type=int, default=os.cpu_count(),\n help=\"The number of concurrent jobs to run\")\nparser.add_argument(\n '-d', '--dump', action='store_true', default=False,\n help=\"If provided, then dump the pointer info from a previous run\")\nparser.add_argument(\n '--pointer-offset', type=int, default=None,\n help=(\n \"Dump full pointer records for sequences which have a pointer\"\n \"at this offset\"))\nparser.add_argument(\n '-f', '--fragment-id', type=int, default=0,\n help=\"The ID of the fragment on which to dump more details\")\nparser.add_argument(\n '--debug', action='store_true', default=False,\n help=\"Enable debug mode (verbose logging)\")\nargs = parser.parse_args()\n\nif args.debug:\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n logging.basicConfig(level=logging.DEBUG)\n\nif not args.dump and (not args.php or not args.output):\n logger.error(\"You must specify a PHP binary and output directory\")\n parser.print_help()\n sys.exit(-1)\n\nif args.dump:\n pointer_offset = args.pointer_offset\n pointer_data = php7.load_from_files(args.inputfiles)\n s = reversed(sorted(pointer_data.items(), key=lambda t: len(t[1])))\n fid = 1\n for fragment, ptr_records in s:\n if not args.fragment_id:\n logger.info(\"FID: {}, Pointer Count: {} <= {}\".format(\n fid, len(ptr_records), fragment))\n if pointer_offset is None:\n continue\n\n found = False\n for record in ptr_records:\n if record.offset_in_container == pointer_offset:\n found = True\n\n if found:\n for record in ptr_records:\n print((\n \"\\tSize of allocation: {}, Offset of pointer: {}, \"\n \"Pointer: 0x{:x}\").format(\n record.allocation_size,\n record.offset_in_container,\n record.pointer))\n elif args.fragment_id == fid:\n logger.info(\"FID: {}, Pointer Count: {} <= {}\".format(\n fid, len(ptr_records), fragment))\n for record in ptr_records:\n print(\n (\"\\tSize of allocation: {}, Offset of pointer: {}, \"\n \"Pointer: 0x{:x}\").format(\n record.allocation_size,\n record.offset_in_container,\n record.pointer))\n fid += 1\n logger.info(\"{} fragments allocate pointers\".format(fid - 1))\n sys.exit(0)\n\nlogger.info(\"Utilising {} cores\".format(args.jobs))\nlogger.info(\"Analysing the PHP binary at {}\".format(args.php))\n\nfragment_data = php7.load_from_files(args.inputfiles)\nfragments = fragment_data.keys()\nlogger.info(\"Loaded {} fragments\".format(len(fragments)))\nresult, err_fatal, err_os, err_sec, err_no_pointers = \\\n php7.pointer_search(fragments, args.jobs, args.php)\n\ns = reversed(sorted(result.items(), key=lambda t: len(t[1])))\nfor fragment, ptr_count in s:\n logger.debug(\"{} <= {}\".format(ptr_count, fragment))\n\nlogger.info(\"{} fatal errors\".format(err_fatal))\nlogger.info(\"{} os errors\".format(err_os))\nlogger.info(\"{} security errors\".format(err_sec))\nlogger.info(\"{} fragments did not allocate pointers\".format(err_no_pointers))\nlogger.info(\"{} fragments allocated pointers\".format(len(result)))\n\nlogger.info(\"Saving results to {}\".format(args.output))\nphp7.dump_to_file(result, args.output)\n","repo_name":"SeanHeelan/HeapLayout","sub_path":"Shrike/shrike/pointer_search.py","file_name":"pointer_search.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"3"} +{"seq_id":"3527015589","text":"import time\nfrom pyost.iost import IOST\nfrom pyost.account import Account\nfrom pyost.algorithm import Ed25519\nfrom pyost.signature import KeyPair\nfrom base58 import b58decode\n\nif __name__ == '__main__':\n iost = IOST('localhost:30002')\n\n admin_seckey = b58decode(b'1rANSfcRzr4HkhbUFZ7L1Zp69JZZHiDDq5v7dNSbbEqeU4jxy3fszV4HGiaLQEyqVpS1dKT9g7zCVRxBVzuiUzB')\n admin_kp = KeyPair(Ed25519, admin_seckey)\n admin = Account('producer00001')\n admin.add_key_pair(admin_kp, 'active')\n admin.add_key_pair(admin_kp, 'owner')\n\n account_seckey = b58decode(\n b'4vZ8qw2MaGLVXsbW7TcyTDcEqrefAS34vuM1eJf7YrBL9Fpnq3LgRyDjnUfv7kjvPfsA5tQGnou3Bv2bYNXyorK1')\n account_kp = KeyPair(Ed25519, account_seckey)\n account = Account('testacc1')\n account.add_key_pair(account_kp, 'active')\n account.add_key_pair(account_kp, 'owner')\n\n # Create token\n token_sym = 't' + str(int(time.time() * 1000000))[-4:]\n tx = iost.create_call_tx('token.iost', 'create', token_sym, admin.name, 21000000,\n {\"fullName\": \"bit coin\", \"decimal\": 9})\n admin.sign_publish(tx)\n print('creating token...')\n txr = iost.send_and_wait_tx(tx)\n print(txr)\n\n ob_admin = iost.get_balance(admin.name, token_sym)\n ob0 = iost.get_balance(account.name, token_sym)\n\n # Issue token\n tx = iost.create_call_tx('token.iost', 'issue', token_sym, account.name, '99.1')\n admin.sign_publish(tx)\n print('issuing token...')\n txr = iost.send_and_wait_tx(tx)\n print(txr)\n\n nb_admin = iost.get_balance(admin.name, token_sym)\n nb0 = iost.get_balance(account.name, token_sym)\n assert nb_admin == ob_admin\n assert nb0 == ob0 + 99.1\n\n # Transfer token\n ob_admin = iost.get_balance(admin.name, token_sym)\n ob0 = iost.get_balance(account.name, token_sym)\n\n tx = iost.create_transfer_tx(token_sym, account.name, admin.name, 55.000000001)\n account.sign_publish(tx)\n print('transferring token...')\n txr = iost.send_and_wait_tx(tx)\n print(txr)\n\n nb_admin = iost.get_balance(admin.name, token_sym)\n nb0 = iost.get_balance(account.name, token_sym)\n assert nb_admin == ob_admin + 55.000000001\n assert nb0 == ob0 - 55.000000001\n\n # Transfer freeze\n ob_admin = iost.get_token_balance(admin.name, token_sym)\n ob0 = iost.get_token_balance(account.name, token_sym)\n\n tx = iost.create_call_tx('token.iost', 'transferFreeze',\n token_sym, admin.name, account.name, '5',\n int((time.time() + 5000) * 1e6), '')\n admin.sign_publish(tx)\n print('transfer-freezing token...')\n txr = iost.send_and_wait_tx(tx)\n print(txr)\n\n nb_admin = iost.get_token_balance(admin.name, token_sym)\n nb0 = iost.get_token_balance(account.name, token_sym)\n assert nb_admin.balance == ob_admin.balance - 5\n assert nb0.balance == ob0.balance\n assert nb0.frozen_balances[0].amount == 5\n\n # Balance of\n ob_admin = iost.get_token_balance(admin.name, token_sym)\n ob0 = iost.get_token_balance(account.name, token_sym)\n\n tx = iost.create_call_tx('token.iost', 'balanceOf',\n token_sym, account.name)\n admin.sign_publish(tx)\n print('querying balance of token...')\n txr = iost.send_and_wait_tx(tx)\n print(txr)\n\n nb_admin = iost.get_token_balance(admin.name, token_sym)\n nb0 = iost.get_token_balance(account.name, token_sym)\n assert nb_admin.balance == ob_admin.balance\n assert nb0.balance == ob0.balance + 5\n assert len(nb0.frozen_balances) == 0\n\n # Token supply\n tx = iost.create_call_tx('token.iost', 'supply', token_sym)\n account.sign_publish(tx)\n print('querying supply of token...')\n txr = iost.send_and_wait_tx(tx)\n print(txr)\n assert txr.returns[0] == '[\"99.1\"]'\n\n # Token destroy\n ob0 = iost.get_token_balance(account.name, token_sym)\n\n tx = iost.create_call_tx('token.iost', 'destroy',\n token_sym, account.name, str(ob0.balance))\n account.sign_publish(tx)\n print('destroying token...')\n txr = iost.send_and_wait_tx(tx)\n print(txr)\n\n nb0 = iost.get_token_balance(account.name, token_sym)\n assert nb0.balance == 0\n\n # Token total supply\n tx = iost.create_call_tx('token.iost', 'totalSupply', token_sym)\n account.sign_publish(tx)\n print('querying total supply of token...')\n txr = iost.send_and_wait_tx(tx)\n print(txr)\n assert txr.returns[0] == '[\"21000000\"]'\n\n # Token supply\n tx = iost.create_call_tx('token.iost', 'supply', token_sym)\n account.sign_publish(tx)\n print('querying supply of token...')\n txr = iost.send_and_wait_tx(tx)\n print(txr)\n assert txr.returns[0] == '[\"50.000000001\"]'\n","repo_name":"iost-official/pyost","sub_path":"examples/token_test.py","file_name":"token_test.py","file_ext":"py","file_size_in_byte":4696,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"3"} +{"seq_id":"12742145902","text":"from PIL import Image\nfrom datetime import datetime\n\nfrom django.forms import ModelForm\nfrom django.contrib.auth.models import User\nfrom django import forms\nfrom django.core.exceptions import ValidationError, ObjectDoesNotExist\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom hashtags.models import HashTag\nfrom social_parsing.models import Network\nfrom user_account.models import SimpleUsers\n\n\nclass HashtagAddingForm(ModelForm):\n\n def clean_user(self):\n data_tag = self.cleaned_data['user']\n\n if not User.objects.filter(id=data_tag.id).exists():\n raise ValidationError(_('Such user does not exist.'))\n\n return data_tag\n\n def clean_tag(self):\n data_tag = self.cleaned_data['tag']\n\n if data_tag is None or len(data_tag) > 40:\n raise ValidationError(_('Invalid line length - line is too long or has nothing in it.'))\n try:\n check_tag = HashTag.objects.get(user=self.user, tag=data_tag)\n except (ObjectDoesNotExist):\n check_tag = list()\n\n if not check_tag:\n return data_tag\n else:\n raise ValidationError(_('Tag already exists.'))\n\n def clean_networks(self):\n data_networks = self.cleaned_data['networks']\n\n for network in data_networks:\n if not Network.objects.filter(guid=network).exists():\n raise ValidationError(_('Such social network is not registred. Please, contact administrator.'))\n\n return data_networks\n\n class Meta:\n model = HashTag\n fields = ['tag', 'networks', 'user']\n widgets = {'user': forms.HiddenInput()}\n labels = {'tag': _('Tag text'), 'network_id': _('Choose social network '), }\n help_texts = {'tag': _('Enter a text for the tag.'),\n 'networks': _('Choose social network, where you want to control tags.'), }\n\n def __init__(self, *args, **kwargs):\n try:\n self.user = args[0].get('user')\n except (IndexError):\n pass\n super(HashtagAddingForm, self).__init__(*args, **kwargs)\n self.fields['networks'] = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,\n choices=[(network.guid, network) for network in Network.objects.all()], )\n self.fields['networks'].required = False\n\n\nclass HashtagEditingForm(ModelForm):\n def clean_tag(self):\n data_tag = self.cleaned_data['tag']\n\n if data_tag is None or len(data_tag) > 40:\n raise ValidationError(_('Invalid line length - line is too long or has nothing in it.'))\n\n return data_tag\n\n def clean_network(self):\n data_networks = self.cleaned_data['networks']\n\n for network in data_networks:\n if not Network.objects.filter(guid=network).exists():\n raise ValidationError(_('Such social network is not registred. Please, contact administrator.'))\n\n return data_networks\n\n class Meta:\n model = HashTag\n fields = ['tag', 'networks']\n labels = {'tag': _('Tag text'), 'network_id': _('Choose social networks'), }\n help_texts = {'tag': _('Enter a text for the tag.'),\n 'network': _('Choose social networks, where you want to control tags.'), }\n\n def __init__(self, *args, **kwargs):\n super(HashtagEditingForm, self).__init__(*args, **kwargs)\n self.fields['networks'] = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,\n choices=[(network.guid, network) for network in Network.objects.all()], )\n self.fields['networks'].required = False\n\n\nclass UserEditForm(ModelForm):\n def clean_avatar(self):\n image = self.cleaned_data['avatar']\n\n if image:\n img = Image.open(image)\n w, h = img.size\n\n max_width = 380\n max_height = 500\n if w > max_width or h > max_height:\n raise ValidationError(\n _('Please use an image that is smaller or equal to '\n '{} x {} pixels.'.format(max_width, max_height)))\n\n main = img.format\n if not main.lower() in ['jpeg', 'pjpeg', 'png', 'jpg']:\n raise ValidationError(_('Please use a JPEG or PNG image.'))\n\n return image\n\n def clean_bio(self):\n bio = self.cleaned_data['bio']\n\n if bio and len(bio) > 400:\n raise ValidationError(_('Too many symbols. Please make your bio shorter.'))\n\n return bio\n\n def clean_company(self):\n company = self.cleaned_data['company']\n\n if company and len(company) > 50:\n raise ValidationError(_('Too many symbols. Please make name of your company shorter.'))\n\n return company\n\n def clean_birthdate(self):\n birth_date = get_date_from_parameter(self.cleaned_data['birth_date'])\n\n if type(birth_date) != datetime:\n raise ValidationError(_('Invalid data for birthday date.'))\n\n return birth_date\n\n class Meta:\n model = SimpleUsers\n exclude = ['user']\n fields = ['avatar', 'bio', 'country_name', 'company', 'birth_date']\n labels = {'avatar': _('Image'),\n 'bio': ('Some words about you'),\n 'country_name': ('Country you live in'),\n 'company': ('Your company'),\n 'birth_date': ('Your birthday'), }\n help_texts = {'avatar': _('Choose an image: JPG, JPEG or PNG images are allowed.\\\n Maximum size: 380x500 pixels. '), }\n\n def __init__(self, *args, **kwargs):\n super(UserEditForm, self).__init__(*args, **kwargs)\n self.fields['avatar'].widget = forms.FileInput()\n self.fields['birth_date'].widget.format = '%m/%d/%Y'\n self.fields['birth_date'].input_formats = ['%m/%d/%Y']\n\n\ndef get_date_from_parameter(raw_data_value):\n month, day, year = raw_data_value.split('/')\n return datetime(int(year), int(month), int(day))\n","repo_name":"goldstar0415/Colts_scraping","sub_path":"s_net_parsing/user_account/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6072,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"7221889464","text":"## @file ArgParser.py\n## @brief Small tool to parse the command line arguments for FQTool \n\nimport argparse\n\n## @brief This function creates a parser for command line arguments\n# Sets up a parser that accepts the following flags: -i, -l, -q, -f, -a, -v, -h\n# @return An already configured instance of the ArgumentParser class\ndef create_parser():\n parser = argparse.ArgumentParser(prog = 'fqtool', \n description = 'FASTQ parser. Quickly get the reads you need.',\n epilog = 'That\\'s all! Reach us at github.com/mistrello96/FQTool',\n add_help = False)\n parser.add_argument('-i', '--input-filenames', type = str, metavar = 'filename', dest = 'filenames', \n nargs = '+', help = 'Input file name(s). Usually in the form *.fastq, *.q', required = True)\n parser.add_argument('-l', '--length', type = int, metavar = 'length', dest = 'length', \n help = 'Minimum length of the reads to be extracted.', required = True)\n parser.add_argument('-q', '--probability-of-correctness', type = float, metavar = 'quality', \n dest = 'quality', required = True,\n help = 'Minimum probabilty of correctness of the reads to be extracted. Ranges between 0 and 1. You can also write the Phread Quality Value directly (e.g. 35)')\n parser.add_argument('-f', '--ascii-conversion-function', type = str, metavar = 'function', \n dest = 'function', help = 'Function to be used to switch bewteen ASCII and Phred Value.' + \n 'Choose between: S = Sanger, X = Solexa, I = Illumina 1.3+, J = Illumina 1.5+, L = Illumina 1.8+. Default = L', \n choices = ['S', 'X', 'I', 'J', 'L'], default = 'L')\n parser.add_argument('-a', '--accuracy', type = float, metavar = 'accuracy', dest = 'accuracy', \n help = 'This value is the %% of bases that must have at least quality q. If this condition is not satisfied, the read will be ignored',\n default = 0)\n parser.add_argument('-v', '--version', action = 'version', help = 'Shows the program version and exits', version = '%(prog)s 1.4')\n parser.add_argument('-h', '--help', action = 'help', help = 'List of the flags you can use with FQTool')\n \n return parser","repo_name":"LolloneS/FQTool","sub_path":"src/ArgParser.py","file_name":"ArgParser.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71609797523","text":"import pygame\r\nimport random\r\nimport time\r\n\r\n\r\nclass wall:\r\n def __init__(self, X, Y, H, W):\r\n self.HeightBreak = random.randrange(0, 600)\r\n self.x = X\r\n self.y = Y\r\n self.HEIGHT = H\r\n self.WIDTH = W\r\n\r\n def setX(self, X):\r\n self.x = X\r\n\r\n","repo_name":"thma4828/misc-code","sub_path":"wall.py","file_name":"wall.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5639214513","text":"import re\n\nfrom super_hero.api.client import ApiClient\n\n\ndef get_ids_of_hero_woman() -> dict:\n # парсим страницу с героями\n html_page = ApiClient().get_ids_of_heroes().text\n heroes = html_page[html_page.find('Chracter Name'):(html_page.find('701'))]\n result = re.findall('<.*\\w>(\\w.*)', heroes)\n\n # вытаскиваем имена персонажей, с \"woman\" в имени\n id_and_woman_hero = {}\n for i in range(0, len(result), 2):\n if 'woman' in result[i + 1].lower():\n id_and_woman_hero.update({int(result[i]): result[i + 1]})\n\n return id_and_woman_hero\n\n\ndef who_stronged(heroes) -> str:\n if heroes[0][\"power\"] > heroes[1][\"power\"]:\n who_winner = heroes[0][\"name\"]\n elif heroes[0][\"power\"] < heroes[1][\"power\"]:\n who_winner = heroes[1][\"name\"]\n else:\n who_winner = \"Силы равны\"\n return who_winner\n","repo_name":"TheWildBunchPog/qa-route-256","sub_path":"Homework3/super_hero/helpers/base_helpers.py","file_name":"base_helpers.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72833841040","text":"\"\"\"\nDay 5 initial solution\nBenjamin Wheeler\n\"\"\"\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass Seat:\n bot: int\n top: int\n\n\ndef get_row_col(seat: str) -> (int, int):\n max_row, max_col = 127, 7 # Constants\n r = Seat(0, max_row)\n c = Seat(0, max_col)\n\n for command in seat:\n r_dist = r.top - r.bot + 1\n c_dist = c.top - c.bot + 1\n if command == 'F':\n # Take lower half.\n r.top -= r_dist // 2\n\n elif command == 'B':\n # Take upper half.\n r.bot += r_dist // 2\n\n elif command == 'R':\n # Take upper half.\n c.bot += c_dist // 2\n\n elif command == 'L':\n # Take lower half.\n c.top -= c_dist // 2\n\n return r.bot, c.top\n\n\ndef part1() -> int:\n with open('day5.input', 'r') as f:\n seats = f.read().splitlines()\n\n seat_num = []\n for seat in seats:\n r, c = get_row_col(seat)\n seat_num.append(r * 8 + c)\n\n return max(seat_num)\n\n\ndef part2() -> int:\n with open('day5.input', 'r') as f:\n seats = f.read().splitlines()\n\n # Get all pairs of seats.\n occupied_seats = set()\n for seat in seats:\n r, c = get_row_col(seat)\n occupied_seats.add(8 * r + c)\n\n # Get all seats not in this set of seats.\n all_seats: set = {8 * r + c for c in range(8) for r in range(128)}\n\n # Get IDs of each unoccupied seat.\n unoccupied = all_seats - occupied_seats\n\n # Search for a seat with no empty neighbors.\n temp = set(unoccupied)\n for seat in temp:\n if seat + 1 not in temp and seat - 1 not in temp:\n return seat\n\n\nif __name__ == '__main__':\n print(f'Running day 5...')\n answer = part1()\n print('Part 1:', answer)\n\n answer = part2()\n print('Part 2:', answer)\n\n print('Done.')\n\n","repo_name":"benjamin051000/adventofcode","sub_path":"2020/day05/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36412389403","text":"import os\nfrom collections import OrderedDict\n\nimport msgpack\nimport rx\nfrom rx import disposable\n\n\nclass ImageLabelIndex(object):\n def __init__(self, name, image_offset, label_offset):\n self.name = name\n self.label_offset = label_offset\n self.image_offset = image_offset\n\n def __repr__(self) -> str:\n return f\"name={self.name}, label_offset={self.label_offset}, image_offset={self.image_offset}\"\n\n\nclass FileLoader(object):\n \"\"\"\n Read data without cache the whole file\n \"\"\"\n\n def __init__(self):\n super(FileLoader, self).__init__()\n\n def load_directory(self, path) -> (rx.Observable, rx.Observable):\n \"\"\"\n Locate and load the *.bin files. Parse the file and create the index to labels and images rather than loading\n them directly to memory\n :param path: path to directory\n :return: 0: observable reporting the progress based on file bytes. Will complete when finished.\n \"\"\"\n\n def subscribe(observer, scheduler=None):\n subs = disposable.CompositeDisposable()\n image_storage = os.path.join(path, \"images.bin\")\n label_storage = os.path.join(path, \"images.bin\")\n\n if not os.path.exists(image_storage):\n raise FileNotFoundError(f\"Image storage is not present in {path}\")\n\n has_label = os.path.exists(label_storage)\n\n try:\n with open(image_storage, \"rb\") as f_image:\n if has_label:\n with open(label_storage, \"rb\") as f_label:\n subs.add(self._index_image(f_image, f_label).subscribe(observer))\n else:\n subs.add(self._index_image(f_image, None).subscribe(observer))\n\n except FileNotFoundError:\n raise FileNotFoundError(f\"Image storage is not present in {path}\")\n except Exception:\n raise\n\n return subs\n\n return rx.create(subscribe)\n\n def get_data(self, index=None) -> object:\n pass\n\n def _index_image(self, f_image, f_label) -> rx.Observable:\n def subscribe(observer: rx.typing.Observer, scheduler=None):\n image_unpacker = msgpack.Unpacker(f_image)\n label_unpacker = msgpack.Unpacker(f_label)\n stop = False\n\n try:\n image_mapping = OrderedDict()\n label_mapping = OrderedDict()\n offset = 0\n for v in image_unpacker:\n if stop:\n raise InterruptedError(\"abort image indexing\")\n image_mapping[v[b\"name\"]] = offset\n offset = image_unpacker.tell()\n\n offset = 0\n for v in label_unpacker:\n if stop:\n raise InterruptedError(\"abort label indexing\")\n label_mapping[v[b\"name\"]] = offset\n offset = label_unpacker.tell()\n\n for name, img_offset in image_mapping.items():\n if stop:\n raise InterruptedError(\"abort assembling index\")\n label_offset = label_mapping[name] if name in label_mapping else None\n observer.on_next(ImageLabelIndex(name, img_offset, label_offset))\n\n except InterruptedError:\n # disposed\n raise\n\n observer.on_completed()\n\n def dispose():\n stop = True\n\n return disposable.Disposable(dispose)\n\n return rx.create(subscribe)\n","repo_name":"nncrystals/CommandCenter","sub_path":"storage_viewer/file_loader.py","file_name":"file_loader.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32701875962","text":"import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\nn, m, k, x = map(int, input().split()) # 도시 개수, 도로 개수, 거리 정보, 출발 도시 번호\ngraph = [[] for _ in range(n + 1)]\ncheck = [0] * (n + 1)\n\nfor _ in range(m):\n A, B = map(int, input().split())\n graph[A].append(B)\n\n\ndef bfs(graph, start, check):\n check[start] = 1\n queue = deque([[start, 0]])\n result = []\n\n while queue:\n now_node, count = queue.popleft()\n for next_node in graph[now_node]:\n if check[next_node] == 1:\n continue\n\n check[next_node] = 1\n next_count = count + 1\n\n queue.append([next_node, next_count])\n\n if next_count == k:\n result.append(next_node)\n\n elif next_count > k:\n return result\n\n return result\n\n\nresult = bfs(graph, x, check)\nif len(result) == 0:\n print(-1)\nelse:\n # 오���차순 출력\n result.sort()\n for node in result:\n print(node)","repo_name":"kaori-killer/baekjoon-summer-challenge","sub_path":"CHAPTER_09_최단 거리/22-08-02/특정 거리의 도시 찾기.py","file_name":"특정 거리의 도시 찾기.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41814724476","text":"import numpy as np\n\n\ndef normalEquation(X, y):\n\treturn np.dot(np.dot(np.linalg.inv(np.dot(np.transpose(X),X)), np.transpose(X)), y)\n\n\ndata = np.loadtxt('ex1data2.txt', dtype=np.float32, delimiter=',')\nX = data[:, 0:2]\ny = data[:, 2]\nm = len(y)\ny = y.reshape((m, 1))\nX = np.column_stack((np.ones([m, 1]), X))\n\ntheta = normalEquation(X, y)\n","repo_name":"Hydrabmol/coursera","sub_path":"machine_learning/assignments/week_02/python/normalEquation.py","file_name":"normalEquation.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16733699873","text":"from django.shortcuts import render, redirect\nfrom django.views import View\nfrom online_shopping.settings import STRIPE_PUBLIC_KEY, STRIPE_SECRET_KEY\nfrom .models import Customer, Product, OrderPlaced, Cart, checkoutform, Invoice, InvoiceItems\nfrom math import ceil\nfrom django.http import HttpResponse, JsonResponse\nfrom .forms import CustomerRegistraionForm, CustomerProfileForm\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.core.mail import EmailMessage\nfrom .models import Contact\nfrom django.conf import settings\nfrom django.views.generic import TemplateView\nfrom django.core.mail import send_mail\nfrom io import BytesIO\nfrom django.template.loader import get_template\nimport xhtml2pdf.pisa as pisa\nimport stripe\n\nstripe.api_key = STRIPE_SECRET_KEY\n\ndef home(request):\n products = Product.objects.all()\n allProds = []\n catProds = Product.objects.values('category', 'id')\n cats = {item['category'] for item in catProds}\n for cat in cats:\n prod = Product.objects.filter(category=cat)\n n = len(products)\n nSlides = n // 4 + ceil((n / 4) + (n // 4))\n allProds.append([prod, range(1, nSlides), nSlides])\n params = {'allProds': allProds}\n return render(request, 'app/home.html', params)\n\nclass ProductView(View):\n def get(self, request):\n totalitem=0\n topwears = Product.objects.filter(category='TW')\n bottomwear = Product.objects.filter(category='BW')\n mobliles = Product.objects.filter(category='M')\n laptops = Product.objects.filter(category='L')\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n return render(request, 'app/home.html', {'topwears': topwears, 'bottomwear': bottomwear, 'mobliles': mobliles, 'laptops':laptops, 'totalitem':totalitem})\n\n@login_required\ndef orders(request):\n op = OrderPlaced.objects.filter(user=request.user)\n product_id = request.GET.get('prod_id')\n print(product_id)\n # product = Product.objects.get(id=product_id)\n status = request.POST.get('Cancel')\n order = OrderPlaced.objects.filter(status='Cancel')\n order.delete()\n print(request.POST)\n return render(request, 'app/orders.html', {'order_placed': op})\n\nclass OrderView(View):\n def get(self, request, pk):\n op = OrderPlaced.objects.filter(user=request.user)\n print(request.POST)\n product = Product.objects.get(pk=pk)\n prod_id = Product.objects.get(id=product.id)\n print(product)\n OrderPlaced(product=product)\n status = request.POST.get('Cancel')\n order = OrderPlaced.objects.filter(status='Cancel', product=product)\n order.delete()\n return render(request, 'app/orders.html', {'order_placed': op})\n\n def post(self, request, pk):\n op = OrderPlaced.objects.filter(user=request.user)\n print(request.POST)\n product = Product.objects.get(pk=pk)\n prod_id = Product.objects.get(id=product.id)\n print(product)\n OrderPlaced(product=product)\n status = request.POST.get('Cancel')\n order = OrderPlaced.objects.filter(status='Cancel' not in status, product=product)\n OrderPlaced(status='Cancel')\n return render(request, 'app/orders.html', {'order_placed': op})\n\n@login_required\ndef cart(request):\n user = request.user\n product_id = request.GET.get('prod_id')\n product = Product.objects.get(id=product_id)\n Cart(user=user, product=product).save()\n return redirect('/cart')\n\n@login_required\ndef show_cart(request):\n if request.user.is_authenticated:\n totalitem=0\n user = request.user\n cart= Cart.objects.filter(user=user)\n amount = 0.0\n shipping_amount = 70.0\n total_amount = 0.0\n cart_product = [p for p in Cart.objects.all() if p.user == user]\n if cart_product:\n for p in cart_product:\n tempamount = (p.quantity * p.product.discounted_price)\n amount += tempamount\n totalamount = amount + shipping_amount\n return render(request, 'app/add_to_cart.html', {'carts': cart, 'totalamount': totalamount, 'amount': amount, 'totalitem':totalitem})\n else:\n return render(request, 'app/emptycart.html')\n\nclass ProductDetailView(View):\n def get(self, request, pk):\n product = Product.objects.get(pk=pk)\n item_already_in_cart = False\n totalitem = 0\n if request.user.is_authenticated:\n item_already_in_cart = Cart.objects.filter(Q(product=product.id) & Q(user=request.user)).exists()\n return render(request, 'app/product_details.html', {'product': product, 'item_already_in_cart': item_already_in_cart, 'totalitem': totalitem})\n\n# @login_required\ndef buynow(request):\n return render(request, 'app/buy_now.html')\n\ndef about(request):\n return render(request, 'app/about.html')\n\ndef plus_cart(request):\n if request.method == 'GET':\n prod_id = request.GET['prod_id']\n c = Cart.objects.get(Q(product=prod_id) & Q(user=request.user))\n c.quantity += 1\n c.save()\n amount = 0.0\n shipping_amount = 70.0\n cart_product = [p for p in Cart.objects.all() if p.user == request.user]\n for p in cart_product:\n tempamount = (p.quantity * p.product.discounted_price)\n amount += tempamount\n # totalamount = amount + shipping_amount\n data = {\n 'quantity': c.quantity,\n 'amount': amount,\n 'totalamount': amount + shipping_amount\n }\n return JsonResponse(data)\n\ndef minus_cart(request):\n if request.method == 'GET':\n prod_id = request.GET['prod_id']\n c = Cart.objects.get(Q(product=prod_id) & Q(user=request.user))\n c.quantity -= 1\n c.save()\n amount = 0.0\n shipping_amount = 70.0\n cart_product = [p for p in Cart.objects.all() if p.user == request.user]\n for p in cart_product:\n tempamount = (p.quantity * p.product.discounted_price)\n amount += tempamount\n # totalamount = amount + shipping_amount\n data = {\n 'quantity': c.quantity,\n 'amount': amount,\n 'totalamount': amount + shipping_amount\n }\n return JsonResponse(data)\n\ndef remove_cart(request):\n if request.method == 'GET':\n prod_id = request.GET['prod_id']\n c = Cart.objects.get(Q(product=prod_id) & Q(user=request.user))\n c.delete()\n amount = 0.0\n shipping_amount = 70.0\n cart_product = [p for p in Cart.objects.all() if p.user == request.user]\n for p in cart_product:\n tempamount = (p.quantity * p.product.discounted_price)\n amount += tempamount\n data = {\n 'amount': amount,\n 'totalamount': amount + shipping_amount\n }\n return JsonResponse(data)\n\ndef contact(request):\n if request.method == 'POST':\n name = request.POST['name']\n email = request.POST['email']\n phone = request.POST['phone']\n person = request.POST['person']\n context = request.POST['context']\n print(name, email, phone, person, context)\n contact = Contact(name=name, email=email, phone=phone, person=person, context=context)\n contact.save()\n return render(request, 'app/contact.html')\n\ndef tracker(request):\n return render(request, 'app/tracker.html')\n\n@login_required\ndef checkout(request):\n user = request.user\n # prod_id = request.id\n add = Customer.objects.filter(user=user)\n # product = Product.objects.filter(id=id)\n cart_items = Cart.objects.filter(user=user)\n amount = 0.0\n shipping_amount = 70.0\n totalamount = 0.0\n cart_product = [p for p in Cart.objects.all() if p.user == request.user]\n if cart_product:\n for p in cart_product:\n tempamount = (p.quantity * p.product.discounted_price)\n amount += tempamount\n totalamount = amount + shipping_amount\n return render(request, 'app/checkout.html', {'add': add, 'totalamount': totalamount, 'cart_items': cart_items})\n\nclass CheckoutView(View):\n def get(self, request, pk):\n user = request.user\n product = Product.objects.get(pk=pk)\n add = Customer.objects.filter(user=user)\n prod_items = Product.objects.filter(Q(id=product.id))\n print(prod_items)\n amount = 0.0\n shipping_amount = 70.0\n totalamount = 0.0\n prod = [p for p in Product.objects.all() if p.id == product.id]\n print(prod)\n if prod:\n for p in prod:\n tempamount = (1 * p.discounted_price)\n amount += tempamount\n totalamount = amount + shipping_amount\n return render(request, 'app/checkout.html', {'add': add, 'product': product, 'totalamount': totalamount, 'prod_items': prod_items})\n\n@login_required\ndef payment_done(request, **kwargs):\n try:\n YOUR_DOMAIN = \"http://127.0.0.1:8000\"\n user = request.user\n custid = request.GET.get('custid')\n customer = Customer.objects.get(id=custid)\n cart = Cart.objects.filter(user=user)\n # product = Product.objects.filter(user=user)\n for c in cart:\n OrderPlaced(user=user, customer=customer, product=c.product, quantity=c.quantity).save()\n c.delete()\n return redirect(\"stripe\")\n\n except Exception:\n return HttpResponse(\"Please provide address\")\n\nclass PaymentdoneView(View):\n def get(self, request, pk):\n user = request.user\n custid = request.GET.get('custid')\n customer = Customer.objects.get(id=custid)\n product = Product.objects.get(pk=pk)\n prod_items = Product.objects.filter(Q(id=product.id))\n print(\"rr\")\n for c in prod_items:\n print(c)\n OrderPlaced(user=user, customer=customer, product=c.product, quantity=c.quantity).save()\n c.delete()\n return redirect(\"ordersdata\")\n\ndef basic(request):\n return render(request, 'app/basic.html')\n\ndef mobiles(request, data=None):\n if data==None:\n mobiles = Product.objects.filter(category='Mobiles')\n elif data=='Realme' or data=='POCO':\n mobiles = Product.objects.filter(category='Mobiles').filter(brand=data)\n elif data=='Below':\n mobiles = Product.objects.filter(category='Mobiles').filter(discounted_price__lt=10000)\n elif data=='Above':\n mobiles = Product.objects.filter(category='Mobiles').filter(discounted_price__gt=10000)\n return render(request, 'app/mobiles.html', {'mobiles': mobiles})\n\ndef laptops(request, data=None):\n if data==None:\n laptops = Product.objects.filter(category='Laptop')\n elif data=='DELL' or data=='ASUS':\n laptops = Product.objects.filter(category='Laptop').filter(brand=data)\n elif data=='Below':\n laptops = Product.objects.filter(category='Laptop').filter(discounted_price__lt=50000)\n elif data=='Above':\n laptops = Product.objects.filter(category='Laptop').filter(discounted_price__gt=50000)\n return render(request, 'app/laptops.html', {'laptops': laptops})\n\nclass CustomerRegistrationView(View):\n def get(self, request):\n form = CustomerRegistraionForm()\n return render(request, 'app/register.html', {'form': form})\n\n def post(self, request):\n form = CustomerRegistraionForm(request.POST)\n if form.is_valid():\n messages.success(request, 'Congratulations!! Registered Successfully')\n form.save()\n return render(request, 'app/register.html', {'form': form})\n\ndef login(request):\n return render(request, 'app/login.html')\n\ndef password_reset(request):\n return render(request, 'app/password_reset.html')\n\n@method_decorator(login_required, name='dispatch')\nclass ProfileView(View):\n def get(self, request):\n form = CustomerProfileForm()\n return render(request, 'app/profile.html', {'form': form, 'active': 'btn-primary'})\n\n def post(self, request):\n form = CustomerProfileForm(request.POST)\n if form.is_valid():\n usr = request.user\n name = form.cleaned_data['name']\n locality = form.cleaned_data['locality']\n city = form.cleaned_data['city']\n state = form.cleaned_data['state']\n zipcode = form.cleaned_data['zipcode']\n reg = Customer(user=usr, name=name, locality=locality, city=city, state=state, zipcode=zipcode)\n reg.save()\n messages.success(request, 'Profile Updated Successfully')\n return render(request, 'app/profile.html', {'form': form, 'active': 'btn-primary'})\n\n@login_required\ndef edit_profile(request):\n # form = CustomerProfileForm(request.POST)\n try:\n if request.method == 'POST':\n # request.user.customer.usr = request.user\n request.user.customer.name = request.POST.get('name', '')\n request.user.customer.locality = request.POST.get('locality', '')\n request.user.customer.city = request.POST.get('city', '')\n request.user.customer.state = request.POST.get('state', '')\n request.user.customer.zipcode = request.POST.get('zipcode', '')\n request.user.customer.save()\n messages.success(request, 'Profile Updated Successfully')\n return redirect('checkout')\n return render(request, 'app/editprofile.html')\n except Exception:\n # return redirect(\"profile\")\n return redirect(\"profile\")\n\n\n@login_required\ndef address(request):\n add = Customer.objects.filter(user=request.user)\n return render(request, 'app/address.html', {'add': add, 'active': 'btn-primary'})\n\ndef searchMatch(query, item):\n if query in item.description.lower() or query in item.title.lower() or query in item.category.lower():\n return True\n else:\n return False\n\ndef search(request):\n query = request.GET.get('search')\n products = Product.objects.all()\n allProds = []\n catProds = Product.objects.values('category', 'id')\n cats = {item['category'] for item in catProds}\n for cat in cats:\n prodtemp = Product.objects.filter(category=cat)\n prod = [item for item in prodtemp if searchMatch(query, item)]\n n = len(products)\n nSlides = n // 4 + ceil((n / 4) + (n // 4))\n if len(prod) != 0:\n allProds.append([prod, range(1, nSlides), nSlides])\n params = {'allProds': allProds}\n if len(allProds) == 0 or len(query) < 4:\n params = {'msg': 'Please Make Sure To Enter Relevant Search Qukwargsery'}\n return render(request, 'app/search.html', params)\n\n@login_required\ndef check_out_form(request):\n if request.method == 'POST':\n name = request.POST['name']\n email = request.POST['email']\n phone = request.POST['phone']\n pmethod = request.POST['pmethod']\n context = request.POST['context']\n print(name, email, phone, pmethod, context)\n checkoutt = checkoutform(name=name, email=email, phone=phone, pmethod=pmethod, context=context)\n if pmethod == 'stripe':\n return redirect('stripe')\n checkoutt.save()\n return render(request, 'app/checkout_form.html')\n\n# def stripe(request):\n# return render(request, 'app/stripe.html')\n\nclass ProductLandingPageView(TemplateView):\n template_name = 'app/landing.html'\n\n def get_context_data(self, **kwargs):\n # product = Product.objects.get(title=\"Test Product\")\n context = super(ProductLandingPageView, self).get_context_data(**kwargs)\n context.update({\n # \"product\": product,\n \"STRIPE_PUBLIC_KEY\": STRIPE_PUBLIC_KEY\n })\n return context\n\ndef charge(request):\n if request.method == 'POST':\n charge = stripe.Charge.create(\n amount='{totalamount}',\n currency='INR',\n description='A Django Charge',\n source=request.POST['stripeToken']\n )\n return render(request, 'charge.html')\n\ndef topwear(request):\n return render(request, 'app/topwear.html')\n\n\nclass CreateCheckoutSessionView(View):\n def post(self, request, pk, *args, **kwargs):\n BASE_URL = \"http://127.0.0.1:8000\"\n product = Product.objects.get(pk=pk)\n prod_items = Product.objects.filter(Q(id=product.id))\n checkout_session = stripe.checkout.Session.create(\n payment_method_types=['card'],\n line_items=[\n {\n 'price': product.stripe_price_id,\n 'quantity': 1,\n },\n ],\n metadata = {\n \"product_id\":product.id\n },\n mode='payment',\n success_url=BASE_URL + '/success/',\n cancel_url=BASE_URL + '/cancel/',\n )\n return redirect(checkout_session.url)\n\nendpoint_secret = 'whsec_a02f2df803ffa596f42eed01d433d199d66144766f636cd3bf1fe207a14448b4'\n@csrf_exempt\ndef stripe_webhook(request):\n payload = request.body\n sig_header = request.META['HTTP_STRIPE_SIGNATURE']\n print(sig_header)\n event = None\n\n try:\n event = stripe.Webhook.construct_event(\n payload, sig_header, settings.STRIPE_WEBHOOK_SECRET\n )\n except ValueError as e:\n return HttpResponse(status=400)\n except stripe.error.SignatureVerificationError as e:\n return HttpResponse(status=400)\n\n if event['type'] == 'checkout.session.completed':\n session = event['data']['object']\n\n customer_email = session[\"customer_details\"][\"email\"]\n product_id = session[\"metadata\"][\"product_id\"]\n product = Product.objects.get(title=\"Realme C21Y 32 GB\")\n\n send_mail(\n subject=\"Here is your product\",\n message=\"Thanks for your purchase.\",\n recipient_list=[customer_email],\n from_email=\"roopesh.rai@plutustec.com\",\n )\n\n # Passed signature verification\n return HttpResponse(status=200)\n\n# def fulfill_order(session):\n# TODO: fill me in\n# print(\"Fulfilling order\")\n\n\nclass SuccessView(TemplateView):\n template_name = \"app/success.html\"\n\nclass CancelView(TemplateView):\n template_name = \"app/cancel.html\"\n\nclass HomePageView(TemplateView):\n template_name = \"app/stripe.html\"\n def get_context_data(self, *args, **kwargs):\n user = self.request.user\n # prod_id = request.id\n add = Customer.objects.filter(user=user)\n cart_items = Cart.objects.filter(user=user)\n amount = 0.0\n shipping_amount = 70.0\n totalamount = 0.0\n cart_product = [p for p in Cart.objects.all() if p.user == self.request.user]\n if cart_product:\n for p in cart_product:\n tempamount = (p.quantity * p.product.discounted_price)\n amount += tempamount\n totalamount = amount + shipping_amount\n # return render(self.request, 'app/checkout.html', {'add': add, 'totalamount': totalamount, 'cart_items': cart_items})\n product = Product.objects.get(title=\"Realme C21Y 32 GB\")\n context = super(HomePageView, self).get_context_data(**kwargs)\n context.update({\n \"product\": product,\n })\n return context\n\nclass StripeIntentView(View):\n def post(self, request, *args, **kwargs):\n try:\n product = Product.objects.get(title=\"Realme C21Y 32 GB\")\n intent = stripe.PaymentIntent.create(\n amount=product.discounted_price,\n currency='INR',\n automatic_payment_methods={\n 'enabled': True,\n },\n )\n return JsonResponse({\n 'clientSecret': intent['client_secret']\n })\n except Exception as e:\n return JsonResponse({'error': str(e)})\n\ndef render_to_pdf(template_src, context_dict={}):\n template = get_template(template_src)\n html = template.render(context_dict)\n result = BytesIO()\n pdf = pisa.pisaDocument(BytesIO(html.encode(\"ISO-8859-1\")), result)\n if not pdf.err:\n return result.getvalue()\n return None\n\n\nclass PaymentSuccess(View):\n def get(self, request):\n return HttpResponse({'msg', 'Your Payment has been succeed'})\n\n\nclass PaymentCancel(View):\n def get(self, request):\n return HttpResponse({'msg', 'Your Payment has cancel'})\n\n\nclass ShowInvoice(View):\n def post(self, request, *args, **kwargs):\n template = get_template('stripe.html')\n data = request.data\n order_id = data['order_id']\n Order = OrderPlaced.objects.get(id=order_id)\n # payment = Payment.objects.get(order_id=Order)\n user = Order.user\n orderitems = OrderPlaced.objects.filter(order=Order)\n pdf = render_to_pdf('stripe.html', {'order_items': orderitems, 'user': user, 'order': Order})\n return HttpResponse(pdf, content_type='application/pdf')\n\n\nclass DownloadInvoice(View):\n def post(self, request):\n template = get_template('stripe.html')\n data = request.data\n order_id = data['order_id']\n Order = OrderPlaced.objects.get(id=order_id)\n # payment = Payment.objects.get(order_id=Order)\n user = Order.user\n invoice = Invoice(user=user, order_id=order_id)\n invoice.save()\n orderitems = OrderPlaced.objects.filter(order=Order)\n for i in orderitems:\n InvoiceItems.objects.create(invoice=invoice, product=i.product, product_pricee=i.price)\n\n pdf = render_to_pdf('stripe.html', {'invoice': invoice, 'order_items': orderitems, 'user': user, 'order': Order})\n\n if pdf:\n response = HttpResponse(pdf, content_type='application/pdf')\n filename = \"Invoice_%s.pdf\" % (data['order_id'])\n content = \"inline; filename = '%s'\" % (filename)\n content = \"attachment; filename = '%s'\" % (filename)\n response['Content-Disposition'] = content\n return response\n return HttpResponse(\"not found\")\n\n\nclass ShareInvoice(View):\n def post(self, request):\n template = get_template('stripe.html')\n data = request.data\n order_id = data['order_id']\n Order = OrderPlaced.objects.get(id=order_id)\n # payment = Payment.objects.get(order_id=Order)\n user = Order.user\n invoice = Invoice(user=user, order_id=order_id)\n invoice.save()\n orderitems = OrderPlaced.objects.filter(order=Order)\n for i in orderitems:\n InvoiceItems.objects.create(invoice=invoice, product=i.product, product_pricee=i.price)\n\n pdf = render_to_pdf('stripe.html', {'invoice': invoice, 'order_items': orderitems, 'user': user, 'order': Order})\n\n if pdf:\n filename = \"Invoice.pdf\"\n content = \"attachment; filename = '%s'\" % (filename)\n mail_subject = \"Recent Order Details\"\n email = EmailMessage(mail_subject, 'this is a message', settings.EMAIL_HOST_USER, [user.email])\n email.attach('new.pdf', pdf, \"application/pdf\")\n email.send()\n return HttpResponse({'msg': 'Invoice generated!'})","repo_name":"roopesh-rai/online_shopping","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":23262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39926995450","text":"#!/usr/bin/python3\n\"\"\" Creates an empty class Square that defines a square \"\"\"\n\n\nclass Square:\n \"\"\"\n Initialise with size with value checks.\n\n Arguments:\n __size: size of the square.\n\n Return: Nothing.\n \"\"\"\n\n def __init__(self, __size=0, __position=(0, 0)):\n \"\"\"Initialises the attribute size\"\"\"\n self.__size = __size\n self.__position = __position\n\n def area(self):\n \"\"\"Returns the current square area.\"\"\"\n return (self.__size * self.__size)\n\n @property\n def size(self):\n return (self.__size)\n\n @size.setter\n def size(self, value):\n if isinstance(value, int) != 1:\n raise TypeError(\"size must be an integer\")\n if value < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = value\n\n @property\n def position(self):\n return (self.__position)\n\n @position.setter\n def position(self, value):\n if not isinstance(value, tuple) or len(value) != 2:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n x, y = value\n if not isinstance(x, int) or not isinstance(y, int) or x < 0 or y < 0:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n self.__position = value\n\n def my_print(self):\n \"\"\"Prints in STDOUT the square with the character #\"\"\"\n if self.__size == 0:\n print()\n else:\n for _ in range(self.__position[1]):\n print()\n for _ in range(self.__size):\n print(\" \" * self.__position[0] + \"#\" * self.__size)\n","repo_name":"neintendo/alx-higher_level_programming","sub_path":"0x06-python-classes/6-square.py","file_name":"6-square.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17250986708","text":"import nmt.utils.misc_utils as utils\nimport argparse\nimport codecs\nimport os\nimport shutil\nimport re\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import cuda\nimport nmt\nimport random\nfrom data import Vocab, Data_Loader, ListsToTensor\nfrom torch.autograd import Variable\nimport sys\n\n\nuse_cuda = True\n\nclass GAN(nn.Module):\n def __init__(self, generator, discriminator, critic):\n super(GAN, self).__init__()\n self.generator = generator\n self.discriminator = discriminator\n self.critic = critic\n\n def save_checkpoint(self, epoch, opt, filename):\n torch.save({'generator_dict': self.generator.state_dict(),\n 'discriminator_dict': self.discriminator.state_dict(),\n 'critic_dict': self.critic.state_dict(),\n 'opt': opt,\n 'epoch': epoch,\n },\n filename)\n\n def load_checkpoint(self, filename):\n ckpt = torch.load(filename)\n self.generator.load_state_dict(ckpt['generator_dict'])\n self.discriminator.load_state_dict(ckpt['discriminator_dict'])\n self.critic.load_state_dict(ckpt['critic_dict'])\n epoch = ckpt['epoch']\n return epoch\n\ndef sample(model, src, ref_src, ref_tgt, src_lengths, ref_src_lengths, ref_tgt_lengths, max_len, show_sample = False):\n model_type = model.__class__.__name__\n if model_type ==\"refNMTModel\":\n context, enc_states, context_keys, context_mask, src_context, src_mask = model.encode(src, ref_src, ref_tgt, src_lengths, ref_src_lengths, ref_tgt_lengths)\n if model_type == \"responseGenerator\":\n context, enc_states, context_mask, dist, src_context, src_mask = model.encode(src, ref_tgt, src_lengths, ref_tgt_lengths)\n dec_states = model.init_decoder_state(enc_states, context)\n\n vocab = model.fields['tgt'].vocab\n EOS_idx = vocab.stoi[vocab.EOS]\n PAD_idx = vocab.stoi[vocab.PAD]\n EOT_idx = vocab.stoi[vocab.EOT]\n batch_size = src.size(1)\n\n notyet = torch.ByteTensor(batch_size).fill_(1)\n inp = Variable(torch.LongTensor(batch_size).fill_(EOS_idx))\n\n pad_mask = torch.LongTensor([PAD_idx])\n if use_cuda:\n notyet = notyet.cuda()\n inp = inp.cuda()\n pad_mask = pad_mask.cuda()\n\n result = [inp]\n eps = 1e-12\n log_prob= []\n\n while notyet.any() and len(result)<= max_len:\n inp = inp.unsqueeze(0)\n if model_type ==\"refNMTModel\":\n dec_out, dec_states, attn = model.decode(inp, context_keys, context, dec_states, context_mask, src_context, src_mask)\n if model_type ==\"responseGenerator\":\n dec_out, dec_states, attn = model.decode(inp, context, dec_states, None, context_mask, src_context, src_mask)\n dec_out = dec_out.squeeze(0)\n cur_log_prob = model.generator(dec_out)\n cur_log_prob.data.index_fill_(1, pad_mask, -float('inf'))\n word_prob = torch.exp(cur_log_prob + eps)\n #inp = torch.multinomial(word_prob, 1).squeeze(-1)\n _, inp = torch.max(cur_log_prob, -1)\n cur_log_prob = torch.gather(cur_log_prob, -1, inp.view(-1, 1)).squeeze(-1)\n cur_log_prob.data.masked_fill_(1-notyet, 0.)\n log_prob.append(cur_log_prob)\n inp.data.masked_fill_( 1-notyet, PAD_idx) # batch_size \n result.append(inp)\n\n endding = torch.eq(inp, EOT_idx)\n notyet.masked_fill_(endding.data, 0)\n\n result = torch.stack(result, 0)\n log_prob = torch.stack(log_prob, 0)\n\n x = result.t().data.tolist()\n new_x = []\n for t in x:\n new_t = []\n for tt in t:\n if tt != PAD_idx:\n new_t.append(tt)\n new_x.append(new_t)\n x = new_x\n\n if show_sample:\n for t in x:\n print (' '.join([vocab.itos[tt] for tt in t]))\n return ListsToTensor(x, tgt = False), log_prob\n\ndef report_func(opt, global_step, epoch, batch, num_batches,\n start_time, lr, report_stats):\n \"\"\"\n This is the user-defined batch-level traing progress\n report function.\n Args:\n epoch(int): current epoch count.\n batch(int): current batch count.\n num_batches(int): total number of batches.\n start_time(float): last report time.\n lr(float): current learning rate.\n report_stats(Statistics): old Statistics instance.\n Returns:\n report_stats(Statistics): updated Statistics instance.\n \"\"\"\n if batch % opt.steps_per_stats == -1 % opt.steps_per_stats:\n report_stats.print_out(epoch, batch+1, num_batches, start_time)\n report_stats = nmt.Statistics()\n\n return report_stats\n\ndef build_or_load_model(args, model_opt, fields):\n if args.model_type == \"ref\":\n generator, discriminator, critic = nmt.model_helper.create_GAN_model(model_opt, fields)\n model = GAN(generator, discriminator, critic)\n if args.start_point is None:\n generator.load_checkpoint(\"init_point\")\n discriminator.base_model.load_checkpoint('init_point')\n critic.base_model.load_checkpoint('init_point')\n else:\n model.load_checkpoint(args.start_point)\n\n latest_ckpt = nmt.misc_utils.latest_checkpoint(model_opt.out_dir)\n start_epoch_at = 0\n if model_opt.start_epoch_at is not None:\n ckpt = 'checkpoint_epoch%d.pkl'%(model_opt.start_epoch_at)\n ckpt = os.path.join(model_opt.out_dir,ckpt)\n else:\n ckpt = latest_ckpt\n\n if ckpt:\n print('Loding model from %s...'%(ckpt))\n start_epoch_at = model.load_checkpoint(ckpt)\n else:\n print('Building model...')\n print(model)\n\n return model, start_epoch_at\n\ndef build_optims_and_lr_schedulers(model, opt):\n optimG = nmt.Optim(opt.optim_method,\n opt.learning_rate,\n opt.max_grad_norm,\n opt.learning_rate_decay,\n opt.weight_decay,\n opt.start_decay_at)\n\n optimG.set_parameters(model.generator.parameters())\n\n lr_lambda = lambda epoch: opt.learning_rate_decay ** epoch\n schedulerG = torch.optim.lr_scheduler.LambdaLR(optimizer=optimG.optimizer, lr_lambda=[lr_lambda])\n optimD = nmt.Optim(opt.optim_method,\n opt.learning_rate_D,\n opt.max_grad_norm,\n opt.learning_rate_decay,\n opt.weight_decay,\n opt.start_decay_at)\n optimD.set_parameters( [ x for x in model.discriminator.parameters() ] + [ y for y in model.critic.parameters()] )\n schedulerD = torch.optim.lr_scheduler.LambdaLR(optimizer=optimD.optimizer, lr_lambda=[lr_lambda])\n return optimG, schedulerG, optimD, schedulerD\n\ndef check_save_model_path(args, opt):\n if not os.path.exists(opt.out_dir):\n os.makedirs(opt.out_dir)\n print('saving config file to %s ...'%(opt.out_dir))\n shutil.copy(args.config, os.path.join(opt.out_dir,'config.yml'))\n\ndef save_per_epoch(model, epoch, opt):\n f = open(os.path.join(opt.out_dir,'checkpoint'),'w')\n f.write('latest_checkpoint:checkpoint_epoch%d.pkl'%(epoch))\n f.close()\n model.save_checkpoint(epoch, opt, os.path.join(opt.out_dir,\"checkpoint_epoch%d.pkl\"%(epoch)))\n\ndef pretrain_discriminators(opt, model, train_iter, valid_iter, fields, optim, lr_scheduler, start_epoch_at):\n for step_epoch in range(start_epoch_at+1, opt.num_train_epochs):\n for batch in train_iter:\n model.zero_grad()\n src_inputs, src_lengths = batch.src\n tgt_inputs = batch.tgt[0]\n ref_src_inputs, ref_src_lengths = batch.ref_src\n ref_tgt_inputs, ref_tgt_lengths = batch.ref_tgt\n (fake_tgt_inputs, _), fake_log_prob = sample(model.generator, src_inputs, ref_src_inputs, ref_tgt_inputs, src_lengths, ref_src_lengths, ref_tgt_lengths, opt.max_sample_len)\n real_output = model.discriminator(src_inputs, tgt_inputs, ref_src_inputs, ref_tgt_inputs, src_lengths, ref_src_lengths, ref_tgt_lengths)\n fake_output = model.discriminator(src_inputs, fake_tgt_inputs, ref_src_inputs, ref_tgt_inputs, src_lengths, ref_src_lengths, ref_tgt_lengths)\n real_output = real_output[1:]\n fake_output = fake_output[1:]\n\n target = torch.ones_like(real_output)\n loss_real = F.binary_cross_entropy_with_logits(real_output, target, torch.ne(tgt_inputs[1:], 0).float(), size_average = False)\n target = torch.zeros_like(fake_output)\n loss_fake = F.binary_cross_entropy_with_logits(fake_output, target, torch.ne(fake_tgt_inputs[1:], 0).float(), size_average = False)\n\n loss = (loss_real + loss_fake)/ (2 * batch.batch_size)\n loss.backward()\n optim.step()\n save_per_epoch(model, step_epoch, opt)\n sys.stdout.flush()\n\ndef G_turn(model, batch, optim, opt):\n model.zero_grad()\n advantages, log_probs, mask = D_turn(model, batch, None, opt, forG = True)\n loss = -(advantages * log_probs) * mask.float()\n loss = torch.sum(loss)/ batch.batch_size\n loss.backward()\n optim.step()\n\ndef D_turn(model, batch, optim, opt, forG = False, show_sample = False):\n if not forG:\n model.zero_grad()\n src_inputs, src_lengths = batch.src\n tgt_inputs = batch.tgt[0]\n ref_src_inputs, ref_src_lengths = batch.ref_src\n ref_tgt_inputs, ref_tgt_lengths = batch.ref_tgt\n\n if show_sample:\n sample(model.generator, src_inputs, ref_src_inputs, ref_tgt_inputs, src_lengths, ref_src_lengths, ref_tgt_lengths, opt.max_sample_len, show_sample = True)\n return\n (fake_tgt_inputs, _), fake_log_prob = sample(model.generator, src_inputs, ref_src_inputs, ref_tgt_inputs, src_lengths, ref_src_lengths, ref_tgt_lengths, opt.max_sample_len)\n\n real_output = model.discriminator(src_inputs, tgt_inputs, ref_src_inputs, ref_tgt_inputs, src_lengths, ref_src_lengths, ref_tgt_lengths)\n fake_output = model.discriminator(src_inputs, fake_tgt_inputs, ref_src_inputs, ref_tgt_inputs, src_lengths, ref_src_lengths, ref_tgt_lengths)\n real_output = real_output[1:]\n fake_output = fake_output[1:]\n\n target = torch.ones_like(real_output)\n loss_real = F.binary_cross_entropy_with_logits(real_output, target, torch.ne(tgt_inputs[1:], 0).float(), size_average = False)\n target = torch.zeros_like(fake_output)\n fake_tgt_mask = torch.ne(fake_tgt_inputs[1:], 0)\n loss_fake = F.binary_cross_entropy_with_logits(fake_output, target, fake_tgt_mask.float(), size_average = False)\n\n loss = (loss_real + loss_fake)/ (2 * batch.batch_size)\n eps = 1e-12\n\n estimated_rewards = model.critic(src_inputs, fake_tgt_inputs, ref_src_inputs, ref_tgt_inputs, src_lengths, ref_src_lengths, ref_tgt_lengths)\n estimated_rewards = estimated_rewards[:-1]\n\n rewards = torch.log(F.sigmoid(fake_output) + eps)\n rewards.data.masked_fill_(1 - fake_tgt_mask.data, 0.)\n split_rewards = torch.split(rewards, 1, dim = 0)\n\n sum_rewards = []\n cur = 0.\n for r in split_rewards[::-1]:\n cur = cur * opt.gamma + r\n sum_rewards.append(cur)\n sum_rewards = torch.cat(sum_rewards[::-1], 0)\n\n if forG:\n return (sum_rewards - estimated_rewards).detach(), fake_log_prob, fake_tgt_mask\n critic_loss = (sum_rewards - estimated_rewards)**2\n critic_loss.data.masked_fill_(1 - fake_tgt_mask.data, 0.)\n critic_loss = torch.sum(critic_loss)/ batch.batch_size\n loss = loss + critic_loss\n loss.backward()\n optim.step()\n\ndef train_model(opt, model, train_iter, valid_iter, fields, optimG, lr_schedulerG, optimD, lr_schedulerD, start_epoch_at):\n num_train_epochs = opt.num_train_epochs\n num_updates = 0\n print('start training...')\n valid_loss = nmt.NMTLossCompute(model.generator.generator,fields['tgt'].vocab)\n if use_cuda:\n valid_loss = valid_loss.cuda()\n shard_size = opt.train_shard_size\n trainer = nmt.Trainer(opt, model.generator, train_iter, valid_iter, valid_loss, valid_loss, optimG, lr_schedulerG, shard_size, train_loss_b = None)\n\n for step_epoch in range(start_epoch_at+1, num_train_epochs):\n for batch in train_iter:\n if num_updates % (opt.D_turns+1) == -1 % (opt.D_turns+1):\n G_turn(model, batch, optimG, opt)\n else:\n D_turn(model, batch, optimD, opt)\n if num_updates % (opt.show_sample_every) == -1 %(opt.show_sample_every):\n D_turn(model, batch, optimD, opt, show_sample = True)\n num_updates += 1\n sys.stdout.flush()\n valid_stats = trainer.validate()\n print('Validation perplexity: %g' % valid_stats.ppl())\n sys.stdout.flush()\n if step_epoch >= opt.start_decay_at:\n lr_schedulerD.step()\n lr_schedulerG.step()\n save_per_epoch(model, step_epoch, opt)\n model.train()\n\nclass vocab_wrapper(object):\n def __init__(self, vocab):\n self.vocab = vocab\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-config\", type=str)\n parser.add_argument(\"-nmt_dir\", type=str)\n parser.add_argument(\"-model_type\", type=str)\n parser.add_argument('-gpuid', default=[0], nargs='+', type=int)\n parser.add_argument(\"-valid_file\", type=str)\n parser.add_argument(\"-train_file\", type=str)\n parser.add_argument(\"-train_score\", type=str, default= None)\n parser.add_argument(\"-src_vocab\", type = str)\n parser.add_argument(\"-tgt_vocab\", type = str)\n parser.add_argument(\"-start_point\", type = str, default = None)\n\n args = parser.parse_args()\n opt = utils.load_hparams(args.config)\n\n if opt.random_seed > 0:\n random.seed(opt.random_seed)\n torch.manual_seed(opt.random_seed)\n\n fields = dict()\n vocab_src = Vocab(args.src_vocab, noST = True)\n vocab_tgt = Vocab(args.tgt_vocab)\n fields['src'] = vocab_wrapper(vocab_src)\n fields['tgt'] = vocab_wrapper(vocab_tgt)\n\n train = Data_Loader(args.train_file, opt.train_batch_size, score = args.train_score, mask_end = (args.model_type == \"ev\"))\n valid = Data_Loader(args.valid_file, opt.train_batch_size, mask_end = (args.model_type == \"ev\"))\n\n # Build model.\n\n model, start_epoch_at = build_or_load_model(args, opt, fields)\n check_save_model_path(args, opt)\n\n optimG, schedulerG, optimD, schedulerD = build_optims_and_lr_schedulers(model, opt)\n\n if use_cuda:\n model = model.cuda()\n\n # Do training.\n #pretrain_discriminators(opt, model, train, valid, fields, optimD, schedulerD, start_epoch_at)\n train_model(opt, model, train, valid, fields, optimG, schedulerG, optimD, schedulerD, start_epoch_at)\n print(\"DONE\")\n x = 0\n while True:\n x = (x +1)%5\nif __name__ == '__main__':\n main()\n","repo_name":"jcyk/Skeleton-to-Response","sub_path":"maskGAN.py","file_name":"maskGAN.py","file_ext":"py","file_size_in_byte":14673,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"3"} +{"seq_id":"33495350643","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n\nclass PDLNet(nn.Module):\n # [ B * N * (3+z) ] -> # [ B * N * 3 ] \n def __init__(self, size_z, num_point):\n super(PDLNet, self).__init__()\n size_kernel = 1\n size_pad = 0\n\n self.size_z = size_z\n self.num_point = num_point\n self.conv1 = torch.nn.Conv1d(3 + self.size_z, 128, size_kernel, padding=size_pad)\n self.conv2 = torch.nn.Conv1d(128, 32, size_kernel, padding=size_pad)\n self.conv3 = torch.nn.Conv1d(32, 3, size_kernel, padding=size_pad)\n \n self.conv4 = torch.nn.Conv1d(3 + self.size_z, 128, size_kernel, padding=size_pad)\n self.conv5 = torch.nn.Conv1d(128, 32, size_kernel, padding=size_pad)\n self.conv6 = torch.nn.Conv1d(32, 3, size_kernel, padding=size_pad)\n \n self.ln0 = nn.LayerNorm((self.size_z , num_point))\n self.ln1 = nn.LayerNorm((128, num_point))\n self.ln2 = nn.LayerNorm((32, num_point))\n self.ln3 = nn.LayerNorm((3, num_point))\n self.ln4 = nn.LayerNorm((128 , num_point))\n self.ln5 = nn.LayerNorm((32, num_point))\n self.ln6 = nn.LayerNorm((3, num_point))\n\n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(0.0)\n\n def forward(self, x_z, x, z):\n z = self.ln0(z)\n x = torch.cat([z, x], 1)\n x = self.dropout(F.relu(self.ln1(self.conv1(x))))\n x = self.dropout(F.relu(self.ln2(self.conv2(x))))\n x = self.dropout(F.relu(self.ln3(self.conv3(x))))\n \n x = torch.cat([z, x], 1)\n x = self.dropout(F.relu(self.ln4(self.conv4(x))))\n x = self.dropout(F.relu(self.ln5(self.conv5(x))))\n x1 = self.dropout((self.ln6(self.conv6(x))))\n return x1\n\n\n\n","repo_name":"WordBearerYI/Unsupervised-Deep-Shape-Descriptor-with-Point-Distribution-Learning","sub_path":"model/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"3"} +{"seq_id":"72881544402","text":"import os\r\nfrom PyQt5 import QtWidgets, QtGui, QtCore\r\nfrom PyQt5.QtWidgets import QMessageBox\r\n\r\nfrom games import Games\r\n\r\nGAMES = Games()\r\n\r\n\r\nclass Label(QtWidgets.QLabel):\r\n def __init__(self, *args, **kwargs):\r\n QtWidgets.QLabel.__init__(self, *args, **kwargs)\r\n\r\n def enterEvent(self, event):\r\n self.setStyleSheet('text-decoration: underline;')\r\n\r\n def leaveEvent(self, event):\r\n self.setStyleSheet('text-decoration: none;')\r\n\r\n\r\nclass Button(QtWidgets.QPushButton):\r\n def __init__(self, *args, **kwargs):\r\n QtWidgets.QPushButton.__init__(self, *args, **kwargs)\r\n\r\n self.lb_pressed = lambda: print(1)\r\n self.rb_pressed = lambda: print(1)\r\n\r\n def mousePressEvent(self, event):\r\n if event.button() == QtCore.Qt.LeftButton:\r\n self.lb_pressed()\r\n elif event.button() == QtCore.Qt.RightButton:\r\n self.rb_pressed()\r\n\r\n\r\nclass Scene2(QtWidgets.QWidget):\r\n def __init__(self, *args, **kwargs):\r\n QtWidgets.QWidget.__init__(self, *args, **kwargs)\r\n self.warn_text = None\r\n self.progressbar = None\r\n self.last_game_label = None\r\n self.last_game = None\r\n self.games_list = None\r\n self.games_content = None\r\n self.games_area = None\r\n self.games_widgets = []\r\n self.game_state = {}\r\n self.lenghts = (sum(1 for _ in os.walk(drv)) for drv in (chr(i) + \":\\\\\" for i in\r\n range(ord(\"A\"),\r\n ord(\"Z\") + 1)))\r\n self.lenght = None\r\n\r\n def setup_ui(self):\r\n self.resize(1093, 680)\r\n self.setFixedSize(1093, 680)\r\n\r\n self.setStyleSheet(\"\"\"background-color: \r\n qlineargradient(spread:pad, x1:0, y1:0.512, x2:0.985, y2:0.511, stop:0 rgba(255, 153, 0, 255),\r\n stop:1 rgba(255, 159, 255, 255));\"\"\")\r\n\r\n self.progressbar = QtWidgets.QProgressBar(self)\r\n self.progressbar.setValue(0)\r\n self.progressbar.setGeometry(QtCore.QRect(10, 650, 1073, 20))\r\n\r\n self.warn_text = QtWidgets.QLabel(self)\r\n self.warn_text.setGeometry(QtCore.QRect(10, 10, 1093, 40))\r\n self.warn_text.setText('Используйте ЛКМ чтобы запустить игру или ПКМ чтобы удалить игру')\r\n self.warn_text.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignHCenter)\r\n self.warn_text.setWordWrap(True)\r\n self.warn_text.setStyleSheet('background: transparent;'\r\n 'font: 12pt \"Minecraft Rus\";')\r\n\r\n self.games_area = QtWidgets.QScrollArea(self)\r\n self.games_area.setGeometry(QtCore.QRect(20, 50, 1061, 621))\r\n self.games_area.setStyleSheet(\"background: transparent;\"\r\n \"border: none;\")\r\n self.games_area.setWidgetResizable(True)\r\n self.games_area.setObjectName(\"games_area\")\r\n self.games_content = QtWidgets.QWidget()\r\n self.games_content.setGeometry(QtCore.QRect(0, 0, 1059, 619))\r\n self.games_content.setObjectName(\"games_content\")\r\n self.games_content.setStyleSheet(\"border: none;\\nbackground: transparent;\\n\"\r\n \"font: 63 9pt \\\"Cascadia Code SemiBold\\\";\")\r\n\r\n self.games_area.verticalScrollBar().setStyleSheet(\"QScrollBar\"\r\n \"{\"\r\n \"border: none;\"\r\n \"background: transparent;\"\r\n \"}\"\r\n \"QScrollBar::handle\"\r\n \"{\"\r\n \"background: #868687;\"\r\n \"border-radius: 10px;\"\r\n \"border: none;\"\r\n \"}\"\r\n \"QScrollBar::handle::pressed\"\r\n \"{\"\r\n \"background: white;\"\r\n \"}\"\r\n \"\"\"\r\nQScrollBar::handle:vertical {\r\nborder-radius: 10px;\r\nborder: none;\r\nbackground: #5b5b5b;\r\n} QScrollBar::handle:vertical::pressed {\r\nbackground: lightgray;\r\n}\r\n\r\nQScrollBar::add-line:vertical {\r\nheight: 0px;\r\n}\r\n\r\nQScrollBar::sub-line:vertical {\r\nheight: 0px;\r\n}\r\n\r\nQScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {\r\nheight: 0px;\r\nbackground: none;\r\nborder-radius: 14px;\r\n}\"\"\")\r\n\r\n self.games_area.setWidget(self.games_content)\r\n\r\n self.games_area.setWidgetResizable(True)\r\n\r\n self.games_area.raise_()\r\n\r\n self.show()\r\n\r\n self.main()\r\n\r\n def make_lambda(self, game):\r\n def setup(*args):\r\n self.open_game(game)\r\n\r\n return setup\r\n\r\n def make_lambda_delete(self, game):\r\n def setup():\r\n self.delete_game(game)\r\n\r\n return setup\r\n\r\n def main(self):\r\n [(i[0].hide(), i[1].hide()) for i in self.games_widgets]\r\n self.games_widgets = []\r\n self.games_list = GAMES.reload().games\r\n\r\n z = 1\r\n x, y = 20, 10\r\n\r\n ww = round((len(self.games_list) / 5) * 350) + 350\r\n\r\n self.games_content.setFixedHeight(ww)\r\n\r\n for i in enumerate(GAMES.games):\r\n setattr(self, 'game%d' % i[0], Button(self.games_content))\r\n self.last_game = getattr(self, 'game%d' % i[0])\r\n self.last_game.lb_pressed = self.make_lambda(i[1])\r\n self.last_game.rb_pressed = self.make_lambda_delete(i[1])\r\n self.last_game.setGeometry(QtCore.QRect(x, y, 175, 240))\r\n self.last_game.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\r\n\r\n setattr(self, 'game%d_label' % i[0], Label(self.games_content))\r\n self.last_game_label = getattr(self, 'game%d_label' % i[0])\r\n self.last_game_label.setGeometry(QtCore.QRect(x, y + 240, 175, 50))\r\n\r\n x += 205\r\n if z % 5 == 0:\r\n x = 20\r\n y += 298\r\n\r\n self.last_game_label.setText(i[1].name)\r\n self.last_game_label.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignHCenter)\r\n self.last_game_label.setWordWrap(True)\r\n self.last_game_label.mousePressEvent = self.make_lambda(i[1])\r\n\r\n url = i[1].img\r\n\r\n self.last_game.setStyleSheet(\"QPushButton {\\n\"\r\n \"border: 1px solid black;\\n\"\r\n \"background-color: rgba(255,255,255,0);\\n\"\r\n f\"border-image: url({url}) 0 0 0 0 stretch stretch;\"\r\n \"border-radius: 12px\"\r\n \"} QPushButton:hover {\\n\"\r\n \";\"\r\n \"margin-top: 5px;\"\r\n \"}\")\r\n\r\n self.last_game.show()\r\n self.last_game_label.show()\r\n\r\n self.games_widgets.append([self.last_game, self.last_game_label])\r\n\r\n z += 1\r\n\r\n def handle(self, i, maximum):\r\n self.progressbar.setMaximum(maximum)\r\n self.progressbar.setValue(i)\r\n QtWidgets.QApplication.processEvents()\r\n\r\n def find_exe(self, game):\r\n for drv in (chr(i) + \":\\\\\" for i in range(ord(\"A\"), ord(\"Z\") + 1)):\r\n self.handle(50, 100)\r\n i = 0\r\n for root, dirs, files in os.walk(drv):\r\n i += 1\r\n if True in [game.name.lower() in i.lower() for i in files] and '.torrent' not in str(\r\n files) and 'AppData' not in root and 'Documents' not in root and 'Документы' not in root:\r\n return [root, files]\r\n self.handle(i, self.lenght)\r\n return False\r\n\r\n def delete_game(self, game):\r\n game.remove()\r\n self.main()\r\n\r\n def open_game(self, game):\r\n if self.game_state:\r\n return\r\n if not game.exe:\r\n self.game_state = True\r\n if self.lenght is None:\r\n self.lenght = sum(self.lenghts)\r\n exe = self.find_exe(game)\r\n self.game_state = False\r\n self.handle(0, 100)\r\n if exe:\r\n for i in exe[1]:\r\n if game.name in i and 'деинстал' not in i.lower() and 'unin' not in i.lower():\r\n path = exe[0] + '\\\\' + i\r\n game.exe = path.replace('/', '\\\\')\r\n elif 'деинст' in i.lower() or 'unins' in i.lower():\r\n path = exe[0] + '\\\\' + i\r\n game.uninstall_exe = path.replace('/', '\\\\')\r\n game.update_game()\r\n else:\r\n return self.error_message('Вы не установили игру!')\r\n try:\r\n os.startfile(game.exe)\r\n except Exception as e:\r\n print(e)\r\n return self.error_message('Не удалось найти игру!')\r\n\r\n def info_message(self, text):\r\n return QMessageBox.about(self, \"INFO\", text)\r\n\r\n def warn_message(self, text):\r\n return QMessageBox.warning(self, \"WARN\", text)\r\n\r\n def error_message(self, text):\r\n return QMessageBox.warning(self, \"ERROR\", text)\r\n","repo_name":"lrdcxdes/XGames","sub_path":"scene2.py","file_name":"scene2.py","file_ext":"py","file_size_in_byte":9815,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"43887571223","text":"from flask import Blueprint, render_template, abort, request, send_file\nfrom flask import jsonify\n\nfrom lwpcms.mongo import db\nfrom bson.objectid import ObjectId\nimport pymongo\n\nfrom lwpcms.api.files import file_thumbnail, make_tarfile, is_image\nfrom lwpcms.api.themes import get_themes\n\nimport os\n\n\nbp = Blueprint(\n __name__, __name__,\n template_folder='templates',\n url_prefix='/api'\n)\n\n@bp.route('/delete_file/', methods=['POST', 'GET'])\ndef delete_file(id):\n file = db.collections.find_one({\"_id\": ObjectId(id)})\n file_path = os.path.dirname(os.path.realpath(__file__))\\\n +'/../../static/upload/{}'.format(file[\"filename\"])\n\n os.remove(file_path)\n \n if is_image(file_path):\n for size in [64, 32, 128]:\n os.remove(\n os.path.dirname(os.path.realpath(__file__))\\\n +'/../../static/upload/{}'.format(\n file_thumbnail(file[\"filename\"], size)\n )\n )\n\n db.collections.delete_many({\"_id\": ObjectId(id)})\n return 'ok', 200\n\n\n@bp.route('/delete_post/', methods=['POST', 'GET'])\ndef delete_post(id):\n db.collections.delete_many({\"_id\": ObjectId(id)})\n return 'ok', 200\n\n\n@bp.route('/query_files/', defaults={'page': 0, 'limit': 100})\n@bp.route('/query_files///', methods=['POST', 'GET'])\ndef query_files(query, page, limit):\n\n page = int(page)\n limit = int(limit)\n\n if query != '*':\n obj = db.collections.find(\n {\n \"structure\": \"#File\",\n \"filename\": {\"$regex\": u\"[a-zA-Z]*{}[a-zA-Z]*\".format(query)}\n }\n ).sort('created', pymongo.DESCENDING)\n if page != -1 and limit != -1:\n obj.skip(page * limit).limit(limit)\n\n files = list(\n obj\n )\n else:\n obj = db.collections.find(\n {\n \"structure\": \"#File\"\n }\n ).sort('created', pymongo.DESCENDING)\n if page != -1 and limit != -1:\n obj.skip(page * limit).limit(limit)\n\n files = list(\n obj\n )\n\n return jsonify(\n {\n 'meta':{\n 'length': len(files)\n },\n 'files':[\n {\n 'id': str(file[\"_id\"]),\n 'filename': file[\"filename\"],\n }\n for file in files]\n } \n )\n\n\n@bp.route('/remove_attachment//', methods=['POST', 'GET'])\ndef remove_attachment(post_id, attach_id):\n db.collections.update_one(\n {\n '_id': ObjectId(post_id)\n },\n {\n '$pull': {\n 'attachments': {\n '_id': ObjectId(attach_id)\n }\n }\n }\n )\n return jsonify({\n 'status': 200\n }), 200\n\n\n@bp.route('/themes', methods=['POST', 'GET'])\ndef themes():\n all_themes = get_themes()\n\n for theme in all_themes:\n theme['url'] = request.url_root + 'api/themes/download/{}'.format(theme['name'])\n\n print(request.url_root)\n return jsonify({'themes': all_themes})\n\n\n@bp.route('/themes/download/', methods=['POST', 'GET'])\ndef themes_download(theme_name):\n theme = get_themes(theme_name)\n tarname = 'lwpcms/themes/tar/{}.tar.gz'.format(theme['name'])\n\n if not os.path.exists('lwpcms/themes/tar'):\n os.mkdir('lwpcms/themes/tar')\n\n if not os.path.exists(tarname):\n make_tarfile(tarname, 'lwpcms/' + theme['path'])\n\n return send_file('themes/tar/' + theme['name'] + '.tar.gz')\n","repo_name":"sebbekarlsson/LWPCMS","sub_path":"lwpcms/views/api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31465915273","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 28 13:31:47 2017\nUpdated on Mon Oct 4 15:14 2017\n@author: Kerk Phillips\n\"\"\"\nimport numpy as np\n\ndef AKsolve(Xguess, funcname, fparams, ccrit, damp, maxiter, shrinkon, \\\n shrink, expandon, expand, disttype, display):\n '''\n This function performs the Auerbach-Kotlikoff contraction mapping on a \n function.\n \n The inputs are:\n Xguess: An initial guess for the fixed point. Can be a scalar or\n matrix.\n funcname: Ahe name of the python function. It must take Xvalue as an\n argument with the same dimensions as Xguess, with fparams as \n parameters and return a new value for X, Xnew. \n fparams: A list of parameters used by funcname\n ccrit: The value for distance between Xvalue and Xnew that indicates\n convergence to the fixed point\n damp: The weight put on Xnew relative to Xvalue when moving to the\n next iteration; Xvalue = damp*Xnew + (1-damp)*Xvalue.\n maxiter: The maximum number of iterations allowed\n shrinkon: If true, the value of damp is scaled down when the distance\n between values of X in an iteration increases.\n shrink: The factor by which damp shrinks.\n expandon: If true, the value of damp is scaled up when the distance\n between values of X in an iteration does not increase.\n expand: The factor by which damp expands.\n disttype: Indicator variable for the method used to compute distance\n between Xvalue and Xnew\n 1: root mean squared differences (default)\n 2: mean absolute deviation\n 3: maximum absolute deviation\n display: If true, display iterations.\n \n The outputs are the fixed point, the last iteration's distanceand the\n number of iterations performed\n '''\n # initialize Xvalue\n Xvalue = Xguess\n # set initial distance measures\n dist = 1.0\n distold = 2.0\n # set counter\n count = 0\n # begin AK iterations\n print('Performing AK contraction mapping')\n while dist > ccrit:\n if count > maxiter:\n break\n Xnew = funcname(Xvalue, fparams)\n diff = Xnew - Xvalue\n if disttype == 2:\n dist = np.mean(np.absolute(diff))\n elif disttype == 3:\n dist = np.amax(np.absolute(diff))\n else:\n dist = (np.mean(diff**2))**.5\n # check if dist is falling, if not lower value of damp\n if (dist > distold) and (shrinkon):\n # shrink damp and redo with same Xvalue, do not update count\n damp = damp * shrink\n Xvalue = damp*Xnew + (1-damp)*Xvalue\n distold = dist\n else:\n # update Xvalue and count\n count = count + 1\n if expandon:\n # expand damp if it is < 1.0\n if damp < 1.0:\n damp = damp * expand\n else:\n damp = 1.0\n # take convex combination for new guess\n Xvalue = damp*Xnew + (1-damp)*Xvalue\n # replace old dist value\n distold = dist\n # show progress\n if display:\n print ('count: ', count, 'distance: ', dist, 'damp: ', damp)\n \n return Xvalue, dist, count","repo_name":"kerkphil/DSGE-Utilities","sub_path":"AK Fixed Point Solver/AKsolve.py","file_name":"AKsolve.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"3"} +{"seq_id":"17757597171","text":"# use unicode encoding for all literals by default (for python2.x)\nfrom __future__ import unicode_literals\n\n__author__ = \"Steffen Vogel\"\n__copyright__ = \"Copyright 2015-2017, Steffen Vogel\"\n__license__ = \"GPLv3\"\n__maintainer__ = \"Steffen Vogel\"\n__email__ = \"post@steffenvogel.de\"\n\n\"\"\"\n This file is part of transWhat\n\n transWhat is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n any later version.\n\n transwhat is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with transWhat. If not, see .\n\"\"\"\n\nimport Queue\nimport threading\n\n# This queue is for other threads that want to execute code in the main thread\neventQueue = Queue.Queue()\n\ndef runInThread(threadFunc, callback):\n\t\"\"\"\n\tExecutes threadFunc in a new thread. The result of threadFunc will be\n\tpass as the first argument to callback. callback will be called in the main\n\tthread.\n\t\"\"\"\n\tdef helper():\n\t\t# Execute threadfunc in new thread\n\t\tresult = threadFunc()\n\t\t# Queue callback to be call in main thread\n\t\teventQueue.put(lambda: callback(result))\n\tthread = threading.Thread(target=helper)\n\tthread.start()\n","repo_name":"stv0g/transwhat","sub_path":"transWhat/threadutils.py","file_name":"threadutils.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"3"} +{"seq_id":"18003000417","text":"# -*- coding: utf-8 -*-\n\"\"\"\nv0.0.0 - Initial Version.\nv0.1.0 - Pipeline actually running.\n - To Do: Improve LaCosmic parameters.\nv0.1.1 - Fixed bug related to master bias and flat names.\n - Added prefix to reduced data.\nv0.1.2 - Fixed bug related to the logging system.\nv0.1.3 - Fixed bug that prevented reducing some filters.\n - At some point, it is useful to have the full filter name.\nv0.1.4 - Skipping existing ZERO and FLAT frames.\n - WCS is added to objects when merging amplifiers.\nv0.1.5 - sami_autoastrometry fixed (AGAIN!!).\n - Added try/except for missing RA/DEC.\n - Skipping existing object files.\n\"\"\"\nimport calendar\n\napi = 0\nfeature = 1\nbug = 5\n\nmonth = 7\nyear = 2018\n\nmonth = calendar.month_name[month]\n__str__ = \"{api:d}.{feature:d}.{bug:d} - {month:s}, {year:d}\".format(**locals())\n","repo_name":"soar-telescope/soar-optical-imager","sub_path":"soar_soi/tools/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39251087312","text":"from asyncio import get_event_loop\nfrom math import sqrt\nfrom os import environ, getpid\nfrom time import sleep\nimport warnings\nimport pandas\nfrom psutil import Process, net_io_counters, virtual_memory\nimport logging\nfrom workload_helper import load_dataset, run_send_thread\nfrom datetime import datetime, timedelta\nfrom hwcounter import Timer, count, count_end\n\n\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n\nd = datetime.now() + timedelta(hours=2)\n\nlogger_filename = './log/' + d.strftime('%m_%d_%H_%M') + '_workload_logger.log'\nlogging.basicConfig(filename=logger_filename,\n encoding='utf-8', force=True, filemode='w')\n\n\n# workload_logger = setup_logger('workload_logger', './log/workload_logger.log')\n\n# By pass proxy eeror on cs dep vm\nenviron['no_proxy'] = '*'\n\n# first cpu call and net work to start counting before startign threads\np = Process(getpid())\np.cpu_percent()\n\n\n# Load coco dataset so that we can get the classes of the images,\n# the data is already since we had built it in the base image\ndataset = load_dataset()\n\nif 'Edges' not in environ:\n print('You did not specfy the edges. Please use the format \"Edges: edge1,edge2\"')\n exit(1)\n\nedges_str = environ['Edges']\n\nedges = edges_str.split(',')\n\n\nnum_of_images = [5 for _ in edges]\nnum_of_images.append(20) # last one is repeated\n\nif \"Images\" not in environ:\n print('You did not Images so sending 5 to all ')\nelse:\n images_ar = environ[\"Images\"].split(',')\n for i, val in enumerate(images_ar):\n num_of_images[i] = int(images_ar[i])\n\n\nsleep_time = 4\n\nif \"Monitor_sleep\" in environ:\n sleep_time = int(environ['Monitor_sleep'])\n\nworkloader_csv_name = './stats/' + \\\n d.strftime('%m_%d_%H_%M') + '_workload_monitor_'\nworkloader_csv_name2 = './stats/' + \\\n d.strftime('%m_%d_%H_%M') + '_workload_requests_'\n\nfor i, edge in enumerate(edges):\n workloader_filename = str(edge) + \\\n '_' + str(num_of_images[i]) + '_'\n\n workloader_csv_name += workloader_filename\n workloader_csv_name2 += workloader_filename\n\n\nworkloader_csv_name2 = workloader_csv_name2 + \\\n '_sleepTime_' + str(sleep_time) + '.csv'\n\nworkloader_csv_name = workloader_csv_name + \\\n '_sleepTime_' + str(sleep_time) + '.csv'\n\n\nprint('Starting workloader with :', p.cpu_percent(), '%')\n\n\ndf = pandas.DataFrame()\n\ndf.to_csv(workloader_csv_name)\ndf.to_csv(workloader_csv_name2)\n\nedge_urls = []\n\nfor i, edge in enumerate(edges):\n\n edge_url = 'http://' + edge + ':5000/endpoint'\n edge_urls.append(edge_url)\n\nget_event_loop().run_in_executor(\n None, run_send_thread, workloader_csv_name2, dataset, num_of_images, edge_urls) # fire and forget\n\n\nprint('Starting workloader monitor')\n\nbytes_sent_before = net_io_counters().bytes_sent\nstart_cpu = count()\n\nsleep(1)\ntry:\n while True:\n\n bytes_sent_after = net_io_counters().bytes_sent\n\n diff_sent = (bytes_sent_after - bytes_sent_before) / 1000\n\n bytes_sent_before = net_io_counters().bytes_sent\n start_cpu = count()\n\n elapsed = int(count_end() - start_cpu)\n mem = virtual_memory()\n\n vram_used = mem.used / 1024/1024 # MBytes\n ram_used = mem.active / 1024/1024 # MBytes\n\n data2 = {'cpu_cycles': elapsed, 'KBytes_sent': diff_sent,\n 'vram_used_MBytes': vram_used, 'ram_active_MBytes': ram_used}\n\n df = df.append(data2, ignore_index=True)\n df.to_csv(workloader_csv_name, mode='w')\n\n sleep(sleep_time)\nexcept Exception as e:\n print(e)\n","repo_name":"PanikosChristou99/Diplomatic_project","sub_path":"workload/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24747306548","text":"class Solution(object):\n def updateBoard(self, board, click):\n \"\"\"\n :type board: List[List[str]]\n :type click: List[int]\n :rtype: List[List[str]]\n \"\"\"\n \n\ns = Solution()\nprint(s.updateBoard(board=[['B', '1', 'E', '1', 'B'],\n ['B', '1', 'M', '1', 'B'],\n ['B', '1', '1', '1', 'B'],\n ['B', 'B', 'B', 'B', 'B']],click=[3,0]))","repo_name":"Victor-Alexandru/PrFrTagma","sub_path":"AmazonJanuary/minesweeper.py","file_name":"minesweeper.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31040155363","text":"import sys\ninput = sys.stdin.readline\n\nboard=[list(map(int,input().split())) for _ in range(9)]\n\nboard_y=[ [0]*9 for _ in range(9)]\n\nboard_box=[[0]*9 for _ in range(9)]\n\nnum={1,2,3,4,5,6,7,8,9}\n\ndone=False\ndef dfs(n):\n global done\n if done:\n return\n for i in range(n,9):\n for j in range(9):\n if i==8 and j==8 and board[i][j]!=0 and done==False:\n for f in range(9):\n print(' '.join(map(str,board[f])))\n \n done=True\n\n if board[i][j]==0:\n tmp = num - set(board[i])\n tmp = tmp - set(board_y[j])\n tmp = tmp - set(board_box[(i//3)*3+j//3])\n if i==8 and j==8 and len(tmp)==1 and done==False:\n board[i][j]=list(tmp)[0]\n for f in range(9):\n print(' '.join(map(str,board[f])))\n \n done=True\n for k in tmp:\n board[i][j]=k\n dfs(i)\n board[i][j]=0\n\n\n\nfor i in range(9):\n for j in range(9):\n board_y[i][j]=board[j][i]\n\nfor i in range(9):\n for j in range(9):\n board_box[(i//3)*3+j//3][(i%3)*3+j%3]=board[i][j]\n\ndfs(0)","repo_name":"ske-kr/Myalgorithm","sub_path":"backtracking/wrong_sudoku.py","file_name":"wrong_sudoku.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5881005077","text":"import time\nimport subprocess\nimport async_timeout\nfrom pyrogram.errors import RPCError\nimport re\nimport collector\nimport cleaner\nimport export\nimport proxys\nfrom .Preprocessing import yaml_collect\nfrom .typechange import chg_type\nfrom .export_result import ExportResult\nfrom retry import retry\n\nsub_path = \"./temp/temp.yaml\"\n\n\nasync def testurl(client,task_queue,configs):\n taskname = task_queue.top()['task_name']\n msgchat_id = task_queue.top()['msgchat_id']\n msg_id = task_queue.top()['msg_id']\n bakmsg_id = task_queue.top()['bakmsg_id']\n clash_path = configs['path_clash']\n port = configs['mixed-port']\n api_port = configs['external-controller']\n subcvt = configs['api']\n color = configs['color']\n front = configs['front']\n print('进入函数')\n try:\n s1 = time.time()\n chat_id = msgchat_id\n info = {} # Netflix Youtube 等等\n # 获取订阅地址\n url =task_queue.top()['url']\n\n print(\"获取订阅链接:\"+url)\n\n # 启动下载配置文件\n suburl = url\n print(\"下载订阅:\")\n sub = yaml_collect(suburl,subcvt,api_port,port)\n down = await sub.downyaml()\n print(\"下载完成:\")\n print(down)\n # sub = collector.SubCollector(suburl=suburl)\n # config = await sub.getSubConfig()\n if not down:\n await client.edit_message_text(\n chat_id=chat_id,\n message_id=bakmsg_id,\n text=\"ERROR: 无法获取到订阅文件\"\n )\n return\n # 启动订阅清洗\n nodename,nodetype,node_sever,proxy_group = await sub.get_yaml() #返回结果\n print(nodename)\n if not nodename:\n await client.edit_message_text(\n chat_id=chat_id,\n message_id=bakmsg_id,\n text=\"ERROR: 无法获取到订阅文件\"\n )\n return\n newnode_type = chg_type(nodetype)\n # 启动clash进程\n print('启动clash')\n command = fr\"{clash_path} -f {sub_path}\"\n subp = subprocess.Popen(command.split(), encoding=\"utf-8\")\n time.sleep(2)\n # 进入循环,直到所有任务完成\n ninfo = [] # 存放所测节点Netflix的解锁信息\n youtube_info = []\n disneyinfo = []\n gpinginfo = []\n proxy_ping = {}\n fnode = []\n # 获取有延迟的node\n info_list = []\n progress = 0\n for n in nodename:\n resp = proxys.switchProxy(proxyName=n, proxyGroup=proxy_group,clashPort=api_port)\n cl = collector.Collector(n)\n print(\"切换节点: \",n)\n nodeinfo = await cl.start(n,api_port,proxy=\"http://127.0.0.1:{}\".format(port))\n nodeinfo['类型'] = newnode_type[progress]\n info_list.append(nodeinfo)\n p_text = \"%.2f\" % (progress / len(nodename) * 100)\n progress += 1\n if progress %5 == 0:\n await client.edit_message_text(\n chat_id=chat_id,\n message_id=bakmsg_id,\n text=\"╰(*°▽°*)╯流媒体测试进行中...\\n\\n\" +\n \"当前进度: \" + p_text + \" % [ \"+str(progress) +\"/\"+ str(len(nodename)) +\"]\"\n ) # 实时反馈进度\n # 关闭进程\n subp.kill()\n progress = 0\n new_y = []\n # 过滤None值\n for info in info_list:\n print(info)\n if info['netflix1'] =='解锁':\n if info['Netflix2'] =='解锁':\n info['Netflix'] = '解锁'\n else:\n info['Netflix'] = '自制'\n else:\n info['Netflix'] = '失败'\n new_data = sorted(info_list, key=lambda i: i[\"HTTPS Ping\"])\n nodename = [i['节点名称'] for i in new_data]\n nodetype = [i['类型'] for i in new_data]\n nodeping1 = [i['CLASH CHECK'] for i in new_data]\n nodeping2 = [i['HTTPS Ping'] for i in new_data]\n\n nodedalay1 = []\n for i in nodeping1:\n if i == 9999:\n i = -1\n delay = str(i) + 'ms'\n nodedalay1.append(delay)\n nodedalay2 = []\n usf_node = len(nodename)\n for i in nodeping2:\n if i == 9999:\n i = -1\n usf_node = usf_node-1\n delay = str(i) + 'ms'\n nodedalay2.append(delay)\n yt = [i['YouTube'] for i in new_data]\n nfx = [i['Netflix'] for i in new_data]\n disney = [i['Disney'] for i in new_data]\n info = {}\n info.update({'类型': nodetype})\n info.update({'CLASH CHECK': nodedalay1})\n info.update({'HTTPS Ping': nodedalay2})\n info.update({'Youtube': yt})\n info.update({'Netflix': nfx})\n info.update({'Disney': disney})\n wtime = \"%.1f\" % float(time.time() - s1)\n alive = str(usf_node) + '/' + str(len(nodename))\n book_dict = {}\n book_dict.update({'alive': alive})\n book_dict.update({'color': color})\n book_dict.update({'path_front': front})\n book_dict.update({'tasktime': wtime})\n book_dict.update({'taskname': \"%s-可乐瓶子--流媒体测试\"%taskname})\n book_dict.update({'thread_num': 8})\n book_dict.update({'timeout': 5})\n book_dict.update({'sort': 'Ping'})\n c1 = ExportResult(nodename, info, book_dict)\n export_time = c1.exportAsPng()\n # 计算测试消耗时间\n # 生成图片\n # 发送回TG\n with async_timeout.timeout(15):\n if len(nodename) > 35:\n await client.send_document(\n chat_id=chat_id,\n document=r\"./results/result-{}.png\".format(export_time),\n caption=\"⏱️总共耗时: {}s\".format(wtime)\n )\n else:\n await client.send_photo(\n chat_id=chat_id,\n photo=r\"./results/result-{}.png\".format(export_time),\n caption=\"⏱️总共耗时: {}s\".format(wtime)\n )\n except RPCError as r:\n print(r)\n await client.edit_message_text(\n chat_id=msgchat_id,\n message_id=bakmsg_id,\n text=\"出错啦\"\n )\n except KeyboardInterrupt:\n await client.edit_message_text(\n chat_id=msgchat_id,\n message_id=bakmsg_id,\n text=\"程序已被强行中止\"\n\n )\n subp.kill()\n","repo_name":"RenaLio/zusu","sub_path":"utils/streamingtest.py","file_name":"streamingtest.py","file_ext":"py","file_size_in_byte":6558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74405565522","text":"import os, argparse, sklearn\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torchvision.transforms as transforms\r\nimport torchvision.datasets as datasets\r\nfrom tensorboardX import SummaryWriter\r\n\r\nfrom config import get_config\r\nfrom image_iter import FaceDataset\r\n\r\nfrom util.utils import (\r\n separate_irse_bn_paras,\r\n separate_resnet_bn_paras,\r\n separate_mobilefacenet_bn_paras,\r\n)\r\nfrom util.utils import (\r\n get_val_data,\r\n perform_val,\r\n get_time,\r\n buffer_val,\r\n AverageMeter,\r\n train_accuracy,\r\n)\r\n\r\nimport time\r\nfrom vit_pytorch import ViT_face\r\nfrom vit_pytorch import ViTs_face\r\nfrom vit_pytorch import NAT # Imported the NAT\r\n\r\n# from IPython import embed\r\nfrom timm.scheduler import create_scheduler\r\nfrom timm.optim import create_optimizer\r\n\r\n\r\n# ======= Added epoch_change boolean value, such that the checkpoint is saved when a new epoch starts =======#\r\ndef need_save(acc, highest_acc, is_epoch_change):\r\n if is_epoch_change:\r\n return True\r\n do_save = False\r\n save_cnt = 0\r\n if acc[0] > 0.49:\r\n do_save = True\r\n for i, accuracy in enumerate(acc):\r\n if accuracy > highest_acc[i]:\r\n highest_acc[i] = accuracy\r\n do_save = True\r\n if i > 0 and accuracy >= highest_acc[i] - 0.002:\r\n save_cnt += 1\r\n if save_cnt >= len(acc) * 3 / 4 and acc[0] > 0.99:\r\n do_save = True\r\n print(\"highest_acc:\", highest_acc)\r\n return do_save\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser(\r\n description=\"for face verification\",\r\n )\r\n parser.add_argument(\r\n \"-w\",\r\n \"--workers_id\",\r\n help=\"gpu ids or cpu ['0', '1', '2', '3'] (default: )\",\r\n default=\"cpu\",\r\n type=str,\r\n )\r\n parser.add_argument(\r\n \"-e\",\r\n \"--epochs\",\r\n help=\"training epochs\",\r\n default=1,\r\n type=int,\r\n )\r\n parser.add_argument(\r\n \"-b\",\r\n \"--batch_size\",\r\n help=\"batch_size\",\r\n default=256,\r\n type=int,\r\n )\r\n parser.add_argument(\r\n \"-d\",\r\n \"--data_mode\",\r\n help=\"use which database, [casia, vgg, ms1m, retina, ms1mr]\",\r\n default=\"ms1m\",\r\n type=str,\r\n )\r\n parser.add_argument(\r\n \"-n\",\r\n \"--net\",\r\n help=\"which network, ['VIT','VITs','SWT','NAT]\",\r\n default=\"VITs\",\r\n type=str,\r\n )\r\n parser.add_argument(\r\n \"-head\",\r\n \"--head\",\r\n help=\"head type, ['Softmax', 'ArcFace', 'CosFace', 'SFaceLoss']\",\r\n default=\"ArcFace\",\r\n type=str,\r\n )\r\n parser.add_argument(\r\n \"-t\",\r\n \"--target\",\r\n help=\"verification targets\",\r\n default=\"lfw\",\r\n type=str,\r\n )\r\n parser.add_argument(\r\n \"-r\",\r\n \"--resume\",\r\n help=\"resume model\",\r\n default=\"\",\r\n type=str,\r\n )\r\n parser.add_argument(\r\n \"--outdir\",\r\n help=\"output dir\",\r\n default=\"\",\r\n type=str,\r\n )\r\n parser.add_argument(\r\n \"--model\",\r\n help=\"model name for nat\",\r\n default=\"nat_mini\",\r\n type=str,\r\n )\r\n parser.add_argument(\r\n \"--opt\",\r\n default=\"adamw\",\r\n type=str,\r\n metavar=\"OPTIMIZER\",\r\n help='Optimizer (default: \"adamw\")',\r\n )\r\n parser.add_argument(\r\n \"--opt-eps\",\r\n default=1e-8,\r\n type=float,\r\n metavar=\"EPSILON\",\r\n help=\"Optimizer Epsilon (default: 1e-8)\",\r\n )\r\n parser.add_argument(\r\n \"--opt-betas\",\r\n default=None,\r\n type=float,\r\n nargs=\"+\",\r\n metavar=\"BETA\",\r\n help=\"Optimizer Betas (default: None, use opt default)\",\r\n )\r\n parser.add_argument(\r\n \"--momentum\",\r\n type=float,\r\n default=0.9,\r\n metavar=\"M\",\r\n help=\"SGD momentum (default: 0.9)\",\r\n )\r\n parser.add_argument(\r\n \"--weight-decay\",\r\n type=float,\r\n default=0.05,\r\n help=\"weight decay (default: 0.05)\",\r\n )\r\n\r\n # ======= Learning rate schedule parameters =======#\r\n parser.add_argument(\r\n \"--sched\",\r\n default=\"cosine\",\r\n type=str,\r\n metavar=\"SCHEDULER\",\r\n help='LR scheduler (default: \"cosine\")',\r\n )\r\n parser.add_argument(\r\n \"--lr\",\r\n type=float,\r\n default=5e-4,\r\n metavar=\"LR\",\r\n help=\"learning rate (default: 5e-4)\",\r\n )\r\n parser.add_argument(\r\n \"--lr-noise\",\r\n type=float,\r\n nargs=\"+\",\r\n default=None,\r\n metavar=\"pct, pct\",\r\n help=\"learning rate noise on/off epoch percentages\",\r\n )\r\n parser.add_argument(\r\n \"--lr-noise-pct\",\r\n type=float,\r\n default=0.67,\r\n metavar=\"PERCENT\",\r\n help=\"learning rate noise limit percent (default: 0.67)\",\r\n )\r\n parser.add_argument(\r\n \"--lr-noise-std\",\r\n type=float,\r\n default=1.0,\r\n metavar=\"STDDEV\",\r\n help=\"learning rate noise std-dev (default: 1.0)\",\r\n )\r\n parser.add_argument(\r\n \"--warmup-lr\",\r\n type=float,\r\n default=1e-6,\r\n metavar=\"LR\",\r\n help=\"warmup learning rate (default: 1e-6)\",\r\n )\r\n parser.add_argument(\r\n \"--min-lr\",\r\n type=float,\r\n default=1e-5,\r\n metavar=\"LR\",\r\n help=\"lower lr bound for cyclic schedulers that hit 0 (1e-5)\",\r\n )\r\n\r\n parser.add_argument(\r\n \"--decay-epochs\",\r\n type=float,\r\n default=30,\r\n metavar=\"N\",\r\n help=\"epoch interval to decay LR\",\r\n )\r\n parser.add_argument(\r\n \"--warmup-epochs\",\r\n type=int,\r\n default=3,\r\n metavar=\"N\",\r\n help=\"epochs to warmup LR, if scheduler supports\",\r\n )\r\n parser.add_argument(\r\n \"--cooldown-epochs\",\r\n type=int,\r\n default=10,\r\n metavar=\"N\",\r\n help=\"epochs to cooldown LR at min_lr, after cyclic schedule ends\",\r\n )\r\n parser.add_argument(\r\n \"--patience-epochs\",\r\n type=int,\r\n default=10,\r\n metavar=\"N\",\r\n help=\"patience epochs for Plateau LR scheduler (default: 10)\",\r\n )\r\n parser.add_argument(\r\n \"--decay-rate\",\r\n \"--dr\",\r\n type=float,\r\n default=0.1,\r\n metavar=\"RATE\",\r\n help=\"LR decay rate (default: 0.1)\",\r\n )\r\n args = parser.parse_args()\r\n\r\n # ======= Hyperparameters & Data Loaders =======#\r\n cfg = get_config(args)\r\n\r\n SEED = cfg[\"SEED\"] # Random Seed for Reproduce results\r\n torch.manual_seed(SEED)\r\n\r\n DATA_ROOT = cfg[\r\n \"DATA_ROOT\"\r\n ] # The parent root where your train/val/test data are stored\r\n EVAL_PATH = cfg[\"EVAL_PATH\"]\r\n WORK_PATH = cfg[\r\n \"WORK_PATH\"\r\n ] # The root to buffer your checkpoints and to log your train/val status\r\n BACKBONE_RESUME_ROOT = cfg[\r\n \"BACKBONE_RESUME_ROOT\"\r\n ] # The root to resume training from a saved checkpoint\r\n\r\n BACKBONE_NAME = cfg[\"BACKBONE_NAME\"]\r\n HEAD_NAME = cfg[\r\n \"HEAD_NAME\"\r\n ] # Support: ['Softmax', 'ArcFace', 'CosFace', 'SFaceLoss']\r\n\r\n INPUT_SIZE = cfg[\"INPUT_SIZE\"]\r\n EMBEDDING_SIZE = cfg[\"EMBEDDING_SIZE\"] # Feature Dimension\r\n BATCH_SIZE = cfg[\"BATCH_SIZE\"]\r\n NUM_EPOCH = 125\r\n\r\n DEVICE = cfg[\"DEVICE\"]\r\n MULTI_GPU = cfg[\"MULTI_GPU\"] # Flag to use multiple GPUs\r\n GPU_ID = cfg[\"GPU_ID\"] # Specify GPU ids\r\n print(\"GPU_ID\", GPU_ID)\r\n TARGET = cfg[\"TARGET\"]\r\n print(\"=\" * 60)\r\n print(\"Overall Configurations:\")\r\n print(cfg)\r\n with open(os.path.join(WORK_PATH, \"config.txt\"), \"w\") as f:\r\n f.write(str(cfg))\r\n print(\"=\" * 60)\r\n\r\n writer = SummaryWriter(WORK_PATH) # Writer for buffering intermedium results\r\n torch.backends.cudnn.benchmark = True\r\n\r\n with open(os.path.join(DATA_ROOT, \"property\"), \"r\") as f:\r\n NUM_CLASS, h, w = [int(i) for i in f.read().split(\",\")]\r\n assert h == INPUT_SIZE[0] and w == INPUT_SIZE[1]\r\n\r\n dataset = FaceDataset(os.path.join(DATA_ROOT, \"train.rec\"), rand_mirror=True)\r\n trainloader = torch.utils.data.DataLoader(\r\n dataset,\r\n batch_size=BATCH_SIZE,\r\n shuffle=True,\r\n num_workers=len(GPU_ID),\r\n drop_last=True,\r\n )\r\n\r\n print(\"Number of Training Classes: {}\".format(NUM_CLASS))\r\n\r\n vers = get_val_data(EVAL_PATH, TARGET)\r\n highest_acc = [0.0 for t in TARGET]\r\n\r\n # embed()\r\n\r\n # ======= Model, Loss & Optimizer =======#\r\n BACKBONE_DICT = {\r\n \"VIT\": ViT_face(\r\n loss_type=HEAD_NAME,\r\n GPU_ID=GPU_ID,\r\n num_class=NUM_CLASS,\r\n image_size=112,\r\n patch_size=16,\r\n dim=512,\r\n depth=20,\r\n heads=8,\r\n mlp_dim=2048,\r\n dropout=0.1,\r\n emb_dropout=0.1,\r\n ),\r\n \"VITs\": ViTs_face(\r\n loss_type=HEAD_NAME,\r\n GPU_ID=GPU_ID,\r\n num_class=NUM_CLASS,\r\n image_size=112,\r\n patch_size=16,\r\n ac_patch_size=12,\r\n pad=4,\r\n dim=512,\r\n depth=20,\r\n heads=8,\r\n mlp_dim=2048,\r\n dropout=0.1,\r\n emb_dropout=0.1,\r\n ),\r\n # Used the NAT model. Used the nat_mini model with the required hyperparameters.\r\n \"NAT\": NAT(\r\n depths=[3, 4, 18, 5],\r\n num_heads=[2, 4, 8, 16],\r\n mlp_ratio=3,\r\n embed_dim=64,\r\n drop_path_rate=0.2,\r\n kernel_size=7,\r\n num_classes=NUM_CLASS,\r\n ),\r\n }\r\n\r\n BACKBONE = BACKBONE_DICT[BACKBONE_NAME]\r\n\r\n print(\"=\" * 60)\r\n print(BACKBONE)\r\n print(\"{} Backbone Generated\".format(BACKBONE_NAME))\r\n print(\"=\" * 60)\r\n\r\n LOSS = nn.CrossEntropyLoss()\r\n\r\n # embed()\r\n\r\n OPTIMIZER = create_optimizer(args, BACKBONE)\r\n print(\"=\" * 60)\r\n print(OPTIMIZER)\r\n print(\"Optimizer Generated\")\r\n print(\"=\" * 60)\r\n lr_scheduler, _ = create_scheduler(args, OPTIMIZER)\r\n\r\n epoch = 0 # Setting Epoch\r\n\r\n # Multi-GPU setting\r\n if MULTI_GPU:\r\n BACKBONE = nn.DataParallel(BACKBONE, device_ids=GPU_ID)\r\n BACKBONE = BACKBONE.to(DEVICE)\r\n\r\n # Single-GPU setting\r\n else:\r\n BACKBONE = BACKBONE.to(DEVICE)\r\n\r\n INITIAL = -1\r\n batch = 0 # Batch Index\r\n\r\n # Optionally resume from a checkpoint\r\n if BACKBONE_RESUME_ROOT:\r\n print(\"=\" * 60)\r\n print(BACKBONE_RESUME_ROOT)\r\n\r\n \"\"\"\r\n Loaded the checkpoint parameters, model state dictionary, optimizer dictionary, epoch, loss, batch\r\n \"\"\"\r\n\r\n if os.path.isfile(BACKBONE_RESUME_ROOT):\r\n print(\"Loading Backbone Checkpoint '{}'\".format(BACKBONE_RESUME_ROOT))\r\n checkpoint = torch.load(\r\n BACKBONE_RESUME_ROOT,\r\n map_location=DEVICE,\r\n )\r\n BACKBONE.load_state_dict(checkpoint[\"model_state_dict\"])\r\n OPTIMIZER.load_state_dict(checkpoint[\"optimizer_state_dict\"])\r\n epoch = checkpoint[\"epoch\"]\r\n INITIAL = checkpoint[\"epoch\"]\r\n LOSS = checkpoint[\"loss\"]\r\n BATCH = checkpoint[\"batch\"]\r\n else:\r\n print(\r\n \"No Checkpoint Found at '{}' . Please Have a Check or Continue to Train from Scratch\".format(\r\n BACKBONE_RESUME_ROOT\r\n )\r\n )\r\n print(\"=\" * 60)\r\n\r\n # ======= Train, Validation & Save checkpoint =======#\r\n DISP_FREQ = 10 # Frequency to display training loss & accuracy\r\n VER_FREQ = 100\r\n\r\n losses = AverageMeter()\r\n top1 = AverageMeter()\r\n\r\n batches = len(trainloader)\r\n\r\n BACKBONE.train() # Set to training mode\r\n\r\n # ======= The epoch starts from the checkpoint epoch =======#\r\n while epoch < NUM_EPOCH: # Start Training process\r\n lr_scheduler.step(epoch)\r\n\r\n last_time = time.time()\r\n\r\n for inputs, labels in iter(trainloader):\r\n if INITIAL == epoch and batch <= BATCH:\r\n batch += 1\r\n continue\r\n\r\n # ======= Compute output =======#\r\n inputs = inputs.to(DEVICE)\r\n labels = labels.to(DEVICE).long()\r\n\r\n \"\"\"\r\n The NAT backbone only requires the input values, thus when the backbone is NAT, pass the inputs, else the labels and the inputs is passed.\r\n \"\"\"\r\n\r\n if BACKBONE_NAME == \"NAT\":\r\n outputs = BACKBONE(inputs.float())\r\n else:\r\n outputs, emb = BACKBONE(inputs.float(), labels)\r\n\r\n # print(outputs.shape)\r\n\r\n loss = LOSS(outputs, labels)\r\n\r\n # print(\"outputs\", outputs, outputs.data)\r\n\r\n # Measure accuracy and record loss =======#\r\n prec1 = train_accuracy(outputs.data, labels, topk=(1,))\r\n\r\n losses.update(loss.data.item(), inputs.size(0))\r\n top1.update(prec1.data.item(), inputs.size(0))\r\n\r\n # ======= Compute Gradient Descent & do SGD step =======#\r\n OPTIMIZER.zero_grad()\r\n loss.backward()\r\n OPTIMIZER.step()\r\n\r\n # ======= Display training loss & acc every DISP_FREQ (buffer for visualization) =======#\r\n if ((batch + 1) % DISP_FREQ == 0) and batch != 0:\r\n epoch_loss = losses.avg\r\n epoch_acc = top1.avg\r\n\r\n writer.add_scalar(\"Training/Training_Loss\", epoch_loss, batch + 1)\r\n writer.add_scalar(\"Training/Training_Accuracy\", epoch_acc, batch + 1)\r\n\r\n batch_time = time.time() - last_time\r\n last_time = time.time()\r\n\r\n print(\r\n \"Epoch {} Batch {}\\t\"\r\n \"Speed: {speed:.2f} samples/s\\t\"\r\n \"Training Loss {loss.val:.4f} ({loss.avg:.4f})\\t\"\r\n \"Training Prec@1 {top1.val:.3f} ({top1.avg:.3f})\".format(\r\n epoch + 1,\r\n batch + 1,\r\n speed=inputs.size(0) * DISP_FREQ / float(batch_time),\r\n loss=losses,\r\n top1=top1,\r\n )\r\n )\r\n\r\n # print(\"=\" * 60)\r\n losses = AverageMeter()\r\n top1 = AverageMeter()\r\n\r\n # ======= Added another condition that when epoch changes i.e batch % number_of_batches is 0 =======#\r\n if (\r\n ((batch + 1) % VER_FREQ == 0) or batch % batches == 0\r\n ) and batch != 0: # Perform Validation & Save checkpoints (Buffer for Visualization)\r\n for params in OPTIMIZER.param_groups:\r\n lr = params[\"lr\"]\r\n break\r\n print(\"Learning rate %f\" % lr)\r\n print(\"Perform Evaluation on\", TARGET, \", and Save Checkpoints...\")\r\n acc = []\r\n for ver in vers:\r\n name, data_set, issame = ver\r\n accuracy, std, xnorm, best_threshold, roc_curve = perform_val(\r\n MULTI_GPU,\r\n DEVICE,\r\n EMBEDDING_SIZE,\r\n BATCH_SIZE,\r\n BACKBONE,\r\n data_set,\r\n issame,\r\n )\r\n buffer_val(\r\n writer,\r\n name,\r\n accuracy,\r\n std,\r\n xnorm,\r\n best_threshold,\r\n roc_curve,\r\n batch + 1,\r\n )\r\n print(\"[%s][%d]XNorm: %1.5f\" % (name, batch + 1, xnorm))\r\n print(\r\n \"[%s][%d]Accuracy-Flip: %1.5f+-%1.5f\"\r\n % (name, batch + 1, accuracy, std)\r\n )\r\n print(\r\n \"[%s][%d]Best-Threshold: %1.5f\"\r\n % (name, batch + 1, best_threshold)\r\n )\r\n acc.append(accuracy)\r\n\r\n is_epoch_change = False\r\n if batch % batches == 0:\r\n is_epoch_change = True\r\n\r\n # ======= Save checkpoints per epoch =======#\r\n if need_save(acc, highest_acc, is_epoch_change):\r\n if is_epoch_change:\r\n print(\"Saving on Epoch change...\")\r\n print(f\"After Epoch {epoch}\")\r\n\r\n \"\"\" While Saving, saved epoch, optimizer, model, loss, and batch \"\"\"\r\n\r\n if MULTI_GPU:\r\n torch.save(\r\n BACKBONE.module.state_dict(),\r\n os.path.join(\r\n WORK_PATH,\r\n \"Backbone_{}_checkpoint.pth\".format(\r\n BACKBONE_NAME,\r\n ),\r\n ),\r\n )\r\n else:\r\n torch.save(\r\n {\r\n \"epoch\": epoch,\r\n \"model_state_dict\": BACKBONE.state_dict(),\r\n \"optimizer_state_dict\": OPTIMIZER.state_dict(),\r\n \"loss\": LOSS,\r\n \"batch\": batch,\r\n },\r\n os.path.join(\r\n WORK_PATH,\r\n \"Backbone_{}_LR_checkpoint.pth\".format(BACKBONE_NAME),\r\n ),\r\n )\r\n\r\n BACKBONE.train() # Set to Training mode\r\n\r\n batch += 1 # Batch Index\r\n\r\n epoch += 1\r\n","repo_name":"SUPRIO24/Face-Transformer","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":17841,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"32627212390","text":"\n\"\"\"\n*******************************************************************\n\nCheck whether the answer can be a node in the constituency graph.\n\n*******************************************************************\n\"\"\"\n\nimport os\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = '7'\n\nimport json\nimport argparse\nfrom tqdm import tqdm\nfrom nltk.tokenize import sent_tokenize, word_tokenize, WhitespaceTokenizer\nimport spacy,benepar\nimport pickle\n\nimport torch\nimport torch.nn as nn\nimport torch_scatter\nfrom transformers import BatchEncoding\n\n\nprint('Initialize the spaCy model...')\n#initialize the parsing model\nspacy.prefer_gpu()\nnlp = spacy.load('en_core_web_trf')\nnlp.add_pipe('benepar', config={'model': 'benepar_en3_large'})\n\nif spacy.prefer_gpu():\n print(\"SpaCy is using GPU.\")\nelse:\n print(\"SpaCy does not use GPU.\")\n\ndef load_json(file):\n return json.load(open(file,'r'))\n\ndef processConstituency(pStr):\n nodes = []\n cur = \"\";\n stack = [];\n nid = 0;\n wordIndex = 0\n for i in range(len(pStr)):\n if(pStr[i] == ' ' or pStr[i] == '\\n'):\n if (len(cur) > 0):\n newNode = {\n \"nodeID\": nid,\n \"nodeType\": \"Internal\",\n \"name\": cur,\n \"children\": []\n }\n cur = \"\";\n nid += 1;\n if (len(stack) > 0):\n stack[len(stack) - 1][\"children\"].append(newNode);\n stack.append(newNode);\n nodes.append(newNode)\n elif pStr[i] == ')':\n if (len(cur) > 0):\n newNode = {\n \"nodeID\": nid,\n \"nodeType\": \"Leaf\",\n \"name\": cur,\n \"wordIndex\": wordIndex,\n \"children\": []\n }\n cur = \"\";\n nid += 1;\n wordIndex += 1;\n stack[len(stack) - 1][\"children\"].append(newNode);\n nodes.append(newNode)\n stack.pop();\n else:\n if (len(stack) == 1):\n root = stack[0]\n stack.pop();\n elif pStr[i] == '(':\n continue\n else:\n cur = cur + pStr[i];\n return nodes\n\ndef process_text(sent):\n return sent.strip(' ').strip('\\n').replace('″','\"').replace('…','...').replace('½','*').replace('\\n',' ').replace(' ',' ').replace('´','\\'').replace('fl','f').replace('№','No')\n\n# def get_sent_id(sents,answer_start_pos):\n# count=0\n# for i,sent in enumerate(sents):\n# if sent==\" \" or \"\":\n# continue\n# #print(sent+\"********\")\n# try:\n# count+=list(WhitespaceTokenizer().span_tokenize(sent))[-1][1]+1\n# except:\n# import pdb;pdb.set_trace()\n# if count > answer_start_pos:\n# break\n# return i\n\n# #meanshile, generate constituency parsing results\ndef get_sent_id(sents,answer_start_pos):\n count=0\n for i,sent in enumerate(sents):\n count+=list(WhitespaceTokenizer().span_tokenize(sent))[-1][1]+1\n if count > answer_start_pos:\n break\n return i\n\ndef parse_context(spacy_doc):\n sents=[]\n parsed_sents=[]\n for i,sent in enumerate(spacy_doc.sents):\n if str(sent) == \" \" or \"\":\n continue\n sents += [str(sent)]\n parsed_sents += [sent._.parse_string]\n return sents,parsed_sents\n\ndef get_leaves(node,words=[]):\n\n if node['children'] == []:\n words += [node['name']]\n else:\n for each in node['children']:\n get_leaves(each,words)\n return\n\ndef get_constituents(nodes):\n constituents=[]\n for node_id,node in enumerate(nodes):\n words=[]\n get_leaves(node,words)\n constituent = ' '.join(words)\n constituents += [(node_id,constituent)]\n return constituents\n\ndef reduce_nodes(nodes):\n\n reduced_nodes = []\n reduced_nodeid_mapping = {}\n\n for i,node in enumerate(nodes):\n if node['nodeType'] == 'Internal' and len(node['children'])==1 and node['children'][0]['nodeType'] == 'Leaf':\n continue\n else:\n #print(i,node['nodeID'])\n reduced_nodeid_mapping[node['nodeID']] = len(reduced_nodes)\n node['nodeID'] = len(reduced_nodes)\n reduced_nodes += [node]\n\n return reduced_nodes,reduced_nodeid_mapping\n\ndef update_nodeid(node,reduced_nodeid_mapping):\n if not node['children']:\n return\n else:\n for child in node['children']:\n if child['nodeID'] in reduced_nodeid_mapping:\n child['nodeID']=reduced_nodeid_mapping[child['nodeID']]\n update_nodeid(child,reduced_nodeid_mapping)\n\ndef main(args):\n\n data = load_json('../data/{}-v{}-modified.json'.format(args.data_split,args.squad_version))\n #parsed_by_qid = load_json('./all_con_parsed_by_qid_{}.json'.format(args.data_split))\n\n have_multiple_answer_nodes=[]\n cannot_find_answer_nodes=[]\n\n parsed_info={}\n\n for doc_id,doc in enumerate(tqdm(data['data'])):\n for para_id,para in enumerate(doc['paragraphs']):\n context = para['context']\n try:\n spacy_doc=nlp(context)\n except:\n try:\n spacy_doc=nlp(process_text(context))\n except:\n import pdb;pdb.set_trace\n sents,parsed_sents = parse_context(spacy_doc)\n for qa_id,qa in enumerate(para['qas']):\n qid = qa['id']\n answer = qa['answers'][0]['text']\n answer_start_pos = qa['answers'][0]['answer_start']\n\n # # already have the parsed results\n # sent_id = get_sent_id(doc,context,answer_start_pos)\n # parsed_sent = parsed_by_qid[qid]['parsed_context'][sent_id]\n\n #generate the parsed results\n\n sent_id = get_sent_id(sents,answer_start_pos)\n parsed_sent = parsed_sents[sent_id]\n nodes = processConstituency(parsed_sent)\n\n if args.reduce_nodes_operation:\n nodes,reduced_nodeid_mapping=reduce_nodes(nodes)\n for node in nodes:\n update_nodeid(node, reduced_nodeid_mapping)\n\n constituents = get_constituents(nodes)\n\n answer_nodes=[]\n for constituent in constituents:\n # predict among all virtual nodes\n if constituent[1] == answer and nodes[constituent[0]]['nodeType']=='Internal':\n # add this constraint -> only predict among the virtual nodes that do not represent the pos tags\n if len(nodes[constituent[0]]['children']) == 1 and nodes[constituent[0]]['children'][0]['nodeType']=='Leaf':\n continue\n answer_nodes += [constituent]\n\n\n if len(answer_nodes)>1:\n have_multiple_answer_nodes += [(doc_id,para_id,qa_id,qid)]\n continue\n\n if answer_nodes == []:\n cannot_find_answer_nodes += [(doc_id,para_id,qa_id,qid)]\n continue\n\n question = qa['question']\n try:\n spacy_q = nlp(question)\n except:\n spacy_q=nlp(process_text(context))\n q,parsed_q = parse_context(spacy_q)\n\n parsed_info[qid]={}\n parsed_info[qid]['doc_id'] = doc_id\n parsed_info[qid]['para_id'] = para_id\n parsed_info[qid]['qa_id'] = qa_id\n parsed_info[qid]['tokenized_context'] = sents\n parsed_info[qid]['conparsed_context'] = parsed_sents\n parsed_info[qid]['virtual_noodes'] = nodes\n parsed_info[qid]['answer_sent_id'] = sent_id\n parsed_info[qid]['answer_node_id'] = answer_nodes\n parsed_info[qid]['tokenized_question'] = q\n parsed_info[qid]['conparsed_question'] = parsed_q\n\n # with open('original_parsed_sents_part1'.format(\"train\"),'a') as fout:\n # for sent_idx,sent in enumerate(sents):\n # fout.write(str(doc_id)+' '+str(para_id)+' '+str(qa_id)+' '+qid+str(sent_idx)+' '+sent+'\\n')\n\n # with open('constituency_parsed_sents_part1'.format(\"train\"),'a') as fout:\n # for sent_idx,parsed_sent in enumerate(parsed_sents):\n # fout.write(str(doc_id)+' '+str(para_id)+' '+str(qa_id)+' '+qid+str(sent_idx)+' '+parsed_sent+'\\n')\n # if qid == '57339a5bd058e614000b5e91':\n # import pdb;pdb.set_trace()\n # except:\n # import pdb;pdb.set_trace()\n\n # with open('have_multiple_answer_nodes_{}_part2'.format(args.data_split),'w') as fh:\n # for line in have_multiple_answer_nodes:\n # for item in line[:-1]:\n # fh.write(str(item)+' ')\n # fh.write(line[-1]+'\\n')\n # #fout.write(str(doc_id)+' '+str(para_id)+' '+str(qa_id)+' '+qid+'\\n')\n\n # with open('cannot_find_answer_nodes_{}_part2'.format(args.data_split),'w') as fc:\n # #fout.write(str(doc_id)+' '+str(para_id)+' '+str(qa_id)+' '+qid+'\\n')\n # for line in cannot_find_answer_nodes:\n # for item in line[:-1]:\n # fc.write(str(item)+' ')\n # fc.write(line[-1]+'\\n')\n\n\n with open('have_multiple_answer_nodes_{}_reduced.pkl'.format(args.data_split),'wb') as fh:\n pickle.dump(have_multiple_answer_nodes,fh)\n\n with open('cannot_find_answer_nodes_{}_reduced.pkl'.format(args.data_split),'wb') as fc:\n pickle.dump(cannot_find_answer_nodes,fc)\n\n with open('parsed_info_original_{}_reduced.pkl'.format(args.data_split),'wb') as fout:\n pickle.dump(parsed_info,fout)\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_split','-d', default=None, required=True, type=str, help='dev or train')\n parser.add_argument('--squad_version','-v', default='1.1', type=str, help='1.1 or 2.0')\n parser.add_argument('--reduce_nodes_operation', '-reduce_nodes', action='store_true', help='whether reduce nodes')\n\n args = parser.parse_args()\n\n main(args)\n","repo_name":"summer1030/GraphQA","sub_path":"utils/ProcessAnswer.py","file_name":"ProcessAnswer.py","file_ext":"py","file_size_in_byte":10366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10447191390","text":"from plan2vec_experiments import instr, config_charts\nfrom plan2vec.plan2vec.plan2vec_streetlearn_2 import DEBUG, Args, main\nimport jaynes\n\n\ndef common_config():\n Args.seed = 5 * 100\n\n Args.num_epochs = 500\n Args.lr = 3e-5\n Args.gamma = 0.97\n Args.target_update = 0.9\n Args.top_k = None\n Args.plan_steps = 1\n Args.H = 50\n Args.r_scale = 0.2\n\n Args.optim_epochs = 32\n\n Args.latent_dim = 2\n\n # make this one to see early stage to make sure\n Args.visualization_interval = 10\n # turn off checkpointing b/c models are large\n Args.checkpoint_interval = None\n Args.binary_reward = None\n\n local_metric_exp_path = \"episodeyang/plan2vec/2019/06-20/streetlearn/local_metric/23.19/07.247751\"\n Args.load_local_metric = f\"/{local_metric_exp_path}/models/local_metric_400.pkl\"\n\n\ndef plan2vec(dataset, prefix):\n Args.data_path = f\"~/fair/streetlearn/processed-data/{dataset}\"\n\n DEBUG.pretrain_global = True\n DEBUG.value_fn_pretrain_global = True\n DEBUG.supervised_value_fn = True\n\n assert Args.binary_reward is None\n assert DEBUG.oracle_planning is False\n Args.term_r, DEBUG.ground_truth_success = 2e-4, True\n DEBUG.ground_truth_neighbor_r = 2e-4\n DEBUG.real_r_distance = False\n _ = instr(main, __postfix=f\"coord-value-pretrain/{prefix}\", **vars(Args), _DEBUG=vars(DEBUG), __up=-1)\n config_charts(path=\"coord-value-pretrain.charts.yml\")\n jaynes.run(_)\n\n\nif __name__ == \"__main__\":\n import numpy as np\n\n common_config()\n\n param_dict = {\n 'ResNet18L2': {\n \"lr\": [1e-6, 3e-6, 1e-7, 3e-7],\n },\n # 'GlobalMetricConvL2_s1': {\"lr\": [1e-6, 3e-6, 6e-6]},\n # 'GlobalMetricConvDeepL2': {\"lr\": [1e-6, 3e-6, 6e-6]},\n # 'GlobalMetricConvDeepL2_wide': {\"lr\": [1e-6, 3e-6, 6e-6]}\n }\n\n # ResNet requires much less memory than the other.\n Args.global_metric = 'ResNet18L2'\n _ = param_dict['ResNet18L2']\n\n jaynes.config(\"vector-gpu\")\n\n for key in ['tiny', 'small', 'medium', 'large', 'xl']:\n for lr in _['lr']:\n DEBUG.pretrain_lr = lr\n DEBUG.value_fn_pretrain_lr = lr\n Args.lr = lr / 10.\n\n plan2vec(f\"manhattan-{key}\", f\"manhattan-{key}/{Args.global_metric}/lr-({Args.lr})\")\n\n jaynes.listen()\n","repo_name":"geyang/plan2vec","sub_path":"plan2vec_experiments/streetlearn/gt_neighbor/latent-2d.py","file_name":"latent-2d.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"3"} +{"seq_id":"16319735771","text":"#Task 1\r\nprint(\"Hello World\")\r\n#Task 2\r\nprint(\"Task 2\")\r\nhello = \"Hello World\"\r\nprint(hello)\r\n#Task 3\r\nprint(\"Task 3\")\r\ndef printString(text):\r\n print(text)\r\nprintString(\"Hi people!\")\r\n#Task 4\r\nprint(\"Task 4\")\r\ndef addingUp(a, b):\r\n print(a+b)\r\naddingUp(1,2)\r\n#Task 5\r\nprint(\"Task 5\")\r\ndef addingIfTrue(c, d, trueSum):\r\n if (trueSum):\r\n result = c + d\r\n else:\r\n result = c * d\r\n print(result)\r\naddingIfTrue(5,6,False)\r\n#Task 6\r\nprint(\"Task 6\")\r\ndef makeSureNoZeroes(e, f, trueSum):\r\n if (e == 0):\r\n result = f\r\n elif (f == 0):\r\n result = e\r\n else: \r\n if (trueSum):\r\n result = e+f\r\n else:\r\n result = e*f\r\n print(result)\r\nmakeSureNoZeroes(3,4,False)\r\n#Task 7\r\nprint(\"Task 7\")\r\nfor g in range(0,10):\r\n makeSureNoZeroes(g,4,True)\r\n#Task 8\r\nprint(\"Task 8\")\r\nnumberList = [1,2,3,4,5,6,7,8,9,10]\r\nfor h in range(0,10):\r\n makeSureNoZeroes(numberList[h],numberList[-(h+1)],False)\r\n#Task 9\r\nprint(\"Task 9\")\r\nfor i in numberList:\r\n print(i)\r\n#Task 10\r\nprint(\"Task 10\")\r\nvalueList = []*10\r\nfor j in range(0,10):\r\n valueList.append(j)\r\n for j in valueList:\r\n print(j*10)\r\n#Task 11\r\nprint(\"Task 11\")\r\nprint(\"Please enter the size of the list you want\")\r\nlistSize = int(input(\"> \"))\r\ninputList = []\r\nfor k in range(listSize):\r\n inputList.append(k)\r\n for k in inputList:\r\n print(inputList)\r\n#Task 12\r\nprint(\"Task 12\")\r\nfrom functools import partial\r\ndef doublingUp(l,m):\r\n return l*m\r\nprint(\"Enter the number you wish to double and treble\")\r\nuserNumber = int(input(\"> \"))\r\ndouble = partial(doublingUp, 2)\r\ntriple = partial(doublingUp, 3)\r\nprint(\"Double is\",str(double(userNumber)), \"and treble is\"\\\r\n,str(triple(userNumber)))\r\n#Task 13\r\nprint(\"Task 13\")\r\ndef checkNumbers(n,o):\r\n if (n > 21 and o > 21):\r\n result = 0\r\n print(\"Both busted!\")\r\n elif (n > o and n <= 21):\r\n result = n\r\n print(n, \"is the winner!\")\r\n else:\r\n result = o\r\n print(o, \"is the winner!\")\r\n return result\r\ncheckNumbers(22, 19)\r\n#Task 14\r\nprint(\"Task 14\")\r\ndef uniqueSum(p,q,r):\r\n if (p == q or p == r):\r\n return r\r\n elif (q == r):\r\n result = p+r\r\n print(result)\r\n return result\r\n elif (r == p or r == q):\r\n result = p + q\r\n print(result)\r\n return result\r\n elif (p == q and q == r):\r\n print(\"0\")\r\n return 0\r\n else:\r\n result = p+q+r\r\n print(result)\r\nprint(uniqueSum(1,3,2))","repo_name":"OSiddiqi/Python-exercises","sub_path":"Exercisebooklet.py","file_name":"Exercisebooklet.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32807256876","text":"import numpy as np\nimport cv2\nimport argparse\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--img_mask_path\", type=str)\n args = parser.parse_args()\n return args\n\ndef save_rain_mask(img_mask_path):\n img_npy = np.load('rain_masks/' + img_mask_path)\n img_npy = np.squeeze(img_npy, 1)\n img_npy = np.transpose(img_npy, (1, 2, 0))\n img_npy = img_npy * 255\n\n path_to_save = 'visualize_rain_masks/' + img_mask_path.split('.')[0] + '.jpg'\n print(\"path: \", path_to_save)\n cv2.imwrite(path_to_save, img_npy)\n\nif __name__ == '__main__':\n print(\"reached step 1\")\n args = get_args()\n print(\"args: \", args)\n save_rain_mask(args.img_mask_path)\n\n\n\n\n\n\n","repo_name":"jainnidhi55/RaindropRemoval","sub_path":"baseline/DeRaindrop-master/visualize_rain_mask.py","file_name":"visualize_rain_mask.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"43142529919","text":"from Contracts.ITransport import ITransport\nfrom Contracts.IHandler import IHandler\nimport logging\nfrom Cache.GlobalVariables import GlobalVariables\nimport Constant.constant as const\n\n\nclass SignalrDataHandler(IHandler):\n __logger: logging.Logger\n __mqtt: ITransport\n __signalr: ITransport\n __globalVariables: GlobalVariables\n\n def __init__(self, log: logging.Logger, mqtt: ITransport, signalr: ITransport):\n self.__logger = log\n self.__mqtt = mqtt\n self.__globalVariables = GlobalVariables()\n self.__signalr = signalr\n\n def handler(self, item):\n if self.__globalVariables.AllowChangeCloudAccountFlag:\n return\n \n dorId = item[0]\n entity = item[1]\n data = item[2]\n \n if dorId != self.__globalVariables.DormitoryId:\n return\n \n self.__logger.debug(f\"handler receive signal data in {entity} is {data}\")\n print(f\"handler receive signal data in {entity} is {data}\")\n try:\n switcher = {\n const.SIGNALR_APP_COMMAND_ENTITY: self.__handler_entity_command\n }\n func = switcher.get(entity)\n func(data)\n except:\n self.__logger.error(\"data receive from signal invalid\")\n print(\"data receive from signal invalid\")\n return\n\n def __handler_entity_command(self, data):\n self.__mqtt.send(const.MQTT_CONTROL_TOPIC, data)\n","repo_name":"phanvanhai/RD_HC","sub_path":"Handler/SignalrDataHandler.py","file_name":"SignalrDataHandler.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33408151520","text":"import vk_requests\nimport oauth\nimport time\n\n# Авторизация\nlogin = 'login'\npwd = 'password'\napp_id = 123456 # ID приложения VK\nscopes = 2097151\n\n# Настройки\nalbum_id = ['wall', 'profile', 'saved']\ntarger_id = 123456 # ID цели\n\n# Переменные\nphotos_ids = []\ni = 0\n\n# Начало скрипта\nvkapi = vk_requests.create_api(app_id, login, pwd, api_version='5.44', timeout=10)\n\nfor album in album_id:\n photos_list = vkapi.photos.get(owner_id=targer_id, album_id=album, extended=0, count=1000)\n print(photos_list)\n for photos in photos_list['items']:\n photos_ids.append(photos['id'])\n\ntotal = len(photos_ids)\nprint('Count: '+str(total))\nprint('start')\nfor each_id in photos_ids:\n try:\n i+=1\n vkapi.likes.add(type='photo', owner_id=targer_id, item_id=each_id)\n finally:\n print(str(i)+'/'+str(total))\n time.sleep(1)\nprint('finish')","repo_name":"fadedDexofan/vkLiker","sub_path":"photo_liker.py","file_name":"photo_liker.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4203273270","text":"# Queue: First in First out\n\nfrom collections import deque\n\nclass Task:\n def __init__(self, name, cost):\n self.name = name\n self.cost = cost\n\nclass RoundRobin:\n def __init__(self):\n self.time = 0\n self.queue = deque()\n self.num_of_task, self.quantum = map(int, input().split())\n \n def enqueue(self, task):\n self.queue.append(task)\n \n def dequeue(self):\n return self.queue.popleft()\n\n\nrr = RoundRobin()\n\nfor _ in range(rr.num_of_task):\n input_task = input().split()\n rr.enqueue(Task(input_task[0], int(input_task[1])))\n\nwhile len(rr.queue) > 0:\n task = rr.dequeue()\n if task.cost <= rr.quantum:\n rr.time += task.cost\n print(\"%s %d\"%(task.name, rr.time))\n else:\n task.cost -= rr.quantum\n rr.time += rr.quantum\n rr.enqueue(task)\n","repo_name":"skyeanka/aoj-exercise","sub_path":"ALDS1/Queue.py","file_name":"Queue.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29900426408","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n \nimport datetime\nimport pytz\n \nfrom sqlalchemy.orm.session import object_session\nfrom sqlalchemy.inspection import inspect\nfrom sqlalchemy.util.langhelpers import symbol\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.sql.expression import select, cast, and_\nfrom sqlalchemy.sql.sqltypes import String\nfrom sqlalchemy.sql.functions import func\n \nfrom togudb.logger.db import LoggerEntry, ChangedField\n \ndef get_identity(obj):\n ''' Идентификатор объекта '''\n \n state = inspect(obj)\n mapper = inspect(state.class_)\n ids = []\n for pkey in mapper.primary_key:\n ids.append(getattr(obj, pkey.key))\n if len(ids) == 1:\n identity = unicode(ids[0])\n else:\n identity = unicode(tuple(ids)) \n \n return identity\n \ndef get_identity_expr(entity):\n ''' Идентификатор объекта (выражение)'''\n \n mapper = inspect(entity)\n \n pkeys = mapper.primary_key\n \n if len(pkeys) == 1:\n return cast(pkeys[0], String)\n \n else: \n return '(' + func.concat_ws(', ', *pkeys) + ')' \n \ndef get_entity_name(obj):\n return inspect(obj).class_.__name__\n \nclass LoggedEntity(object):\n '''\n Класс для журналирования изменений.\n Для использования добавить в родительские классы модели sqlalchemy.\n Например:\n class MyModel(LoggedEntity, Base): \n '''\n \n def _get_changed_fields(self, creating=False):\n state = inspect(self)\n mapper = inspect(state.class_)\n fields = []\n for attr in state.attrs:\n if mapper.attrs[attr.key].info.get('logged', True) and attr.history.has_changes():\n old = None if creating else state.committed_state.get(attr.key, None)\n new = attr.value\n fields.append((unicode(attr.key),\n self._get_log_field_value(old),\n self._get_log_field_value(new)))\n \n return fields \n \n def _get_log_field_value(self, value):\n if isinstance(value, list):\n return '[{}]'.format(','.join(self._get_log_field_value(x) for x in value))\n else:\n return unicode(value)\n \n def _get_log_related_objects(self):\n state = inspect(self)\n mapper = inspect(state.class_)\n rels = set()\n for rel in mapper.relationships: \n if rel.direction == symbol('MANYTOONE') and rel.info.get('logged', True):\n attr = state.attrs[rel.key]\n if isinstance(attr.value, list):\n vals = attr.value\n elif attr.value is None:\n vals = []\n else:\n vals = [attr.value]\n \n for val in vals:\n rels.add(val)\n return list(rels) \n \n def _save_log(self,session, event_type, fields=None):\n entry = LoggerEntry(\n timestamp = datetime.datetime.now(pytz.utc),\n type = event_type,\n entity = get_entity_name(self),\n identity = get_identity(self),\n username = getattr(session, 'username', None),\n ip = getattr(session, 'ip', None),\n related_objects = [\n '{}|{}'.format(\n get_entity_name(val),\n get_identity(val)) for val in self._get_log_related_objects()\n ],\n )\n \n session.add(entry)\n \n if fields is not None:\n for f in fields:\n field = ChangedField(\n entry = entry,\n name = f[0],\n value_old = f[1],\n value_new = f[2],\n value_old_pretty = f[1],\n value_new_pretty = f[2], \n )\n session.add(field)\n \n def log_created(self, session): \n fields = self._get_changed_fields(True)\n self._save_log(session, 'create', fields)\n \n def log_changed(self, session, event_type='change'):\n fields = self._get_changed_fields(True)\n \n if not fields:\n return\n \n self._save_log(session, 'change', fields) \n \n def log_deleted(self, session):\n self._save_log(session, 'delete') \n \n @hybrid_property\n def created_timestamp(self): \n ''' Дата и время создания '''\n \n return object_session(self)\\\n .query(LoggerEntry.timestamp)\\\n .filter(LoggerEntry.entity == get_entity_name(self),\n LoggerEntry.identity == get_identity(self),\n LoggerEntry.type == 'create')\\\n .scalar()\n \n @created_timestamp.expression\n def created_timestamp(self): \n ''' Дата и время создания (выражение)'''\n \n return select([LoggerEntry.timestamp])\\\n .where(and_(\n LoggerEntry.type =='create',\n LoggerEntry.entity == self.__name__,\n LoggerEntry.identity == get_identity_expr(self)))\\\n .correlate(self)\\\n .as_scalar()\\\n .label('created_timestamp')\n","repo_name":"esquonk/examples","sub_path":"logging_mixin.py","file_name":"logging_mixin.py","file_ext":"py","file_size_in_byte":5438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"535130263","text":"from math import sqrt\nimport heapq\n\n\nclass Solution:\n def kClosest(self, points: List[List[int]], K: int) -> List[List[int]]:\n\n heap = []\n heapq.heapify(heap)\n\n constant = (0, 0)\n\n ans = []\n\n for x in range(0, len(points)):\n pt = tuple(points[x])\n\n pt_dist = self.eucDist(constant, pt)\n entry = (pt_dist, pt)\n heapq.heappush(heap, entry)\n\n for x in range(K):\n popped = heapq.heappop(heap)\n ans.append(list(popped[1]))\n\n return ans\n\n def eucDist(self, constant, pt):\n ans = sqrt(\n (constant[0] - pt[0]) ** 2\n +\n (constant[1] - pt[1]) ** 2\n )\n\n return ans\n\n","repo_name":"SajinKowserSK/algorithms-practice","sub_path":"leetcode old&new/973. K Closest Points to Origin.py","file_name":"973. K Closest Points to Origin.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"34086886773","text":"from flask import *\nfrom models.client import *\nfrom models.marchandises import *\nfrom models.fournisseur import *\nfrom models.historique import *\nfrom mysql.connector import cursor\nfrom models.admin import *\n\napp = Flask(__name__)\napp.secret_key = \"super secret key\"\n\nconnection = mysql.connector.connect(host=\"localhost\",\n user=\"root\",\n password=\"\",\n database=\"gestion_stock\")\n\ncursor = connection.cursor()\n\n\n@app.route('/logout')\ndef logout():\n session.pop('loggedin', None)\n session.pop('adminName', None)\n return redirect(url_for('login'))\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n msg = ''\n if request.method == 'POST':\n adminName = request.form['adminName']\n adminPassword = request.form['adminPassword']\n cursor.execute('SELECT * FROM admin WHERE adminName= %s AND adminPassword=%s', (adminName, adminPassword), )\n record = cursor.fetchone()\n if record:\n session['loggedin'] = True\n session['adminName'] = record[1]\n return redirect(url_for('home'))\n else:\n msg = 'Name/password incorrect!'\n return render_template('authenticate.html',\n msg=msg)\n\n\n@app.route('/')\ndef authenticate():\n return render_template('authenticate.html')\n\n\n@app.route('/home')\ndef home():\n row = liste_marchandise()\n fournisseur = liste_fournisseur()\n client = liste_clients()\n return render_template('home.html', adminName=session['adminName'], row=row, fournisseur=fournisseur, client=client)\n\n\n@app.route('/liste_client')\ndef liste_client():\n row = liste_clients()\n return render_template('liste_clients.html', row=row)\n\n\n@app.route('/create_client', methods=['POST'])\ndef create_client():\n nom = request.form['nom']\n adresse = request.form['adresse']\n telephone = request.form['numéro']\n email = request.form['email']\n insert_client(nom, adresse, telephone, email)\n return redirect(\"/liste_client\")\n\n\n@app.route('/liste_fournisseur')\ndef liste_four():\n row = liste_fournisseur()\n return render_template('liste_fournisseur.html', row=row)\n\n\n@app.route('/create_fournisseur', methods=['POST'])\ndef create_fournisseur():\n nom = request.form['nom']\n adresse = request.form['adresse']\n telephone = request.form['numéro']\n email = request.form['email']\n creer_fournisseur(nom, adresse, telephone, email)\n return redirect(\"/liste_fournisseur\")\n\n\n@app.route('/create', methods=['POST'])\ndef create_item():\n Ref_Four = request.form['Ref_Four']\n Date_fourni = request.form['Date_fourni']\n Ref_Mar = request.form['Ref_Mar']\n Marchandises = request.form['Marchandises']\n Quantité = request.form['Quantité']\n inserer_marchandise(Ref_Mar, Marchandises, Quantité)\n insert_his_Fourni(Date_fourni, Quantité, Ref_Four, Ref_Mar)\n return redirect('/home')\n\n\n@app.route('/retrieve', methods=['POST'])\ndef retrieve():\n Ref_Client = request.form['Ref_Client']\n Date_Achat = request.form['Date_Achat']\n Quantité = 0 - int(request.form['Quantité'])\n Ref_Mar = request.form['Ref_Mar']\n insert_his_achat(Date_Achat, Quantité, Ref_Client, Ref_Mar)\n update_quantite(Quantité, Ref_Mar)\n return redirect('/home')\n\n\n@app.route('/supply', methods=['POST'])\ndef supply():\n Ref_Four = request.form['Ref_Four']\n Date_fourni = request.form['Date_fourni']\n Quantité = request.form['Quantité']\n Ref_Mar = request.form['Ref_Mar']\n insert_his_Fourni(Date_fourni, Quantité, Ref_Four, Ref_Mar)\n update_quantite(Quantité, Ref_Mar)\n return redirect('/home')\n\n\n@app.route('/Historique')\ndef historique():\n approvi = approvisionnements()\n achat = achats()\n return render_template('historique.html', approvi=approvi, achat=achat)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"gathluc/Gestion_de_stock","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13243677493","text":"from qgis.PyQt.QtCore import Qt\nfrom qgis.PyQt.QtGui import QIcon\nfrom qgis.PyQt.QtWidgets import QDockWidget, QSizePolicy, QLabel, QVBoxLayout, QWidget, QHBoxLayout, QPushButton\nfrom qgis.core import QgsMapLayer, QgsProject, QgsRectangle\nfrom qgis.gui import *\n\nfrom qgis.PyQt.QtGui import QColor\nfrom qgis.PyQt.QtCore import QEvent\n\nfrom qgis.PyQt.QtWidgets import QApplication, QDialog, QLabel, QVBoxLayout, QHBoxLayout, QLineEdit, QAction, QToolBar\n\ndef getAllVisibleLayers():\n project = QgsProject.instance()\n layer_tree = project.layerTreeRoot()\n layer_list = layer_tree.findLayers()\n visible_layers = []\n for layer in layer_list:\n if layer.isVisible():\n visible_layers.append(layer.layer())\n return visible_layers\n\nclass MinimapDock(QDockWidget):\n\n def __init__(self, iface):\n super().__init__()\n\n # Set some properties for the dock widget\n self.setWindowTitle(\"Minimap\")\n self.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea )\n self.canvas = QgsMapCanvas()\n self.canvas.setCanvasColor(Qt.white)\n self.setFixedSize(400, 300)\n self.iface = iface\n\n # Create a widget to hold the contents of the dock widget\n self.contents = QWidget()\n self.layout = QVBoxLayout()\n self.contents.setLayout(self.layout)\n\n main_canvas = self.iface.mapCanvas()\n crs = main_canvas.mapSettings().destinationCrs()\n\n self.canvas.setDestinationCrs(crs)\n layers = getAllVisibleLayers()\n active_layer = self.iface.activeLayer()\n if active_layer is not None:\n self.canvas.setExtent(active_layer.extent())\n self.canvas.setLayers(layers)\n \n # Add the widget to the dock\n self.setWidget(self.canvas)\n\n\n self.actionZoomIn = QAction(QIcon(':/images/actions/zoom-in.png'), \"Zoom in\", self)\n self.actionZoomOut = QAction(QIcon(':/images/actions/zoom-out.png'), \"Zoom out\", self)\n self.actionPan = QAction(QIcon(':/images/actions/pan.png'), \"Pan\", self)\n self.actionZoomIn.setCheckable(True)\n self.actionZoomOut.setCheckable(True)\n self.actionPan.setCheckable(True)\n self.actionZoomIn.triggered.connect(self.zoomIn)\n self.actionZoomOut.triggered.connect(self.zoomOut)\n self.actionPan.triggered.connect(self.pan)\n # create the map tools\n self.toolPan = QgsMapToolPan(self.canvas)\n self.toolPan.setAction(self.actionPan)\n self.toolZoomIn = QgsMapToolZoom(self.canvas, False) # false = in\n self.toolZoomIn.setAction(self.actionZoomIn)\n self.toolZoomOut = QgsMapToolZoom(self.canvas, True) # true = out\n self.toolZoomOut.setAction(self.actionZoomOut)\n self.pan()\n\n \n\n\n def zoomIn(self):\n self.canvas.setMapTool(self.toolZoomIn)\n\n def zoomOut(self):\n self.canvas.setMapTool(self.toolZoomOut)\n\n def pan(self):\n self.canvas.setMapTool(self.toolPan)\n def mist(self, tett=False):\n pass\n \nclass MinimapPlugin:\n\n def __init__(self, iface):\n self.iface = iface\n self.layers = getAllVisibleLayers()\n self.my_dock = None\n\n def initGui(self):\n # Create a new dock widget instance\n self.my_dock = MinimapDock(self.iface)\n\n # Add the dock widget to the interface\n # Create a new toolbar instance\n self.toolbar = QToolBar(\"Minimap toolbar\")\n\n # Add a button to the toolbar\n self.action = QAction(QIcon(\"icon.png\"), \"Turn on minimap\", self.toolbar)\n self.action.setCheckable(True)\n self.toolbar.addAction(self.action)\n\n self.refresh_action = QAction(QIcon(\"icon.png\"),'Refresh', self.toolbar)\n self.toolbar.addAction(self.refresh_action)\n\n # Add the toolbar to the interface\n self.iface.addToolBar(self.toolbar)\n self.iface.addDockWidget(Qt.LeftDockWidgetArea, self.my_dock)\n\n self.action.toggled.connect(self.on_button_toggled)\n self.refresh_action.triggered.connect(self.refresh)\n\n self.toolbar.show()\n\n def unload(self):\n self.my_dock.deleteLater()\n self.iface.removeDockWidget(self.my_dock)\n self.iface.removePluginMenu(\"My Dock\", self.action)\n self.iface.removeToolBarIcon(self.action)\n self.iface.removeToolBarIcon(self.refresh_action)\n\n self.toolbar.deleteLater()\n self.iface.mainWindow().removeToolBar(self.toolbar)\n\n def on_button_toggled(self, checked):\n if checked:\n self.my_dock.show()\n else:\n self.my_dock.hide()\n \n def refresh(self):\n self.my_dock.canvas.refresh()\n layers = getAllVisibleLayers()\n main_canvas = self.iface.mapCanvas()\n crs = main_canvas.mapSettings().destinationCrs()\n self.my_dock.canvas.setDestinationCrs(crs)\n self.my_dock.canvas.setLayers(layers)\n active_layer = self.iface.activeLayer()","repo_name":"mateuszrydzik/qgis-minimap","sub_path":"minimap.py","file_name":"minimap.py","file_ext":"py","file_size_in_byte":4947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74898298297","text":"from geoprocessor.commands.abstract.AbstractCommand import AbstractCommand\n\nfrom geoprocessor.core.CommandError import CommandError\nfrom geoprocessor.core.CommandLogRecord import CommandLogRecord\nfrom geoprocessor.core.CommandParameterError import CommandParameterError\nfrom geoprocessor.core.CommandParameterMetadata import CommandParameterMetadata\nfrom geoprocessor.core.CommandPhaseType import CommandPhaseType\nfrom geoprocessor.core.CommandStatusType import CommandStatusType\nfrom geoprocessor.core.GeoLayer import GeoLayer\nfrom geoprocessor.core.QGISAlgorithmProcessingFeedbackHandler import QgisAlgorithmProcessingFeedbackHandler\nfrom geoprocessor.core.VectorGeoLayer import VectorGeoLayer\n\nimport geoprocessor.util.command_util as command_util\nimport geoprocessor.util.qgis_util as qgis_util\nimport geoprocessor.util.validator_util as validator_util\n\nimport logging\n\n# from plugins.processing.tools import general\n\n\nclass SetGeoLayerCRS(AbstractCommand):\n \"\"\"\n Sets a GeoLayer's coordinate reference system (CRS).\n\n * If the GeoLayer already has a CRS, this command will reset the GeoLayer's CRS to the new CRS.\n\n Command Parameters:\n\n * GeoLayerID (str, required): the ID of the input GeoLayer, the layer to set the CRS.\n * CRS (str, EPSG/ESRI code, required): the CRS to set for the GeoLayer.\n \"\"\"\n\n # Define the command parameters.\n __command_parameter_metadata: [CommandParameterMetadata] = [\n CommandParameterMetadata(\"GeoLayerID\", type(\"\")),\n CommandParameterMetadata(\"CRS\", type(\"\"))]\n\n # Command metadata for command editor display.\n __command_metadata = dict()\n __command_metadata['Description'] = \"Set the coordinate reference system (CRS) of a GeoLayer.\"\n __command_metadata['EditorType'] = \"Simple\"\n\n # Command Parameter Metadata.\n __parameter_input_metadata = dict()\n # GeoLayerID\n __parameter_input_metadata['GeoLayerID.Description'] = \"GeoLayer identifier\"\n __parameter_input_metadata['GeoLayerID.Label'] = \"GeoLayerID\"\n __parameter_input_metadata['GeoLayerID.Required'] = True\n __parameter_input_metadata['GeoLayerID.Tooltip'] = \"The ID of the GeoLayer.\"\n # CRS\n __parameter_input_metadata['CRS.Description'] = \"coordinate references system\"\n __parameter_input_metadata['CRS.Label'] = \"CRS\"\n __parameter_input_metadata['CRS.Required'] = True\n __parameter_input_metadata['CRS.Tooltip'] = (\n \"The coordinate reference system of the GeoLayer. \"\n \"EPSG or ESRI code format required (e.g. EPSG:4326, EPSG:26913, ESRI:102003).\")\n\n def __init__(self) -> None:\n \"\"\"\n Initialize the command.\n \"\"\"\n\n # AbstractCommand data.\n super().__init__()\n self.command_name = \"SetGeoLayerCRS\"\n self.command_parameter_metadata = self.__command_parameter_metadata\n\n # Command metadata for command editor display.\n self.command_metadata = self.__command_metadata\n\n # Command Parameter Metadata.\n self.parameter_input_metadata = self.__parameter_input_metadata\n\n # Class data.\n self.warning_count = 0\n self.logger = logging.getLogger(__name__)\n\n def check_command_parameters(self, command_parameters: dict) -> None:\n \"\"\"\n Check the command parameters for validity.\n\n Args:\n command_parameters: the dictionary of command parameters to check (key:string_value)\n\n Returns: None.\n\n Raises:\n ValueError if any parameters are invalid or do not have a valid value.\n The command status messages for initialization are populated with validation messages.\n \"\"\"\n warning_message = \"\"\n\n # Check that required parameters are non-empty, non-None strings.\n required_parameters = command_util.get_required_parameter_names(self)\n for parameter in required_parameters:\n parameter_value = self.get_parameter_value(parameter_name=parameter, command_parameters=command_parameters)\n if not validator_util.validate_string(parameter_value, False, False):\n message = \"Required {} parameter has no value.\".format(parameter)\n recommendation = \"Specify the {} parameter.\".format(parameter)\n warning_message += \"\\n\" + message\n self.command_status.add_to_log(CommandPhaseType.INITIALIZATION,\n CommandLogRecord(CommandStatusType.FAILURE, message, recommendation))\n\n # Check for unrecognized parameters.\n # This returns a message that can be appended to the warning, which if non-empty triggers an exception below.\n warning_message = command_util.validate_command_parameter_names(self, warning_message)\n\n # If any warnings were generated, throw an exception.\n if len(warning_message) > 0:\n self.logger.warning(warning_message)\n raise CommandParameterError(warning_message)\n else:\n # Refresh the phase severity.\n self.command_status.refresh_phase_severity(CommandPhaseType.INITIALIZATION, CommandStatusType.SUCCESS)\n\n def check_runtime_data(self, geolayer_id: str, crs_code: str) -> bool:\n \"\"\"\n Checks the following:\n * The ID of the input GeoLayer is an actual GeoLayer (if not, log an error message & do not continue.)\n * The CRS is a valid coordinate reference system code.\n * The CRS is difference than the GeoLayer's CRS.\n\n Args:\n geolayer_id (str): the ID of the GeoLayer to add the new attribute\n crs_code (str): the CRS to set for the GeoLayer (EPSG or ESRI code)\n\n Returns:\n set_crs: Boolean. If TRUE, the CRS should be set. If FALSE, a check has failed & the CRS should not be set.\n \"\"\"\n\n # Boolean to determine if the CRS should be set. Set to TRUE until one or many checks fail.\n set_crs = True\n\n # Boolean to determine if the input GeoLayer id is a valid GeoLayer ID. Set to TRUE until proved False.\n input_geolayer_exists = True\n\n if self.command_processor.get_geolayer(geolayer_id) is None:\n # If the input GeoLayer does not exist, FAILURE.\n set_crs = False\n input_geolayer_exists = False\n self.warning_count += 1\n message = 'The input GeoLayer ID ({}) does not exist.'.format(geolayer_id)\n recommendation = 'Specify a valid GeoLayerID.'\n self.logger.warning(message)\n self.command_status.add_to_log(CommandPhaseType.RUN,\n CommandLogRecord(CommandStatusType.FAILURE, message, recommendation))\n\n if qgis_util.parse_qgs_crs(crs_code) is None:\n # If the input CRS code is not a valid code, FAILURE.\n set_crs = False\n self.warning_count += 1\n message = 'The input CRS ({}) is not a valid CRS code.'.format(crs_code)\n recommendation = 'Specify a valid CRS code (EPSG codes are an approved format).'\n self.logger.warning(message)\n self.command_status.add_to_log(CommandPhaseType.RUN,\n CommandLogRecord(CommandStatusType.FAILURE, message, recommendation))\n\n # If the input CRS code is that same as the GeoLayer's current CRS, raise a WARNING.\n if input_geolayer_exists and self.command_processor.get_geolayer(geolayer_id).get_crs_code():\n if crs_code.upper() == self.command_processor.get_geolayer(geolayer_id).get_crs_code().upper():\n set_crs = False\n self.warning_count += 1\n message = 'The input GeoLayer ({}) already is projected to the input' \\\n ' CRS ({}).'.format(geolayer_id, crs_code)\n recommendation = 'The SetGeoLayerCRS command will not run. Specify a different CRS code.'\n self.logger.warning(message)\n self.command_status.add_to_log(CommandPhaseType.RUN,\n CommandLogRecord(CommandStatusType.WARNING, message, recommendation))\n\n # Return the Boolean to determine if the crs should be set. If TRUE, all checks passed.\n # If FALSE, one or many checks failed.\n return set_crs\n\n def run_command(self) -> None:\n \"\"\"\n Run the command. Set the GeoLayer coordinate reference system.\n\n Returns:\n None.\n\n Raises:\n RuntimeError if any warnings occurred during run_command method.\n \"\"\"\n\n self.warning_count = 0\n\n # Obtain the parameter values.\n # noinspection PyPep8Naming\n pv_GeoLayerID = self.get_parameter_value(\"GeoLayerID\")\n # noinspection PyPep8Naming\n pv_CRS = self.get_parameter_value(\"CRS\")\n\n # Convert the pv_GeoLayerID parameter to expand for ${Property} syntax.\n # noinspection PyPep8Naming\n pv_GeoLayerID = self.command_processor.expand_parameter_value(pv_GeoLayerID, self)\n\n # Run the checks on the parameter values. Only continue if the checks passed.\n if self.check_runtime_data(pv_GeoLayerID, pv_CRS):\n # Run the process.\n # noinspection PyBroadException\n try:\n # Get the input GeoLayer.\n input_geolayer = self.command_processor.get_geolayer(pv_GeoLayerID)\n\n # Check if the input GeoLayer already has an assigned CRS.\n if input_geolayer.get_crs_code():\n # Reproject the GeoLayer.\n alg_parameters = {\n \"INPUT\": input_geolayer.qgs_layer,\n \"TARGET_CRS\": pv_CRS,\n \"OUTPUT\": \"memory:\"\n }\n feedback_handler = QgisAlgorithmProcessingFeedbackHandler(self)\n reprojected_output = qgis_util.run_processing(processor=self.command_processor.qgis_processor,\n algorithm=\"qgis:reprojectlayer\",\n algorithm_parameters=alg_parameters,\n feedback_handler=feedback_handler)\n self.warning_count += feedback_handler.get_warning_count()\n\n # Create a new GeoLayer and add it to the GeoProcessor's geolayers list.\n\n # In QGIS 2 the reprojected[\"OUTPUT\"] returned the full file pathname of the memory output layer\n # (saved in a QGIS temporary folder)\n # qgs_vector_layer = qgis_util.read_qgsvectorlayer_from_file(reprojected[\"OUTPUT\"])\n # new_geolayer = VectorGeoLayer(input_geolayer.id, qgs_vector_layer, GeoLayer.SOURCE_MEMORY)\n\n # In QGIS 3 the reprojected[\"OUTPUT\"] returns the QGS vector layer object:\n # - use the same name and description as the original\n new_geolayer = VectorGeoLayer(geolayer_id=input_geolayer.id,\n qgs_vector_layer=reprojected_output[\"OUTPUT\"],\n name=input_geolayer.name,\n description=input_geolayer.description,\n input_path_full=GeoLayer.SOURCE_MEMORY,\n input_path=GeoLayer.SOURCE_MEMORY)\n self.command_processor.add_geolayer(new_geolayer)\n\n else:\n alg_parameters = {\n \"INPUT\": input_geolayer.qgs_vector_layer,\n \"CRS\": pv_CRS\n }\n feedback_handler = QgisAlgorithmProcessingFeedbackHandler(self)\n reprojected_output = qgis_util.run_processing(processor=self.command_processor.qgis_processor,\n algorithm=\"qgis:definecurrentprojection\",\n algorithm_parameters=alg_parameters,\n feedback_handler=feedback_handler)\n self.warning_count += feedback_handler.get_warning_count()\n\n except Exception:\n # Raise an exception if an unexpected error occurs during the process.\n self.warning_count += 1\n message = \"Unexpected error setting CRS ({}) of GeoLayer ({})\".format(pv_CRS, pv_GeoLayerID)\n recommendation = \"Check the log file for details.\"\n self.logger.warning(message, exc_info=True)\n self.command_status.add_to_log(CommandPhaseType.RUN,\n CommandLogRecord(CommandStatusType.FAILURE, message, recommendation))\n\n # Determine success of command processing. Raise Runtime Error if any errors occurred.\n if self.warning_count > 0:\n message = \"There were {} warnings processing the command.\".format(self.warning_count)\n raise CommandError(message)\n\n else:\n # Set command status type as SUCCESS if there are no errors.\n self.command_status.refresh_phase_severity(CommandPhaseType.RUN, CommandStatusType.SUCCESS)\n","repo_name":"OpenWaterFoundation/owf-app-geoprocessor-python","sub_path":"src/geoprocessor/commands/vector/SetGeoLayerCRS.py","file_name":"SetGeoLayerCRS.py","file_ext":"py","file_size_in_byte":13392,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"16444898543","text":"# -*- coding: utf-8 -*-\nimport click\nimport logging\nimport pandas as pd\nfrom pathlib import Path\nimport os\nfrom dotenv import find_dotenv, load_dotenv\nimport preprocessing\n\n\n\ndef clean_trailing_spaces(df:pd.DataFrame)->pd.DataFrame:\n '''\n Retrieves columns of type ''object'' (string) and strips trailing spaces in their values\n '''\n index_obj_cols = df.select_dtypes(include='object').columns\n for col in index_obj_cols:\n df[col] = df[col].str.strip()\n return df\n\ndef clean_missing(df: pd.DataFrame):\n pass\n\ndef clean_duplicates(data_df:pd.DataFrame)->pd.DataFrame:\n tr_df = data_df.pipe(clean_trailing_spaces)\n return tr_df\n\ndef clean_products(data_df:pd.DataFrame)->pd.DataFrame:\n pass\n\ndef clean_all(data_df:pd.DataFrame)->pd.DataFrame:\n tr_df = data_df.pipe(clean_trailing_spaces) \\\n .pipe(clean_duplicates)\\\n .pipe(clean_products)\n \n return tr_df\n\n@click.command()\n@click.argument('input_filepath', type=click.Path(exists=True))\n@click.argument('output_filepath', type=click.Path())\ndef main(input_filepath, output_filepath):\n \"\"\" Runs data processing scripts to turn raw data from (../raw) into\n cleaned data ready to be analyzed (saved in ../processed).\n \"\"\"\n \n logger = logging.getLogger(__name__)\n logger.info('Making final data set from raw data')\n file_name = ['Agribalise_Detail ingredient.csv']\n\n # Retrieve all files in from raw data folder and make basic cleaning\n data_df = pd.read_csv(os.path.join(input_filepath,file_name))\n data_df = clean_all(data_df)\n\n \n logging.info(f'Data set ready for training')\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n # not used in this stub but often useful for finding various files\n project_dir = Path(__file__).resolve().parents[2]\n\n # find .env automagically by walking up directories until it's found, then\n # load up the .env entries as environment variables\n load_dotenv(find_dotenv())\n\n main()\n","repo_name":"aimorenov/fullstack_datascience","sub_path":"06_Personal_project_agribalyse/src/data/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"71552149496","text":"import json\nfrom pathlib import Path\n\nfrom ._version import __version__\nfrom .handlers import setup_handlers\n\n\n\nHERE = Path(__file__).parent.resolve()\n\n\ndef _jupyter_server_extension_points():\n return [{\n \"module\": \"jupyterlab_telemetry\"\n }]\n\n\ndef _load_jupyter_server_extension(server_app):\n \"\"\"Registers the API handler to receive HTTP requests from the frontend extension.\n\n Parameters\n ----------\n server_app: jupyterlab.labapp.LabApp\n JupyterLab application instance\n \"\"\"\n name = \"jupyterlab_telemetry\"\n setup_handlers(server_app.web_app)\n server_app.log.info(\"Registered {name} server extension\")\n\n\n# For backward compatibility with notebook server - useful for Binder/JupyterHub\nload_jupyter_server_extension = _load_jupyter_server_extension\n\n","repo_name":"jupyterlab/jupyterlab-telemetry","sub_path":"jupyterlab_telemetry/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"22"} +{"seq_id":"10493219740","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport math\nfrom _signal import *\nfrom scipy.io import wavfile\nfrom scipy.signal import decimate, resample\n\nBITS_PER_SAMPLE = 5\nSAMPLING_RATE = 8000\nBER = 1e-5\nSIMULATION_SAMPLES = 80000\nTEST_TONE = 1e2 # Hz\nWAV_BITS = 16\nFILE = \"./speech.wav\"\n\ndef round_truncate_sample(sample, n_in, n_out):\n '''Takes int type sample of n_in bits, reduces it n_out bits with rounding\n '''\n delta_n = n_in-n_out\n sum_of_truncated = 0\n for n in range(delta_n):\n sum_of_truncated += sample&(2**n)\n if sum_of_truncated < 2**(delta_n-1):\n return sample >> delta_n\n else:\n return (sample >> delta_n) + 1\n\nv_round_truncate_sample = np.vectorize(round_truncate_sample, otypes=[np.int16])\n\ndef corrupt(sample, ber=BER, bits=BITS_PER_SAMPLE):\n '''Takes int type sample, corrupts individual bits of sample with\n probability ber\n '''\n corrupted = 0\n for bit in range(BITS_PER_SAMPLE):\n corrupted += ((2**bit)&sample)^(np.random.binomial(n=1,p=ber)< abs(audio_max) else abs(audio_max)\naudio_bits = math.log(2.0*audio_peak, 2)\neff_bits = int(math.ceil(audio_bits))\n# fix audio to maximize utilization of range available with eff_bits\n# i.e. remove DC offset and rescale so audio peak hits maximum code for ceil(eff_bits)\nfill_range_gain = (2**(eff_bits-1)-1)/float(audio_peak)\naudio = np.array((np.rint((audio-average)*fill_range_gain) + 2**(eff_bits-1)), dtype=np.int16)\n\nprint(\"* Source audio effective bits = %f,\\tceil(eff_bits) = %d\"%(audio_bits, eff_bits))\n\n\n#RATE = 10\n#N = 0\n#ber = RATE/float(SAMPLING_RATE)\n\n#for n in range(BITS_PER_SAMPLE):\n# print(\"* Generating autio with %d corruptions/s on bit %d of sample\"%(RATE, n))\n# reduced_audio = (audio-average+2**(eff_bits-1))>>(eff_bits-BITS_PER_SAMPLE)\n# reduced_audio = np.array(reduced_audio, dtype=np.int16)\n# reduced_audio = v_corrupt_nth_bit(reduced_audio, n=n, ber=ber)\n# reduced_audio = (reduced_audio << (WAV_BITS - BITS_PER_SAMPLE-1)) - 2**(WAV_BITS-2)\n#\n# wavfile.write(\"./speech_%dbit_bit%d_%d_per_sec.wav\"%(BITS_PER_SAMPLE,n,RATE), rate=fs, data=reduced_audio)\n\n#for rate_exp in range(-3,2,1):\n# N = 7\n# rate = (10.0**rate_exp)\n# ber = rate/float(SAMPLING_RATE)\n# print(\"* Generating autio with %.3f corruptions/s on bit %d of sample\"%(rate, N))\n# reduced_audio = (audio-average+2**(eff_bits-1))>>(eff_bits-BITS_PER_SAMPLE)\n# reduced_audio = np.array(reduced_audio, dtype=np.int16)\n# reduced_audio = v_corrupt_nth_bit(reduced_audio, n=N, ber=ber)\n# reduced_audio = (reduced_audio << (WAV_BITS - BITS_PER_SAMPLE-1)) - 2**(WAV_BITS-2)\n#\n# wavfile.write(\"./speech_%dbit_bit%d_%.3f_per_sec.wav\"%(BITS_PER_SAMPLE,N,rate), rate=fs, data=reduced_audio)\n\nprint(\"Generating audio with truncated sample sizes and decimated sampling rate\")\nfor decim_factor in [1,2]:\n if decim_factor != 1:\n _audio = decimate(x=audio, q=decim_factor)\n _audio = np.array(np.rint(_audio), dtype=np.int16)\n else:\n _audio = audio\n for n_bits in range(4,11,1):\n reduced_audio = v_round_truncate_sample(_audio, n_in=eff_bits, n_out=n_bits)\n reduced_audio = np.array(reduced_audio, dtype=np.int16)\n #reduced_audio = v_corrupt(reduced_audio)\n reduced_audio = (reduced_audio << (WAV_BITS - n_bits - 1)) - 2**(WAV_BITS-2)\n print(\"* sampling rate = %d Hz,\\tnumber of bits = %d\"%(fs/decim_factor,n_bits))\n wavfile.write(\"./speech_%d_Hz_%d_bits.wav\"%(fs/decim_factor,n_bits), rate=fs/decim_factor, data=reduced_audio)\n\n\ntime = np.arange(len(audio))/float(fs)\n\n#plt.plot(time, audio)\nplt.plot(time, reduced_audio)\nplt.show()\n\n\n","repo_name":"nielscol/radiocomms","sub_path":"old/generate_audio_error_by_nsb.py","file_name":"generate_audio_error_by_nsb.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6423935519","text":"'''\nhttps://www.acmicpc.net/problem/11726\n'''\n\n# 제출 답안 2\nimport sys\nn = int(sys.stdin.readline())\na,b = 1,1\nfor i in range(n):\n a,b = b,a+b\nprint(a%10007)\n\n# 제출 답안 1\nimport sys\ndp = [0,1,2]\nn = int(sys.stdin.readline())\nfor i in range(3,n+1):\n dp.append(dp[i-2]+dp[i-1])\nprint(dp[n]%10007)\n","repo_name":"Hankyul-k/BOJ","sub_path":"silver3/11726_2xN타일링.py","file_name":"11726_2xN타일링.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72173951417","text":"import requests\nimport base64\nimport configparser\n\n\n# Load configuration file\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\n\ndef send(no):\n url=config['DEFAULT2']['URL']\n auth_token=config['DEFAULT2']['AUTH_TOKEN']\n headers = {\n \"Content-Type\": \"application/json\",\n \"Accept-Language\": \"en-US\"\n }\n data = {\n \"identity\": {\n \"type\": \"number\",\n \"endpoint\": no,\n \"message\": \"make lode\"\n },\n \"method\": \"sms\"\n }\n\n # Encode the authentication token in base64\n auth_token_bytes = auth_token.encode('utf-8')\n encoded_auth_token = base64.b64encode(auth_token_bytes).decode('utf-8')\n auth_header = f\"Basic {encoded_auth_token}\"\n\n headers['Authorization'] = auth_header\n\n response = requests.post(url, headers=headers, json=data)\n\n if response.status_code == 200:\n print(\"SMS verification request sent successfully.\")\n \n else:\n print(f\"Failed to send SMS verification request. Status code: {response.status_code}\")\n print(response.text)\n\n\ndef verify(no,otp):\n \n urll = config.get('DEFAULT3', 'URL')\n # Replace {no} with the actual value in the URL\n url = urll.replace('{no}', no)\n auth_token=config['DEFAULT3']['AUTH_TOKEN']\n headers = {\n \"Content-Type\": \"application/json\"\n }\n data = {\n \"method\": \"sms\",\n \"sms\": {\n \"code\": otp\n }\n }\n\n # Encode the authentication token in base64\n auth_token_bytes = auth_token.encode('utf-8')\n encoded_auth_token = base64.b64encode(auth_token_bytes).decode('utf-8')\n auth_header = f\"Basic {encoded_auth_token}\"\n\n headers['Authorization'] = auth_header\n\n response = requests.put(url, headers=headers, json=data)\n\n if response.status_code == 200:\n print(f\"Phone number {no} Verified\")\n \n else:\n print(f\"Failed to verify phone number. Status code: {response.status_code}\")\n \n\n","repo_name":"Risriddle/sendVerifyOTP","sub_path":"msg.py","file_name":"msg.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11788917335","text":"\"\"\"Create UMAP plots from a directory of DamID deseq data.\"\"\"\n\nimport collections\nimport os\nimport math\nfrom functools import reduce\n\nimport hdbscan\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport umap\n\nsns.set(style='white', context='notebook', rc={'figure.figsize': (14, 10)})\nDIR = 'deseq2 datasets/'\nLOG2_COLUMN = 2\n\n_Result = collections.namedtuple('ClusteringResult', 'embedding clusterable_embedding labels')\n\nclass Result(object):\n\n def __init__(self, embedding=None, clusterable_embedding=None, labels=None, figure=None):\n self.embedding = embedding\n self.clusterable_embedding = clusterable_embedding\n self.labels = labels\n self.figure = figure\n\n\ndef plot_umap(embedding,\n data,\n protein,\n fig,\n ax,\n result,\n alpha=0.03,\n cmap='RdYlBu_r',\n vmin=None,\n vmax=None,\n clip=False,\n auto_color_scale=True,\n **kwargs):\n \"\"\"Dispatch to plotting log2 intensity over 2D representation or histogram of clustered GATC density\"\"\"\n if protein == 'density':\n plot_density_umap(embedding, fig=fig, ax=ax)\n elif protein == 'cluster':\n plot_cluster(embedding, data, fig=fig, ax=ax, result=result)\n else:\n if auto_color_scale:\n vmin = data.min().min()\n vmax = data.max().max()\n plot_chrom_umap(embedding,\n data,\n protein,\n fig=fig,\n ax=ax,\n alpha=alpha,\n cmap=cmap,\n vmin=vmin,\n vmax=vmax,\n clip=clip,\n **kwargs)\n label_cluster(result=result, ax=ax)\n return result\n\n\ndef plot_cluster(embedding, data, fig, ax, result):\n result.clusterable_embedding = create_clusterable_embedding(df=data)\n result.labels = hdbscan.HDBSCAN(\n min_samples=10,\n min_cluster_size=2500,\n ).fit_predict(result.clusterable_embedding)\n clustered = (result.labels >= 0)\n ax.scatter(embedding[~clustered, 0],\n embedding[~clustered, 1],\n c=(0.5, 0.5, 0.5),\n s=0.1,\n alpha=0.5)\n mappable = ax.scatter(embedding[clustered, 0],\n embedding[clustered, 1],\n c=result.labels[clustered],\n s=0.1,\n cmap='Spectral')\n\n plot_colorbar(mappable, fig, ax)\n\n\ndef label_cluster(result, ax):\n if result.labels is not None:\n s = pd.Series(result.labels)\n for label in s.unique():\n ax.annotate(str(label),\n (result.embedding[s == label][:, 0].mean(),\n result.embedding[s == label][:, 1].mean()),\n horizontalalignment='center',\n verticalalignment='center',\n weight='bold',\n color='black',\n )\n\n\ndef plot_chrom_umap(embedding,\n data,\n protein,\n fig=None,\n ax=None,\n alpha=0.03,\n cmap='RdYlBu_r',\n vmin=None,\n vmax=None,\n clip=False,\n **kwargs):\n \"\"\"Plot UMAP visulatization.\"\"\"\n if fig is None:\n fig, ax = plt.subplots()\n cmap = matplotlib.cm.get_cmap(cmap)\n normalize = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax, clip=clip)\n mappable = ax.scatter(embedding[:, 0], embedding[:, 1], s=0.1, c=data[protein], cmap=cmap, norm=normalize)\n plot_colorbar(mappable, fig, ax)\n ax.set_title(protein)\n return ax\n\n\ndef plot_density_umap(embedding, fig, ax, alpha=0.03, cmap='RdYlBu_r', title='GATC density'):\n \"\"\"Plot GATC density.\"\"\"\n mappable = ax.hist2d(embedding[:, 0], embedding[:, 1], bins=[100, 100], cmap=cmap, normed=True)[-1]\n plot_colorbar(mappable, fig, ax)\n ax.set_title(title)\n\n\ndef plot_colorbar(mappable, fig, ax):\n \"\"\"Plot colorbar\"\"\"\n cbar = fig.colorbar(mappable, ax=ax)\n cbar.set_alpha(1)\n cbar.draw_all()\n\n\ndef read_data_from_direcotry(path):\n datasets = [os.path.join(path, d) for d in os.listdir(path)]\n dataframes = [\n pd.read_csv(d, sep='\\t', header=None, names=['index', os.path.basename(d)[len('DESeq2 '):-len('.tabular')]],\n index_col=0, usecols=[0, LOG2_COLUMN]) for d in datasets]\n df_final = reduce(lambda left, right: pd.merge(left, right, on='index'), dataframes)\n return df_final\n\ndef create_clusterable_embedding(df, n_neighbors=30, min_dist=0.0, n_components=2, random_state=42, metric='canberra', **kwargs):\n return umap.UMAP(\n n_neighbors=n_neighbors,\n min_dist=min_dist,\n n_components=n_components,\n random_state=random_state,\n metric=metric,\n **kwargs\n ).fit_transform(df)\n\n\ndef fit_transform(df, random_state=42, **kwargs):\n reducer = umap.UMAP(random_state=random_state, **kwargs)\n embedding = reducer.fit_transform(df)\n return reducer, embedding\n\n\ndef load_embedding(path):\n return np.array(pd.read_csv(path, sep='\\t'))\n\n\ndef save_embedding(embedding, path):\n pd.DataFrame.from_records(embedding).to_csv(path, sep='\\t', index=None)\n\n\ndef plot_proteins(embedding, df, density=True, cluster=False, clip=False, **kwargs):\n elements = list(df.columns)\n if density:\n elements.append('density')\n if cluster:\n elements.append('cluster')\n nrows = int(math.ceil(len(elements) / 2.0))\n fig, axes = plt.subplots(ncols=2, nrows=nrows, figsize=(20, 15))\n result = Result(embedding=embedding, figure=fig)\n for ax, protein in zip(axes.flat, reversed(elements)):\n result = plot_umap(embedding=embedding, data=df, protein=protein, ax=ax, fig=fig, result=result, clip=clip, **kwargs)\n fig.tight_layout()\n return result\n","repo_name":"mvdbeek/seaplotlib","sub_path":"seaplotlib/umap_plot.py","file_name":"umap_plot.py","file_ext":"py","file_size_in_byte":6080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74677573496","text":"#! python3\n\n\"\"\"Mission Manager\"\"\"\n\nfrom collections import OrderedDict\nfrom threading import Lock\n\nfrom worker import current\n\nfrom .safeprint import print\nfrom .mission import create_mission, mission_lock\nfrom .episode import Episode\nfrom .io import backup, json_load, json_dump\nfrom .profile import get as profile\nfrom .channel import mission_ch\nfrom .episode_loader import cleanup_episode\n\nclass MissionManager:\n\t\"\"\"Since check_update thread might grab mission from mission_manager, we\n\thave to make it thread safe.\n\t\"\"\"\n\tdef __init__(self):\n\t\t\"\"\"Construct.\"\"\"\n\t\tself.pool = {}\n\t\tself.view = OrderedDict()\n\t\tself.library = OrderedDict()\n\t\tself.edit = False\n\t\tself.lock = Lock()\n\t\t\n\t\tself.load()\n\n\t\tthread = current()\n\t\tmission_ch.sub(thread)\n\t\t@thread.listen(\"MISSION_PROPERTY_CHANGED\")\n\t\tdef _(event):\n\t\t\t\"\"\"Set the edit flag after mission changed.\"\"\"\n\t\t\tself.edit = True\n\n\tdef cleanup(self):\n\t\t\"\"\"Cleanup unused missions\"\"\"\n\t\tmain_pool = set(self.pool)\n\t\tview_pool = set(self.view)\n\t\tlibrary_pool = set(self.library)\n\n\t\tfor url in main_pool - (view_pool | library_pool):\n\t\t\tcleanup_episode(self.pool[url])\n\t\t\tdel self.pool[url]\n\n\tdef save(self):\n\t\t\"\"\"Save missions to json.\"\"\"\n\t\tif not self.edit:\n\t\t\treturn\n\n\t\twith mission_lock:\n\t\t\tjson_dump(list(self.pool.values()), profile(\"pool.json\"))\n\t\t\tjson_dump(list(self.view), profile(\"view.json\"))\n\t\t\tjson_dump(list(self.library), profile(\"library.json\"))\n\t\t\t\n\t\tself.edit = False\n\t\tprint(\"Session saved\")\n\n\tdef load(self):\n\t\t\"\"\"Load mission from json.\n\n\t\tIf failing to load missions, create json backup .\n\t\t\"\"\"\n\t\ttry:\n\t\t\tself._load()\n\t\texcept Exception:\n\t\t\tprint(\"Failed to load session!\")\n\t\t\tbackup(profile(\"*.json\"))\n\t\t\traise\n\t\tself.cleanup()\n\n\tdef _load(self):\n\t\t\"\"\"Load missions from json. Called by MissionManager.load.\"\"\"\n\t\tpool = json_load(profile(\"pool.json\")) or []\n\t\tview = json_load(profile(\"view.json\")) or []\n\t\tlibrary = json_load(profile(\"library.json\")) or []\n\n\t\tfor m_data in pool:\n\t\t\t# reset state\n\t\t\tif m_data[\"state\"] in (\"DOWNLOADING\", \"ANALYZING\"):\n\t\t\t\tm_data[\"state\"] = \"ERROR\"\n\t\t\t# build episodes\n\t\t\t# compatible 2016.6.4\n\t\t\tif m_data[\"episodes\"]:\n\t\t\t\tepisodes = []\n\t\t\t\tfor ep_data in m_data[\"episodes\"]:\n\t\t\t\t\t# compatible 2016.4.3\n\t\t\t\t\tif \"total\" not in ep_data:\n\t\t\t\t\t\tif not ep_data[\"current_url\"]:\n\t\t\t\t\t\t\tep_data[\"total\"] = 0\n\t\t\t\t\t\t\t\n\t\t\t\t\t\telif ep_data[\"url\"] == ep_data[\"current_url\"]:\n\t\t\t\t\t\t\t# first page crawler\n\t\t\t\t\t\t\tep_data[\"total\"] = ep_data[\"current_page\"] - 1\n\t\t\t\t\t\t\t\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# per page crawler\n\t\t\t\t\t\t\tep_data[\"total\"] = ep_data[\"current_page\"] - 1\n\t\t\t\t\t\t\tep_data[\"current_page\"] = 1\n\t\t\t\t\t\t\n\t\t\t\t\t\tif ep_data[\"complete\"]:\n\t\t\t\t\t\t\tep_data[\"total\"] += 1\n\t\t\t\t\t\t\t\n\t\t\t\t\tepisodes.append(Episode(**ep_data))\n\t\t\t\tm_data[\"episodes\"] = episodes\n\t\t\tmission = create_mission(**m_data)\n\t\t\t\n\t\t\tself.pool[mission.url] = mission\n\n\t\tfor url in view:\n\t\t\tself.view[url] = self.pool[url]\n\n\t\tfor url in library:\n\t\t\tself.library[url] = self.pool[url]\n\n\t\tmission_ch.pub(\"MISSION_LIST_REARRANGED\", self.view)\n\t\tmission_ch.pub(\"MISSION_LIST_REARRANGED\", self.library)\n\n\tdef add(self, pool_name, *missions):\n\t\t\"\"\"Add missions to pool.\"\"\"\n\t\tpool = getattr(self, pool_name)\n\n\t\twith self.lock:\n\t\t\tfor mission in missions:\n\t\t\t\tif mission.url not in self.pool:\n\t\t\t\t\tmission_ch.pub(\"MISSION_ADDED\", mission)\n\t\t\t\tself.pool[mission.url] = mission\t\t\t\t\t\n\t\t\t\tpool[mission.url] = mission\n\t\t\n\t\tmission_ch.pub(\"MISSION_LIST_REARRANGED\", pool)\n\t\tself.edit = True\n\n\tdef remove(self, pool_name, *missions):\n\t\t\"\"\"Remove missions from pool.\"\"\"\n\t\tpool = getattr(self, pool_name)\n\n\t\t# check mission state\n\t\tmissions = [m for m in missions if m.state not in (\"ANALYZING\", \"DOWNLOADING\")]\n\n\t\twith self.lock:\n\t\t\tfor mission in missions:\n\t\t\t\tif mission.url in pool:\n\t\t\t\t\tdel pool[mission.url]\n\t\t\tself.cleanup()\n\t\t\t\n\t\tmission_ch.pub(\"MISSION_LIST_REARRANGED\", pool)\n\t\tself.edit = True\n\n\tdef lift(self, pool_name, *missions):\n\t\t\"\"\"Lift missions to the top.\"\"\"\n\t\tpool = getattr(self, pool_name)\n\t\twith self.lock:\n\t\t\tfor mission in reversed(missions):\n\t\t\t\tpool.move_to_end(mission.url, last=False)\n\t\tmission_ch.pub(\"MISSION_LIST_REARRANGED\", pool)\n\t\tself.edit = True\n\n\tdef drop(self, pool_name, *missions):\n\t\t\"\"\"Drop missions to the bottom.\"\"\"\n\t\tpool = getattr(self, pool_name)\n\t\twith self.lock:\n\t\t\tfor mission in missions:\n\t\t\t\tpool.move_to_end(mission.url)\n\t\tmission_ch.pub(\"MISSION_LIST_REARRANGED\", pool)\n\t\tself.edit = True\n\t\t\n\tdef sort(self, pool_name, key, reverse=False):\n\t\tpool = getattr(self, pool_name)\n\t\twith self.lock:\n\t\t\tfor mission in sorted(pool.values(), key=key):\n\t\t\t\tpool.move_to_end(mission.url, last=not reverse)\n\t\tmission_ch.pub(\"MISSION_LIST_REARRANGED\", pool)\n\t\tself.edit = True\n\t\t\n\tdef get_all(self, pool_name, test=None):\n\t\t\"\"\"Get all missions matching condition.\"\"\"\n\t\twith self.lock:\n\t\t\treturn [m for m in getattr(self, pool_name).values() if not test or test(m)]\n\t\t\t\n\tdef get(self, pool_name, test=None):\n\t\t\"\"\"Get the first mission matching condition.\"\"\"\n\t\twith self.lock:\n\t\t\tfor mission in getattr(self, pool_name).values():\n\t\t\t\tif not test or test(mission):\n\t\t\t\t\treturn mission\n\n\tdef get_by_url(self, url, pool_name=None):\n\t\t\"\"\"Get mission by url.\"\"\"\n\t\tif not pool_name:\n\t\t\treturn self.pool[url]\n\t\treturn getattr(self, pool_name)[url]\n\nmission_manager = MissionManager()\n","repo_name":"eight04/ComicCrawler","sub_path":"comiccrawler/mission_manager.py","file_name":"mission_manager.py","file_ext":"py","file_size_in_byte":5237,"program_lang":"python","lang":"en","doc_type":"code","stars":244,"dataset":"github-code","pt":"22"} +{"seq_id":"34811624054","text":"#!/usr/bin/env python\n\n# Imports\nimport datetime\nimport os\n#import shlex\nimport subprocess\nimport sys\nimport time\n\n# Variable. This will be used as unique identifier for a pcap file name.\ntoday = datetime.datetime.now().strftime(\"%Y-%m-%d--%H:%M\")\n\n\ndef how_to():\n print(\"Usage:\")\n print(\" Simply run\")\n print(\" \\\"python packetnoid.py\\\"\")\n print(\"[-0--<^_^>--0-]\")\n\n\n# Simmple function which uses subprocess to send nmap a command to \n# find all live hosts in the newtork. It then greps to find the line\n# which contain an IP address, cutting only to get the IP address.\n# The IPs are then converted into a list.\ndef get_hosts():\n try:\n print(\"[+] Finding the live hosts in your network!\")\n cmd_1 = 'nmap -sP 192.168.1.1-254 | grep \"report\" | cut -d\" \" -f5'\n \n # Perform the actual nmap command.\n p1 = subprocess.Popen(cmd_1, stdout=subprocess.PIPE, stderr=\\\n subprocess.STDOUT, shell=True)\n \n # Take the first returned value of communicate and split the lines.\n p1_list = p1.communicate()[0].splitlines()\n\n print(\"[+] Done!\")\n print(\"[+] Here is what I found:\\n\")\n \n # Print live hosts found (for information only).\n for ip in p1_list:\n print(\" \" + ip)\n\n # Return IPs in a list.\n return p1_list\n\n except Exception:\n sys.exit(\"[-] Problem finding your live hosts!\")\n\n\n# This funtion takes the list of IPs created by get_hosts()\n# and spits out a new list, formatted for tcpdump.\ndef preparer(p1_list):\n try:\n print(\"[+] Trying to prepare the command for tcpdump.\")\n new_ip_list = []\n\n # The aim here is to ascertain the indeces of each list element.\n # There are three possibilities:\n # 1) if the index is zero, then we prefix the IP with the string \"host\";\n # 2) if the index is not of the last list element, append \"or \\\"; and\n # 3) if the index is that of the last element, add \"&\" to the command\n for ip in p1_list:\n if p1_list.index(ip) == 0:\n new_ip_list.append(\"host \" + ip + \" or \")\n elif p1_list.index(ip) in range(1, (len(p1_list) - 1)):\n new_ip_list.append(ip + \" or \")\n elif p1_list.index(ip) == (len(p1_list) - 1):\n new_ip_list.append(ip + \" &\")\n\n ip_str = ''.join(new_ip_list)\n\n raw_cmd = \"sudo /usr/sbin/tcpdump -i wlan0 -lenx -X -s 0 -w \"\\\n + os.getcwd() + \"/tcpdump-\" + today + \".pcap \" + ip_str\n \n print(\"[+] Done!\")\n print(\"[+] Here is the command:\\n\")\n print(\" \" + raw_cmd)\n\n # Return the command as a string. \"shell=True\" must be passed \n # as a subprocess' argument.\n return raw_cmd\n\n except Exception:\n sys.exit(\"[-] Could not prepare the IP list for tcpdump.\")\n\n\ndef monitore(raw_cmd):\n #while True:\n try:\n f_name = os.getcwd() + \"/tcpdump-\" + today + \".pcap\"\n\n print(\"[+] Firing tcpdump now.\")\n p2 = subprocess.Popen(raw_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, \\\n stderr=subprocess.STDOUT, shell=True)\n\n # Putting the script to sleep for 24 hours whilst tcpdump\n # runs on the background.\n time.sleep(86400)\n p2.terminate()\n \n # try:\n # print(\"[+] Zipping pcap file.\")\n # p3 = subprocess.Popen(gzip, stdout=subprocess.PIPE, shell=True)\n # #p3.terminate()\n # print(\"[+] Done!\\n\")\n # except Exception:\n # sys.exit(\"[-] I have managed to run tcpdump, but could not zip the file\")\n\n print(\"[+] Done! tcpdump has run successfully.\")\n print(\"[+] pcap file saved as \\\"%s\\\".\\n\" % f_name)\n except Exception:\n sys.exit(\"[-] Could not run the actual tcpdump command.\") \n\n\ndef main():\n if len(sys.argv) > 1: \n sys.exit(how_to())\n\n print(\"\\nThe default network to be scanned is:\")\n print(\" 192.168.1.0/24\")\n print(\"\\n----------------------------------------------------\")\n \n try:\n p1_list = get_hosts()\n except Exception:\n sys.exit(\"[-] Main function could not get the return of get_hosts().\")\n \n print(\"\\n----------------------------------------------------\")\n try:\n raw_cmd = preparer(p1_list)\n except Exception:\n sys.exit(\"[-] Main function could not get the return of preparer().\")\n\n print(\"\\n----------------------------------------------------\")\n try:\n monitore(raw_cmd)\n except Exception:\n sys.exit(\"[-] Main function could not call monitore().\")\n\nif __name__ != \"__main__\":\n sys.exit(\"[!] NO WAY!\")\nelse: \n main()","repo_name":"sorebyte/packetnoid","sub_path":"packetnoid.py","file_name":"packetnoid.py","file_ext":"py","file_size_in_byte":4391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26842536619","text":"from __future__ import print_function\n\nimport os.path\nimport sys\n\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\n\nSCOPES = ['https://www.googleapis.com/auth/script.projects']\n\nextensions = {\n 'SERVER_JS': 'js',\n 'JSON': 'json',\n 'HTML': 'html'\n}\n\n\ndef save_sources(files, local_folder: str):\n for file_info in files:\n #print(file_info)\n filename = file_info['name'] + \".\" + (\n extensions[file_info['type']] if file_info['type'] in extensions else file_info['type'])\n print(f\"Exporting file {filename}...\")\n with open(os.path.join(local_folder, filename), encoding=\"utf-8\", mode=\"w\") as fw:\n fw.write(file_info['source'])\n\n\ndef get_file_type(filename: str) -> str:\n if filename.lower().endswith('.js'):\n return 'SERVER_JS'\n if filename.lower().endswith('.json'):\n return 'JSON'\n if filename.lower().endswith('.html'):\n return 'HTML'\n raise Exception(f'Unrecognized file type: {filename}')\n\n\ndef upload_sources(service: str, script_id: str, local_folder: str):\n request = {\n 'files': []\n }\n for filename in os.listdir(local_folder):\n with open(os.path.join(local_folder, filename), mode='r', encoding=\"utf-8\") as f:\n request['files'].append({\n 'name': os.path.splitext(filename)[0],\n 'type': get_file_type(filename),\n 'source': f.read()\n })\n response = service.projects().updateContent(\n body=request,\n scriptId=script_id).execute()\n print(response)\n\n\ndef main():\n if len(sys.argv) != 4:\n raise Exception('Not enough command line arguments, should be 3')\n cmd: str = sys.argv[1]\n if cmd not in ['download', 'upload']:\n raise Exception('Wrong command argument, should be one of \"download\" or \"upload\"')\n script_id: str = sys.argv[2]\n local_folder: str = sys.argv[3]\n\n try:\n service = build('script', 'v1', credentials=get_credentials())\n\n if cmd == 'download':\n request = service.projects().getContent(scriptId=script_id)\n response = request.execute()\n save_sources(response['files'], local_folder)\n\n if cmd == 'upload':\n upload_sources(service, script_id, local_folder)\n\n print(\"That's all, folks!\")\n except HttpError as err:\n print(err)\n\n\ndef get_credentials() -> Credentials:\n credentials = None\n if os.path.exists('token.json'):\n credentials = Credentials.from_authorized_user_file('token.json', SCOPES)\n if not credentials or not credentials.valid:\n if credentials and credentials.expired and credentials.refresh_token:\n credentials.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n credentials = flow.run_local_server(port=0)\n with open('token.json', 'w') as token:\n token.write(credentials.to_json())\n return credentials\n\n\nif __name__ == '__main__':\n print(\"usage: sync-gas.py \")\n main()\n","repo_name":"xuthus/google-apps-script-sync","sub_path":"sync-gas.py","file_name":"sync-gas.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"14171321300","text":"'''\nРеализовать функцию, принимающую несколько параметров, описывающих данные пользователя:\nимя, фамилия, год рождения, город проживания, email, телефон.\nФункция должна принимать параметры как именованные аргументы.\nРеализовать вывод данных о пользователе одной строкой.\n\n'''\ndef user_date(name, surname, birth_year, city, email, phone):\n if email.endswith('@mail.ru'):\n print(f'name - {name}; surname - {surname}; birth_year - {birth_year}; city - {city}; email -{email}; phone - {phone}')\n else:\n print('Некорректно введен email')\n#user_date(name=\"Jon\", surname=\"Snow\", birth_year=1986, city=\"Winterfell\", email=\"j.snow@mail.ru\", phone=12345)\n\nname = input('Введите ваше имя: ')\nsurname = input('Введите вашу фамилию: ')\nbirth_year = int(input('Введите дату вашего рождения: '))\ncity = input('Введите город проживания: ')\nemail = input('Введите Email: ')\nphone = int(input('Введите номер телефона: '))\n","repo_name":"anmalch/python","sub_path":"lesson_3_2.py","file_name":"lesson_3_2.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34156496068","text":"import tkinter\nfrom tkinter import filedialog\nfrom PIL import ImageTk\nimport PIL.Image\nimport os\nimport re\nimport cv2\n\nBACKGROUND_COLOR = '#ededed'\n\nWINDOW_WIDTH = 580\nWINDOW_HEIGHT = 600\n\nPAD_SMALL = 2\nPAD_MEDIUM = 4\nPAD_LARGE = 8\nPAD_EXTRA_LARGE = 14\n\n\nclass Application(tkinter.Frame):\n\n def __init__(self, master):\n\n tkinter.Frame.__init__(self, master=master)\n\n self.image_name = None\n self.image_dir = None\n\n self.master.minsize(width=WINDOW_WIDTH, height=WINDOW_HEIGHT)\n\n file_chooser_frame = tkinter.Frame(self.master, bg=BACKGROUND_COLOR)\n file_chooser_frame.pack(\n fill=tkinter.X,\n expand=False,\n anchor=tkinter.N,\n padx=PAD_MEDIUM,\n pady=PAD_MEDIUM\n )\n\n file_chooser_button = tkinter.Button(\n file_chooser_frame,\n text='Choose Image File...',\n command=self.choose_files\n )\n file_chooser_button.pack(side=tkinter.LEFT)\n\n clear_regions_button = tkinter.Button(\n file_chooser_frame,\n text='Clear Regions',\n command=self.clear_rectangles\n )\n clear_regions_button.pack(side=tkinter.RIGHT, anchor=tkinter.N)\n\n self.snip_string = tkinter.StringVar()\n snip_label = tkinter.Label(\n file_chooser_frame,\n text=\"Snip Label: \",\n bg=BACKGROUND_COLOR\n )\n snip_label_entry = tkinter.Entry(\n file_chooser_frame,\n textvariable=self.snip_string\n )\n snip_label_entry.pack(side=tkinter.RIGHT)\n snip_label.pack(side=tkinter.RIGHT)\n\n # the canvas frame's contents will use grid b/c of the double\n # scrollbar (they don't look right using pack), but the canvas itself\n # will be packed in its frame\n canvas_frame = tkinter.Frame(self.master, bg=BACKGROUND_COLOR)\n canvas_frame.grid_rowconfigure(0, weight=1)\n canvas_frame.grid_columnconfigure(0, weight=1)\n canvas_frame.pack(\n fill=tkinter.BOTH,\n expand=True,\n anchor=tkinter.N,\n padx=PAD_MEDIUM,\n pady=PAD_MEDIUM\n )\n\n self.canvas = tkinter.Canvas(canvas_frame, cursor=\"cross\")\n\n self.scrollbar_v = tkinter.Scrollbar(\n canvas_frame,\n orient=tkinter.VERTICAL\n )\n self.scrollbar_h = tkinter.Scrollbar(\n canvas_frame,\n orient=tkinter.HORIZONTAL\n )\n self.scrollbar_v.config(command=self.canvas.yview)\n self.scrollbar_h.config(command=self.canvas.xview)\n\n self.canvas.config(yscrollcommand=self.scrollbar_v.set)\n self.canvas.config(xscrollcommand=self.scrollbar_h.set)\n\n self.canvas.grid(\n row=0,\n column=0,\n sticky=tkinter.N + tkinter.S + tkinter.E + tkinter.W\n )\n self.scrollbar_v.grid(row=0, column=1, sticky=tkinter.N + tkinter.S)\n self.scrollbar_h.grid(row=1, column=0, sticky=tkinter.E + tkinter.W)\n\n # setup some button and key bindings\n self.canvas.bind(\"\", self.on_draw_button_press)\n self.canvas.bind(\"\", self.on_draw_move)\n\n self.canvas.bind(\"\", self.on_pan_button_press)\n self.canvas.bind(\"\", self.pan_image)\n self.canvas.bind(\"\", self.on_pan_button_release)\n\n # save our sub-region snippet\n self.master.bind(\"\", self.extract_region)\n\n self.rect = None\n\n self.start_x = None\n self.start_y = None\n\n self.pan_start_x = None\n self.pan_start_y = None\n\n self.image = None\n self.tk_image = None\n\n self.pack()\n\n def on_draw_button_press(self, event):\n # starting coordinates\n self.start_x = self.canvas.canvasx(event.x)\n self.start_y = self.canvas.canvasy(event.y)\n\n # create a new rectangle if we don't already have one\n if self.rect is None:\n self.rect = self.canvas.create_rectangle(\n self.start_x,\n self.start_y,\n self.start_x,\n self.start_y,\n outline='#00ff00',\n width=2\n )\n\n def on_draw_move(self, event):\n cur_x = self.canvas.canvasx(event.x)\n cur_y = self.canvas.canvasy(event.y)\n\n # update rectangle size with mouse position\n self.canvas.coords(self.rect, self.start_x, self.start_y, cur_x, cur_y)\n\n def on_pan_button_press(self, event):\n self.canvas.config(cursor='fleur')\n\n # starting position for panning\n self.pan_start_x = int(self.canvas.canvasx(event.x))\n self.pan_start_y = int(self.canvas.canvasy(event.y))\n\n def pan_image(self, event):\n self.canvas.scan_dragto(\n event.x - self.pan_start_x,\n event.y - self.pan_start_y,\n gain=1\n )\n\n # noinspection PyUnusedLocal\n def on_pan_button_release(self, event):\n self.canvas.config(cursor='cross')\n\n def clear_rectangles(self):\n self.canvas.delete(\"rect\")\n self.canvas.delete(self.rect)\n self.rect = None\n\n # noinspection PyUnusedLocal\n def extract_region(self, event):\n if self.rect is None:\n return\n\n output_dir = \"/\".join(\n [\n self.image_dir,\n self.snip_string.get().strip()\n ]\n )\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n corners = self.canvas.coords(self.rect)\n corners = tuple([int(c) for c in corners])\n region = self.image.crop(corners)\n\n match = re.search('(.+)\\.(.+)$', self.image_name)\n output_filename = \"\".join(\n [\n match.groups()[0],\n '_',\n str(corners[0]),\n ',',\n str(corners[1])\n ]\n )\n output_filename = \".\".join([output_filename, match.groups()[1]])\n\n output_file_path = \"/\".join([output_dir, output_filename])\n\n region.save(output_file_path)\n\n self.canvas.create_rectangle(\n corners[0],\n corners[1],\n corners[2],\n corners[3],\n outline='#ff1493',\n width=2,\n tag='rect'\n )\n\n self.canvas.delete(self.rect)\n self.rect = None\n\n def choose_files(self):\n self.canvas.delete(self.rect)\n self.rect = None\n\n selected_file = filedialog.askopenfile('r')\n\n cv_img = cv2.imread(selected_file.name)\n\n self.image = PIL.Image.fromarray(\n cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB),\n 'RGB'\n )\n height, width = self.image.size\n self.canvas.config(scrollregion=(0, 0, height, width))\n self.tk_image = ImageTk.PhotoImage(self.image)\n self.canvas.create_image(0, 0, anchor=tkinter.NW, image=self.tk_image)\n\n self.image_name = os.path.basename(selected_file.name)\n self.image_dir = os.path.dirname(selected_file.name)\n\nroot = tkinter.Tk()\napp = Application(root)\nroot.mainloop()\n","repo_name":"whitews/image-subregion-extractor","sub_path":"image_subregion_extractor.py","file_name":"image_subregion_extractor.py","file_ext":"py","file_size_in_byte":7158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"75289137335","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\n\nfrom argparse import ArgumentParser\nfrom pathlib import Path\n\nfrom .shortcuts import InptType\nfrom .shortcuts import get_desc\n\n\ndef get_bytes(inpt: InptType) -> bytes:\n \"\"\"\n Returns a bytes object from .'inpt', no matter what 'inpt' is.\n\n For ``ioBase`` classes, its contents is read.\n If the read input is ``bytes`` or ``bytearray``, it is returned as is.\n For string inputs, it is encoded using ``sys.getdefaultencoding``.\n If inpt is a string pointing to a file,\n a ``PathLike`` or ``PosixPath`` object,\n The bytes contained in that file are returned.\n\n Args:\n inpt: bytes, bytearray, str, os.PathLike, typing.io, object\n The object or file to convert to bytes.\n\n Returns: bytes\n \"\"\"\n\n if hasattr(inpt, 'read'):\n inpt = inpt.read()\n if isinstance(inpt, (bytes, bytearray)):\n return inpt\n if os.path.isfile(inpt):\n return Path(inpt).read_bytes()\n if isinstance(inpt, str):\n return inpt.encode(sys.getdefaultencoding())\n else:\n print(\"unsupported input type\")\n\n\ndef main():\n desc, help_msgs = get_desc('get_bytes')\n parser = ArgumentParser(prog='get_bytes',\n usage=desc,\n description=get_bytes.__doc__.splitlines()[0])\n parser.add_argument('inpt', type=InptType, nargs=1, help=help_msgs[0])\n args = parser.parse_args()\n get_bytes(args.inpt[0])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"FrancoisNadeau/csvfix","sub_path":"get_bytes.py","file_name":"get_bytes.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34361619332","text":"from haystack.preview import Pipeline\nfrom haystack.preview.components.builders.prompt_builder import PromptBuilder\n\ndef initialize_simple_pipeline(llm_generator, llm_generator_name, prompt_template):\n # Creating a pipeline\n pipeline = Pipeline()\n\n # Adding a PromptBuilder\n prompt_builder = PromptBuilder(template=prompt_template)\n pipeline.add_component(instance=prompt_builder, name=\"prompt_builder\")\n\n # Adding a GPT-based Generator\n # Ensure that you have the OPENAI_API_KEY environment variable set\n gpt_generator = llm_generator # GPTGenerator(api_key=os.environ.get(\"OPENAI_API_KEY\"))\n pipeline.add_component(instance=gpt_generator, name=llm_generator_name) #\"gpt_generator\")\n\n # Connecting the components\n pipeline.connect(\"prompt_builder\",llm_generator_name)\n\n return pipeline","repo_name":"PacktPublishing/Building-Natural-Language-Pipelines","sub_path":"ch2/scripts/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"31101051502","text":"#Grabs data from a serial connection\nimport serial\nimport time\nimport sys\n\nimport logging\nlogging.basicConfig()\nlogger = logging.getLogger(\"serial_parser.py\")\nlogger.setLevel(logging.DEBUG)\n\nfrom data import plot_storage\n\ndef screen_to_terminal():\n ser = serial.Serial(sys.argv[1], 19200)\n\n #Waits for the begin command\n while 1:\n begin = False\n while (not begin):\n try:\n string_line = ser.readline().decode().strip(\"\\n\").split(\":\")\n except UnicodeDecodeError:\n continue\n \n if(string_line[0] == \"PLOTTER\"):\n if (string_line[1] == \"begin\"):\n begin = True\n\n restart = False\n while (not restart):\n try:\n string_line = ser.readline().decode().strip(\"\\n\").split(\":\")\n except UnicodeDecodeError:\n #Have to go back and wait for begin signal again\n restart = True\n continue\n\n if(string_line[0] == \"PLOTTER\"):\n if (string_line[1] == \"add_line\"):\n #dispatch to a handler for add_line\n pass\n\n elif(string_line[1] == \"add_points\"):\n #should dispatch to a add_points handler\n logger.debug(f\"{time.time()} -> {string_line}\")\n\ndef screen_to_data_storage(port_name):\n\n #Keyed on incoming descriptor, Value is plot_storage descriptor\n line_d_mapping = dict()\n\n ser = serial.Serial(port_name, 115200)\n\n while 1:\n\n #Waits for the begin command\n begin = False\n while (not begin):\n try:\n string_line = ser.readline().decode().strip(\"\\n\").split(\":\")\n except UnicodeDecodeError:\n continue\n \n if(string_line[0] == \"PLOTTER\"):\n if (string_line[1] == \"begin\"):\n begin = True\n\n #Goes until reset or exit\n restart = False\n while (not restart):\n\n if(plot_storage.kill_update_thread):\n logger.debug(\"Received kill.\")\n sys.exit(0)\n\n try:\n string_line = ser.readline().decode().strip(\"\\n\").split(\":\")\n #logger.debug(string_line)\n except UnicodeDecodeError:\n #Have to go back and wait for begin signal again\n restart = True\n continue\n\n if(string_line[0] == \"PLOTTER\"):\n if (string_line[1] == \"add_line\"):\n #Unpack the rest of the commands\n incoming_descriptor = int(string_line[2])\n x_fp_digits = int(string_line[3])\n y_fp_digits = int(string_line[4])\n\n line_d_mapping[incoming_descriptor] = plot_storage.add_line(x_fp_digits, y_fp_digits)\n\n logger.debug(f\"Added line to mapping. {line_d_mapping}\")\n\n elif(string_line[1] == \"add_points\"):\n #should dispatch to a add_points handler\n storage_line_d = line_d_mapping[int(string_line[2])]\n x_buffer = eval(string_line[3])\n y_buffer = eval(string_line[4])\n\n #logger.debug(storage_line_d)\n #logger.debug(x_buffer)\n #logger.debug(y_buffer)\n\n plot_storage.add_points(x_buffer, y_buffer, storage_line_d)\n\n logger.debug(f\"Recv buffer at {time.time()}\")\n\nif __name__ == \"__main__\":\n #screen_to_terminal()\n logger.info(\"Serial Parser was run directly. Screening to (unused) data storage object.\")\n screen_to_data_storage()","repo_name":"adityanarayanan03/plotter_logger","sub_path":"python_src/serial_parser.py","file_name":"serial_parser.py","file_ext":"py","file_size_in_byte":3706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4264661931","text":"import itertools\r\nimport numpy\r\n\r\n\r\ndef check_input_is_valid_number(input_string):\r\n \"\"\"\r\n Validate the input string given by the user, in order to see if it is a number in the suitable range.\r\n :param input_string: the string given by the user.\r\n :return: the integer form of the input_string in case it is a number, else an exception will be raised,\r\n that means there are no operations to be done, so the application will stop.\r\n \"\"\"\r\n\r\n # first check if the number is a non-zero natural number\r\n if input_string.isnumeric():\r\n input_number = int(input_string)\r\n\r\n # then check if it is lower than 1 or bigger than 4\r\n if input_number < 2 or input_number > 5:\r\n raise ValueError(\"The number's value should be an integer between 2 and 5!\")\r\n\r\n else:\r\n raise ValueError(\"The given value must be a natural number(no extra spaces or any other characters)!\")\r\n\r\n return input_number\r\n\r\n\r\ndef get_cartesian_product_tuples_of_0_and_1s(number_of_columns):\r\n \"\"\"\r\n Create a list with all non-zero existing vectors in Z2^sequence_length.\r\n :return: a list with all non-zero existing vectors.\r\n :pre-condition: the sequence length should be a non-zero natural number.\r\n \"\"\"\r\n\r\n # create the cartesian product list of the given length, which represents all the rows\r\n # with which you can form the matrices\r\n cartesian_product_list = [list(index) for index in itertools.product([0, 1], repeat=number_of_columns)]\r\n\r\n return cartesian_product_list\r\n\r\n\r\ndef check_reduced_row_echelon_form(given_matrix):\r\n \"\"\"\r\n Given a matrix, check if it is in row echelon form, and then check again if it also checks the reduced property.\r\n :param given_matrix: a matrix(an array of lists).\r\n :return: True if the matrix is in the reduced echelon form.\r\n :pre-condition: given_matrix is a list of lists(a matrix).\r\n \"\"\"\r\n\r\n previous_row_number_of_0 = 0\r\n column_index = 0\r\n\r\n # remember the index of the columns which have a leading 1\r\n leading_entry_column_index = []\r\n\r\n # get the numbers of 0s on the first line before the leading 1\r\n while column_index < len(given_matrix[0]):\r\n if given_matrix[0][column_index] == 0:\r\n previous_row_number_of_0 += 1\r\n else:\r\n leading_entry_column_index.append(column_index)\r\n break\r\n\r\n column_index += 1\r\n\r\n # compare the number of 0s from the first line with the others(the consecutive ones)\r\n row = 1\r\n\r\n while row < len(given_matrix):\r\n actual_row_numbers_of_0 = 0\r\n column_index = 0\r\n\r\n while column_index < len(given_matrix[row]):\r\n if given_matrix[row][column_index] == 0:\r\n actual_row_numbers_of_0 += 1\r\n else:\r\n if column_index not in leading_entry_column_index:\r\n leading_entry_column_index.append(column_index)\r\n break\r\n else:\r\n return False\r\n column_index += 1\r\n\r\n if actual_row_numbers_of_0 <= previous_row_number_of_0:\r\n return False\r\n\r\n previous_row_number_of_0 = actual_row_numbers_of_0\r\n row += 1\r\n\r\n # check if there are more 1s on the same column where you can find a leading 1 element\r\n # if there are, it means the matrix is not in row reduced echelon form\r\n given_matrix = numpy.array(given_matrix)\r\n number_of_1s = numpy.count_nonzero(given_matrix == 1, axis=0)\r\n for column in leading_entry_column_index:\r\n if number_of_1s[column] > 1:\r\n return False\r\n\r\n return True\r\n\r\n\r\ndef get_number_of_reduced_echelon_form_matrices_and_their_format(all_possible_matrix_rows_format, number_of_rows,\r\n number_of_columns):\r\n \"\"\"\r\n Given a tuple with all non-zero existing vectors in Z2^sequence_length and the dimension of the basis,\r\n check all the possible permutations of the vectors which can create a basis.\r\n :return: number of the bases found and their content.\r\n :pre-condition: all_non_zero_possible_vectors should be a tuple with all non-zero existing vectors that can\r\n be a part of the basis, basis_dimension should be a positive integer.\r\n \"\"\"\r\n\r\n # store the bases(in the tuple format) found in a list\r\n matrices_found = list()\r\n\r\n # check for each possible matrix if it is in the reduced echelon form\r\n for matrix in itertools.permutations(all_possible_matrix_rows_format, number_of_rows):\r\n matrix = list(matrix)\r\n if check_reduced_row_echelon_form(matrix) and matrix not in matrices_found:\r\n matrix = tuple(tuple(i) for i in matrix)\r\n matrices_found.append(matrix)\r\n\r\n # add the zero matrix to the list, since it is in raw echelon form for any given dimension\r\n zero_matrix = [[0] * number_of_columns] * number_of_rows\r\n zero_matrix = tuple(tuple(i) for i in zero_matrix)\r\n\r\n if zero_matrix not in matrices_found:\r\n matrices_found.append(zero_matrix)\r\n\r\n # store the number of bases found\r\n number_of_matrices_found = len(matrices_found)\r\n\r\n return number_of_matrices_found, matrices_found\r\n\r\n\r\ndef run_algorithm():\r\n # get the input string from the first line of the file\r\n with open(\"input.txt\") as file:\r\n input_from_file = file.readline()\r\n\r\n m_value, n_value = input_from_file.split()\r\n\r\n # open the output file in the 'write' mode, in order to clean the data that existed before the run\r\n file = open(\"output.txt\", \"w\")\r\n\r\n # check if the input passed is valid\r\n try:\r\n number_of_rows = check_input_is_valid_number(m_value)\r\n number_of_columns = check_input_is_valid_number(n_value)\r\n except ValueError as error:\r\n file.write(str(error))\r\n return\r\n\r\n # get the values necessary for both sub-points\r\n all_possible_matrix_rows_format = get_cartesian_product_tuples_of_0_and_1s(number_of_columns)\r\n number_of_matrices_found, matrices_found = \\\r\n get_number_of_reduced_echelon_form_matrices_and_their_format \\\r\n (all_possible_matrix_rows_format, number_of_rows, number_of_columns)\r\n\r\n # open the output file again, but in the 'append' mode, to keep the data displayed earlier unmodified\r\n file = open(\"output.txt\", \"a\")\r\n\r\n # print the corresponding output\r\n file.write(\r\n f\"1. the number of matrices A from M{number_of_rows},{number_of_columns}(Z2) in reduced echelon form is {number_of_matrices_found}.\\n\")\r\n\r\n file.write(f\"2. the matrices A from M{number_of_rows},{number_of_columns}(Z2) in reduced echelon form are: \\n\")\r\n\r\n # print the matrices in the reduced row echelon form found\r\n for matrix in matrices_found:\r\n for row in matrix:\r\n file.write(f\"{row}\\n\")\r\n file.write(f\"\\n\")\r\n\r\n\r\nif __name__ == '__main__':\r\n run_algorithm()\r\n","repo_name":"trutadan/University-Work","sub_path":"Semester 1/Algebra/Bonus projects/Project 5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23475982879","text":"from scrape_indeed import *\nfrom kafka import KafkaProducer\nfrom numpy import record\nimport pandas as pd\nimport json\n\nprint(\"Envoie des données vers kafka en cours...\")\n\n#msg = f'[{companies},{titles},{jobType},{descriptions},{links}]'\ndata = dataPost\n\n\n#data_list = data.to_dict(orient=\"records\")\n\n\ndef connect_kafka_producer():\n producer = None\n try:\n producer = KafkaProducer(bootstrap_servers=['localhost:9092'], api_version=(0, 10))\n except Exception as ex:\n print('Exception lors de la connexion avec kafka', producer)\n finally:\n return producer\nproducer=connect_kafka_producer()\n\ndef publish_message(prod, topic_name, val):\n try:\n \n b_value = bytes(val, encoding='utf-8')\n \n prod.send(topic_name, value=b_value)\n prod.flush()\n except Exception as ex:\n print(str(ex))\n\nfor i in range(len(dataPost)):\n message_to_kafka = {\"companies\":dataPost[\"companies\"].iloc[i],\"job_title\":dataPost[\"job title\"].iloc[i], \"job_type\":dataPost[\"job Type\"].iloc[i],\"job_Description\":dataPost[\"job Description\"].iloc[i],\"job_link\":dataPost[\"job link\"].iloc[i]}\n json_data = json.dumps(message_to_kafka)\n publish_message(producer, 'INDEED', json_data)\n\nprint(\"Kafka Producer Application Completed. \")\n\n#, encoding='utf-8'","repo_name":"Djeinaba2019/Finding_My_next_Job_Posting","sub_path":"Get_MyFuture_job/Kafka_stream_producer_indeed.py","file_name":"Kafka_stream_producer_indeed.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40787811349","text":"from odoo import models, fields, api, Command\n\n\nclass SignRequest(models.Model):\n _inherit = 'sign.request'\n\n ticket_id = fields.Many2one('helpdesk.ticket', string=\"Ticket\")\n\n @api.model\n def initialize_new(self, template_id, signers, followers, reference, subject, ticket_id, message, message_cc=None, attachment_ids=None, send=True, without_mail=False):\n sign_users = self.env['res.users'].search([('partner_id', 'in', [signer['partner_id'] for signer in signers])]).filtered(lambda u: u.has_group('sign.group_sign_employee'))\n sign_request = self.create({'template_id': template_id,\n 'reference': reference,\n 'subject': subject,\n 'message': message,\n 'message_cc': message_cc,\n 'ticket_id': ticket_id})\n if attachment_ids:\n attachment_ids.write({'res_model': sign_request._name, 'res_id': sign_request.id})\n sign_request.write({'attachment_ids': [Command.set(attachment_ids.ids)]})\n sign_request.message_subscribe(partner_ids=followers)\n sign_request.activity_update(sign_users)\n sign_request.set_signers(signers)\n if send:\n sign_request.action_sent()\n if without_mail:\n sign_request.action_sent_without_mail()\n return {\n 'id': sign_request.id,\n 'token': sign_request.access_token,\n 'sign_token': sign_request.request_item_ids.filtered(lambda r: r.partner_id == self.env.user.partner_id)[:1].access_token,\n }\n","repo_name":"lumitec-solutions/lt_helpdesk_enhancement","sub_path":"lt_cb_ticket_to_esign/models/sign_request.py","file_name":"sign_request.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29517381075","text":"\"\"\"module for Steps\"\"\"\n\nfrom typing import Dict\nfrom os import path\nfrom yaml import load, FullLoader\nfrom json import dumps\n\nGLOBALS_PATH = path.join(path.dirname(path.abspath(__file__)), \"../globals.yml\")\n\nclass Step:\n\n def __init__(self, cfg: Dict, params: Dict) -> None:\n \"\"\"\n Base class for steps. The base class handles parsing of the config into a function-specific\n step. Common conventions for defining steps should also be defined here. \n \n Finally, the base class handles injection of params (noted in the config file as $param,\n and a key corresponding to the value in the params file) into the data structure of the Step.\n The key can also be a $ref, referring to another file relative to the path of the params\n file, which will be loaded and injected to the params file before it is itself injected\n into the step.\n\n Therefore, every step can have one $param, which can support an arbitrary number of keys\n and a single $ref, which can itself support an arbitrary number of keys.\n\n Args:\n cfg (Dict): The contents of the config Dictionary for the individual step\n params (Dict): The params file, after deserializing into a Dictionary\n \"\"\"\n with open(GLOBALS_PATH) as fp:\n self.globals = load(fp, Loader=FullLoader)\n\n self.action = cfg.pop(\"action\")\n self.action_details = cfg\n\n global_diagnostic_mode = self.globals.get(\"diagnostic_mode\")\n step_level_diagnostic_mode = cfg.pop(\"diagnostic_mode\", False)\n \n self.is_diagnostic_mode = global_diagnostic_mode or step_level_diagnostic_mode\n\n self.param = cfg.pop(\"$param\", None)\n\n if self.param:\n injected_param = params.get(self.param)\n _param_path = params.get(\"_params_path\")\n\n if \"$ref\" in injected_param:\n\n param_dir = path.dirname(path.abspath(_param_path))\n ref_path = injected_param['$ref']\n\n with open(path.join(param_dir, ref_path)) as fp:\n ref_data = load(fp, Loader=FullLoader)\n injected_param = {**injected_param, **ref_data}\n\n self.action_details = {**self.action_details, **injected_param}\n \n self.columns = self.action_details.pop(\"columns\", None)\n self.comment = self.action_details.pop(\"comment\", None)\n \n def _make_log(self, workflow, log_stub):\n\n if self.is_diagnostic_mode:\n\n if self.globals.get(\"diagnostic_mode_show_count\", False):\n log_stub[\"row_count\"] = workflow.df.count()\n \n if self.globals.get(\"diagnostic_mode_show_columns\", False):\n log_stub[\"observed_columns\"] = workflow.df.columns\n \n if self.globals.get(\"diagnostic_mode_show_column_diff\", False):\n old_columns = set(getattr(workflow, \"_columns\", set()))\n new_columns = set(workflow.df.columns)\n\n columns_removed = old_columns - new_columns\n columns_added = new_columns - old_columns\n\n log_stub[\"columns_added\"] = list(columns_added)\n log_stub[\"columns_removed\"] = list(columns_removed)\n\n if self.globals.get(\"diagnostic_mode_show_preview\", False):\n print(\"showing preview for step:\")\n print(dumps(log_stub, indent=4))\n\n preview_rows = self.globals.get(\"diagnostic_mode_show_preview_rows_count\", 20)\n workflow.df.show(preview_rows, False)\n \n if self.comment:\n log_stub[\"comment\"] = self.comment\n\n workflow._columns = workflow.df.columns \n workflow.workflow_report[\"steps\"].append(log_stub)","repo_name":"leozqin/etl-markup-toolkit","sub_path":"etl_markup_toolkit/actions/step.py","file_name":"step.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"74582010614","text":"# -*-coding:utf-8-*-\n\nimport logging\nimport sys\n\nfrom shixun.settings import LOG_FMT, LOG_LEVEL, LOG_FILENAME, LOG_DATEFMT\n\n\nclass Logger(object):\n\n def __init__(self):\n # 获取一个logger对象\n self._logger = logging.getLogger()\n # 设置format对象\n self.formatter = logging.Formatter(fmt=LOG_FMT, datefmt=LOG_DATEFMT)\n # 设置日志输出模式\n # 设置文件日志模式\n self._logger.addHandler(self._get_file_handler(LOG_FILENAME))\n # 设置终端日志模式\n self._logger.addHandler(self._get_console_handler())\n # 设置日志等级\n self._logger.setLevel(LOG_LEVEL)\n\n def _get_file_handler(self, filename):\n '''返回一个文件日志handler'''\n # 获取一个文件日志handler\n filehandler = logging.FileHandler(filename=filename, encoding=\"utf-8\")\n # 设置日志格式\n filehandler.setFormatter(self.formatter)\n return filehandler\n\n def _get_console_handler(self):\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(self.formatter)\n return console_handler\n\n @property\n def logger(self):\n return self._logger\n\n\nlogger = Logger().logger\n\nif __name__ == '__main__':\n logging.debug('调试信息')\n logging.info('logger info message')\n logging.warning('logger warning message')\n logging.error('logger error message')\n logging.critical('logger critical message')\n","repo_name":"domekisuzi/python-project-","sub_path":"new-2021-06-26/shixun/shixun/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14171364340","text":"\"\"\"\nСоздать текстовый файл (не программно),\nсохранить в нем несколько строк,\nвыполнить подсчет количества строк,\nколичества слов в каждой строке.\n\n\"\"\"\nimport re\n\nwith open('second_file.txt', 'w') as f:\n f.writelines(['Мороз и солнце!\\n', 'День чудесный!\\n', 'Ещё ты дремлешь, друг прелестный?\\n'])\n\nwith open('second_file.txt', 'r') as f:\n lines = f.readlines()\n for i_str, val in enumerate(lines, start=1):\n split = re.split('[ !,?\\n]+', val.strip('[ !,?\\n]+'))\n num_words = len(split)\n print(f'В {i_str} строке {num_words} слов/слова')","repo_name":"anmalch/python","sub_path":"lesson_5_2.py","file_name":"lesson_5_2.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"1805463843","text":"# # class dog:\n# #\n# # def __init__(self):\n# # return\n# #\n# # def dog(self):\n# # return\n# #\n# #\n# # if __name__ == '__main__':\n# # myDog=dog()\n# # print(myDog)\n#\n# str = \"very long long string\"\n#\n# for i in range(len(str)):\n# pass\n# # print(str[i])\n#\n# print(str[0:4])\n#\n#\n# def revstr(inStr):\n# newstr = \"\"\n# le = len(inStr)\n# i = le - 1\n# while i >= 0:\n# newstr += inStr[i]\n# i -= 1\n# return newstr\n#\n#\n# print(revstr(str))\n# print(str[len(str) - 1:0])\n#\n# mylist = [2, 3, 4, 5, 6, 0]\n# print(mylist)\n# mylist.sort()\n# print(mylist)\n# mylist.clear()\n# try:\n# newlist = [i for i in range(10)]\n# except:\n# print(\"An error occured\")\n# exit(1)\n# else:\n# print(f'New list : {newlist}')\n#\n# mytuple=(1,2,3,6)\n# print(mytuple)\n# for m in mytuple:\n# print(m)\n\nmyDict = {\n \"1\": {\n \"name\": \"brian\",\n \"age\": 20\n },\n\n \"2\": {\n \"name\": \"ben\",\n \"age\": 15\n }\n}\n\nfor id in myDict:\n for obj in id:\n # print(f'_id:{id}\\n\\t')\n pass\n\nx = 20\ny = 2\n# exponent operator or to the power of\n'''\nprint(x**y)\n\nprint(x/3)\nprint(x//3)\n\nx=-20\n#performs math floor on the result\nprint(x//3)\n\nmyl=[2,3,4,5,6]\n\n# x=eval(input())\n\n\nif x not in myl:\n print(\"not found\")\nelse:\n print(\"found\")\n'''\n\nname=\"nr\"\n# format specifiers\nprint(\"%s\"%name)\nname='b'\nprint(\"%c\"%name)\n\ndef printL(l):\n for el in l:\n print(el,end=\" \")\n print(\"\\n\")\n\nmyl = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]\nprintL(myl)\nprint(len(myl))\ndel myl[0]\nprintL(myl)\nprint(len(myl))\n\nmyl.remove(0)\nprintL(myl)\nprint(len(myl))\n\ndata={\n \"name\":\"brian\",\n \"age\":20\n}\n\ndata2={\n \"name\":\"brian\",\n \"age\":20\n}\n\nprint(str(data))\n\nfor dt in data:\n pass\n #print(type(data[dt]))\ndatac=data.copy()\ndata.clear()\n\nprint(data)\nprint(datac)\nprint(datac.keys())\n#print(type(x))\n\n","repo_name":"junrdev/python","sub_path":"test1/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32089408608","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom aliyunsdkdysmsapi.request.v20170525 import SendSmsRequest\nfrom aliyunsdkdysmsapi.request.v20170525 import QuerySendDetailsRequest\nfrom aliyunsdkcore.client import AcsClient\nimport uuid\nfrom moduleGlobal import app\n\nclass sendSMS(object):\n def __init__(self,type, number, msg):\n\n self.TemplateCode = app.config.get('SMS_MODEL_ID_CODE')\n self.para = msg\n if type=='noti_v':\n self.TemplateCode = app.config.get('SMS_MODEL_NOTI_CODE_V')\n if type=='noti_a':\n self.TemplateCode = app.config.get('SMS_MODEL_NOTI_CODE_A')\n\n\n self.access_key_id = app.config.get('SMS_ACCESS_KEY')\n self.access_key_secret = app.config.get('SMS_ACCESS_SECRET')\n self.server_address = app.config.get('SMS_URL')\n self.region = \"cn-hangzhou\" # 暂时不支持多region\n self.num = int(number)\n self.SignName = app.config.get('SMS_SIGN_NAME').encode('utf-8')\n self.acs_client = AcsClient(self.access_key_id, self.access_key_secret, self.region)\n self.uuid = uuid.uuid1()\n def send(self):\n smsRequest = SendSmsRequest.SendSmsRequest()\n smsRequest.set_TemplateCode(self.TemplateCode)\n if self.para is not None:\n smsRequest.set_TemplateParam(self.para)\n smsRequest.set_OutId(self.uuid)\n smsRequest.set_SignName( self.SignName)\n smsRequest.set_PhoneNumbers(self.num )\n smsResponse = self.acs_client.do_action_with_exception(smsRequest)\n return smsResponse\n # # 定义参数\n # user_params = {'Action': 'SingleSendSms', 'ParamString': '%s' % self.para, 'RecNum': '%d' % self.num,\n # 'SignName': self.SignName,\n # 'TemplateCode': self.TemplateCode}\n # self.make_request(user_params)\n\n # def percent_encode(self,encodeStr):\n # encodeStr = str(encodeStr)\n #\n # res = urllib.quote(encodeStr.decode('utf-8').encode('utf-8'), '')\n # res = res.replace('+', '%20')\n # res = res.replace('*', '%2A')\n # res = res.replace('%7E', '~')\n # return res\n #\n # def compute_signature(self,parameters, access_key_secret):\n # sortedParameters = sorted(parameters.items(), key=lambda parameters: parameters[0])\n # canonicalizedQueryString = ''\n # for (k, v) in sortedParameters:\n # canonicalizedQueryString += '&' + self.percent_encode(k) + '=' + self.percent_encode(v)\n # stringToSign = 'GET&%2F&' + self.percent_encode(canonicalizedQueryString[1:])\n # print \"stringToSign: \" + stringToSign\n # h = hmac.new(access_key_secret + \"&\", stringToSign, sha1)\n # signature = base64.encodestring(h.digest()).strip()\n # return signature\n #\n # def compose_url(self,user_params):\n # timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime(time.time()))\n # parameters = {\n # 'Format': 'JSON',\n # 'Version': '2016-09-27',\n # 'AccessKeyId': self.access_key_id,\n # 'SignatureVersion': '1.0',\n # 'SignatureMethod': 'HMAC-SHA1',\n # 'SignatureNonce': str(uuid.uuid1()),\n # 'RegionId': 'cn-hangzhou',\n # 'Timestamp': timestamp\n # }\n # for key in user_params.keys():\n # parameters[key] = user_params[key]\n # signature = self.compute_signature(parameters, self.access_key_secret)\n # parameters['Signature'] = signature\n # print parameters\n # url = self.server_address + \"/?\" + urllib.urlencode(parameters)\n # return url\n #\n # def make_request(self,user_params, quiet=False):\n # url = self.compose_url(user_params)\n # request = urllib2.Request(url)\n # try:\n # conn = urllib2.urlopen(request)\n # response = conn.read()\n # except urllib2.HTTPError, e:\n # print(e.read().strip())\n # try:\n # obj = json.loads(response)\n # if quiet:\n # return obj\n # except ValueError, e:\n # raise SystemExit(e)\n # json.dump(obj, sys.stdout, sort_keys=True, indent=2)\n # sys.stdout.write('\\n')\n","repo_name":"superdun/guijia","sub_path":"smsModule.py","file_name":"smsModule.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18798395685","text":"N = int(input())\nnum_list = list()\ndef DecimalToBinary(num, num_list):\n if num >= 1:\n DecimalToBinary(num // 2, num_list)\n num_list.append(num % 2)\n return num_list\n\nnew_list = DecimalToBinary(N, num_list)\nfor i in range(len(new_list)):\n if new_list[i] == 1:\n new_list[i] = 2\nanswer = ''\nfor number in new_list:\n number = str(number)\n answer += number\nprint(answer)","repo_name":"clareyong/atcoder-solutions","sub_path":"abc234c.py","file_name":"abc234c.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"22164626539","text":"from app import app\nimport sys\nfrom flask import Flask, render_template, url_for, request\nimport pymysql\n\n\n# Connect to the database\nconnection = pymysql.connect(port=3306,\n host='db',\n user='jakkam',\n password='password',\n db='calculations',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n\n\n# Recent 10 calculations from db\ndef get_calculations():\n data = ''\n with connection.cursor() as cursor:\n calculations = cursor.execute('SELECT * FROM calculations.executions ORDER BY id DESC LIMIT 10;')\n if calculations > 0:\n data = cursor.fetchall()\n cursor.close()\n return data\n\n\n# main route\n@app.route('/')\ndef main():\n return render_template('simple_calculator.html', results=get_calculations())\n\n\n# calculations route \n@app.route(\"/calculation_result\", methods=['GET', 'POST'])\ndef calculation_result():\n if request.method == 'POST':\n details = request.form\n first_number = int(details['firstNumber'])\n operator = details['operation']\n second_number = int(details['secondNumber'])\n note = ''\n color = 'alert-success'\n\n try:\n if operator == '+':\n result = first_number + second_number\n note = f'{first_number} + {second_number} = {result}'\n elif operator == '-':\n result = first_number - second_number\n note = f'{first_number} - {second_number} = {result}'\n elif operator == 'x':\n result = first_number * second_number\n note = f'{first_number} * {second_number} = {result}'\n elif operator == '/':\n result = first_number / second_number\n note = f'{first_number} / {second_number} = {result}'\n \n with connection.cursor() as cursor:\n sql_query = f\"INSERT INTO calculations.executions(first_num, operator, second_num, result) VALUES ({first_number}, '{operator}', {second_number}, {result});\"\n cursor.execute(sql_query)\n connection.commit()\n cursor.close()\n return render_template('simple_calculator.html', note=note, color=color, results=get_calculations())\n\n except:\n note = sys.exc_info()[0]\n color = 'alert-danger'\n return render_template('simple_calculator.html', note=note, color=color, results=get_calculations())\n\n elif request.method == 'GET':\n return render_template('simple_calculator.html', results=get_calculations())\n","repo_name":"AJakkam38/number-cruncher-app","sub_path":"flask/app/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"1603272600","text":"\n\n\ndef get_sum(target):\n res_list = []\n def dfs(target, path):\n if sum(path) == target and sorted(path) not in res_list:\n res_list.append(sorted(path[:]))\n return\n if sum(path) > target:\n return\n for num in range(1, target+1):\n path.append(num)\n dfs(target, path)\n path.pop()\n print(res_list)\n return len(res_list)\n return dfs(target, [])\n\nprint(get_sum(10))","repo_name":"peachch/peachch-AlgorithmSolutions","sub_path":"dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4832943175","text":"import os\n# Para não exibir as mensagens do TensorFlow\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport sys\nimport tensorflow\nfrom tensorflow import keras\nfrom tensorflow.keras.models import model_from_json\nimport numpy as np\n\nimport sklearn\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Conv1D, AveragePooling1D, Flatten, Reshape, LSTM\nfrom tensorflow.keras.losses import MeanAbsoluteError\n\n# Ler os dados que são passados como parâmetro\nif len(sys.argv) == 2:\n raw_data = str(sys.argv[1])\nelse:\n print('{\"error\":\"There are no data received\"}')\n exit()\n \n\n# Transformar a string em uma matriz\nraw_data = raw_data.split(';')\ndata_rows = []\nfor row in raw_data:\n if row != \"\":\n data_rows.append(row.split(','))\nraw_data = data_rows\n\n# Definir as colunas de entrada que são úteis\nuseful_headers = ['H.A','L.A','H.B','L.B','M.G','L.G','THE','DEL']\n# Definir o número de leituras que o modelo utiliza para fazer a predição\nreadings_number = 100\n\n# Verificar se as colunas necessárias estão presentes entre cabeçalhos\ndata_headers = raw_data[0]\nraw_data = raw_data[1:]\ndef verify_headers(useful_headers, data_headers):\n for header in useful_headers:\n if header not in data_headers: return False\n return True\nif not verify_headers(useful_headers, data_headers):\n print('{\"error\":\"The sent data does not have all the necessary headers with signals names\"}')\n exit()\n \n# Verificar se os dados tem leituras o suficiente dos sinais\nif len(raw_data) < readings_number:\n print('{\"error\":\"The sent data does not have enough observations, it must have at least %i observations for each signal\"}' %readings_number)\n exit()\n\n# Padronizar para os dados tenham o shape para o qual o modelo foi treinado, que é (1,100,16)\n# (1 amostra, 100 medições, 16 canais (8 de entrada e 8 calculados))\n# Selecionar apenas as linhas necessárias\nif len(raw_data) > readings_number:\n raw_data = raw_data[int(len(raw_data)/4):int(len(raw_data)/4)+readings_number]\n# Selecionar apenas as colunas necessárias\ndata = []\nfor row in raw_data:\n data_row = []\n for header in useful_headers:\n data_row += [row[data_headers.index(header)]]\n data += [data_row]\n \n# Para enriquecer a base e disponibilizar mais informações para a modelagem, vamos adicionar colunas que informem as variações dos sinais em relação ao tempo, comparando o registro atual com o anterior\nrich_data = []\nfor row in range(len(data)):\n row_data = []\n for col in range(len(useful_headers)):\n row_data += [data[row][col]]\n for col in range(len(useful_headers)):\n if row == 0:\n row_data += [0]\n else:\n row_data += [int(data[row][col])-int(data[row-1][col])]\n rich_data += [row_data]\n\n# Convertendo os dados enriquecidos para o formato np.array\ndata = np.array([rich_data], dtype=np.int32)\n\n# Identificar o caminho para o diretório dos arquivos dos modelos\ncurrent_directory = os.getcwd().split('/')\nfull_directory = '/home/jonasmarinho/brain/api/predict'.split('/')\ndirectory_path = ''\nfor directory_index in range(len(full_directory)):\n if directory_index >= len(current_directory):\n directory_path += full_directory[directory_index] + '/'\n\n\n# Carregar a arquitetura do modelo classificador\njson_classifier = open(directory_path + 'brain_classifier.json', 'r')\nbrain_classifier = model_from_json(json_classifier.read())\njson_classifier.close()\n# Carregar os pesos do modelo classificador\nbrain_classifier.load_weights(directory_path + 'brain_classifier.h5')\n\n# Carregar a arquitetura do modelo regressor\njson_regressor = open(directory_path + 'brain_regressor.json', 'r')\nbrain_regressor = model_from_json(json_regressor.read())\njson_regressor.close()\n# Carregar os pesos do modelo regressor\nbrain_regressor.load_weights(directory_path + 'brain_regressor.h5')\n\n# Realizar as predições e retornar os valores\nclassifier_prediction = brain_classifier.predict(data)[0][0][1]\nregressor_prediction = brain_regressor.predict(data)[0][0]\nprint('{\"classifier\":%.2f,\"regressor\":%.2f}' %(classifier_prediction, regressor_prediction))\nexit()\n","repo_name":"jonas-marinho/brain","sub_path":"api/predict/brain_predict.py","file_name":"brain_predict.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6102913693","text":"from django import template\nfrom Python import Hashing, init\nimport json\nfrom Register.models import Users\nfrom Articles.models import Posts\nfrom Python.Tags import *\n\nregister = template.Library()\n\n\n# GetNotification\n@register.filter(name='GetNotification')\ndef GetNotification(Notification):\n Result = Hashing.GetAllFromHashing([\n {'Type': 'Notifications', 'Data': Notification.User_Email, 'Key': 'Email'},\n {'Type': '', 'Data': Notification.Type, 'Key': 'Type'},\n {'Type': 'Notifications', 'Data': Notification.Message, 'Key': 'Message'}])\n\n if Result['Result'] == -1:\n return ''\n\n Data = json.loads(Result['Data']['Message'])\n User = Users.objects.filter(Email=Hashing.Hash_Users(Data['Email']), Deleted=0)\n if not User.exists():\n User = ''\n else:\n Hash = Hashing.Get_Hashed_Users(User[0].Name)\n if Hash['Result'] == -1:\n User = ''\n else:\n User = Hash['Data']\n\n Post = Posts.objects.filter(id=Data['PostID'], Deleted=0)\n if not Post.exists():\n Title = ''\n else:\n Hash = Hashing.Get_Hashed_Articles(Post[0].ArticleTitle)\n if Hash['Result'] == -1:\n Title = ''\n else:\n Title = Hash['Data']\n\n User = Span(User, 'Green')\n Title = Span(Title, 'Green')\n if Result['Data']['Type'] == 1:\n Message = 'User : ' + User + ' Liked Your Post : ' + Title\n elif Result['Data']['Type'] == 2:\n Message = 'User : ' + User + ' DisLiked Your Post : ' + Title\n elif Result['Data']['Type'] == 3:\n Message = 'User : ' + User + ' Commented in Your Post : ' + Title\n else:\n Message = 'User : ' + User + ' Added New Tag in Your Post : ' + Title\n\n return Div(P(Message))\n\n\n@register.filter(name='CheckNotifications')\ndef CheckNotifications(Notifications):\n return '' if len(Notifications) else Div(P('No Notifications'))\n\n\n@register.filter(name='GetPicture')\ndef GetPicture(Notifications):\n for Notification in Notifications:\n if Notification.See == 0:\n return init.Notification\n return init.NoNotification\n\n\n@register.filter(name='GetNotificationsNumber')\ndef GetNotificationsNumber(Notifications):\n return len(Notifications)\n\n\n#################################################################################\n@register.filter(name='GetTheWholeNotification')\ndef GetTheWholeNotification(Notification):\n Result = Hashing.GetAllFromHashing([\n {'Type': 'Notifications', 'Data': Notification.User_Email, 'Key': 'Email'},\n {'Type': '', 'Data': Notification.Type, 'Key': 'Type'},\n {'Type': 'Notifications', 'Data': Notification.Message, 'Key': 'Message'},\n {'Type': 'Date', 'Data': Notification.Date, 'Key': 'Date'}])\n\n if Result['Result'] == -1:\n return ''\n\n Data = json.loads(Result['Data']['Message'])\n Comment = Data['Comment'] if 'Comment' in Data else ''\n User = Users.objects.filter(Email=Hashing.Hash_Users(Data['Email']), Deleted=0, Activate=1)\n if not User.exists():\n UserName = ''\n UserPicture = init.OfflineUser\n UserID = ''\n else:\n Hash = Hashing.GetAllFromHashing([\n {'Type': 'Users', 'Data': User[0].Name, 'Key': 'Name'},\n {'Type': '', 'Data': User[0].id, 'Key': 'ID'},\n {'Type': 'Users', 'Data': User[0].Picture, 'Key': 'Picture'}])\n\n if Hash['Result'] == -1:\n UserName = ''\n UserPicture = init.OfflineUser\n UserID = ''\n else:\n UserName = Hash['Data']['Name']\n UserPicture = Hash['Data']['Picture']\n UserID = Hash['Data']['ID']\n\n Post = Posts.objects.filter(id=Data['PostID'], Deleted=0)\n if not Post.exists():\n Title = ''\n else:\n Hash = Hashing.Get_Hashed_Articles(Post[0].ArticleTitle)\n if Hash['Result'] == -1:\n Title = ''\n else:\n Title = Hash['Data']\n\n if Notification.See == 0:\n Class = 'Notification DidNotSeeNotification'\n else:\n Class = 'Notification'\n\n from Register.models import Notifications\n Notifications.objects.filter(id=Notification.id).update(See=1)\n\n Title = Strong(Title)\n if Result['Data']['Type'] == 1:\n Message = 'This User Liked Your Post : ' + Title\n elif Result['Data']['Type'] == 2:\n Message = 'This User DisLiked Your Post : ' + Title\n elif Result['Data']['Type'] == 3:\n Message = 'This User Commented in Your Post : ' + Title\n else:\n Message = 'This User Added New Tag To Your Post : ' + Title\n\n return Div(Div(A(init.User+str(UserID), InputImage(UserPicture)) +\n Div(P(Strong('By : ') + UserName) +\n P(Strong('Date : ')+Result['Data']['Date']))) +\n Div(P(Message) +\n A(GetLink(Data['PostID'], Result['Data']['Type'], Comment),\n 'The Link For Article')\n ), Class)\n\n\ndef GetLink(ID, Type, Comment):\n return init.Article + str(ID) + ('#Comment'+str(Comment) if Type == 3 or Type == 4 else '')\n\n\n@register.filter(name='CheckNotificationsNumber')\ndef CheckNotificationsNumber(Notifications):\n if len(Notifications) < 7:\n return ''\n return Div(Input('button', 'Show More Notifications', '', '',\n 'GetMoreNotifications();'), 'Show_More_Div')\n","repo_name":"Hady-Eslam/Articles_Analyzing","sub_path":"Register/templatetags/NotificationsFilters.py","file_name":"NotificationsFilters.py","file_ext":"py","file_size_in_byte":5359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"538091780","text":"from datetime import date\nfrom unittest import TestCase\n\nfrom hypothesis import given, strategies\n\nfrom flask_profiler import calendar\n\n\nclass CalendarDaysTests(TestCase):\n def setUp(self) -> None:\n super().setUp()\n self.calendar = calendar.Calendar()\n\n def test_that_the_correct_number_of_days_is_caluclated_between_two_days(\n self,\n ) -> None:\n samples = [\n (date(2000, 1, 1), date(1999, 12, 31), 0),\n (date(2000, 1, 1), date(2000, 1, 2), 1),\n (date(2000, 1, 1), date(2000, 1, 3), 2),\n ]\n for since, until, expected_days in samples:\n with self.subTest():\n assert (\n len(self.calendar.day_interval(since=since, until=until))\n == expected_days\n )\n\n @given(since=strategies.dates())\n def test_that_the_start_date_is_included_in_the_interval(self, since: date) -> None:\n assert since in self.calendar.day_interval(since=since, until=date.max)\n\n @given(until=strategies.dates())\n def test_that_the_end_date_is_not_included_in_the_interval(\n self, until: date\n ) -> None:\n assert until not in self.calendar.day_interval(since=date.min, until=until)\n\n @given(\n since=strategies.dates(), until=strategies.dates(), element=strategies.dates()\n )\n def test_date_considered_in_interval_if_greater_or_equal_then_since_and_lower_then_until(\n self, since: date, until: date, element: date\n ) -> None:\n interval = self.calendar.day_interval(since=since, until=until)\n assert (element in interval) == (since <= element and element < until)\n\n def test_that_2nd_of_jan_is_included_in_interval_from_1st_to_3rd_of_jan(\n self,\n ) -> None:\n interval = self.calendar.day_interval(\n since=date(2000, 1, 1), until=date(2000, 1, 3)\n )\n assert date(2000, 1, 2) in list(interval)\n\n def test_that_7th_of_jan_is_included_in_interval_from_6th_to_9th_of_jan(\n self,\n ) -> None:\n interval = self.calendar.day_interval(\n since=date(2000, 1, 6), until=date(2000, 1, 9)\n )\n assert date(2000, 1, 7) in list(interval)\n\n def test_that_7th_of_jan_is_included_in_interval_from_5th_to_9th_of_jan(\n self,\n ) -> None:\n interval = self.calendar.day_interval(\n since=date(2000, 1, 5), until=date(2000, 1, 9)\n )\n assert date(2000, 1, 7) in list(interval)\n\n def test_that_8th_of_jan_is_included_in_interval_from_1st_to_9th_of_jan(\n self,\n ) -> None:\n interval = self.calendar.day_interval(\n since=date(2000, 1, 1), until=date(2000, 1, 9)\n )\n assert date(2000, 1, 8) in list(interval)\n\n def test_that_8th_of_jan_is_not_included_in_interval_from_1st_to_8th_of_jan(\n self,\n ) -> None:\n interval = self.calendar.day_interval(\n since=date(2000, 1, 1), until=date(2000, 1, 8)\n )\n assert date(2000, 1, 8) not in list(interval)\n\n def test_that_8th_of_jan_2000_is_included_in_interval_from_min_to_max(\n self,\n ) -> None:\n interval = self.calendar.day_interval(\n since=date.min,\n until=date.max,\n )\n assert date(2000, 1, 8) in list(interval)\n\n def test_that_8th_of_oct_is_included_in_interval_from_1st_of_jan_to_9th_of_oct(\n self,\n ) -> None:\n interval = self.calendar.day_interval(\n since=date(2000, 1, 1), until=date(2000, 10, 9)\n )\n assert date(2000, 10, 8) in list(interval)\n","repo_name":"seppeljordan/flask-profiler","sub_path":"tests/test_calendar.py","file_name":"test_calendar.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"5860665834","text":"\"\"\"A partially retroactive priority queue.\n\nThis queue can be used independently, but it is intended to be\nnested within a fully retroactive priority queue.\n\nBased on:\n Erik D. Demaine, John Iacono, and Stefan Langerman, \"Retroactive Data\n Structures\", ACM Transactions on Algorithms, volume 3, number 2, May 2007,\n Article 13. (URL: https://erikdemaine.org/papers/Retroactive_TALG/)\n\"\"\"\nfrom typing import TypeVar, Generic, Union, Tuple, Optional, Generator\nfrom retroactive_pq.range_scapegoat import SumRangeTree\nfrom retroactive_pq.wb_btree import WBBTree\nfrom retroactive_pq.insert_tree import InsertTree\nfrom retroactive_pq.prefix_sum_tree import PrefixSumTree\n\nTS = float\nTS_ZERO = 0.0\nTS_EPSILON = 1\nV = TypeVar('V')\n\nINSERT = 'insert'\nDELETE_MIN = 'delete-min'\n\nEvent = str\nInsertEvent = Tuple[Event, V]\nDeleteMinEvent = Event\nEventType = Union[InsertEvent, DeleteMinEvent]\n\n\nclass PRPriorityQueue(Generic[V]):\n def __init__(self):\n self.t_max = TS_ZERO\n self.t_next = TS_ZERO\n self.now: WBBTree[V, TS] = WBBTree() # Qnow\n self.deleted: WBBTree[V, TS] = WBBTree() # Qdel\n self.events: WBBTree[TS, EventType] = WBBTree()\n self.inserts: InsertTree[TS, V] = InsertTree()\n self.updates: PrefixSumTree[TS, int] = PrefixSumTree(0)\n\n def insert(self, val: V, t: Optional[TS] = None) -> None:\n \"\"\"Inserts a value in the priority queue at time `t`.\n\n If `t` is not specified, the value is inserted strictly\n after the latest event time in the queue.\"\"\"\n if t is None:\n self.t_next += TS_EPSILON\n t = self.t_next\n elif t <= TS_ZERO:\n raise ValueError(f'timestamp must be > {TS_ZERO}.')\n self.events.insert(t, (INSERT, val))\n\n bridge = self.updates.last_node_with_sum(t, 0)\n if bridge is None:\n t_bridge = TS_ZERO\n else:\n t_bridge = bridge.min\n absent_val, absent_t = self.inserts.max_absent_in_range(\n t_bridge, self.t_max)\n if val == absent_val:\n raise ValueError(f'Value {val} not unique.')\n if absent_val is None or val > absent_val:\n self.now.insert(val, t)\n self.inserts.insert(t, val)\n self.updates.insert(t, 0)\n self.inserts.mark_present(t)\n else:\n self.now.insert(absent_val, absent_t)\n self.inserts.insert(t, val)\n self.inserts.mark_absent(t)\n self.inserts.mark_present(absent_t)\n self.updates.insert(t, 1)\n self.deleted.insert(val, t)\n self.t_max = max(t, self.t_max)\n\n def delete_min(self, t: Optional[TS] = None) -> None:\n \"\"\"Inserts a delete-min operation at time `t`.\"\"\"\n if t is None:\n self.t_next += TS_EPSILON\n t = self.t_next\n elif t <= TS_ZERO:\n raise ValueError(f'timestamp must be > {TS_ZERO}.')\n\n bridge = self.updates.first_node_with_sum(t, 0)\n if bridge is None:\n t_bridge = self.t_max\n else:\n t_bridge = bridge.min\n present_val, present_t = self.inserts.min_present_in_range(\n TS_ZERO, t_bridge)\n self.events.insert(t, DELETE_MIN)\n self.updates.insert(t, -1)\n if present_t is not None:\n self.updates.remove(present_t)\n self.updates.insert(present_t, 1)\n self.now.remove(present_val)\n self.inserts.mark_absent(present_t)\n self.deleted.insert(present_val, present_t)\n\n def delete_op(self, t: TS) -> None:\n \"\"\"Deletes the operation at time `t` from the queue.\n\n If no event exists at time `t`, a `ValueError` is raised.\n \"\"\"\n event = self.events.find(t)\n if event is None:\n raise ValueError(f'No event at time {t}.')\n if event == DELETE_MIN:\n self._delete_delete_min(t)\n else:\n self._delete_insert(t)\n max_event = self.events.max()\n if max_event is None:\n self.t_max = TS_ZERO\n else:\n self.t_max = max_event[0]\n\n def _delete_delete_min(self, t: TS) -> None:\n \"\"\"Deletes a delete-min operation at time `t`.\"\"\"\n bridge = self.updates.last_node_with_sum(t, 0)\n if bridge is None:\n t_bridge = TS_ZERO\n else:\n t_bridge = bridge.min\n absent_val, absent_t = self.inserts.max_absent_in_range(\n t_bridge, self.t_max)\n self.events.remove(t)\n self.updates.remove(t)\n self.now.insert(absent_val, absent_t)\n self.inserts.mark_present(absent_t)\n self.updates.remove(absent_t)\n self.updates.insert(absent_t, 0)\n self.deleted.remove(absent_val)\n\n def _delete_insert(self, t: TS) -> None:\n \"\"\"Deletes an insert operation at time `t`.\"\"\"\n val = self.inserts.find(t)\n self.events.remove(t)\n if self.now.find(val):\n # Case: The element to delete is still in Qnow.\n self.now.remove(val)\n self.inserts.remove(t)\n self.updates.remove(t)\n self.deleted.insert(val, t)\n else:\n # Case: The element to delete is now longer in Qnow.\n bridge = self.updates.first_node_with_sum(t, 0)\n if bridge is None:\n t_bridge = self.t_max\n else:\n t_bridge = bridge.min\n present_val, present_t = self.inserts.min_present_in_range(\n TS_ZERO, t_bridge)\n self.now.remove(present_val)\n self.inserts.remove(present_t)\n self.updates.remove(present_t)\n self.deleted.insert(present_val, present_t)\n\n def all(self) -> Generator[V, None, None]:\n \"\"\"Generates all the elements currently in the queue.\"\"\"\n yield from (v for v, _ in self.now.all())\n\n def __repr__(self):\n status = 'Qnow: ' + ' '.join([str(k) for k, _ in self.now.all()])\n status += '\\nevents:\\n'\n for t, event in self.events.all():\n if event == DELETE_MIN:\n status += f'{t}: delete min\\n'\n else:\n status += f'{t}: insert {event[1]}\\n'\n return status\n","repo_name":"6851-2021/retroactive-pq","sub_path":"retroactive_pq/partial_pq.py","file_name":"partial_pq.py","file_ext":"py","file_size_in_byte":6210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36001854813","text":"#https://www.acmicpc.net/problem/10165\n#백준-10165-버스 노선\n\nimport sys, heapq\n\ninput = sys.stdin.readline\n\nn = int(input())\nm = int(input())\nroutes = []\nfor i in range(m):\n a, b = map(int, input().split())\n if a < b:\n heapq.heappush(routes, (a, b, i))\n heapq.heappush(routes, (a+n, b+n, i))\n else:\n heapq.heappush(routes, (a, b+n, i))\n \nis_included = [False] * m\na1, b1, i1 = heapq.heappop(routes)\nwhile routes:\n a2, b2, i2 = heapq.heappop(routes)\n if a1 == a2:\n is_included[i1] = True\n b1, i1 = b2, i2\n elif b1 >= b2:\n is_included[i2] = True\n else:\n a1, b1, i1 = a2, b2, i2\n \nprint(' '.join(map(str, [i+1 for i in range(len(is_included)) if is_included[i] == False])))","repo_name":"DeveloperAcademy-POSTECH/Algorithm","sub_path":"TEAM B - Afternoon, TEAM B1 (Season 1, 2)/Week 7/Benny/[백준-10165]버스 노선.py","file_name":"[백준-10165]버스 노선.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"22"} +{"seq_id":"3275796328","text":"#!/usr/bin/python2\n\"\"\"\nProject\t \t: SIM800 test script \nDate&Time\t: 08th August 2019.\nDescription\t: This module consists all API's nececeary for testing SIMcom SIM800H module\n\t\thttp://simcomm2m.com/En/module/detail.aspx?id=75\n\"\"\"\nimport serial\nimport logging\nimport time, sys, codecs\n\n\nclass SIM800H:\n def __init__(\n self,\n portName=\"\",\n baudRate=115200,\n bytesize=serial.EIGHTBITS,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n timeout=5,\n ):\n self.portName = portName\n self.baudRate = baudRate\n self.bytesize = bytesize\n self.parity = parity\n self.stopbits = stopbits\n self.timeout = timeout\n\n def openComPort(self):\n try:\n self.ser = serial.Serial(\n self.portName,\n self.baudRate,\n timeout=self.timeout,\n bytesize=self.bytesize,\n parity=self.parity,\n stopbits=self.stopbits,\n )\n time.sleep(0.5)\n except:\n logging.error(\"Couldn't open desired tty port: \" + self.portName)\n sys.exit()\n\n def closeComPort(self):\n try:\n self.ser.close()\n except:\n logging.error(\"Couldn't close tty port\")\n sys.exit()\n\n def sendAtCommand(self, command):\n self.command = command\n try:\n self.ser.write(command + \"\\r\")\n received = self.ser.read(20)\n logging.debug(received)\n if \"ERROR\" in received:\n return False\n return received\n except:\n print(\"Couldn't write on \" + self.portName)\n return False\n\n def checkCommunication(self):\n if not self.sendAtCommand(\"AT\"):\n return False\n return True\n\n def sendSms(self):\n try:\n number = raw_input(\"To >> \")\n except Exception as e:\n logging.error(\"Error: \" + str(e))\n return False\n try:\n message = raw_input(\"Insert Message >> \")\n except Exception as e:\n logging.error(\"Error: \" + str(e))\n return False\n\n print(\"\\n\\r...sending SMS\")\n if not self.sendAtCommand(\"AT+CMGF=1\"):\n logging.error(\"To send AT command: AT+CMGF=1\")\n return False\n if not self.sendAtCommand('AT+CMGS=\"' + number + '\"'):\n logging.error(\"To send AT command: AT+CMGS=\")\n return False\n if not self.sendAtCommand(message):\n logging.error(\"To send AT command: message content\")\n return False\n\n if not self.sendAtCommand(\"1A\".decode(\"hex\")):\n logging.error(\"To send AT command: Ctrl+Z\")\n return False\n\n return True\n\n def call(self):\n try:\n number = raw_input(\"Insert Number >> \")\n except Exception as e:\n logging.error(str(e))\n return False\n\n print(\"\\n\\r...processing call\")\n if not self.sendAtCommand(\"ATD\" + number + \";\"):\n logging.error(\"To send AT command: ATD\")\n return False\n if not self.sendAtCommand(\"ATL9\"):\n logging.error(\"To send AT command: ATL\")\n return False\n if not self.sendAtCommand(\"ATM9\"):\n logging.error(\"To send AT command: ATM\")\n return False\n\n try:\n number = raw_input(\"Call established press ENTER if want to END call >> \")\n except Exception as e:\n logging.error(str(e))\n return False\n\n if not self.sendAtCommand(\"ATH\"):\n logging.error(\"To send AT command: ATH\")\n return False\n\n return True\n","repo_name":"srdjanStankovic/SIM800-Test-Script","sub_path":"sim800h_api.py","file_name":"sim800h_api.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25338382690","text":"\nfrom tsstate_building.classes import StateBuilder, EveryMixin, SessionMixin, OnceMixin\n\nclass SessionStartTimeStateBuilder(SessionMixin, StateBuilder):\n \"\"\" just remember the start Time of this session \"\"\"\n def __init__(self):\n StateBuilder.__init__(self,\n name = \"SessionStartTimeStateBuilder\",\n dep = [],\n inkeys = ['LogTime'],\n outkeys = ['session__SessionStartTime'])\n def __call__(self, newInput, oldState, newState, newSession = False, reset = False):\n if reset or newSession:\n if 'SessionStartTime' in oldState.data.session:\n newState.data.prev['SessionStartTime'] = oldState.data.session['SessionStartTime']\n starttime = RetrieveValue(newInput, 'LogTime', None, 'datetime')\n newState.data.session['SessionStartTime'] = starttime\n","repo_name":"mzoll/slearn","sub_path":"slearn/extra/click_stream/state_builders/sessionstart.py","file_name":"sessionstart.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15871788491","text":"try:\r\n def encode(msg):\r\n encoded_msg= \"\"\r\n for char in msg: #loop checks if vowel, and replaces with adjacent value\r\n if char ==\"a\":\r\n encoded_msg=encoded_msg+\"1\"\r\n elif char ==\"e\":\r\n encoded_msg=encoded_msg+\"2\"\r\n elif char ==\"i\":\r\n encoded_msg=encoded_msg+\"3\"\r\n elif char ==\"o\":\r\n encoded_msg=encoded_msg+\"4\"\r\n elif char ==\"u\":\r\n encoded_msg=encoded_msg+\"5\"\r\n else:\r\n encoded_msg+=char #if not vowel, simply add\r\n return encoded_msg\r\n\r\n def decode(msg):\r\n decoded_msg= \"\"\r\n for char in msg: #loop checks if vowel, and replaces with adjacent value\r\n if char ==\"1\":\r\n decoded_msg=decoded_msg+\"a\"\r\n elif char ==\"2\":\r\n decoded_msg=decoded_msg+\"e\"\r\n elif char ==\"3\":\r\n decoded_msg=decoded_msg+\"i\"\r\n elif char ==\"4\":\r\n decoded_msg=decoded_msg+\"o\"\r\n elif char ==\"5\":\r\n decoded_msg=decoded_msg+\"u\"\r\n else:\r\n decoded_msg+=char #if not vowel, simply add\r\n return decoded_msg\r\n\r\n if __name__==\"__main__\":\r\n msg=input(\"Enter your message: \")\r\n c= int(input(\"Select your choice: \\nPress 1 to encode and Press 2 to decode : \"))\r\n if c == 1:\r\n a= encode(msg)\r\n print(\"Your Encoded message is :\", a)\r\n if c== 2:\r\n b= decode(msg)\r\n print(\"Your Decoded message is :\", b)\r\n\r\n print(\"\\nTestCase for encode 'Hello'\")\r\n print(encode(\"hello\"))\r\n print(\"\\nTestCase for decode 'h2ll4'\")\r\n print(decode(\"h2ll4\"))\r\n \r\n\r\n \r\nexcept:\r\n print(\"Please Enter the input correclty and select you choice as 1 and 2 only\")","repo_name":"AlkeshKothar/SKillEdge-Coding-Battle","sub_path":"Coding Battle 8/CB8_SkyFlame/CB8_Q4_SkyFlame.py","file_name":"CB8_Q4_SkyFlame.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72956173815","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n\n# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nfrom tqdm import tqdm\nimport re\n\n# Use tqdm to show progress of an pandas function we use\ntqdm.pandas()\n\nfrom gensim.models import KeyedVectors as kv\nfrom gensim.scripts.glove2word2vec import glove2word2vec\n\nembedding_path_dict= {'googlenews':{\n 'path':'../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin',\n 'format':'word2vec',\n 'binary': True\n },\n 'glove':{\n 'path':'../input/embeddings/glove.840B.300d/glove.840B.300d.txt',\n 'format': 'glove',\n 'binary': ''\n },\n 'glove_word2vec':{\n 'path':'../input/glove.840B.300d.txt.word2vec',\n 'format': 'word2vec',\n 'binary': False\n },\n 'wiki':{\n 'path': '../input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec',\n 'format': 'word2vec',\n 'binary': False\n },\n 'paragram':{\n 'path': '../input/embeddings/paragram_300_sl999/paragram_300_sl999.txt',\n 'format': '',\n 'binary': False\n }\n }\n\n\n\n\ntrain=pd.read_csv(\"../input/train.csv\")\ntest= pd.read_csv(\"../input/test.csv\")\nprint(\"Train shape:\", train.shape)\nprint(\"Test shape:\", test.shape)\n\n\n\n\ntrain.head()\n\n\n\n\ntrain = train.loc[train.question_text.str.len()>100]\n\n\n\n\nlen(train.loc[train['target']==0])\n\n\n\n\nnum_pos= len(train.loc[train['target']==1])\nprint(num_pos)\n\n\n\n\nlen(train['target'])\n\n\n\n\n# Get word embeddings\ndef get_embeddings(embedding_path_dict, emb_name):\n \"\"\"\n :params embedding_path_dict: a dictionary containing the path, binary flag, and format of the desired embedding,\n emb_name: the name of the embedding to retrieve\n :return embedding index: a dictionary containing the embeddings\"\"\"\n \n def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')\n \n embeddings_index = {}\n if (emb_name == 'googlenews'):\n emb_path = embedding_path_dict[emb_name]['path']\n bin_flag = embedding_path_dict[emb_name]['binary']\n embeddings_index = kv.load_word2vec_format(emb_path, binary=bin_flag).vectors\n elif (emb_name in ['glove', 'wiki']):\n embeddings_index = dict(get_coefs(*o.split(\" \")) for o in open(embedding_path_dict[emb_name]['path']) if len(o)>100) \n elif (emb_name == 'paragram'):\n embeddings_index = dict(get_coefs(*o.split(\" \")) for o in open(embedding_path_dict[emb_name]['path'], encoding=\"utf8\", errors='ignore'))\n return embeddings_index\n\n#Convert GLoVe format into word2vec format\ndef glove_to_word2vec(embedding_path_dict, emb_name='glove', output_emb='glove_word2vec'):\n \"\"\"\n Convert the GLOVE embedding format to a word2vec format\n :params embedding_path_dict: a dictionary containing the path, binary flag, and format of the desired embedding,\n glove_path: the name of the GLOVE embedding\n output_file_path: the name of the converted embedding in embedding_path_dict. \n :return output from the glove2word2vec script\n \"\"\"\n glove_input_file = embedding_path_dict[emb_name]['path']\n word2vec_output_file = embedding_path_dict[output_emb]['path'] \n return glove2word2vec(glove_input_file, word2vec_output_file)\n\n\n\n\n# Get stats of a given embeddings index\ndef get_emb_stats(embeddings_index):\n\n # Put all embeddings in a numpy matrix\n all_embs= np.stack(embeddings_index.values())\n\n # Get embedding stats\n emb_mean = all_embs.mean()\n emb_std = all_embs.std()\n \n num_embs = all_embs.shape[0]\n \n emb_size = all_embs.shape[1]\n \n return emb_mean,emb_std, num_embs, emb_size \n\n\n\n\n# Converts sentences into lists of tokens\n# We use this function to allow more control over what constitutes a word\n# It also allows us to explore ways to cover more the pre-defined word embeddings.\n\ndef tokenize(sentences, restrict_to_len=-1):\n \"\"\"\n :params sentence_list: list of strings\n :returns tok_sentences: list of list of tokens\n \"\"\"\n \n if restrict_to_len>0:\n tok_sentences = [re.findall(r\"[\\w]+[']*[\\w]+|[\\w]+|[.,!?;]\", x ) for x in sentences if len(x)>restrict_to_len]\n else:\n tok_sentences = [re.findall(r\"[\\w]+[']*[\\w]+|[\\w]+|[.,!?;]\", x ) for x in sentences] \n return tok_sentences\n\n#Build the vocabulary given a list of sentence words\ndef get_vocab(sentences, verbose= True):\n \"\"\"\n :param sentences: a list of list of words\n :return: a dictionary of words and their frequency \n \"\"\"\n vocab={}\n for sentence in tqdm(sentences, disable = (not verbose)):\n for word in sentence:\n try:\n vocab[word] +=1\n except KeyError:\n vocab[word] = 1\n return vocab\n\ndef repl(m):\n return '#' * len(m.group())\n\n#Convert numerals to a # sign\ndef convert_num_to_pound(sentences):\n return sentences.progress_apply(lambda x: re.sub(\"[1-9][\\d]+\", repl, x)).values\n\n\n\n\n\n#find words in common between a given embedding and our vocabulary\ndef compare_vocab_and_embeddings(vocab, embeddings_index):\n \"\"\"\n :params vocab: our corpus vocabulary (a dictionary of word frquencies)\n embeddings_index: a genim object containing loaded embeddings.\n :returns in_common: words in common,\n in_common_freq: total frequency in the corpus vocabulary of \n all words in common\n oov: out of vocabulary words\n oov_frequency: total frequency in vocab of oov words\n \"\"\"\n in_common={}\n oov=[]\n in_common=[]\n in_common_freq = 0\n oov_freq = 0\n \n # Compose the vocabulary given the sentence tokens\n vocab = get_vocab(sentences)\n\n for word in tqdm(vocab):\n if word in embeddings_index:\n in_common.append(word)\n in_common_freq += vocab[word]\n else: \n oov.append(word)\n oov_freq += vocab[word]\n \n print('Found embeddings for {:.2%} of vocab'.format(len(in_common) / len(vocab)))\n print('Found embeddings for {:.2%} of all text'.format(in_common_freq / (in_common_freq + oov_freq)))\n\n return sorted(in_common)[::-1], sorted(oov)[::-1], in_common_freq, oov_freq, vocab\n\n# print the list of out-of-vocabulary words sorted by their frequency in teh training text\ndef show_oov_words(oov, vocab, num_to_show=15):\n # Sort oov words by their frequency in the text\n sorted_oov= sorted(oov, key =lambda x: vocab[x], reverse=True )\n\n # Show oov words and their frequencies\n if (len(sorted_oov)>0):\n print(\"oov words:\")\n for word in sorted_oov[:num_to_show]:\n print(\"%s\\t%s\"%(word, vocab[word]))\n else:\n print(\"No words were out of vocabulary.\")\n \n return len(sorted_oov);\n\n\n\n\nembedding_name = 'glove'\nembeddings_index= get_embeddings(embedding_path_dict, embedding_name)\nimport gc; gc.collect()\n\n\n\n\n# Get embedding stats\nemb_mean,emb_std, num_embs, emb_size = get_emb_stats(embeddings_index)\nprint(\"mean: %5.5f\\nstd: %5.5f\\nnumber of embeddings: %d\\nembedding vector size:%d\" %(emb_mean,emb_std, num_embs, emb_size))\n\n\n\n\nquestion_text = train[\"question_text\"]\n\n# Get a list of token for each question text\n# restrict_to_len is approximately the mean sentence length+ 0.5std\nsentences = tokenize(question_text)\n\n\n\n\n# Does our tokenization method produce a good match with \n# the words in the selected embedding type?\n\n# Get words in common and out of vocabulary words\nin_common, oov, in_common_freq, oov_freq, vocab = compare_vocab_and_embeddings(sentences, embeddings_index)\n\n# Print a sorted list of the oov words\nshow_oov_words(oov, vocab)\n\n\n\n\ncontr_dict={\"I\\'m\": \"I am\",\n \"won\\'t\": \"will not\",\n \"\\'s\" : \"\", \n \"\\'ll\":\"will\",\n \"\\'ve\":\"have\",\n \"n\\'t\":\"not\",\n \"\\'re\": \"are\",\n \"\\'d\": \"would\",\n \"y'all\": \"all of you\"}\n\ndef replace_contractions(sentences, contr_dict=contr_dict):\n res_sentences=[]\n for sent in sentences:\n for contr in contr_dict:\n sent = sent.replace(contr, \" \"+contr_dict[contr])\n res_sentences.append(sent)\n return res_sentences\n\n\n\n\n# start by replacing contractions\nsentences = replace_contractions(question_text)\n\n# Get a list of token for each question text\n# restrict_to_len is approximately the mean sentence length+ 0.5std\nsentences = tokenize(sentences)\n\n\n\n\n# Does our tokenization method produce a good match with \n# the words in the selected embedding type?\n\n# Get words in common and out of vocabulary words\nin_common, oov, in_common_freq, oov_freq, vocab = compare_vocab_and_embeddings(sentences, embeddings_index)\n\n# Print a sorted list of the oov words\nshow_oov_words(oov, vocab)\n\n\n\n\nprint(\"Is 'Quora' in the wiki embeddings index?\",'Quora' in embeddings_index)\nprint(\"Is 'quora' in the wiki embeddings index?\",'quora' in embeddings_index)\n\n\n\n\nw_quoran_contr_dict={\"I\\'m\": \"I am\",\n \"won\\'t\": \"will not\",\n \"\\'s\" : \"\", \n \"\\'ll\":\"will\",\n \"\\'ve\":\"have\",\n \"n\\'t\":\"not\",\n \"\\'re\": \"are\",\n \"\\'d\": \"would\",\n \"y'all\": \"all of you\",\n \"Quoran\": \"Quora contributor\",\n \"quoran\": \"quora contributor\"\n }\n\n\n\n\n# replace contractions using a contr dict containing replacement for Quoran\nsentences = replace_contractions(question_text, contr_dict = w_quoran_contr_dict)\n\n# Get a list of token for each question text\n# restrict_to_len is approximately the mean sentence length+ 0.5std\nsentences = tokenize(sentences)\n\n\n\n\n# Does our tokenization method produce a good match with \n# the words in the selected embedding type?\n\n# Get words in common and out of vocabulary words\nin_common, oov, in_common_freq, oov_freq, vocab = compare_vocab_and_embeddings(sentences, embeddings_index)\n\n# Print a sorted list of the oov words\nshow_oov_words(oov, vocab)\n\n\n\n\nprint(\"0 in embedding index?\", ('0' in embeddings_index))\nprint(\"Other digits?\", ('1' in embeddings_index) and ('2' in embeddings_index))\n\n\n\n\nimport re\n\ndef convert_height(sentences):\n res_sentences = []\n for sent in sentences:\n res_sent = re.sub( \"(\\d+)\\'(\\d+)\", \"\\1 foot \\2\", sent)\n res_sentences.append(res_sent)\n return res_sentences\n\n\n\n\n# start by converting heights such as 5'4 to longer format 5 foot 4\nsentences = convert_height(question_text)\n\n# replace contractions\nsentences = replace_contractions(sentences)\n\n# Get a list of token for each question text\n# restrict_to_len is approximately the mean sentence length+ 0.5std\nsentences = tokenize(sentences)\n\n\n\n\n# Does our tokenization method produce a good match with \n# the words in the selected embedding type?\n\n# Get words in common and out of vocabulary words\nin_common, oov, in_common_freq, oov_freq, vocab = compare_vocab_and_embeddings(sentences, embeddings_index)\n\n# Print a sorted list of the oov words\nshow_oov_words(oov, vocab)\n\n\n\n\nembedding_name = 'paragram'\nembeddings_index= get_embeddings(embedding_path_dict, embedding_name)\nimport gc; gc.collect()\n\n\n\n\n# Get embedding stats\nemb_mean,emb_std, num_embs, emb_size = get_emb_stats(embeddings_index)\nprint(\"mean: %5.5f\\nstd: %5.5f\\nnumber of embeddings: %d\\nembedding vector size:%d\" %(emb_mean,emb_std, num_embs, emb_size))\n\n\n\n\nquestion_text = train[\"question_text\"]\n\n# Get a list of token for each question text\n# restrict_to_len is approximately the mean sentence length+ 0.5std\nsentences = tokenize(question_text)\n\n\n\n\n# Does our tokenization method produce a good match with \n# the words in the selected embedding type?\n\n# Get words in common and out of vocabulary words\nin_common, oov, in_common_freq, oov_freq, vocab = compare_vocab_and_embeddings(sentences, embeddings_index)\n\n# Print a sorted list of the oov words\nshow_oov_words(oov, vocab)\n\n\n\n\ndef convert_to_lower(sentences):\n res_sentences = []\n for sent in sentences:\n lower_sent = sent.lower()\n res_sentences.append(lower_sent)\n return res_sentences\n\n\n\n\n# convert capitals to lowercase\nsentences = convert_to_lower(question_text)\n\n# Get a list of token for each question text\n# restrict_to_len is approximately the mean sentence length+ 0.5std\nsentences = tokenize(sentences)\n\n\n\n\n# Does our tokenization method produce a good match with \n# the words in the selected embedding type?\n\n# Get words in common and out of vocabulary words\nin_common, oov, in_common_freq, oov_freq, vocab = compare_vocab_and_embeddings(sentences, embeddings_index)\n\n# Print a sorted list of the oov words\nshow_oov_words(oov, vocab)\n\n\n\n\n# start by converting capitals to lowercase\nsentences = convert_to_lower(question_text)\n\n# replace contractions\nsentences = replace_contractions(sentences)\n\n# Get a list of token for each question text\n# restrict_to_len is approximately the mean sentence length+ 0.5std\nsentences = tokenize(sentences)\n\n\n\n\n# Does our tokenization method produce a good match with \n# the words in the selected embedding type?\n\n# Get words in common and out of vocabulary words\nin_common, oov, in_common_freq, oov_freq, vocab = compare_vocab_and_embeddings(sentences, embeddings_index)\n\n# Print a sorted list of the oov words\nshow_oov_words(oov, vocab)\n\n\n\n\n# start by replacing heights such as 5'4 to a longer format (5 foot 4)\nsentences = convert_height(question_text)\n\n# convert capitals to lowercase\nsentences = convert_to_lower(sentences)\n\n# replace contractions\nsentences = replace_contractions(sentences)\n\n# Get a list of token for each question text\n# restrict_to_len is approximately the mean sentence length+ 0.5std\nsentences = tokenize(sentences)\n\n\n\n\n# Does our tokenization method produce a good match with \n# the words in the selected embedding type?\n\n# Get words in common and out of vocabulary words\nin_common, oov, in_common_freq, oov_freq, vocab = compare_vocab_and_embeddings(sentences, embeddings_index)\n\n# Print a sorted list of the oov words\nshow_oov_words(oov, vocab)\n\n\n\n\nembedding_name = 'wiki'\nembeddings_index= get_embeddings(embedding_path_dict, embedding_name)\nimport gc; gc.collect()\n\n\n\n\n# Get embedding stats\nemb_mean,emb_std, num_embs, emb_size = get_emb_stats(embeddings_index)\nprint(\"mean: %5.5f\\nstd: %5.5f\\nnumber of embeddings: %d\\nembedding vector size:%d\" %(emb_mean,emb_std, num_embs, emb_size))\n\n\n\n\nquestion_text = train[\"question_text\"]\n\n# Get a list of token for each question text\n# restrict_to_len is approximately the mean sentence length+ 0.5std\nsentences = tokenize(question_text)\n\n\n\n\n# Does our tokenization method produce a good match with \n# the words in the selected embedding type?\n\n# Get words in common and out of vocabulary words\nin_common, oov, in_common_freq, oov_freq, vocab = compare_vocab_and_embeddings(sentences, embeddings_index)\n\n# Print a sorted list of the oov words\nshow_oov_words(oov, vocab)\n\n\n\n\n# start by replacing contractions\nsentences = replace_contractions(question_text)\n\n# Get a list of token for each question text\n# restrict_to_len is approximately the mean sentence length+ 0.5std\nsentences = tokenize(sentences)\n\n\n\n\n# Does our tokenization method produce a good match with \n# the words in the selected embedding type?\n\n# Get words in common and out of vocabulary words\nin_common, oov, in_common_freq, oov_freq, vocab = compare_vocab_and_embeddings(sentences, embeddings_index)\n\n# Print a sorted list of the oov words\nshow_oov_words(oov, vocab)\n\n\n\n\nprint(\"Is 'Quora' in the wiki embeddings index?\",'Quora' in embeddings_index)\nprint(\"Is 'quora' in the wiki embeddings index?\",'quora' in embeddings_index)\n\n\n\n\n# start by replacing contractions using the contractions dict containing replacements for Quoran\nsentences = replace_contractions(question_text, contr_dict = w_quoran_contr_dict)\n\n# Get a list of token for each question text\n# restrict_to_len is approximately the mean sentence length+ 0.5std\nsentences = tokenize(sentences)\n\n\n\n\n# Does our tokenization method produce a good match with \n# the words in the selected embedding type?\n\n# Get words in common and out of vocabulary words\nin_common, oov, in_common_freq, oov_freq, vocab = compare_vocab_and_embeddings(sentences, embeddings_index)\n\n# Print a sorted list of the oov words\nshow_oov_words(oov, vocab)\n\n\n\n\nprint(\"0 in embedding index?\", ('0' in embeddings_index))\nprint(\"Other digits?\", ('1' in embeddings_index) and ('2' in embeddings_index))\n\n\n\n\n# start by converting height to longer form\nsentences = convert_height(question_text)\n\n# replace contractions\nsentences = replace_contractions(sentences, contr_dict = w_quoran_contr_dict)\n\n# Get a list of token for each question text\n# restrict_to_len is approximately the mean sentence length+ 0.5std\nsentences = tokenize(sentences)\n\n\n\n\n# Does our tokenization method produce a good match with \n# the words in the selected embedding type?\n\n# Get words in common and out of vocabulary words\nin_common, oov, in_common_freq, oov_freq, vocab = compare_vocab_and_embeddings(sentences, embeddings_index)\n\n# Print a sorted list of the oov words\nshow_oov_words(oov, vocab)\n\n","repo_name":"aorursy/lost-nb","sub_path":"alhalimi_tokenization-and-word-embedding-compatibility.py","file_name":"alhalimi_tokenization-and-word-embedding-compatibility.py","file_ext":"py","file_size_in_byte":17857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7156740108","text":"# coding=utf-8\n\n'''\n\tSimple code for IR sensor.\n\tUscita:\n\t- livello logico alto se non c'è nessun rilevamento\n\t- livello logico basso se c'è un rilevamento\n\tLa distanza può essere regolata tramite il trimmer, \n\truotando in senso orario la distanza aumenta; \n\truotando in senso antiorario la distanza di rilevamento diminuisce.\n\t\n\tDa esperimenti si è osservato che la distanza massima rilevata è:\n\t- 1.5 / 2 cm da un oggetto nero\n\t- 6 / 6.5 cm da un oggetto bianco\n''' \n\nimport RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BOARD)\nIR_PIN = 37 \nGPIO.setup(IR_PIN, GPIO.IN)\n\ndef main() :\t\n\twhile True :\n\t\tbooleanValue = GPIO.input(IR_PIN)\n\t\tif booleanValue :\n\t\t\tprint(\"Nessun rilevamento!\")\n\t\telse :\n\t\t\tprint(\"Qualcosa è stato rilevato!\")\n\t\ttime.sleep(0.5)\n\tGPIO.cleanup()\n\n# main function\nmain()\n \n \n","repo_name":"fGuarina/roboticsProject","sub_path":"Debug/IR_sensor.py","file_name":"IR_sensor.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12541603634","text":"def solve():\n number_of_records = int(input())\n\n records = []\n after_launch = set()\n before_launch = set()\n for _ in range(number_of_records):\n type_, id = input().split()\n records.append((type_, id))\n\n if type_ == \"+\":\n after_launch.add(id)\n if type_ == \"-\":\n if id not in after_launch:\n before_launch.add(id)\n\n min_capacity = len(before_launch)\n proceedings = set()\n for type_, id in records:\n if type_ == \"+\":\n proceedings.add(id)\n if type_ == \"-\":\n if id in before_launch:\n before_launch.remove(id)\n else:\n proceedings.remove(id)\n\n min_capacity = max(min_capacity, len(proceedings) + len(before_launch))\n\n print(min_capacity)\n\n\nif __name__ == \"__main__\":\n solve()\n","repo_name":"ffekirnew/a2sv-contests","sub_path":"camp-ii-contest-2/B_Berland_National_Library.py","file_name":"B_Berland_National_Library.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42137123514","text":"from __future__ import unicode_literals\n\nimport os\nimport re\nimport hashlib\nimport base64\n\nfrom PyQt5.QtCore import pyqtSignal, Qt, QObject, QByteArray, QDateTime, \\\n QUrl, QCryptographicHash, QFile, QIODevice, QTextStream, QDate, QTime, \\\n qVersion\nfrom PyQt5.QtNetwork import QNetworkReply\n\nfrom E5Gui import E5MessageBox\n\nimport Utilities\nimport Preferences\n\n\nclass AdBlockSubscription(QObject):\n \"\"\"\n Class implementing the AdBlock subscription.\n \n @signal changed() emitted after the subscription has changed\n @signal rulesChanged() emitted after the subscription's rules have changed\n @signal enabledChanged(bool) emitted after the enabled state was changed\n \"\"\"\n changed = pyqtSignal()\n rulesChanged = pyqtSignal()\n enabledChanged = pyqtSignal(bool)\n \n def __init__(self, url, custom, parent=None, default=False):\n \"\"\"\n Constructor\n \n @param url AdBlock URL for the subscription (QUrl)\n @param custom flag indicating a custom subscription (boolean)\n @param parent reference to the parent object (QObject)\n @param default flag indicating a default subscription (boolean)\n \"\"\"\n super(AdBlockSubscription, self).__init__(parent)\n \n self.__custom = custom\n self.__url = url.toEncoded()\n self.__enabled = False\n self.__downloading = None\n self.__defaultSubscription = default\n \n self.__title = \"\"\n self.__location = QByteArray()\n self.__lastUpdate = QDateTime()\n self.__requiresLocation = \"\"\n self.__requiresTitle = \"\"\n \n self.__updatePeriod = 0 # update period in hours, 0 = use default\n self.__remoteModified = QDateTime()\n \n self.__rules = [] # list containing all AdBlock rules\n \n self.__networkExceptionRules = []\n self.__networkBlockRules = []\n self.__domainRestrictedCssRules = []\n self.__elementHidingRules = \"\"\n self.__documentRules = []\n self.__elemhideRules = []\n \n self.__checksumRe = re.compile(\n r\"\"\"^\\s*!\\s*checksum[\\s\\-:]+([\\w\\+\\/=]+).*\\n\"\"\",\n re.IGNORECASE | re.MULTILINE)\n self.__expiresRe = re.compile(\n r\"\"\"(?:expires:|expires after)\\s*(\\d+)\\s*(hour|h)?\"\"\",\n re.IGNORECASE)\n self.__remoteModifiedRe = re.compile(\n r\"\"\"!\\s*(?:Last modified|Updated):\\s*(\\d{1,2})\\s*\"\"\"\n r\"\"\"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\\s*\"\"\"\n r\"\"\"(\\d{2,4})\\s*((\\d{1,2}):(\\d{2}))?\"\"\",\n re.IGNORECASE)\n \n self.__monthNameToNumber = {\n \"Jan\": 1,\n \"Feb\": 2,\n \"Mar\": 3,\n \"Apr\": 4,\n \"May\": 5,\n \"Jun\": 6,\n \"Jul\": 7,\n \"Aug\": 8,\n \"Sep\": 9,\n \"Oct\": 10,\n \"Nov\": 11,\n \"Dec\": 12\n }\n \n self.__parseUrl(url)\n \n def __parseUrl(self, url):\n \"\"\"\n Private method to parse the AdBlock URL for the subscription.\n \n @param url AdBlock URL for the subscription (QUrl)\n \"\"\"\n if url.scheme() != \"abp\":\n return\n \n if url.path() != \"subscribe\":\n return\n \n if qVersion() >= \"5.0.0\":\n from PyQt5.QtCore import QUrlQuery\n urlQuery = QUrlQuery(url)\n self.__title = QUrl.fromPercentEncoding(\n QByteArray(urlQuery.queryItemValue(\"title\").encode()))\n self.__enabled = urlQuery.queryItemValue(\"enabled\") != \"false\"\n self.__location = QByteArray(QUrl.fromPercentEncoding(\n QByteArray(urlQuery.queryItemValue(\"location\").encode()))\n .encode(\"utf-8\"))\n \n # Check for required subscription\n self.__requiresLocation = QUrl.fromPercentEncoding(\n QByteArray(urlQuery.queryItemValue(\n \"requiresLocation\").encode()))\n self.__requiresTitle = QUrl.fromPercentEncoding(\n QByteArray(urlQuery.queryItemValue(\"requiresTitle\").encode()))\n if self.__requiresLocation and self.__requiresTitle:\n import Helpviewer.HelpWindow\n Helpviewer.HelpWindow.HelpWindow.adBlockManager()\\\n .loadRequiredSubscription(self.__requiresLocation,\n self.__requiresTitle)\n \n lastUpdateString = urlQuery.queryItemValue(\"lastUpdate\")\n self.__lastUpdate = QDateTime.fromString(lastUpdateString,\n Qt.ISODate)\n else:\n self.__title = \\\n QUrl.fromPercentEncoding(url.encodedQueryItemValue(b\"title\"))\n self.__enabled = QUrl.fromPercentEncoding(\n url.encodedQueryItemValue(b\"enabled\")) != \"false\"\n self.__location = QByteArray(QUrl.fromPercentEncoding(\n url.encodedQueryItemValue(b\"location\")).encode(\"utf-8\"))\n \n # Check for required subscription\n self.__requiresLocation = QUrl.fromPercentEncoding(\n url.encodedQueryItemValue(b\"requiresLocation\"))\n self.__requiresTitle = QUrl.fromPercentEncoding(\n url.encodedQueryItemValue(b\"requiresTitle\"))\n if self.__requiresLocation and self.__requiresTitle:\n import Helpviewer.HelpWindow\n Helpviewer.HelpWindow.HelpWindow.adBlockManager()\\\n .loadRequiredSubscription(self.__requiresLocation,\n self.__requiresTitle)\n \n lastUpdateByteArray = url.encodedQueryItemValue(b\"lastUpdate\")\n lastUpdateString = QUrl.fromPercentEncoding(lastUpdateByteArray)\n self.__lastUpdate = QDateTime.fromString(lastUpdateString,\n Qt.ISODate)\n \n self.__loadRules()\n \n def url(self):\n \"\"\"\n Public method to generate the URL for this subscription.\n \n @return AdBlock URL for the subscription (QUrl)\n \"\"\"\n url = QUrl()\n url.setScheme(\"abp\")\n url.setPath(\"subscribe\")\n \n queryItems = []\n queryItems.append((\"location\", bytes(self.__location).decode()))\n queryItems.append((\"title\", self.__title))\n if self.__requiresLocation and self.__requiresTitle:\n queryItems.append((\"requiresLocation\", self.__requiresLocation))\n queryItems.append((\"requiresTitle\", self.__requiresTitle))\n if not self.__enabled:\n queryItems.append((\"enabled\", \"false\"))\n if self.__lastUpdate.isValid():\n queryItems.append((\"lastUpdate\",\n self.__lastUpdate.toString(Qt.ISODate)))\n if qVersion() >= \"5.0.0\":\n from PyQt5.QtCore import QUrlQuery\n query = QUrlQuery()\n query.setQueryItems(queryItems)\n url.setQuery(query)\n else:\n url.setQueryItems(queryItems)\n return url\n \n def isEnabled(self):\n \"\"\"\n Public method to check, if the subscription is enabled.\n \n @return flag indicating the enabled status (boolean)\n \"\"\"\n return self.__enabled\n \n def setEnabled(self, enabled):\n \"\"\"\n Public method to set the enabled status.\n \n @param enabled flag indicating the enabled status (boolean)\n \"\"\"\n if self.__enabled == enabled:\n return\n \n self.__enabled = enabled\n self.enabledChanged.emit(enabled)\n \n def title(self):\n \"\"\"\n Public method to get the subscription title.\n \n @return subscription title (string)\n \"\"\"\n return self.__title\n \n def setTitle(self, title):\n \"\"\"\n Public method to set the subscription title.\n \n @param title subscription title (string)\n \"\"\"\n if self.__title == title:\n return\n \n self.__title = title\n self.changed.emit()\n \n def location(self):\n \"\"\"\n Public method to get the subscription location.\n \n @return URL of the subscription location (QUrl)\n \"\"\"\n return QUrl.fromEncoded(self.__location)\n \n def setLocation(self, url):\n \"\"\"\n Public method to set the subscription location.\n \n @param url URL of the subscription location (QUrl)\n \"\"\"\n if url == self.location():\n return\n \n self.__location = url.toEncoded()\n self.__lastUpdate = QDateTime()\n self.changed.emit()\n \n def requiresLocation(self):\n \"\"\"\n Public method to get the location of a required subscription.\n \n @return location of a required subscription (string)\n \"\"\"\n return self.__requiresLocation\n \n def lastUpdate(self):\n \"\"\"\n Public method to get the date and time of the last update.\n \n @return date and time of the last update (QDateTime)\n \"\"\"\n return self.__lastUpdate\n \n def rulesFileName(self):\n \"\"\"\n Public method to get the name of the rules file.\n \n @return name of the rules file (string)\n \"\"\"\n if self.location().scheme() == \"file\":\n return self.location().toLocalFile()\n \n if self.__location.isEmpty():\n return \"\"\n \n sha1 = bytes(QCryptographicHash.hash(\n self.__location, QCryptographicHash.Sha1).toHex()).decode()\n dataDir = os.path.join(\n Utilities.getConfigDir(), \"browser\", \"subscriptions\")\n if not os.path.exists(dataDir):\n os.makedirs(dataDir)\n fileName = os.path.join(\n dataDir, \"adblock_subscription_{0}\".format(sha1))\n return fileName\n \n def __loadRules(self):\n \"\"\"\n Private method to load the rules of the subscription.\n \"\"\"\n fileName = self.rulesFileName()\n f = QFile(fileName)\n if f.exists():\n if not f.open(QIODevice.ReadOnly):\n E5MessageBox.warning(\n None,\n self.tr(\"Load subscription rules\"),\n self.tr(\n \"\"\"Unable to open adblock file '{0}' for reading.\"\"\")\n .format(fileName))\n else:\n textStream = QTextStream(f)\n header = textStream.readLine(1024)\n if not header.startswith(\"[Adblock\"):\n E5MessageBox.warning(\n None,\n self.tr(\"Load subscription rules\"),\n self.tr(\"\"\"AdBlock file '{0}' does not start\"\"\"\n \"\"\" with [Adblock.\"\"\")\n .format(fileName))\n f.close()\n f.remove()\n self.__lastUpdate = QDateTime()\n else:\n from .AdBlockRule import AdBlockRule\n \n self.__updatePeriod = 0\n self.__remoteModified = QDateTime()\n self.__rules = []\n self.__rules.append(AdBlockRule(header, self))\n while not textStream.atEnd():\n line = textStream.readLine()\n self.__rules.append(AdBlockRule(line, self))\n expires = self.__expiresRe.search(line)\n if expires:\n period, kind = expires.groups()\n if kind:\n # hours\n self.__updatePeriod = int(period)\n else:\n # days\n self.__updatePeriod = int(period) * 24\n remoteModified = self.__remoteModifiedRe.search(line)\n if remoteModified:\n day, month, year, time, hour, minute = \\\n remoteModified.groups()\n self.__remoteModified.setDate(\n QDate(int(year),\n self.__monthNameToNumber[month],\n int(day))\n )\n if time:\n self.__remoteModified.setTime(\n QTime(int(hour), int(minute)))\n self.__populateCache()\n self.changed.emit()\n elif not fileName.endswith(\"_custom\"):\n self.__lastUpdate = QDateTime()\n \n self.checkForUpdate()\n \n def checkForUpdate(self):\n \"\"\"\n Public method to check for an update.\n \"\"\"\n if self.__updatePeriod:\n updatePeriod = self.__updatePeriod\n else:\n updatePeriod = Preferences.getHelp(\"AdBlockUpdatePeriod\") * 24\n if not self.__lastUpdate.isValid() or \\\n (self.__remoteModified.isValid() and\n self.__remoteModified.addSecs(updatePeriod * 3600) <\n QDateTime.currentDateTime()) or \\\n self.__lastUpdate.addSecs(updatePeriod * 3600) < \\\n QDateTime.currentDateTime():\n self.updateNow()\n \n def updateNow(self):\n \"\"\"\n Public method to update the subscription immediately.\n \"\"\"\n if self.__downloading is not None:\n return\n \n if not self.location().isValid():\n return\n \n if self.location().scheme() == \"file\":\n self.__lastUpdate = QDateTime.currentDateTime()\n self.__loadRules()\n return\n \n import Helpviewer.HelpWindow\n from Helpviewer.Network.FollowRedirectReply import FollowRedirectReply\n self.__downloading = FollowRedirectReply(\n self.location(),\n Helpviewer.HelpWindow.HelpWindow.networkAccessManager())\n self.__downloading.finished.connect(self.__rulesDownloaded)\n \n def __rulesDownloaded(self):\n \"\"\"\n Private slot to deal with the downloaded rules.\n \"\"\"\n reply = self.sender()\n \n response = reply.readAll()\n reply.close()\n self.__downloading = None\n \n if reply.error() != QNetworkReply.NoError:\n if not self.__defaultSubscription:\n # don't show error if we try to load the default\n E5MessageBox.warning(\n None,\n self.tr(\"Downloading subscription rules\"),\n self.tr(\n \"\"\"

Subscription rules could not be\"\"\"\n \"\"\" downloaded.

Error: {0}

\"\"\")\n .format(reply.errorString()))\n else:\n # reset after first download attempt\n self.__defaultSubscription = False\n return\n \n if response.isEmpty():\n E5MessageBox.warning(\n None,\n self.tr(\"Downloading subscription rules\"),\n self.tr(\"\"\"Got empty subscription rules.\"\"\"))\n return\n \n fileName = self.rulesFileName()\n QFile.remove(fileName)\n f = QFile(fileName)\n if not f.open(QIODevice.ReadWrite):\n E5MessageBox.warning(\n None,\n self.tr(\"Downloading subscription rules\"),\n self.tr(\n \"\"\"Unable to open adblock file '{0}' for writing.\"\"\")\n .file(fileName))\n return\n f.write(response)\n f.close()\n self.__lastUpdate = QDateTime.currentDateTime()\n if self.__validateCheckSum(fileName):\n self.__loadRules()\n else:\n QFile.remove(fileName)\n self.__downloading = None\n reply.deleteLater()\n \n def __validateCheckSum(self, fileName):\n \"\"\"\n Private method to check the subscription file's checksum.\n \n @param fileName name of the file containing the subscription (string)\n @return flag indicating a valid file (boolean). A file is considered\n valid, if the checksum is OK or the file does not contain a\n checksum (i.e. cannot be checked).\n \"\"\"\n try:\n f = open(fileName, \"r\", encoding=\"utf-8\")\n data = f.read()\n f.close()\n except (IOError, OSError):\n return False\n \n match = re.search(self.__checksumRe, data)\n if match:\n expectedChecksum = match.group(1)\n else:\n # consider it as valid\n return True\n \n # normalize the data\n data = re.sub(r\"\\r\", \"\", data) # normalize eol\n data = re.sub(r\"\\n+\", \"\\n\", data) # remove empty lines\n data = re.sub(self.__checksumRe, \"\", data) # remove checksum line\n \n # calculate checksum\n md5 = hashlib.md5()\n md5.update(data.encode(\"utf-8\"))\n calculatedChecksum = base64.b64encode(md5.digest()).decode()\\\n .rstrip(\"=\")\n if calculatedChecksum == expectedChecksum:\n return True\n else:\n res = E5MessageBox.yesNo(\n None,\n self.tr(\"Downloading subscription rules\"),\n self.tr(\n \"\"\"

AdBlock subscription {0} has a wrong\"\"\"\n \"\"\" checksum.
\"\"\"\n \"\"\"Found: {1}
\"\"\"\n \"\"\"Calculated: {2}
\"\"\"\n \"\"\"Use it anyway?

\"\"\")\n .format(self.__title, expectedChecksum,\n calculatedChecksum))\n return res\n \n def saveRules(self):\n \"\"\"\n Public method to save the subscription rules.\n \"\"\"\n fileName = self.rulesFileName()\n if not fileName:\n return\n \n f = QFile(fileName)\n if not f.open(QIODevice.ReadWrite | QIODevice.Truncate):\n E5MessageBox.warning(\n None,\n self.tr(\"Saving subscription rules\"),\n self.tr(\n \"\"\"Unable to open adblock file '{0}' for writing.\"\"\")\n .format(fileName))\n return\n \n textStream = QTextStream(f)\n if not self.__rules or not self.__rules[0].isHeader():\n textStream << \"[Adblock Plus 1.1.1]\\n\"\n for rule in self.__rules:\n textStream << rule.filter() << \"\\n\"\n \n def match(self, req, urlDomain, urlString):\n \"\"\"\n Public method to check the subscription for a matching rule.\n \n @param req reference to the network request (QNetworkRequest)\n @param urlDomain domain of the URL (string)\n @param urlString URL (string)\n @return reference to the rule object or None (AdBlockRule)\n \"\"\"\n for rule in self.__networkExceptionRules:\n if rule.networkMatch(req, urlDomain, urlString):\n return None\n \n for rule in self.__networkBlockRules:\n if rule.networkMatch(req, urlDomain, urlString):\n return rule\n \n return None\n \n def adBlockDisabledForUrl(self, url):\n \"\"\"\n Public method to check, if AdBlock is disabled for the given URL.\n \n @param url URL to check (QUrl)\n @return flag indicating disabled state (boolean)\n \"\"\"\n for rule in self.__documentRules:\n if rule.urlMatch(url):\n return True\n \n return False\n \n def elemHideDisabledForUrl(self, url):\n \"\"\"\n Public method to check, if element hiding is disabled for the given\n URL.\n \n @param url URL to check (QUrl)\n @return flag indicating disabled state (boolean)\n \"\"\"\n if self.adBlockDisabledForUrl(url):\n return True\n \n for rule in self.__elemhideRules:\n if rule.urlMatch(url):\n return True\n \n return False\n \n def elementHidingRules(self):\n \"\"\"\n Public method to get the element hiding rules.\n \n @return element hiding rules (string)\n \"\"\"\n return self.__elementHidingRules\n \n def elementHidingRulesForDomain(self, domain):\n \"\"\"\n Public method to get the element hiding rules for the given domain.\n \n @param domain domain name (string)\n @return element hiding rules (string)\n \"\"\"\n rules = \"\"\n \n for rule in self.__domainRestrictedCssRules:\n if rule.matchDomain(domain):\n rules += rule.cssSelector() + \",\"\n \n return rules\n \n def rule(self, offset):\n \"\"\"\n Public method to get a specific rule.\n \n @param offset offset of the rule (integer)\n @return requested rule (AdBlockRule)\n \"\"\"\n if offset >= len(self.__rules):\n return None\n \n return self.__rules[offset]\n \n def allRules(self):\n \"\"\"\n Public method to get the list of rules.\n \n @return list of rules (list of AdBlockRule)\n \"\"\"\n return self.__rules[:]\n \n def addRule(self, rule):\n \"\"\"\n Public method to add a rule.\n \n @param rule reference to the rule to add (AdBlockRule)\n @return offset of the rule (integer)\n \"\"\"\n self.__rules.append(rule)\n self.__populateCache()\n self.rulesChanged.emit()\n \n return len(self.__rules) - 1\n \n def removeRule(self, offset):\n \"\"\"\n Public method to remove a rule given the offset.\n \n @param offset offset of the rule to remove (integer)\n \"\"\"\n if offset < 0 or offset > len(self.__rules):\n return\n \n del self.__rules[offset]\n self.__populateCache()\n self.rulesChanged.emit()\n \n def replaceRule(self, rule, offset):\n \"\"\"\n Public method to replace a rule given the offset.\n \n @param rule reference to the rule to set (AdBlockRule)\n @param offset offset of the rule to remove (integer)\n @return requested rule (AdBlockRule)\n \"\"\"\n if offset >= len(self.__rules):\n return None\n \n self.__rules[offset] = rule\n self.__populateCache()\n self.rulesChanged.emit()\n \n return self.__rules[offset]\n \n def __populateCache(self):\n \"\"\"\n Private method to populate the various rule caches.\n \"\"\"\n self.__networkExceptionRules = []\n self.__networkBlockRules = []\n self.__domainRestrictedCssRules = []\n self.__elementHidingRules = \"\"\n self.__documentRules = []\n self.__elemhideRules = []\n \n for rule in self.__rules:\n if not rule.isEnabled():\n continue\n \n if rule.isCSSRule():\n if rule.isDomainRestricted():\n self.__domainRestrictedCssRules.append(rule)\n else:\n self.__elementHidingRules += rule.cssSelector() + \",\"\n elif rule.isDocument():\n self.__documentRules.append(rule)\n elif rule.isElementHiding():\n self.__elemhideRules.append(rule)\n elif rule.isException():\n self.__networkExceptionRules.append(rule)\n else:\n self.__networkBlockRules.append(rule)\n \n def canEditRules(self):\n \"\"\"\n Public method to check, if rules can be edited.\n \n @return flag indicating rules may be edited (boolean)\n \"\"\"\n return self.__custom\n \n def canBeRemoved(self):\n \"\"\"\n Public method to check, if the subscription can be removed.\n \n @return flag indicating removal is allowed (boolean)\n \"\"\"\n return not self.__custom and not self.__defaultSubscription\n \n def setRuleEnabled(self, offset, enabled):\n \"\"\"\n Public method to enable a specific rule.\n \n @param offset offset of the rule (integer)\n @param enabled new enabled state (boolean)\n @return reference to the changed rule (AdBlockRule)\n \"\"\"\n if offset >= len(self.__rules):\n return None\n \n rule = self.__rules[offset]\n rule.setEnabled(enabled)\n if rule.isCSSRule():\n import Helpviewer.HelpWindow\n self.__populateCache()\n Helpviewer.HelpWindow.HelpWindow.mainWindow()\\\n .reloadUserStyleSheet()\n \n return rule\n","repo_name":"pycom/Pymakr","sub_path":"Helpviewer/AdBlock/AdBlockSubscription.py","file_name":"AdBlockSubscription.py","file_ext":"py","file_size_in_byte":24948,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"22"} +{"seq_id":"26247393038","text":"\"\"\"\nTested with:\nPython 3.7.7\nscikit-learn==0.24.2\n\nhttps://scikit-learn.org/0.23/auto_examples/feature_selection/plot_select_from_model_diabetes.html\n\"\"\"\n\n###\n# Requirements\n# 1. First you need to create a teachable here: https://app.teachablehub.com/create\n# 2. Create Deploy and Serving keys\n# https://app.teachablehub.com///settings/deploy-keys\n# https://app.teachablehub.com///settings/serving-keys\n###\n\n# training packages\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\n\n# deployment packages\nfrom teachablehub.deployments.sklearn import TeachableDeployment\nfrom teachablehub.clients import TeachableHubPredictAPI\n\n# environment info\nimport platform\nfrom sklearn import __version__ as sklearn_version\n\n###\n# Training\n###\n\ndiabetes = datasets.load_diabetes() # load data\nX_train, X_test, y_train, y_test = train_test_split(diabetes.data, diabetes.target, test_size=0.2, random_state=0)\n\nmodel = LinearRegression()\nmodel.fit(X_train, y_train)\n\n###\n# Deployment\n###\n\ndeployment = TeachableDeployment(\n teachable=\"user/teachable\",\n environment=\"production\",\n deploy_key=\"your-deploy-key-here\",\n)\n\ndeployment.model(model)\n\n# HTTP Request schema + validation\ndeployment.schema({\n \"features\": {\n \"age\": {\"type\": \"float\", \"max\": 0.1, \"min\": -0.1},\n \"sex\": {\"type\": \"float\", \"max\": 0.1, \"min\": -0.1},\n \"bmi\": {\"type\": \"float\", \"max\": 0.1, \"min\": -0.1},\n \"bp\": {\"type\": \"float\", \"max\": 0.1, \"min\": -0.1},\n \"s1\": {\n \"type\": \"float\",\n \"max\": 0.1,\n \"min\": -0.1,\n \"help\": \"What is this feature about, where we can get it. how to prepare it, how to generate it?\",\n },\n \"s2\": {\"type\": \"float\", \"max\": 0.1, \"min\": -0.1},\n \"s3\": {\"type\": \"float\", \"max\": 0.1, \"min\": -0.1},\n \"s4\": {\"type\": \"float\", \"max\": 0.1, \"min\": -0.1},\n \"s5\": {\"type\": \"float\", \"max\": 0.1, \"min\": -0.1},\n \"s6\": {\"type\": \"float\", \"max\": 0.1, \"min\": -0.1},\n\n },\n \"ndarray\": [[\"age\", \"sex\", \"bmi\", \"bp\", \"s1\", \"s2\", \"s3\", \"s4\", \"s5\", \"s6\"]]\n})\n\ndeployment.samples(\n ndarray=X_train[0],\n features={\n \"age\": 0.01264814,\n \"sex\": 0.05068012,\n \"bmi\": 0.00241654,\n \"bp\": 0.05630106,\n \"s1\": 0.02732605,\n \"s2\": 0.01716188,\n \"s3\": 0.04127682,\n \"s4\": -0.03949338,\n \"s5\": 0.00371174,\n \"s6\": 0.07348023\n }\n)\n\ndeployment.context({\n \"script\": \"deploy-regression-advanced.py\",\n \"scikit-learn\": sklearn_version,\n \"python\": platform.python_version(),\n \"local_hostname\": platform.node(),\n \"os_info\": platform.version()\n})\n\ndeployment.deploy(\n summary=\"Automatic deployment from {}\".format(platform.node()),\n activate=True\n)\n\nprint(\"v{} successfuly deployed.\".format(deployment.version()))\n\n###\n# Predict\n###\n\nteachable = TeachableHubPredictAPI(\n teachable=\"user/teachable\",\n environment=\"production\",\n serving_key=\"your-serving-key-here\"\n)\n\n# predict with ndarray\n# predictions = teachable.predict([[0.03, 0.05, -0.002, -0.01, 0.04, 0.01, 0.08, -0.04, 0.005, -0.1]])\n\n# predict with features\npredictions = teachable.predict({\n \"age\": 0.03,\n \"sex\": 0.05,\n \"bmi\": -0.002,\n \"bp\": -0.01,\n \"s1\": 0.04,\n \"s2\": 0.01,\n \"s3\": 0.08,\n \"s4\": -0.04,\n \"s5\": 0.005,\n \"s6\": -0.1\n })\n\nprint(predictions)\n\n\"\"\"\nResult:\n[\n 106.38885834176024\n]\n\n\"\"\"\n","repo_name":"teachablehub/python-sdk","sub_path":"examples/sklearn-train-deploy-regression-advanced.py","file_name":"sklearn-train-deploy-regression-advanced.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"22"} +{"seq_id":"5819382316","text":"import requests\nfrom datetime import datetime,date\nfrom bs4 import BeautifulSoup\nfrom mysqls.pandasql import Links\nfrom dateutil.parser import parse\nfrom all_link.page.rss import getList\ndef link43():\n getList(\n numero=\"43\",\n LA_name=\"Devon\",\n LA_pr=\"https://www.devonnewscentre.info/\",\n links=\"https://www.devonnewscentre.info/feed/\",\n listas=\"item\",\n datesss=\"pubDate\",\n replaceDate=None,\n titles=\"title\",\n getBody=getBody,\n imajinasi=\"sam\",\n linkedin=\"\",\n href=\"link\",\n linkedin2=\"\")\ndef getBody(link,**kwargs):\n panda1=\"\"\n image=\"\"\n try:\n r = requests.get(link, timeout=15,verify=False)\n soup = BeautifulSoup(r.text, 'html.parser')\n a=soup.select(\"div#content p\")\n image=soup.select_one(\"div.single-thumbnail.pull-right img\").get(\"src\") if soup.select_one(\"div.single-thumbnail.pull-right img\") else \"\"\n s=\"\"\n c=0\n for j in a[1:len(a)]:\n s+=j.getText().replace('\\n', ' ').replace('\\r', '').strip() if j else \"\"\n panda1=s\n \n \n return [panda1,image]\n \n except:\n return None","repo_name":"gakpenting/scrape-python-news","sub_path":"all_link/link43.py","file_name":"link43.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28985716204","text":"import environ\n\nenv = environ.Env(\n # set casting, default value\n DEBUG=(bool, True),\n CELERY_TASK_ALWAYS_EAGER=(bool, False),\n BOT_NUMBER_OF_USERS=(int, 5),\n BOT_MAX_POSTS_PER_USER=(int, 5),\n BOT_MAX_LIKES_PER_USER=(int, 5),\n)\nbase = environ.Path(__file__) - 3\nenviron.Env.read_env(env_file=base(\".env\")) # reading .env file\nDEBUG = env(\"DEBUG\")\n","repo_name":"BakanovKirill/test_social_network","sub_path":"app/settings/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33737455768","text":"import ast\nimport re\nimport sys\n\nfrom string import punctuation\nfrom porter import PorterStemmer\n\nargs = sys.argv\nif len(args) != 5:\n sys.exit('Usage: python3 tc_test.py stopword-list model test-list test-class-list')\n\nk = 1\nmax_compromise = 0\nlines_to_write = []\n\nstopword_list_file, model_file, test_list_file, test_class_list_file = args[1:]\np = PorterStemmer()\n\ndef strip_and_filter_line(ln):\n if all(x in ln for x in [':', '@']):\n return []\n tokens = map(lambda t: t.strip().strip(punctuation).lower(), ln.split(' '))\n return list(filter(lambda t: t and len(t) > 2 and t.isalpha() and t not in stop_list, tokens))\n\ndef get_word_to_count(word_list):\n word_to_count = {}\n num_words = len(word_list)\n prev_unigram = word_list[0]\n for i in range(1, num_words):\n curr_unigram = word_list[i]\n ngrams = [curr_unigram, '{} {}'.format(prev_unigram, curr_unigram)]\n for ngram in ngrams:\n if ngram not in word_to_count:\n word_to_count[ngram] = 1\n else:\n word_to_count[ngram] += 1\n prev_unigram = curr_unigram\n return word_to_count\n\ndef get_weaker_word_to_count(word_to_count):\n fin_word_to_count = {}\n for compromise in range(1, max_compromise + 1):\n if fin_word_to_count:\n break\n fin_word_to_count = { word: count for word, count in word_to_count.items() \\\n if count >= k - compromise }\n for len_gram in range(2, 0, -1):\n fin_word_to_count = { word: count for word, count in fin_word_to_count.items() \\\n if len(word.split(' ')) >= len_gram }\n if fin_word_to_count:\n break\n return fin_word_to_count\n\ndef get_activation(row, weights):\n activation = weights[0]\n for i in range(len(row) - 1):\n activation += weights[i + 1] * row[i]\n return activation\n\ndef predict(activation):\n return 1 if activation >= 0 else 0\n\n'''\ndef predict(row, weights):\n activation = weights[0]\n for i in range(len(row) - 1):\n activation += weights[i + 1] * row[i]\n return 1 if activation >= 0 else 0\n'''\n\nwith open(stopword_list_file, 'r') as s:\n stop_list = list(map(lambda ln: ln.strip(), s.readlines()))\n\nwith open(model_file, 'r') as m:\n lines = list(map(lambda w: ast.literal_eval(w), m.readlines()))\n class_list, class_to_feat_to_index, class_to_weights = lines\n\nwith open(test_list_file, 'r') as t:\n # lines = map(lambda ln: ln.strip(), t.readlines())\n lines = map(lambda ln: ln.strip().split(' ')[0], t.readlines())\n for ln in lines:\n file = ln\n # text = file.split('/')[-1]\n text = re.split('[(\\\\\\\\)(\\\\)(\\/)]', file)[-1]\n flat_text = []\n with open(file, 'r') as f:\n for line in map(lambda ln: strip_and_filter_line(ln), f.readlines()):\n flat_text.extend(list(map(lambda word: p.stem(word, 0, len(word) - 1), line)))\n word_to_count = get_word_to_count(flat_text)\n fin_word_to_count = { word: count for word, count in word_to_count.items() if count >= k }\n if not fin_word_to_count:\n fin_word_to_count = get_weaker_word_to_count(word_to_count)\n sum_count = sum(fin_word_to_count.values())\n normalized_word_to_count = { word: count / sum_count for word, count in fin_word_to_count.items() }\n instance_class_to_output = { c: 0 for c in class_list }\n for c in class_list:\n feat_vec = [0 for i in range(len(class_to_feat_to_index[c]))]\n for w in class_to_feat_to_index[c]:\n if w in normalized_word_to_count:\n index = class_to_feat_to_index[c][w]\n feat_vec[index] = normalized_word_to_count[w]\n instance_class_to_output[c] = get_activation(feat_vec, class_to_weights[c])\n # instance_class_to_output[c] = predict(get_activation(feat_vec, class_to_weights[c]))\n instance_class_to_output = sorted(instance_class_to_output.items(), key = lambda x: x[1], reverse = True)\n instance_class_to_output = list(filter(lambda x: x[1] != 0, instance_class_to_output))\n predicted_class = instance_class_to_output[0][0]\n lines_to_write.append('{} {}\\n'.format(file, predicted_class))\n\nwith open(test_class_list_file, 'w') as f:\n f.writelines(lines_to_write)\n","repo_name":"jia1/pyceptron","sub_path":"tc_test.py","file_name":"tc_test.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"44299789192","text":"from string import ascii_lowercase\n\nascii_lowercase = [i for i in ascii_lowercase]\ngreek_lowercase = [u'\\u03B1', u'\\u03B2', u'\\u03B3', u'\\u03B4', u'\\u03B5',\n u'\\u03B6', u'\\u03B7', u'\\u03B8', u'\\u03B9', u'\\u03BA',\n u'\\u03BB', u'\\u03BC', u'\\u03BD', u'\\u03BE', u'\\u03BF',\n u'\\u03C0', u'\\u03C1', u'\\u03C3', u'\\u03C4', u'\\u03C5',\n u'\\u03C6', u'\\u03C7', u'\\u03C8', u'\\u03C9']\nletters = ascii_lowercase + greek_lowercase\ntex_letters = ascii_lowercase\n\n\ndef bool_to_machine(b):\n if b:\n return \"1\"\n return \"0\"\n\n\ndef bool_to_str(b):\n if b:\n return \"True\"\n return \"False\"\n\n\nclass Symbol:\n def __init__(self, n, machine, ascii=None, unicode=None, tex=None,\n name=None):\n self.ascii = ascii\n self.unicode = unicode\n self.machine = machine\n self.tex = tex\n self.n = n\n self.replacements = []\n self.name = name\n\n def __str__(self):\n return self.ascii\n\n def __eq__(self, other):\n return self.n == other.n\n\n\nclass Bool(Symbol):\n def __init__(self, n, bool):\n super().__init__(n, bool_to_machine(bool), ascii=bool_to_machine(bool),\n unicode=bool_to_machine(bool),\n tex=\"\\\\textsc{\" + bool_to_str(bool) + \"}\")\n\n\nclass UnarySymbol(Symbol):\n def __init__(self, n, truth_table, machine, **kwargs):\n super().__init__(n, machine, **kwargs)\n for i, res in truth_table:\n self.replacements.append((self.machine + bool_to_machine(i),\n bool_to_machine(res)))\n\n\nclass BinarySymbol(Symbol):\n def __init__(self, n, truth_table, machine, **kwargs):\n super().__init__(n, machine, **kwargs)\n for i, j, res in truth_table:\n self.replacements.append((\"(\" + bool_to_machine(i)\n + self.machine\n + bool_to_machine(j) + \")\",\n bool_to_machine(res)))\n\n\nclass Variable(Symbol):\n def __init__(self, n, var_n):\n if var_n < len(ascii_lowercase):\n ascii = ascii_lowercase[var_n]\n else:\n ascii = None\n if var_n < len(letters):\n unicode = letters[var_n]\n else:\n unicode = None\n if var_n < len(tex_letters):\n tex = tex_letters[var_n]\n else:\n tex = None\n super().__init__(n, \"VAR[\"+str(var_n)+\"]\", ascii=ascii,\n unicode=unicode, tex=tex)\n self.var_n = var_n\n\n\nclass Symbols:\n def __init__(self, **params):\n self._next_machine = \"A\"\n self._symbols = []\n self._unary = []\n self._binary = []\n self._bool = []\n self._variables = []\n\n self.add_unary([(False, True), (True, False)],\n \"NOT\", ascii=\"-\", unicode=u\"\\u00AC\", tex=\"\\\\lnot\",\n name=\"not\")\n\n self.add_binary([(True, True, True), (True, False, False),\n (False, True, False), (False, False, False)],\n \"AND\", ascii=\"+\", unicode=u\"\\u2227\", tex=\"\\\\land\",\n name=\"and\")\n self.add_binary([(True, True, True), (True, False, True),\n (False, True, True), (False, False, False)],\n \"OR\", ascii=\"/\", unicode=u\"\\u2228\", tex=\"\\\\lor\",\n name=\"or\")\n self.add_binary([(True, True, True), (True, False, False),\n (False, True, False), (False, False, True)],\n \"IFF\", ascii=\"=\", unicode=u\"\\u21FF\",\n tex=\"\\\\leftrightarrow\", name=\"if and only if\")\n self.add_binary([(True, True, True), (True, False, False),\n (False, True, True), (False, False, True)],\n \"IMP\", ascii=\">\", unicode=u\"\\u21FE\",\n tex=\"\\\\rightarrow\", name=\"implies\")\n\n self.add_symbol(\"(\", ascii=\"(\", unicode=\"(\", tex=\"(\")\n self._open = self._symbols[-1]\n self.add_symbol(\")\", ascii=\")\", unicode=\")\", tex=\")\")\n self._close = self._symbols[-1]\n\n if \"include_bools\" in params and params[\"include_bools\"]:\n self.add_bool(True)\n self.add_bool(False)\n if \"allow_not_bool\" in params:\n self.allow_not_bool = params[\"allow_not_not\"]\n else:\n self.allow_not_bool = True\n if \"allow_not_not\" in params:\n self.allow_not_not = params[\"allow_not_not\"]\n else:\n self.allow_not_not = True\n\n self.replacements = []\n for i in self._symbols:\n self.replacements += i.replacements\n\n def ascii_key(self):\n key = \"# KEY\\n\"\n for s in self._unary:\n key += \"# \" + s.ascii + \" \" + s.name + \"\\n\"\n for s in self._binary:\n key += \"# \" + s.ascii + \" \" + s.name + \"\\n\"\n key += \"# a-z represent variables\"\n return key\n\n def next(self, prev, current):\n follow = self.follow(prev)\n return follow[follow.index(current)+1]\n\n def follow(self, prev=[]):\n \"\"\"Returns a list of characters that could follow prev.\"\"\"\n # If this is the first character\n if len(prev) == 0:\n return self._unary + [self._open]\n # If no brackets have been opened\n if prev.count(self._open) == 0:\n if self.allow_not_not:\n return self._unary + [self._open]\n else:\n return [i for i in self._unary if i != prev[-1]] + [self._open]\n\n # If all brackets are closed, this is invalid, so just return )\n if prev.count(self._open) <= prev.count(self._close):\n return [self._close]\n\n # If last character is (\n if prev[-1] == self._open:\n return (self._unary + self._bool + [self._open]\n + self.variables_follow(prev))\n # If last character is a binary symbol\n if isinstance(prev[-1], BinarySymbol):\n return (self._unary + self._bool + [self._open]\n + self.variables_follow(prev))\n # If last character is a unary symbol\n if isinstance(prev[-1], UnarySymbol):\n if self.allow_not_not:\n u = self._unary\n else:\n u = [i for i in self._unary if i != prev[-1]]\n if self.allow_not_bool:\n return (u + self._bool + [self._open]\n + self.variables_follow(prev))\n else:\n return u + [self._open] + self.variables_follow(prev)\n # If the last character is a variable, bool or )\n assert (isinstance(prev[-1], Bool) or isinstance(prev[-1], Variable)\n or prev[-1] == self._close)\n op = 0\n for i in prev[::-1]:\n if i == self._open:\n if op == 0:\n break\n op -= 1\n if i == self._close:\n op += 1\n if op == 0 and isinstance(i, BinarySymbol):\n return [self._close]\n return self._binary\n\n def variables_follow(self, prev):\n used = max([-1] + [i.var_n for i in prev if isinstance(i, Variable)])\n return [self.get_variable(i) for i in range(used+2)]\n\n def __len__(self):\n return len(self._symbols)\n\n def __getitem__(self, i):\n return self._symbols[i]\n\n def get_machine_name(self):\n out = self._next_machine\n self._next_machine += \"A\"\n return out\n\n def get_variable(self, n):\n while len(self._variables) <= n:\n self.add_variable()\n return self._variables[n]\n\n def add_variable(self):\n self._symbols.append(Variable(len(self._symbols),\n len(self._variables)))\n self._variables.append(self._symbols[-1])\n\n def add_symbol(self, machine=None, **kwargs):\n if machine is None:\n machine = self.get_machine_name()\n self._symbols.append(Symbol(len(self._symbols), machine, **kwargs))\n\n def add_binary(self, truth_table, machine=None, **kwargs):\n if machine is None:\n machine = self.get_machine_name()\n self._symbols.append(BinarySymbol(len(self._symbols), truth_table,\n machine, **kwargs))\n self._binary.append(self._symbols[-1])\n\n def add_unary(self, truth_table, machine=None, **kwargs):\n if machine is None:\n machine = self.get_machine_name()\n self._symbols.append(UnarySymbol(len(self._symbols), truth_table,\n machine, **kwargs))\n self._unary.append(self._symbols[-1])\n\n def add_bool(self, bool):\n self._symbols.append(Bool(len(self._symbols), bool))\n self._bool.append(self._symbols[-1])\n\n def get_from_ascii(self, a):\n if a in letters:\n return self.get_variable(letters.index(a))\n for s in self._symbols:\n if s.ascii == a:\n return s\n raise ValueError(\"Unknown character \" + a)\n","repo_name":"mscroggs/Logic-Bot","sub_path":"logic/symbols.py","file_name":"symbols.py","file_ext":"py","file_size_in_byte":9150,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"22"} +{"seq_id":"9898175605","text":"import numpy as np\n\nfrom keras.datasets import imdb\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\n\ntop_words = 5000\n(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)\n\n# Dataset info\nX = np.concatenate((X_train, X_test), axis=0)\ny = np.concatenate((y_train, y_test), axis=0)\nprint(f\"Classes: {np.unique(y)}\")\nprint(\"Number of words: \")\nprint(len(np.unique(np.hstack(X))))\nprint(\"Review length: \")\nresult = [len(x) for x in X]\nprint(\"Mean %.2f words (%f)\" % (np.mean(result), np.std(result)))\n\nmax_words = 500\nX_train = sequence.pad_sequences(X_train, maxlen=max_words)\nX_test = sequence.pad_sequences(X_test, maxlen=max_words)\n\nmodel = Sequential()\nmodel.add(Embedding(top_words, 32, input_length=max_words))\nmodel.add(Flatten())\nmodel.add(Dense(250, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(model.summary())\n\nmodel.fit(\n X_train,\n y_train,\n validation_data=(X_test, y_test),\n epochs=2,\n batch_size=128,\n verbose=2,\n)\n\nscores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"Accuracy: %.2f%%\" % (scores[1] * 100))\n","repo_name":"alecordev/data-science","sub_path":"src/examples/nlp/keras/mlp_single_multi_layer_perceptron.py","file_name":"mlp_single_multi_layer_perceptron.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"9453938152","text":"from multiprocessing import context\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\n\nfrom .models import Task\n\n# Create your views here.\n\n\ndef list_carro(request):\n task = Task.objects.all()\n context = {\n \"task\": task[::-1],\n \"update_from\": None\n }\n return render(request, 'list_carro.html', context)\n\n\ndef insert(request):\n try:\n task_modelo = request.POST['modelo']\n task_año = request.POST['año']\n task_placa = request.POST['placa']\n task_chasis = request.POST['chasis']\n task_propietario = request.POST['propietario']\n if task_modelo == \"\" or task_año == \"\" or task_placa == \"\" or task_chasis == \"\" or task_propietario == \"\":\n raise ValueError(\"El texto no puede estar vacio.\")\n task = Task(modelo=task_modelo, año=task_año, placa=task_placa,\n chasis=task_chasis, propietario=task_propietario)\n task.save()\n return redirect('/carro/')\n except ValueError as err:\n print(err)\n return redirect('/carro/')\n\n\ndef update(request):\n task_id = request.POST[\"id\"]\n task_modelo = request.POST['modelo']\n task_año = request.POST['año']\n task_placa = request.POST['placa']\n task_chasis = request.POST['chasis']\n task_propietario = request.POST['propietario'] \n task = Task.objects.get(pk=task_id)\n task.modelo = task_modelo\n task.año = task_año\n task.placa = task_placa\n task.chasis = task_chasis\n task.propietario = task_propietario\n task.save()\n return redirect('/carro/')\n\n\ndef update_from(request, task_id):\n task = Task.objects.all()\n task_only = Task.objects.get(pk=task_id)\n print(task_only)\n context = {\n \"task\": task[::-1],\n \"update\": task_only\n }\n return render(request, 'list_carro.html', context)\n\n\ndef delete_task(request, task_id):\n task = Task.objects.filter(id=task_id)\n task.delete()\n return redirect('/carro/')\n","repo_name":"Joel7Anthony/Editar","sub_path":"carro/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36904067366","text":"# A MySQL Backup Instance Object\n# is responsible for managing the\n# mysql backup files associated with it.\n# It also functions a place to abstract\n# the creation of new backups.\n\nimport mysql_backup\nimport time\n\n\nclass MysqlBackupInstance:\n\n backup_logger = None\n\n def __init__(self, db_name, date_string=None, bkup_file_objs=()):\n \"\"\"There are two methods to initialize.\n 1: Pass only db_name = trigger a new backup to be created and become an instance.\n (Still verify the crap out of the new instance)\n 2: Pass a tuple of backup file objects to bkup_file_objs = initialize an instance\n of an existing backup, making every effort to make sure things are valid or\n self destructing (removing all files) with a RuntimeError\"\"\"\n\n MysqlBackupInstance.backup_logger = mysql_backup.mysql_backup.MysqlBackup.backup_logger\n\n self.db_name = db_name\n self.date_string = date_string\n\n # These are things managed within the instance itself\n self.bkup_file_objs = list(bkup_file_objs)\n\n # The following two values will always be initialized after the\n # call to set_proper_instance_state\n self.checksum = None\n self.incremental_backup_file_obj = None\n\n # A convenient way to be sure proper instance has been attempted\n # at least once, which should almost always be sufficient\n self.set_proper_instance_state_called_at_least_once = False\n\n if bkup_file_objs and date_string is not None:\n if self.any_files_being_written():\n msg = \"Files are being written. Can not instantiate %s\" % self\n # MysqlBackupInstance.backup_logger(msg, extra={'object': self})\n raise RuntimeError(msg)\n else:\n self.set_proper_instance_state()\n\n elif not bkup_file_objs and date_string is None:\n # Create a new backup\n results = mysql_backup.MysqlBackupFileFactory.create_file_object(self.db_name)\n self.bkup_file_objs = results.values()\n self.date_string = results.values()[0].date_string\n validated_instance_file_objects = self.clean_bad_files_return_good_file_objects_or_fail()\n self.checksum = validated_instance_file_objects.get(\"checksumfileobj\").get_checksum()\n\n else:\n msg = \"Improper combination of arguments.\"\n # MysqlBackupInstance.backup_logger(msg, extra={'object': self})\n raise ValueError(msg)\n\n def __eq__(self, other):\n \"\"\"If the checksums of two instances are equal\n the backups are equal.\"\"\"\n if not isinstance(other, MysqlBackupInstance):\n AssertionError(\"Invalid comparison attempted.\")\n return self.checksum == other.checksum\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __str__(self):\n return self.db_name + \" \" + self.date_string\n\n # Get stuff\n\n def get_age_secs(self):\n # current time stamp\n now = int(time.time())\n\n #time stamp based on the backup files naming\n backup_time = mysql_backup.MysqlBackup.ts_from_human_readable_date(self.date_string)\n\n age_in_secs = now - backup_time\n\n return age_in_secs\n\n def get_all_files(self):\n \"\"\"Return a list of all files (full paths) associated\n with this backup instance\"\"\"\n return_list = list()\n for obj in self.bkup_file_objs:\n return_list.append(obj.file_name_full_path)\n\n if self.is_a_long_term_version():\n return_list.append(self.incremental_backup_file_obj.get_long_term_backup_full_name())\n return return_list\n\n def is_a_long_term_version(self):\n \"\"\"Does this backup instance exist in the long\n term backup path\"\"\"\n return self.incremental_backup_file_obj.is_a_long_term_version()\n\n # Validate or (do stuff (with a RuntimeError) and die trying)\n\n def set_proper_instance_state(self):\n \"\"\"void (but throws RunTiimeException on error) either here or in a called method.\n Backups could die mid run or compression.\n In any case this method will make an effort to\n resolve situations that should not exist and\n ensure backup instances exist in a proper state.\n If this is not possible, CLEAN EVERYTHING UP, SELF DESTRUCT,\n and throw a RunTimeError\"\"\"\n\n validated_instance_file_objects = self.clean_bad_files_return_good_file_objects_or_fail()\n\n self.checksum = validated_instance_file_objects.get(\"checksumfileobj\").get_checksum()\n self.incremental_backup_file_obj = validated_instance_file_objects.get(\"bkupfileobj\")\n self.set_compression_state()\n\n self.set_proper_instance_state_called_at_least_once = True\n\n def self_destruct(self):\n \"\"\"Delete all files associated with this instance\"\"\"\n for bkfobj in self.bkup_file_objs:\n bkfobj.self_destruct()\n\n def clean_bad_files_return_good_file_objects_or_fail(self):\n \"\"\"Part of the initialization phase of an existing backup\n and should be called when a new backup will be kept\n ie. the checksums are different than the last one.\n\n If this does not fail, it will\n return a dict of\n\n {\n 'checksumfileobj':CheckSumFile,\n 'bkupfileobj':ActualBackupFileObj (UncompressedFile or CompressedFile)\n }\"\"\"\n\n has_checksum_file = False\n checksum_file_has_content = False\n has_uncompressed_file = False\n has_compresssed_file = False\n # less obvious but noting these things are also being factored\n # self.should_be_long_term_version\n # mysql_backup.MysqlBackup.compression_enabled\n\n for bkup_file_obj in self.bkup_file_objs:\n if isinstance(bkup_file_obj, mysql_backup.CheckSumFile):\n has_checksum_file = True\n result = bkup_file_obj.get_checksum()\n if isinstance(result, str):\n checksum_file_has_content = True\n elif isinstance(bkup_file_obj, mysql_backup.UncompressedFile):\n has_uncompressed_file = True\n elif isinstance(bkup_file_obj, mysql_backup.CompressedFile):\n has_compresssed_file = True\n\n # Missing a checksum object, game over. Backup not to be trusted\n # Missing a checksum object, game over. Backup not to be trusted\n if not has_checksum_file:\n self.self_destruct()\n msg = \"Checksum file missing. This backup is invalid.\"\n MysqlBackupInstance.backup_logger.error(msg, extra={'object': self})\n raise RuntimeError(msg)\n\n if not checksum_file_has_content:\n self.self_destruct()\n msg = \"Checksum file exists but had no content. Checksum file is not valid.\"\n MysqlBackupInstance.backup_logger.error(msg, extra={'object': self})\n raise RuntimeError(msg)\n\n\n # If both files exist, this is strange. The compressed one is\n # not to be trusted but let's assume it's a failure during the\n # compression of step\n if has_compresssed_file and has_uncompressed_file:\n for bkup_file_obj in self.bkup_file_objs:\n if isinstance(bkup_file_obj, mysql_backup.CompressedFile):\n bkup_file_obj.self_destruct()\n if bkup_file_obj.exists():\n msg = \"Tried to delete the backup file but failed.\"\n MysqlBackupInstance.backup_logger.error(msg, extra={'object': self})\n raise RuntimeError(msg)\n has_uncompressed_file = False\n\n # If neither a compressed or uncompressed version exists\n # this is a bad backup and should not be trusted\n if True not in (has_uncompressed_file, has_compresssed_file):\n\n self.self_destruct()\n msg = \"No backups actually exist. Self destructing this instance.\"\n MysqlBackupInstance.backup_logger.error(msg, extra={'object': self})\n raise RuntimeError(msg)\n\n # At this point the expectation is that there is one backup file\n # and one checksum file. To be really, really sure, let's double check\n # and while we are at it, set up the return values\n bkpfileobj = None\n checksumfileobj = None\n bkup_file_obj_count = 0\n\n for bkup_file_obj in self.bkup_file_objs:\n if isinstance(bkup_file_obj, (mysql_backup.UncompressedFile, mysql_backup.CompressedFile)):\n bkup_file_obj_count += 1\n bkpfileobj = bkup_file_obj\n elif isinstance(bkup_file_obj, mysql_backup.CheckSumFile):\n checksumfileobj = bkup_file_obj\n\n if bkup_file_obj_count != 1 or checksumfileobj is None:\n self.self_destruct()\n msg = \"An error occur when validating the the backup file state.\"\n MysqlBackupInstance.backup_logger.error(msg, extra={'object': self})\n raise RuntimeError(msg)\n\n returndict = {\n 'checksumfileobj': checksumfileobj,\n 'bkupfileobj': bkpfileobj,\n }\n return returndict\n\n def any_files_being_written(self):\n for bkup_file_obj in self.bkup_file_objs:\n if mysql_backup.MysqlBackup.is_file_open(bkup_file_obj.file_name_full_path):\n return True\n return False\n\n def set_compression_state(self):\n \"\"\"return: void or fail\n If backups should or should not be compressed,\n do the right thing and make it so. Should something\n change here, the self.incremental_backup_file_obj\n will be updated to become the modified object and\n the old one cleaned up.\n\n This would typically only be called after\n clean_bad_files_return_good_file_objects_or_fail\n and after self.incremental_backup_file_obj has\n been initialized\"\"\"\n\n if self.incremental_backup_file_obj is None:\n self.self_destruct()\n msg = \"This should never be called without the incremental_backup_file_obj initialized.\" \\\n \"This is a weird error that is never to be expected. Did you run \" \\\n \"clean_bad_files_return_good_file_objects_or_fail and initialize \" \\\n \"incremental_backup_file_obj?\"\n MysqlBackupInstance.backup_logger.error(msg, extra={'object': self})\n raise RuntimeError(msg)\n\n else:\n\n # When compression should exist, make it so\n if mysql_backup.MysqlBackup.compression_enabled and isinstance(self.incremental_backup_file_obj,\n mysql_backup.UncompressedFile):\n cmpf = mysql_backup.MysqlBackupFileFactory.create_file_object(self.db_name,\n ucpf=self.incremental_backup_file_obj)\n # Add the compressed file object as managed by this instance\n self.bkup_file_objs.append(cmpf)\n\n # at this point the uncompressed file should have been removed. Let's double check or fail.\n # before removing it from the backup file objects here and pointing to the new file\n\n if self.incremental_backup_file_obj.exists():\n msg = \"Attempted to delete the compressed file but failed.\"\n MysqlBackupInstance.backup_logger.error(msg, extra={'object': self})\n raise RuntimeError(msg)\n\n # it is now safe to drop the uncompressed file object as managed by this instance.\n # and set the new incremental_backup_file_obj to the compressed file object\n self.bkup_file_objs = [bkobj for bkobj in self.bkup_file_objs\n if not isinstance(bkobj, mysql_backup.UncompressedFile)]\n self.incremental_backup_file_obj = cmpf\n\n # When compression should not exist, make it so\n elif not mysql_backup.MysqlBackup.compression_enabled and isinstance(self.incremental_backup_file_obj,\n mysql_backup.CompressedFile):\n ucmf = self.incremental_backup_file_obj.decompress()\n\n # Add the decompressed file object as managed by this instance\n self.bkup_file_objs.append(ucmf)\n\n # at this point the decompressed file should have been removed. Let's double check or fail.\n # before removing it from the backup file objects here and pointing to the new file\n\n if self.incremental_backup_file_obj.exists():\n msg = \"Attempted to delete the decompressed file but failed.\"\n MysqlBackupInstance.backup_logger.error(msg, extra={'object': self})\n raise RuntimeError(msg)\n\n # it is now safe to drop the uncompressed file object as managed by this instance.\n # and set the new incremental_backup_file_obj to the compressed file object\n self.bkup_file_objs = [bkobj for bkobj in self.bkup_file_objs\n if not isinstance(bkobj, mysql_backup.CompressedFile)]\n self.incremental_backup_file_obj = ucmf\n\n def set_as_long_term_version(self, lt_state):\n \"\"\"Input: lt_state (bool)\n Result: This instace will either become the long\n term backup or remove itself as being the long term backup\"\"\"\n\n lt_cur_state = self.is_a_long_term_version()\n if lt_cur_state != lt_state:\n if lt_state:\n self.incremental_backup_file_obj.copy_to_long_term_backup()\n else:\n self.incremental_backup_file_obj.remove_long_term_version()\n","repo_name":"dmatthewsbnd251/mysql-backup","sub_path":"mysql_backup/mysql_backup_instance.py","file_name":"mysql_backup_instance.py","file_ext":"py","file_size_in_byte":13933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41776429478","text":"import tkinter as tk\r\nfrom tkinter import colorchooser, simpledialog\r\n\r\nclass GraphicsApp:\r\n def __init__(self):\r\n self.root = tk.Tk()\r\n self.root.title(\"Lab3\")\r\n self.canvas = tk.Canvas(self.root, width=400, height=400, bg=\"white\")\r\n self.canvas.pack()\r\n self.current_shape = None\r\n self.start_x = None\r\n self.start_y = None\r\n\r\n # Create a context menu\r\n self.context_menu = tk.Menu(self.root, tearoff=0)\r\n self.context_menu.add_command(label=\"Change Color\", command=self.change_color)\r\n self.context_menu.add_command(label=\"Change Line Thickness\", command=self.change_line_thickness)\r\n self.context_menu.add_command(label=\"Change Line Type\", command=self.change_line_type)\r\n self.context_menu.add_command(label=\"Change Background Color\", command=self.change_background_color)\r\n\r\n # Bind the context menu to the canvas\r\n self.canvas.bind(\"\", self.show_context_menu)\r\n\r\n self.shape = self.canvas.create_rectangle(50, 50, 150, 150, fill=\"red\")\r\n self.move_enabled = True\r\n self.move_start_x = None\r\n self.move_start_y = None\r\n\r\n # Bind the left mouse button to enable shape movement\r\n self.canvas.tag_bind(self.shape, \"\", self.enable_move)\r\n\r\n def run(self):\r\n self.root.mainloop()\r\n\r\n def show_context_menu(self, event):\r\n self.context_menu.post(event.x_root, event.y_root)\r\n\r\n def change_color(self):\r\n color = tk.colorchooser.askcolor(title=\"Select Color\")\r\n if color[1]:\r\n self.canvas.itemconfig(self.shape, outline=color[1])\r\n\r\n def change_line_thickness(self):\r\n thickness = tk.simpledialog.askinteger(\"line\", \"Enter Line Thickness\")\r\n if thickness:\r\n self.canvas.itemconfig(self.shape, width=thickness)\r\n\r\n def change_line_type(self):\r\n self.canvas.itemconfig(self.shape, dash=(4, 4))\r\n\r\n def change_background_color(self):\r\n color = tk.colorchooser.askcolor(title=\"Select Color\")\r\n if color[1]:\r\n self.canvas.itemconfig(self.shape, fill=color[1])\r\n\r\n def enable_move(self, event=None):\r\n self.move_enabled = True\r\n self.move_start_x = event.x\r\n self.move_start_y = event.y\r\n self.canvas.bind(\"\", self.move_shape)\r\n self.canvas.bind(\"\", self.disable_move)\r\n\r\n def move_shape(self, event):\r\n if self.move_enabled:\r\n dx = event.x - self.move_start_x\r\n dy = event.y - self.move_start_y\r\n self.canvas.move(self.shape, dx, dy)\r\n self.move_start_x = event.x\r\n self.move_start_y = event.y\r\n\r\n def disable_move(self, event):\r\n self.move_enabled = False\r\n self.canvas.unbind(\"\")\r\n self.canvas.unbind(\"\")\r\n\r\n\r\napp = GraphicsApp()\r\napp.run()\r\n","repo_name":"ARTYsas/univer-semestr-4","sub_path":"Algorithms and methods for representing graphical information/Creating applications using Windows Forms/lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23752686953","text":"import inspect\nimport configobj\nimport numpy as np\nimport pandas as pd\nfrom abc import ABCMeta, abstractmethod\nfrom helper.util import catch_exception\nfrom helper.util import get_attribute\nfrom helper.util import get_logger\nimport sklearn.preprocessing as preprocessing\nimport talib\n\n\nLOGGER = get_logger(__name__)\n\n\nclass Action(metaclass=ABCMeta):\n\n @staticmethod\n @abstractmethod\n def fire(self, data_frame, col_order):\n raise NotImplementedError\n\n\nclass Fetch(Action):\n\n @staticmethod\n @abstractmethod\n def fire(self, data_frame=None, col_order=None):\n raise NotImplementedError\n\n\nclass PreAnalyze(Action):\n\n @staticmethod\n @abstractmethod\n def fire(self, origin_frame, col_order=None):\n print('this is pre analyze')\n # raise NotImplementedError\n\n\nclass Analyze(Action):\n @staticmethod\n @abstractmethod\n def fire(self, pre_frame, label):\n for state_code in self._state_codes:\n analyze_frames = IndicatorAnalysis(pre_frame[state_code], self._indicators, label).add_analysis()\n self._analyze_frames[state_code] = analyze_frames\n\nclass PostAnalyze(Action):\n @staticmethod\n @abstractmethod\n def fire(self, data_frame, col_order=None):\n raise NotImplementedError\n # print(\"this is PostAnalyze\")\n # for state_code in self._state_codes:\n # self._analyze_frames[state_code].to_csv(\"../../back_testing/data/{}.csv\".format(state_code))\n\n\nclass FetchCSVConfig(Fetch):\n @staticmethod\n def fire(self):\n source_root = self.cfg['General']['source']\n if self._state_codes is None:\n raise TypeError(\"you should add correct inst_list\")\n\n for state_code in self._state_codes:\n file_path = source_root + state_code + \".csv\"\n try:\n df_ori = pd.read_csv(file_path)\n except Exception:\n print (state_code, \"the source file has error path\")\n continue\n mask = (df_ori.date > self._start_date) & (df_ori.date <= self._end_date)\n self._origin_frames[state_code] = df_ori.loc[mask]\n self._dates[state_code] = self._origin_frames[state_code]['date']\n self._origin_frames[state_code].set_index('date', inplace=True)\n\n\nclass PreAnalyzeDefault(PreAnalyze):\n @staticmethod\n def fire(self, origin_frames, col_order=None):\n self._pre_frames = origin_frames\n\nclass PostAnalyzeDefault(PostAnalyze):\n ### single csv data\n @staticmethod\n def fire(self, analyze_frames, col_order=None):\n # print(\"this is PostAnalyzeDefault\")\n scales = self._scaler\n for state_code in self._state_codes:\n post_frame = analyze_frames[state_code].copy()\n scales.fit(post_frame)\n post_frame = scales.transform(post_frame)\n self._post_frames[state_code] = pd.DataFrame(data=post_frame, index=self._dates[state_code],\n columns=analyze_frames[state_code].columns)\n self._origin_frames[state_code] = self._origin_frames[state_code].dropna(axis=0)\n self._post_frames[state_code] = self._post_frames[state_code].dropna(axis=0)\n self._dates[state_code] = list(self._post_frames[state_code].index)\n\n\ndef fetchcfg(cfg):\n # config = configobj.ConfigObj(cfg, encoding='UTF8')\n module = cfg['General']['module']\n fetch = cfg['PreProcess']['fetch']\n pre_analyze = cfg['PreProcess']['pre_analyze']\n post_analyze = cfg['PreProcess']['post_analyze']\n label = cfg['Analyze']['label']\n indicators = cfg['Analyze']['indicators']\n if label is None:\n label = 'close'\n action_fetch = get_attribute('.'.join([module, fetch]))\n action_pre_analyze = get_attribute('.'.join([module, pre_analyze]))\n action_post_analyze = get_attribute('.'.join([module, post_analyze]))\n return action_fetch, action_pre_analyze, indicators, action_post_analyze, label\n\n\nclass ProcessStrategy(object):\n def __init__(self, cfg):\n self.cfg = cfg\n self._fetch, self._pre_analyze, self._indicators, self._post_analyze, self._label = fetchcfg(self.cfg)\n self._analyze = Analyze # the analyze function in this module\n if 'instList' in cfg['General']:\n self._state_codes = list(pd.read_csv(cfg['General']['instList'])['instCode'])\n else:\n self._state_codes = None\n\n if 'start_date' in cfg['Parameter']:\n self._start_date = cfg['Parameter']['start_date']\n else:\n self._start_date = '2007-01-01'\n\n if 'end_date' in cfg['Parameter']:\n self._end_date = cfg['Parameter']['end_date']\n else:\n self._end_date = '2020-07-30'\n # self._end_date = end_date\n # self._indicators = action_analyze\n if 'scaler' in cfg['Setting']:\n self._scaler = getattr(preprocessing, cfg['Setting']['scaler'])()\n else:\n self._scaler = preprocessing.StandardScaler()\n\n # self._scaler = scaler\n self._dates = dict()\n self._origin_frames = dict()\n self._pre_frames = dict()\n self._analyze_frames = dict() # with analysis added\n self._post_frames = dict()\n self._col_order = cfg['Analyze']['indicators']\n # self._scaled_frames = dict()\n\n @classmethod\n def get_instance(cls, data_source, state_codes, start_date, end_date, indicators, scaler, **kwargs):\n cls = get_attribute(inspect.getmodule(cls).__name__ + '.Process' + data_source)\n return cls(data_source, state_codes, start_date, end_date, indicators, scaler, **kwargs)\n\n def process(self):\n self._fetch.fire(self)\n self._pre_analyze.fire(self, self._origin_frames)\n self._analyze.fire(self, self._pre_frames, self._label)\n self._post_analyze.fire(self, self._analyze_frames, self._col_order)\n return self._dates, self._post_frames, self._origin_frames, self._post_frames\n\n\nclass IndicatorAnalysis:\n def __init__(self, origin_frame, indicators, label):\n self._origin_frame = origin_frame\n self._indicators = indicators\n self._index = origin_frame.index.values\n self._label = label\n\n # def rsi(self, *args):\n # para = args[0]\n # result = talib.RSI(self._origin_frame[para[0]], int(para[1]))\n # return pd.DataFrame(result, columns=['rsi_{}'.format(para[1])])\n #\n # def macd(self, *args):\n # para = args[0]\n # result = talib.MACD(self._origin_frame[para[0]], int(para[1]), int(para[2]), int(para[3]))\n # return pd.DataFrame(result[0], columns=['macd'])\n\n # def stoch(self, *args):\n # # df_indicators = pd.DataFrame()\n #\n # para = args[0]\n # result = talib.STOCH(self._origin_frame['high'], self._origin_frame['low'], self._origin_frame[para[0]],\n # fastk_period=int(para[1]), slowk_period=int(para[2]), slowd_period=int(para[3]))\n #\n # for idx, res in enumerate(result):\n # if idx == 0:\n # df_result = pd.DataFrame(res, columns=['stoch' + str(idx)])\n # else:\n # df_result = df_result.join(pd.DataFrame(res, columns=['stoch' + '_' + str(idx)]))\n #\n # return df_result\n\n\n def trend(self, *args):\n \"\"\" \u0015 If closing price value leads its MA 15 and MA 15 is rising for last 5 days then trend is Uptrend\n i.e. trend signal is 1.\n\n If closing price value lags its MA 15 and MA 15 is falling for last 5 days then trend is Downtrend\n i.e. trend signal is 0.\n\n For up trend:\n Tr_i = [(cp_i - min cp)/(max cp - min cp)] * 0.5 + 0.5\n\n For down trend:\n Tr_i = [(cp_i - min cp)/(max cp - min cp)] * 0.5\n\n min cp = min(cp_i, cp_i+1, cp_i+2)\n max cp = max(cp_i, cp_i+1, cp_i+2)\n\n \"\"\"\n\n TREND_DOWN = -1\n TREND_NO = 0\n TREND_UP = 1\n\n def determine_trend_ma(targets, trend_bars_idx, current_val):\n # determine the trend based on the move average.\n # e.x. if the target falling in last 5(trend_bars_idx) days and current value lower than mv, trend is down\n latest_trend = None\n for idx in range(trend_bars_idx):\n\n # if trend_bars_idx - idx - 2 == 0:break\n\n # if the current target is larger than the previous one\n if targets[trend_bars_idx - idx] >= targets[trend_bars_idx - idx - 1]:\n trend = TREND_UP\n if latest_trend == TREND_DOWN:\n return TREND_NO\n latest_trend = trend\n if targets[trend_bars_idx - idx] < targets[trend_bars_idx - idx - 1]:\n trend = TREND_DOWN\n if latest_trend == TREND_UP:\n return TREND_NO\n latest_trend = trend\n\n if trend == TREND_UP and current_val < targets[trend_bars_idx]:\n return TREND_NO\n elif trend == TREND_DOWN and current_val > targets[trend_bars_idx]:\n return TREND_NO\n\n return trend\n\n def calculate_up_trend(current_val, target_future_bars):\n if max(target_future_bars) == min(target_future_bars):\n breakpoint()\n return (1-((current_val - min(target_future_bars)) / (max(target_future_bars) - min(target_future_bars)))) * 0.5 + 0.5\n # return ((current_val - min(target_future_bars)) / (max(target_future_bars) - min(target_future_bars))) * 0.5 + 0.5\n\n def calculate_down_trend(current_val, target_future_bars):\n return (1-((current_val - min(target_future_bars)) / (max(target_future_bars) - min(target_future_bars)))) * 0.5\n # return ((current_val - min(target_future_bars)) / (max(target_future_bars) - min(target_future_bars))) * 0.5\n\n # we calculate the trend\n target = args[0]\n result = target.copy()\n\n ma_bars = int(args[1])\n trend_bars = int(args[2])\n future_bars = int(args[3])\n\n # get moving average\n ma = talib.MA(target, timeperiod=ma_bars)\n\n last_trend = None\n for curr_idx, val in enumerate(target):\n result[curr_idx] = None\n if curr_idx >= ma_bars + trend_bars-1 and curr_idx < (len(target)-future_bars):\n target_trend_bars = ma[curr_idx-trend_bars: curr_idx+1]\n\n # determine the trend based on the move average.\n # e.x. if falling in last 5 days, trend is down\n ma_trend = determine_trend_ma(target_trend_bars, trend_bars, val)\n\n # if trend is down and price is lower than the ma we calculate trend with down formula\n if ma_trend == TREND_DOWN:\n last_trend = TREND_DOWN\n result[curr_idx] = calculate_down_trend(val, target[curr_idx: curr_idx+future_bars])\n\n # if trend is up and price is higher than the ma we calculate trend with up formula\n elif ma_trend == TREND_UP:\n last_trend = TREND_UP\n result[curr_idx] = calculate_up_trend(val, target[curr_idx: curr_idx+future_bars])\n elif ma_trend == TREND_NO:\n # if have no trend, we get the last trend and calculate the trend\n if last_trend == TREND_DOWN:\n result[curr_idx] = calculate_down_trend(val, target[curr_idx: curr_idx + future_bars])\n elif last_trend == TREND_UP:\n result[curr_idx] = calculate_up_trend(val, target[curr_idx: curr_idx + future_bars])\n\n # return pd.DataFrame(result).rename(columns={'close': 'trend_{}'.format(args[1:4])})\n # self._label = 'trend_{}'.format(\"_\".join([str(v) for v in args[1:4]]))\n return pd.DataFrame(data=result, index=self._index.flatten(), columns=[self._label])\n\n\n def trend_backward(self, *args):\n \"\"\" \u0015 If closing price value leads its MA 15 and MA 15 is rising for last 5 days then trend is Uptrend\n i.e. trend signal is 1.\n\n If closing price value lags its MA 15 and MA 15 is falling for last 5 days then trend is Downtrend\n i.e. trend signal is 0.\n\n For up trend:\n Tr_i = [(cp_i - min cp)/(max cp - min cp)] * 0.5 + 0.5\n\n For down trend:\n Tr_i = [(cp_i - min cp)/(max cp - min cp)] * 0.5\n\n min cp = min(cp_i, cp_i-1, cp_i-2)\n max cp = max(cp_i, cp_i-1, cp_i-2)\n\n \"\"\"\n\n TREND_DOWN = -1\n TREND_NO = 0\n TREND_UP = 1\n\n def determine_trend_ma(targets, trend_bars_idx, current_val):\n # determine the trend based on the move average.\n # e.x. if the target falling in last 5(trend_bars_idx) days and current value lower than mv, trend is down\n latest_trend = None\n for idx in range(trend_bars_idx):\n\n # if trend_bars_idx - idx - 2 == 0:break\n\n # if the current target is larger than the previous one\n if targets[trend_bars_idx - idx] >= targets[trend_bars_idx - idx - 1]:\n trend = TREND_UP\n if latest_trend == TREND_DOWN:\n return TREND_NO\n latest_trend = trend\n if targets[trend_bars_idx - idx] < targets[trend_bars_idx - idx - 1]:\n trend = TREND_DOWN\n if latest_trend == TREND_UP:\n return TREND_NO\n latest_trend = trend\n\n if trend == TREND_UP and current_val < targets[trend_bars_idx]:\n return TREND_NO\n elif trend == TREND_DOWN and current_val > targets[trend_bars_idx]:\n return TREND_NO\n return trend\n\n def calculate_up_trend(current_val, target_past_bars):\n # if max(target_past_bars) == min(target_past_bars):\n # breakpoint()\n # return (1 - ((current_val - min(target_past_bars)) / (max(target_past_bars) - min(target_past_bars)))) * 0.5 + 0.5\n return ((current_val - min(target_past_bars)) / (max(target_past_bars) - min(target_past_bars))) * 0.5 + 0.5\n\n def calculate_down_trend(current_val, target_past_bars):\n # return (1 - ((current_val - min(target_past_bars)) / (max(target_past_bars) - min(target_past_bars)))) * 0.5\n return ((current_val - min(target_past_bars)) / (max(target_past_bars) - min(target_past_bars))) * 0.5\n\n # we calculate the trend\n target = args[0]\n result = target.copy()\n\n ma_bars = int(args[1])\n trend_bars = int(args[2])\n past_bars = int(args[3])\n input_col = args[4]\n\n # get moving average\n ma = talib.MA(target, timeperiod=ma_bars)\n\n last_trend = None\n for curr_idx, val in enumerate(target):\n result[curr_idx] = None\n if curr_idx >= ma_bars + trend_bars-1 and curr_idx >= past_bars:\n target_trend_bars = ma[curr_idx-trend_bars: curr_idx+1]\n\n # determine the trend based on the move average.\n # e.x. if falling in last 5 days, trend is down\n ma_trend = determine_trend_ma(target_trend_bars, trend_bars, val)\n\n # if trend is down and price is lower than the ma we calculate trend with down formula\n if ma_trend == TREND_DOWN:\n last_trend = TREND_DOWN\n result[curr_idx] = calculate_down_trend(val, target[curr_idx-past_bars+1: curr_idx+1])\n\n # if trend is up and price is higher than the ma we calculate trend with up formula\n elif ma_trend == TREND_UP:\n last_trend = TREND_UP\n result[curr_idx] = calculate_up_trend(val, target[curr_idx-past_bars+1: curr_idx+1])\n elif ma_trend == TREND_NO:\n # if have no trend, we get the last trend and calculate the trend\n if last_trend == TREND_DOWN:\n result[curr_idx] = calculate_down_trend(val, target[curr_idx-past_bars+1: curr_idx+1])\n elif last_trend == TREND_UP:\n result[curr_idx] = calculate_up_trend(val, target[curr_idx-past_bars+1: curr_idx+1])\n\n # return pd.DataFrame(result).rename(columns={'close': 'trend_{}'.format(args[1:4])})\n # self._label = 'trend_{}'.format(\"_\".join([str(v) for v in args[1:4]]))\n # return pd.DataFrame(data=result, index=self._index.flatten(), columns=[self._label])\n args_str = \"_\".join([str(v) for v in args[1:4]])\n return pd.DataFrame(data=result, index=self._index.flatten(), columns=[\"trend_backward|{}|{}\".format(input_col[0], args_str)])\n\n # Fourier transformation\n def fft(self, *args):\n target = args[0]\n input_col = args[-1][0]\n # args_str = [\"FTT{}Comps\".format(x) for x in args[1:]]\n result = target.copy()\n fft_list = np.fft.fft(result)\n df_all = pd.DataFrame(index=self._index.flatten())\n for num_ in args[1:-1]:\n col_name_str = \"FFT{}|{}Comps\".format(input_col, num_)\n fft_list_temp = np.copy(fft_list)\n fft_list_temp[num_:-num_] = 0\n fft_list_temp = np.abs(np.fft.ifft(fft_list_temp))\n df_all[col_name_str] = fft_list_temp\n return df_all\n\n\n #input (p_d_q)\n #p: periods taken for auto-regressive model\n #d: Intergrated order, difference\n #q: moving average, periods in moving average model\n def arima(self, *args):\n from statsmodels.tsa.arima_model import ARIMA\n target = args[0]\n p = args[1]\n d = args[2]\n q = args[3]\n input_col = args[-1][0]\n train = target[:11]\n test = target[11:]\n history = [x for x in train]\n predictions = list()\n for t in range(len(test)):\n try:\n model = ARIMA(history, order=(p, d, q))\n model_fit = model.fit(disp=0)\n output = model_fit.forecast()\n yhat = output[0]\n except Exception:\n yhat = np.nan\n predictions.append(yhat)\n obs = test[t]\n history.append(obs)\n predictions = [np.nan] * 11 + predictions\n print(predictions)\n args_str = \"_\".join([str(v) for v in args[1:4]])\n return pd.DataFrame(data=predictions, index=self._index.flatten(),\n columns=[\"arima|{}|{}\".format(input_col, args_str)])\n\n @catch_exception(LOGGER)\n def analyze(self):\n df_indicators = pd.DataFrame()\n # instance = self.get_instance()\n for indicator in self._indicators:\n indicator = indicator.lower()\n meta_info = indicator.split('|')\n # if len(meta_info) == 4 and meta_info[3] == \"label\":\n # self._label = \"_\".join([meta_info[0],meta_info[2]])\n method_name = meta_info[0]\n input_col = meta_info[1].split('_')\n # method_name = meta_info[0]\n # del meta_info[0]\n # input_col = [val for val in paras if val.startswith(input_selector)]\n # remove the columns, only arguments remained\n # args = list((arg for arg in paras if not any(col == arg for col in input_col)))\n args = []\n if len(meta_info) == 3:\n args = meta_info[2].split('_')\n args = list(map(int, args)) # convert from string to int\n\n # input_col_final = [col.replace(input_selector, '') for col in input_col]\n input = self._origin_frame[input_col].transpose().values\n\n method = getattr(self, method_name, None)\n if method is not None:\n\n result = method(*input, *args, input_col)\n\n if df_indicators.empty:\n df_indicators = result if result is not None else df_indicators\n else:\n df_indicators = df_indicators.join(result) if result is not None else df_indicators\n\n else:\n # try to get the method from talib\n method = get_attribute('.'.join(['talib', method_name.upper()]))\n # # get input columns\n # input_col = [val for val in meta_info if val.startswith(source_selector)]\n #\n # # remove the columns, only arguments remained\n # args = list((arg for arg in meta_info if not any(col == arg for col in input_col)))\n # args = list(map(int, args)) # convert from string to int\n # input_col_final = [col.replace(source_selector, '') for col in input_col]\n # input = self._origin_frame[input_col_final].transpose().values\n result = method(*input, *args)\n args_str = \"_\".join([str(v) for v in args])\n # if isinstance(result, pd.core.series.Series):\n if not isinstance(result, tuple):\n df_result = pd.DataFrame(data=result, index=self._index.flatten(), columns=[\"{}|{}|{}\".format(method_name, input_col[0], args_str)])\n else:\n for idx, res in enumerate(result):\n if idx == 0:\n df_result = pd.DataFrame(data=res, index=self._index.flatten(),\n columns=[\"{}_{}_{}\".format(method_name,args_str, str(idx))])\n else:\n df_result = df_result.join(pd.DataFrame(data=res, index=self._index.flatten(),\n columns=[\"{}_{}_{}\".format(method_name,args_str, str(idx))]))\n # df_result = pd.DataFrame(result[0], columns=[method_name])\n if df_indicators.empty:\n df_indicators = df_result\n else:\n df_indicators = df_indicators.join(df_result)\n\n df_result = None # clean the data frame\n\n return df_indicators\n\n def add_analysis(self):\n return self._origin_frame.join(self.analyze())\n\n def get_instance(self):\n \"\"\" initialize a instance \"\"\"\n ds_cls = get_attribute(\n inspect.__package__ + inspect.getmodulename(__file__) + '.{}'.format(self.__class__.__name__))\n return ds_cls\n\n","repo_name":"wilsonZWS/ETLDL","sub_path":"base/pre_process.py","file_name":"pre_process.py","file_ext":"py","file_size_in_byte":22460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15168011857","text":"import sys\nsys.path.append('../')\n\nimport jieba\nimport jieba.analyse\nfrom optparse import OptionParser\n\n\"\"\"\n參考[Python2文檔 - 15.5. optparse — Parser for command line options]\n(https://docs.python.org/2/library/optparse.html#creating-the-parser)\n此處USAGE參數代表的是help message\n\"\"\"\nUSAGE = \"usage: python extract_tags.py [file name] -k [top k]\"\n\nparser = OptionParser(USAGE)\n\"\"\"\nfrom https://docs.python.org/2/library/optparse.html#optparse.Option.dest:\nIf the option’s action implies writing or modifying a value somewhere, \nthis tells optparse where to write it: \ndest names an attribute of the options object that optparse builds \nas it parses the command line.\n\n總結一下,就是當使用者輸入-k xxx時,parser.parse_args()回傳的opt的topK屬性就會被設為xxx\n\"\"\"\nparser.add_option(\"-k\", dest=\"topK\")\n\"\"\"\nfrom https://docs.python.org/2/library/optparse.html#module-optparse:\nparse_args() returns two values:\noptions, an object containing values for all of your options—e.g. if --file takes a single string argument, then options.file will be the filename supplied by the user, or None if the user did not supply that option\nargs, the list of positional arguments leftover after parsing options\nparser.parse_args()會回傳options及args兩個物件\noptions代表可選參數,args代表位置參數\n在本例中[file name]為位置參數,-k [top k]為可選參數\n\nAs it parses the command line, optparse sets attributes of the options \nobject returned by parse_args() based on user-supplied command-line values.\nparser.parse_args()會回傳一個opt物件,opt的topK屬性由使用者輸入的參數決定\n\"\"\"\nopt, args = parser.parse_args()\n\n\n#代表使用者沒有輸入位置參數[file name]\nif len(args) < 1:\n print(USAGE)\n sys.exit(1)\n\nfile_name = args[0]\n\n# 使用opt.topK來獲取可選參數topK\nif opt.topK is None:\n topK = 10\nelse:\n topK = int(opt.topK)\n\ncontent = open(file_name, 'rb').read()\n\ntags = jieba.analyse.extract_tags(content, topK=topK)\n\nprint(\",\".join(tags))\n","repo_name":"keineahnung2345/jieba-code-reading-notes-zh","sub_path":"test/extract_tags.py","file_name":"extract_tags.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"24890890605","text":"animals =['hen','horse','zebra','chicken']\n# animals.append('man')\n# animals.insert(2,'man')\n# animals.pop()\n# animals.sort()\nnumbers = [1,3,4,5]\n# numbers.reverse()\nnew = numbers.index(4)\n\n# new = [str(x) for x in numbers] + animals\n# x=10\n# list1=[ x for x in range(10) if x %2 ==0]\nnmnber = int(input(\"Enter an integer: \"))\n\n# a=[]\n# for x in number\n# a.append(x)\n\nlist1=[1,2,3,4,5]\ntuple1=(4,5,6,7,8)\nset1={7,8,9,10}\n\n\nnew_list = list1+list(tuple1)+list(set1)\nnew_set= set(new_list)\nnew =[]\nsum=0\nfor i in new_list:\n sum+=i\n\nprint(sum)\n\n\nprint(new)\n","repo_name":"AjithMthomas/Data-Stuture-Algorithum","sub_path":"list/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"71608658617","text":"import sys\nimport random\n\n\nclass Hasher:\n\tdef __init__(self, sbox):\n\t\tself.sbox = sbox\n\n\tdef transform(self, key, data):\n\t\t# data is an array of size 4\n\t\tt = bytearray([key[i] ^ data[i] for i in range(4)])\n\t\th = self.sbox[0][t[0]] + self.sbox[1][t[1]]\n\t\th ^= self.sbox[2][t[2]]\n\t\th += self.sbox[3][t[3]]\n\t\th &= 0xFFFFFFFF # take care of overflow\n\t\treturn h.to_bytes(4, 'little')\n\n\nclass Feistel:\n\tdef __init__(self, keys, roundf):\n\t\tself.keys = keys\n\t\tself.roundf = roundf\n\t\n\n\tdef encode(self, plain):\n\t\t# plain is an array of length 8\n\t\tcipher = bytearray(plain)\n\n\t\t# write code here\n\n\t\treturn cipher\n\n\n\tdef decode(self, cipher):\n\t\t# cipher is a byte array of length 8\n\t\tplain = bytearray(cipher)\n\n\t\t# write code here\n\n\t\treturn plain\n\n\ndef main(argv):\n\tsbox = [[random.getrandbits(32) for r in range(256)] for i in range(4)]\n\thasher = Hasher(sbox) \n\n\tkeys = [random.getrandbits(32).to_bytes(4, 'little') for i in range(int(argv[2]))]\n\tf = Feistel(keys, hasher.transform)\n\n\tmsg = argv[1]\n\tprint('Message:', msg)\n\n\tcipher = f.encode(msg.encode())\n\tprint('After encoding:', cipher)\n\n\tplain = f.decode(cipher)\n\tprint('After decoding:', plain)\n\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) != 3 or len(sys.argv[1]) != 8:\n\t\tprint('usage: python %s message rounds' % sys.argv[0])\n\t\tprint('message should be 8 characters')\n\telse:\n\t\tmain(sys.argv)\n","repo_name":"azriel-stephen/cyber_security_base_2023","sub_path":"mooc-cyber-advanced-topics-2023/part3-05.feistel/src/feistel.py","file_name":"feistel.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26338083614","text":"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\n#import time\n\nfrom random_walk import RandomWalk\n\nrw = RandomWalk(1000)\n\nplt.ion()\n\nfig, ax = plt.subplots()\ncmap = mpl.cm.get_cmap(\"Blues\")\nsc = ax.scatter(rw.x_values, rw.y_values, c = [1], cmap = cmap, s = 10)\n# c = list(range(len(rw.x_values))), cmap = plt.cm.Blues, edgecolor = 'none', s = 10\n\nplt.xlim(-1, 1)\nplt.ylim(-1, 1)\n\nplt.draw()\n\nwhile len(rw.x_values) < rw.num_points:\n if rw.fill_walk():\n sc.set_offsets(np.c_[rw.x_values, rw.y_values])\n\n # Using Normalize to make a normalised colour list based on the relevant data, \n # mapping it to a ScalarMappable, and using that to set the face colour and \n # c limits on each frame of the animation.\n n = mpl.colors.Normalize(vmin = 1, vmax = len(rw.x_values))\n m = mpl.cm.ScalarMappable(norm=n, cmap=cmap)\n sc.set_facecolor(m.to_rgba(list(range(1, len(rw.x_values) + 1))))\n\n plt.xlim(min(rw.x_values)-5, max(rw.x_values)+5)\n plt.ylim(min(rw.y_values)-5, max(rw.y_values)+5)\n\n fig.canvas.draw()\n #plt.pause(0.001)\n","repo_name":"sunduda/python_data_visualisation","sub_path":"rw_visual.py","file_name":"rw_visual.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27602951295","text":"# -*- coding: utf-8 -*-\n'''\n.. versionadded:: v0.12.0\n'''\nfrom PySide2 import QtGui, QtCore, QtWidgets\nimport cv2\n\nfrom .invoker import Invoker\n\n\nclass ImageViewer(QtWidgets.QGraphicsView):\n '''View ``QtGui.QPixmap``; automatically fit in frame with pan and zoom.\n\n See: https://stackoverflow.com/a/35514531/345236\n '''\n imageClicked = QtCore.Signal(QtCore.QPoint)\n\n def __init__(self, parent):\n super(ImageViewer, self).__init__(parent)\n self._zoom = 0\n self._empty = True\n self._scene = QtWidgets.QGraphicsScene(self)\n self._photo = QtWidgets.QGraphicsPixmapItem()\n self._scene.addItem(self._photo)\n self.setScene(self._scene)\n self.setTransformationAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse)\n self.setResizeAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse)\n self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.setBackgroundBrush(QtGui.QBrush(QtGui.QColor(30, 30, 30)))\n self.setFrameShape(QtWidgets.QFrame.NoFrame)\n\n def hasImage(self):\n return not self._empty\n\n def fitInView(self, scale=True):\n rect = QtCore.QRectF(self._photo.pixmap().rect())\n if not rect.isNull():\n self.setSceneRect(rect)\n if self.hasImage():\n unity = self.transform().mapRect(QtCore.QRectF(0, 0, 1, 1))\n self.scale(1 / unity.width(), 1 / unity.height())\n viewrect = self.viewport().rect()\n scenerect = self.transform().mapRect(rect)\n factor = min(viewrect.width() / scenerect.width(),\n viewrect.height() / scenerect.height())\n self.scale(factor, factor)\n self._zoom = 0\n\n def setPhoto(self, pixmap=None):\n if pixmap and not pixmap.isNull():\n self._empty = False\n self.setDragMode(QtWidgets.QGraphicsView.ScrollHandDrag)\n self._photo.setPixmap(pixmap)\n else:\n self._empty = True\n self.setDragMode(QtWidgets.QGraphicsView.NoDrag)\n self._photo.setPixmap(QtGui.QPixmap())\n\n def wheelEvent(self, event):\n if self.hasImage():\n if event.angleDelta().y() > 0:\n factor = 1.125\n self._zoom += 1\n else:\n factor = 0.875\n self._zoom -= 1\n if self._zoom == 0:\n self.fitInView()\n else: # self._zoom > 0:\n self.scale(factor, factor)\n\n def toggleDragMode(self):\n if self.dragMode() == QtWidgets.QGraphicsView.ScrollHandDrag:\n self.setDragMode(QtWidgets.QGraphicsView.NoDrag)\n elif not self._photo.pixmap().isNull():\n self.setDragMode(QtWidgets.QGraphicsView.ScrollHandDrag)\n\n def mousePressEvent(self, event):\n if self._photo.isUnderMouse():\n self.imageClicked.emit(QtCore.QPoint(event.pos()))\n super(ImageViewer, self).mousePressEvent(event)\n\n\nclass QCVideoViewer(ImageViewer):\n '''Show latest frame received from a ``frame-ready`` blinker signal.\n '''\n def __init__(self, parent, signals):\n super(QCVideoViewer, self).__init__(parent)\n self._signals = signals\n self._invoker = Invoker()\n signals.signal('frame-ready').connect(self.on_frame_ready)\n self._frame = None\n\n def on_frame_ready(self, sender, **record):\n frame = record['frame']\n rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n def draw_frame(rgb_frame):\n image = QtGui.QImage(rgb_frame, rgb_frame.shape[1],\n rgb_frame.shape[0],\n rgb_frame.shape[1] * 3,\n QtGui.QImage.Format_RGB888)\n pix = QtGui.QPixmap(image)\n self.setPhoto(pix)\n if self._frame is None:\n self.fitInView()\n self._frame = rgb_frame\n\n self._invoker.invoke(draw_frame, rgb_frame)\n\n def resizeEvent(self, event):\n self.fitInView()\n return super(QCVideoViewer, self).resizeEvent(event)\n","repo_name":"sci-bots/dropbot-chip-qc","sub_path":"src/dropbot_chip_qc/ui/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34781875171","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.views.i18n import JavaScriptCatalog\n\nfrom ajax_select import urls as ajax_select_urls\nfrom rest_framework.authtoken import views\nfrom rest_framework.documentation import include_docs_urls\nfrom rest_framework_swagger.views import get_swagger_view\n\nfrom .server.routers import router, device_router\nfrom .catalog.routers import router as catalog_router\nfrom .stats.routers import router as stats_router\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nTITLE = 'Migasfree REST API'\n\nswagger_schema_view = get_swagger_view(title=TITLE)\n\nurlpatterns = [\n url(r'^', include('django.contrib.auth.urls')),\n url(r'^docs/', swagger_schema_view, name='docs'),\n url(r'^api-docs/', include_docs_urls(title=TITLE)),\n url(r'^token-auth/$', views.obtain_auth_token),\n url(r'^rest-auth/', include('rest_auth.urls')),\n url(r'^api/v1/token/', include(router.urls)),\n url(r'^api/v1/token/', include(stats_router.urls)),\n url(r'^api/v1/token/devices/', include(device_router.urls)),\n url(r'^api/v1/token/catalog/', include(catalog_router.urls)),\n\n url(r'', include('migasfree.server.urls')),\n url(r'', include('migasfree.stats.urls')),\n\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n url(r'^admin/', admin.site.urls),\n\n url(r'^admin/lookups/', include(ajax_select_urls)),\n url(r'^markdownx/', include('markdownx.urls')),\n\n url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript-catalog'),\n]\n\nif settings.DEBUG:\n try:\n import debug_toolbar\n urlpatterns = [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n ] + urlpatterns\n except ImportError:\n pass\n\n if settings.STATIC_ROOT is not None:\n urlpatterns += static(\n settings.STATIC_URL,\n document_root=settings.STATIC_ROOT\n )\n\n if settings.MEDIA_ROOT is not None:\n urlpatterns += static(\n settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT,\n show_indexes=True\n )\n","repo_name":"migasfree/migasfree","sub_path":"migasfree/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"22"} +{"seq_id":"46762034959","text":"def enq(item):\n global last\n last += 1\n tree[last] = item\n c = last\n p = c//2\n while p>=1 and tree[c] < tree[p]:\n tree[c], tree[p] = tree[p], tree[c]\n c = p\n p = c//2\n\nTC = int(input())\nfor tc in range(1, TC+1):\n\n N = int(input())\n lst = list(map(int, input().split()))\n tree = [0]*(N+1)\n last = 0\n for item in lst:\n enq(item)\n print(tree)\n\n\n lastt = tree[-1]\n sumV = 0\n while True:\n idx = tree.index(lastt)\n idx = idx//2\n sumV += tree[idx]\n if idx == 1:\n break\n # sumV += tree[idx]\n lastt = tree[idx]\n\n print(f'#{tc} {sumV}')\n\n\n","repo_name":"ChoiWoooJin/SWEA","sub_path":"5177 이진힙.py","file_name":"5177 이진힙.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16443053203","text":"from app import db\nimport cx_Oracle\nimport pandas as pd\n\nfrom app import searchQuery\nimport traceback, socket\n\ndef query(win,logic,date1,val1,val2,val3):\n \"\"\"메인 쿼리\"\"\"\n try:\n if win is not None:\n cur = connect_hkfund()\n if win == 1 and logic == 1:\n \"\"\"date1:날짜\"\"\"\n view1table1 = searchQuery.returnSQL('tab5_view1Table1')\n view1table2 = searchQuery.returnSQL('tab5_view1Table2')\n sql = searchQuery.returnSQL('tab5_viewCommonSearchQuery').format(table1=view1table1, table2=view1table2, val='suik_group', date=date1)\n elif win == 1 and logic == 2:\n \"\"\"date1:날짜\"\"\"\n view2table = searchQuery.returnSQL('tab5_view2Table')\n sql = searchQuery.returnSQL('tab5_viewCommonSearchQuery').format(table1=view2table, table2=view2table, val='SUIK_FUND_TYPE', date=date1)\n elif win == 2 and logic == 1:\n \"\"\"date1:날짜,val1:본부, val2:수익그룹, val3:PN_NPS\"\"\"\n sql = searchQuery.returnSQL('tab5_groupCommonSearchQuery').format(date=date1, mg_bu=val1, val='SUIK_FUND_TYPE',val2='suik_group',select=val2,nps=val3)\n elif win == 2 and logic == 2:\n \"\"\"date1:날짜,val1:본부, val2:항목, val3:PN_NPS\"\"\"\n sql = searchQuery.returnSQL('tab5_groupCommonSearchQuery').format(date=date1, mg_bu=val1, val='suik_group',val2='SUIK_FUND_TYPE',select=val2,nps=val3)\n elif win == 3 and logic == 1:\n \"\"\"date1:날짜\"\"\"\n sql = searchQuery.returnSQL('tab5_itemCommonSearchQuery').format(date=date1)\n elif win == 3 and logic == 2:\n \"\"\"date1:날짜\"\"\"\n sql = searchQuery.returnSQL('tab5_itemCommonSearchQuery').format(date=date1)\n elif win == 1 and logic == 99:\n \"\"\"val1:수익자\"\"\"\n sql = searchQuery.returnSQL('find_SuikjaSearchQuery').format(suikja=val1)\n elif win == 2 and logic == 99:\n \"\"\"date1:날짜,val1:수익자\"\"\"\n if val1=='전체':\n suikja=''\n else:\n suikja=val1\n sql = searchQuery.returnSQL('tab5_suikjaSearchQuery1').format(date=date1,suikja=suikja)\n else:\n print('미구현')\n print(win,logic,date1,val1,val2,val3)\n # print(sql)\n cur.execute(sql)\n row = cur.fetchall()\n return row\n except:\n print(traceback.format_exc())\n\ndef dateQuery(gubun,module,win,date1):\n \"\"\"날짜 관련 쿼리\"\"\"\n try:\n sql=''\n cur = connect_hkfund()\n if module == 'recently':\n \"\"\"DB 최근자료 날짜 가져옴\"\"\"\n sql = searchQuery.returnSQL('tab5_recentlyDateSearchQuery')\n elif module == 'header':\n \"\"\"조회 쿼리의 기준별 일자 조회\"\"\"\n query = 'tab5_headerDateSearchQuery'\n sql = searchQuery.returnSQL(query).format(date=date1)\n elif module == 'parity':\n \"\"\"조회값 하나펀드 자료테이블과 비교\"\"\"\n query = 'tableParityCheck'\n sql = searchQuery.returnSQL(query).format(date=date1)\n # print(sql)\n cur.execute(sql)\n row = cur.fetchall()\n df=pd.DataFrame(row)\n\n if module == 'header':\n df.columns = ['null', 'str', 'today', 'lastmonth', 'lastquater', 'lastyear', 'last2year']\n if gubun == 'tab5_suikja':\n df=df[['str','null','today','today','lastmonth','lastquater','lastyear','last2year','lastmonth','lastquater',\n 'lastyear','last2year']]\n elif win == 1:\n df=df[['str','today','today','lastmonth','lastquater','lastyear','today','lastmonth','lastquater','lastyear']]\n elif win == 2:\n df = df[['str', 'today', 'today', 'lastmonth', 'lastquater', 'lastyear', 'last2year', 'lastmonth', 'lastquater',\n 'lastyear', 'last2year']]\n elif win == 3:\n df = df[['str', 'null', 'today', 'lastmonth', 'lastquater', 'lastyear', 'last2year', 'lastmonth', 'lastquater',\n 'lastyear', 'last2year']]\n return df\n except:\n print(traceback.format_exc())\n\ndef etcQuery(module,val1):\n \"\"\"기타 쿼리들\"\"\"\n try:\n sql=''\n cur = connect_hkfund()\n if module == 'group':\n \"\"\"DB 최근자료 날짜 가져옴\"\"\"\n query = 'findGroup'\n sql = searchQuery.returnSQL(query).format(suikja=val1)\n # print(sql)\n cur.execute(sql)\n row = cur.fetchall()\n df = pd.DataFrame(row)\n return df\n except:\n print(traceback.format_exc())\n\ndef connect_hkfund():\n \"\"\"오라클 DB에 접속\"\"\"\n try:\n conn={}\n server = [{'id': 'system', 'pw': '1234', 'connect': 'localhost:1521/xe'},\n {'id': 'HKCL', 'pw': 'hkcl', 'connect': '11.10.5.11:1521/hkfund'}]\n userinfo = {}\n ip=str(socket.gethostbyname(socket.gethostname()))\n if ip[0:2] == '19':\n userinfo.update(server[0])\n elif ip[0:2] == '11':\n userinfo.update(server[1])\n else:\n print(socket.gethostbyname(socket.gethostname()))\n conn = cx_Oracle.connect(userinfo['id'], userinfo['pw'], userinfo['connect'])\n cur = conn.cursor()\n return cur\n except:\n print(traceback.format_exc())\n# conn = cx_Oracle.connect(\"HKCL\", \"hkcl\", \"11.10.5.11:1521/hkfund\")\n# conn = cx_Oracle.connect(\"system\", \"1234\", \"localhost:1521/xe\")\n# https://wikidocs.net/81051","repo_name":"ainesof/study_python","sub_path":"webproject1/app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74222360697","text":"\"\"\"\nSimple example on how to log scalars and images to tensorboard without tensor ops.\n\nLicense: Copyleft\n\"\"\"\n\n__author__ = \"Michael Gygli\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom io import BytesIO\n\n\ndef log_scalar(callback, tag, value, step):\n \"\"\"Log a scalar variable.\n\n Parameter\n ----------\n tag : basestring\n Name of the scalar\n value\n step : int\n training iteration\n \"\"\"\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag,\n simple_value=value)])\n callback.writer.add_summary(summary, step)\n\n\ndef log_images(callback, tag, images, step):\n \"\"\"Logs a list of images.\"\"\"\n\n im_summaries = []\n for nr, img in enumerate(images):\n # Write the image to a string\n s = BytesIO()\n plt.imsave(s, img, format='png')\n\n # Create an Image object\n img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),\n height=img.shape[0],\n width=img.shape[1])\n # Create a Summary value\n im_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, nr),\n image=img_sum))\n\n # Create and write Summary\n summary = tf.Summary(value=im_summaries)\n callback.writer.add_summary(summary, step)\n\n\ndef log_histogram(callback, tag, values, step, bins=1000):\n \"\"\"Logs the histogram of a list/vector of values.\"\"\"\n # Convert to a numpy array\n values = np.array(values)\n\n # Create histogram using numpy\n counts, bin_edges = np.histogram(values, bins=bins)\n\n # Fill fields of histogram proto\n hist = tf.HistogramProto()\n hist.min = float(np.min(values))\n hist.max = float(np.max(values))\n hist.num = int(np.prod(values.shape))\n hist.sum = float(np.sum(values))\n hist.sum_squares = float(np.sum(values ** 2))\n\n # Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1]\n # See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto#L30\n # Thus, we drop the start of the first bin\n bin_edges = bin_edges[1:]\n\n # Add bin edges and counts\n for edge in bin_edges:\n hist.bucket_limit.append(edge)\n for c in counts:\n hist.bucket.append(c)\n\n # Create and write Summary\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])\n callback.writer.add_summary(summary, step)\n callback.writer.flush()","repo_name":"luogen1996/MCN","sub_path":"utils/tensorboard_logging.py","file_name":"tensorboard_logging.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":131,"dataset":"github-code","pt":"22"} +{"seq_id":"15401525461","text":"# -*- coding: utf-8 -*-\n\nimport requests\nimport json\n\nfrom oslo_log import log\nfrom template.exceptions import tmp_except\n\nLOG = log.getLogger(__name__)\n\ntest = 1\n\nclass Req(object):\n # 目前默认是http\n _scheme = \"http\"\n '''\n _method={\n \"POST\": self._post,\n \"GET\": self._get,\n \"DELETE\": self._delete\n }\n '''\n def __init__(self,url):\n self._endpoint = url\n self._method={\n \"POST\": self._post,\n \"GET\": self._get,\n \"DELETE\": self._delete\n }\n\n def _get_endpoint(self):\n if self._scheme == \"https\":\n tcloud_url = \"https://\" + self._endpoint\n else:\n tcloud_url = \"http://\" + self._endpoint\n return tcloud_url\n\n def _post(self,**kwargs):\n body = kwargs.get(\"body\",None)\n headers = kwargs.get(\"headers\",None)\n cookie = kwargs.get(\"cookies\",None)\n para = kwargs.get(\"para\",None)\n \n try:\n resp = requests.post(self._get_endpoint(),params=para,data=json.dumps(body),headers=headers,cookies=cookie)\n except requests.ConnectTimeout as err:\n LOG.error(\"Connection Timeout Error ,ERR: %s\" % err)\n raise tmp_except.TemplateInternalException(tmp_except.InternalError,\"内部错误\")\n except reuqests.ConnectionError as err:\n LOG.error(\"Connection Error ,ERR: %s\" % err)\n raise tmp_except.TemplateInternalException(tmp_except.InternalError,\"内部错误\")\n except:\n LOG.error(\"Connection Unknown Error.\")\n raise tmp_except.TemplateInternalException(tmp_except.InternalError,\"内部错误\")\n \n if resp.status_code != 200:\n raise tmp_except.TemplateInternalException(tmp_except.InternalError,\"内部错误\")\n\n resp_content = resp.content.decode(\"utf-8\")\n resp_json = json.loads(resp_content)\n\n return resp_json\n \n\n def _get(self,**kwargs):\n \"\"\"\n To do\n \"\"\"\n pass\n\n def _delete(self,**kwargs):\n \"\"\"\n To do\n \"\"\"\n pass\n\n def make_request(self,method,**kwargs):\n resp = self._method[method](**kwargs)\n return resp\n\ndef send(method,url=None,**kwargs):\n \"\"\"\n Call http request\n \"\"\"\n global test\n if test == 1:\n return {'ret':0}\n rq = Req(url)\n try:\n rsp = rq.make_request(method,**kwargs)\n except tmp_except.TemplateInternalException as err:\n raise\n\n return rsp","repo_name":"C2python/template","sub_path":"template/api/auth/_request.py","file_name":"_request.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31550359514","text":"from .DatatypeHandler import DatatypeHandler\nimport numpy as np\n\ndef eulerToQuaternion(roll, pitch, yaw):\n print(\"Roll={0}\\nPitch={1}\\nYaw={2}\".format(roll, pitch, yaw))\n cr = np.cos(roll * 0.5)\n sr = np.sin(roll * 0.5)\n cp = np.cos(pitch * 0.5)\n sp = np.sin(pitch * 0.5)\n cy = np.cos(yaw * 0.5)\n sy = np.sin(yaw * 0.5)\n\n # https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles\n w = cr * cp * cy + sr * sp * sy\n x = sr * cp * cy - cr * sp * sy\n y = cr * sp * cy + sr * cp * sy\n z = cr * cp * sy - sr * sp * cy\n\n return np.array([w,x,y,z])\n\nclass EulerAngleHandler(DatatypeHandler):\n def get_quaternions(self, data, start_column, time_column):\n \"\"\"\n Overrides get_quaternions method of DatatypeHandler.\n Assumes input data is in the form of Euler angles in degrees,\n in (Roll, Pitch, Yaw) order.\n \"\"\"\n RADIANS_PER_DEGREE = (2 * np.pi) / 360.0\n quaternion_list = []\n for line in data:\n if len(line) <= start_column + 2 or line[0] == '#':\n continue\n roll_raw = line[start_column]\n pitch_raw = line[start_column+1]\n yaw_raw = line[start_column+2]\n if roll_raw == 'NONE' or pitch_raw == 'NONE' or yaw_raw == 'NONE':\n quaternion_list.append(np.array([1,0,0,0]))\n continue\n roll = float(roll_raw) * RADIANS_PER_DEGREE\n pitch = float(pitch_raw) * RADIANS_PER_DEGREE\n yaw = float(yaw_raw) * RADIANS_PER_DEGREE\n quaternion_list.append(eulerToQuaternion(roll, pitch, yaw))\n return quaternion_list\n","repo_name":"jpiland16/hmv_test","sub_path":"src/server_side/python_programs/datatype_handlers/EulerAngleHandler.py","file_name":"EulerAngleHandler.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"24562341894","text":"__author__ = 'pablogsal'\r\n# -*- coding: utf-8 -*-\r\n\r\nimport logging\r\nimport telegram\r\nimport cleverbot\r\nimport string\r\nimport tools\r\nimport time\r\nimport os\r\nimport threading\r\nimport uuid\r\nimport bot_commands\r\nfrom collections import Counter\r\n\r\n#Create logger for module\r\nmodule_logger = logging.getLogger('Python_granada_bot.bot_library')\r\n\r\nclass MasterBot(object):\r\n \"\"\"This class manages the bot at a local level. A brief list of taks of this class is:\r\n -Echo to the server for updates.\r\n -Manage the creation and deletion of new and old conversations.\r\n -Organize conversations in new and old depending of the status of the conversation list.\r\n -Update the status of the conversation if needed (parallelization)\r\n\r\n\r\n When instantiate the class you must provide nothing. ToDo: Provide the\r\n\r\n Properties:\r\n\r\n bot -> The telegram API high level wrapper (object of class telegram.bot)\r\n command_library -> An instantiation of the command library class (mainly a dict with the functions).\r\n chat_engine -> The Markovian cleverbot chat engine class (object).\r\n \"\"\"\r\n\r\n def __init__(self,bot_key):\r\n\r\n # Create custom logger to the master to keep track of general things in a master log.\r\n self.logger = logging.getLogger('Python_granada_bot.bot_library.MasterBot')\r\n self.logger.debug('Instanciating class Masterbot.')\r\n #Instantiate the telegram bot\r\n self.bot = telegram.Bot(bot_key)\r\n #Create empty list of active conversations\r\n self.active_conversations = []\r\n\r\n try:\r\n self.last_update_ID = self.bot.getUpdates()[0].update_id\r\n except IndexError:\r\n self.last_update_ID = None\r\n\r\n self.chat_engine = cleverbot.Cleverbot()\r\n\r\n\r\n def echo(self):\r\n \"\"\"\r\n This method queries the server for updates. In the case that we find updatesm then we\r\n start n threads (where n is the number of updates to process) to manage each one).\r\n\r\n When each thread is started, we increase the update_number, so when we query the server again\r\n the server will know that we are done with the processed updates.\r\n\r\n At the end of each update group, we wait for all threads to end and repeat.\r\n\r\n :return: None\r\n \"\"\"\r\n\r\n #Get number of new updates -> This can crash because of reasons. List of possible crashes:\r\n #\r\n # Telegram Error\r\n # No Json possible to decode\r\n\r\n try:\r\n num_updates = len(self.bot.getUpdates(offset=self.last_update_ID))\r\n except Exception as exception:\r\n self.logger.critical('Exception when catching messages: '+str(exception))\r\n time.sleep(60*5) # Sleep 5 minutes.\r\n return # Exit the echo function\r\n\r\n\r\n #If we have updates we do stuff\r\n if num_updates > 0:\r\n\r\n self.logger.debug('Received '+str(num_updates)+' messages to process.')\r\n\r\n # To avoid problems with sequenciality, if there is more that one message of the same user,\r\n # manage the updates in serial mode, if not, run in parallel.\r\n\r\n #Initialize list of ids for the users\r\n list_of_ids=[]\r\n\r\n # Loop over the updates to get the ID's and then construct a dictionary using Counter\r\n for update in self.bot.getUpdates(offset=self.last_update_ID):\r\n list_of_ids.append(update.message.chat_id)\r\n list_of_ids=Counter(list_of_ids)\r\n\r\n #If we have more than one message for the same user\r\n if any([item > 1 for item in list_of_ids.values()]):\r\n\r\n self.logger.debug('Processing updates in serial.')\r\n update_num=1\r\n # Bucle para gestionar cada una de las updates que tenemos\r\n for update in self.bot.getUpdates(offset=self.last_update_ID):\r\n\r\n self.process_update(update=update,update_num=update_num)\r\n update_num = update_num +1\r\n self.last_update_ID = update.update_id + 1\r\n\r\n #If we NOT have more than one message for the same user\r\n else:\r\n self.logger.debug('Processing updates in parallel.')\r\n update_num=1\r\n # Bucle para gestionar cada una de las updates que tenemos\r\n for update in self.bot.getUpdates(offset=self.last_update_ID):\r\n\r\n #Paralelizing threads\r\n thr=threading.Thread(target=self.process_update, args=(update,update_num), kwargs={})\r\n thr.start()\r\n update_num = update_num +1\r\n self.last_update_ID = update.update_id + 1\r\n\r\n\r\n thr.join() # This will wait until the last one is done! :)\r\n\r\n\r\n\r\n def process_update(self,update,update_num):\r\n\r\n \"\"\"\r\n This method process each update. The tasks are organisez as follows:\r\n\r\n 1) Check if the message is text, if not, send a error message to the user.\r\n 2) If is text:\r\n 3) Look in the conversation list to see if we have already a conversation pending with the user\r\n 3.1) If we have a conversation, call the ManageUpdate method in the old conversation and mark\r\n the need_for_new_conversation flag False.\r\n 3.2) If wee do not find and old conversation (the need_for_new_conversation flag is True) create\r\n a new conversation and call the ManageUpdate method in it.\r\n 4) Look for ended conversations in the list and delete them. -> This must be done here because as the\r\n updates run in parallel we need to delete old conversation to avoid the case when for updating a\r\n conversation we must look up in the list for active conversations and find some of them that are ended.\r\n\r\n\r\n :param update: Each (JSON) update to process (property of telegram bot class). - bot.getUpdates property\r\n :param update_num: The update number in the update group (for logger pourposes) - Integer\r\n :return: Nothing\r\n \"\"\"\r\n\r\n self.logger.info('Analizing update '+str(update_num)+'.')\r\n # Cogemos el chat_id de la conversacion y el mensaje\r\n chat_id = update.message.chat_id\r\n message = update.message.text.encode('utf-8')\r\n\r\n if (message): # If the message is made out of text, we can answer it\r\n need_for_new_conversation = True\r\n for conversation in self.active_conversations:\r\n if conversation.chatID == chat_id:\r\n self.logger.info('The message is part of an old conversation')\r\n need_for_new_conversation = False\r\n self.logger.info('Updating the status of the conversation.')\r\n conversation.ManageUpdate(bot=self.bot, raw_message=message,\r\n chat_engine=self.chat_engine,conversation_list=self.active_conversations)\r\n break\r\n\r\n if need_for_new_conversation:\r\n self.logger.info('Creating new conversation for the message')\r\n new_conversation =ActiveConversation(chat_id,message)\r\n self.active_conversations.append( new_conversation )\r\n self.logger.info('There are '+str(len(self.active_conversations))+ ' active conversations')\r\n self.logger.info('Updating the status of the conversation.')\r\n new_conversation.ManageUpdate(bot=self.bot, raw_message=message,\r\n chat_engine=self.chat_engine,conversation_list=self.active_conversations)\r\n\r\n else: # If is not a text message\r\n self.bot.sendMessage(chat_id=chat_id,text='Other formats than text are not supported yet')\r\n\r\n for conversation in self.active_conversations:\r\n\r\n if not conversation.active:\r\n #conversation.logger.handlers = [] # Delte the object logger to avoid duplicate messages\r\n self.active_conversations.remove(conversation)\r\n self.logger.info('Deleting conversation')\r\n self.logger.info('There are '+str(len(self.active_conversations))+ ' active conversations')\r\n\r\n\r\nclass ActiveConversation(bot_commands.BotCommands):\r\n \"\"\"\r\n This class represents each active conversation. To initialize the class you must provide:\r\n\r\n -The ChatId representing the userID of the message -> Integer\r\n -The rawmessage to process -> The text message in raw format to process -> String\r\n \"\"\"\r\n def __init__(self,chatID,raw_message):\r\n\r\n #Instantiate the command library from the parent class with Super!\r\n self.commands_dict = super(ActiveConversation, self).get_commands_dict()\r\n # Instantiate properties with the ChatId and Message\r\n self.chatID = chatID\r\n self.active = True\r\n self.ActualMessage = raw_message\r\n #Instantiate phase indicator and get Unique id\r\n self.conversation_phase = 0 #For multiple-phase conversations\r\n self.uniqueID = uuid.uuid4().get_hex()\r\n #Set conversation logger.\r\n newlogger = tools.setup_logger(self.uniqueID,os.path.dirname(os.path.realpath(__file__))\r\n +'/logs/'+str(self.chatID)+'.log')\r\n self.logger = logging.getLogger(self.uniqueID)\r\n #Set error counter\r\n self.errorcounter = 0\r\n\r\n #Classify the creation command. Do we need the chat engine or we know the message command?\r\n\r\n if self.commandsQ(raw_message):\r\n\r\n self.function = self.AssignCommand(raw_message)\r\n self.logger.info('Conversation marked as command.')\r\n self.function_type = 'BotCommand'\r\n\r\n else:\r\n\r\n self.function = None\r\n self.function_type = 'ChatEngine'\r\n self.logger.info('Conversation marked as chat.')\r\n\r\n\r\n self.cache =[]\r\n\r\n def ManageUpdate(self,bot,raw_message,chat_engine,conversation_list):\r\n \"\"\"\r\n This method manages each conversation update depending of the conversation nature.\r\n\r\n Usually this is always called from MasterBot, but can be also called from other parts of the code,\r\n for example from the bot functions in bot_commands. This last case is very usefull for example\r\n when you want to send a message to a lot of people using another comand like \"/sendq\".\r\n\r\n :param bot: An instance of the Telegram bot. (Object of Telegram.Bot)\r\n :param raw_message: The raw message of the recieved update. (String)\r\n :param chat_engine: The conversational engine. (Object of class CleverBot)\r\n :param conversation_list: The list of active conversations from the class MasterBot (for example). (List of\r\n Active Conversation objects)\r\n :return: None\r\n\r\n Exaple of usage from process_update method of class Masterbot:\r\n\r\n :>:>:> new_conversation.ManageUpdate(bot=self.bot,raw_message=message,\r\n chat_engine=self.chat_engine,conversation_list=self.active_conversations)\r\n\r\n \"\"\"\r\n\r\n self.logger.info('Received: '+raw_message+' from '+str(self.chatID)+'.')\r\n\r\n # Al these things can fail if Telegram not available. So we need to catch these exceptions in a try, except:\r\n\r\n try:\r\n\r\n #If the conversation is classified at chat we use the chat engine\r\n\r\n if self.function_type == 'ChatEngine':\r\n self.args = raw_message\r\n cleverbot_answer=chat_engine.ask(self.args)\r\n self.logger.info('Answering: '+cleverbot_answer+'.')\r\n\r\n # Sometimes the chat_engine gives empty strings. We cannot send that to the user because it will raise\r\n # a Telegram Error, so we say that we are sleeping.\r\n if cleverbot_answer == \"\":\r\n cleverbot_answer = \"I am sleeping now. Try it later or use a command from /start\"\r\n\r\n bot.sendMessage(chat_id=self.chatID,text=cleverbot_answer)\r\n\r\n # Marc conversation as ended.\r\n self.active = False\r\n\r\n # If the conversation is classified as command, we execute the command (that is saved in self.function).\r\n\r\n if self.function_type == 'BotCommand':\r\n\r\n # First, separate the command from the args if needed.\r\n\r\n if '/' in raw_message:\r\n self.args = string.join(raw_message.split(' ')[1:],' ')\r\n else:\r\n self.args = raw_message\r\n\r\n # Execute the command and recieve the status and the cache\r\n\r\n talk_status, self.cache = self.function(bot,self.chatID,self.args,self.conversation_phase,self.cache\r\n ,conversation_list)\r\n\r\n self.logger.info('Talk status code recieved: '+talk_status+'.')\r\n\r\n # Update the phase with the new information.\r\n\r\n if talk_status == 'Next_phase':\r\n is_the_conversation_ended = False\r\n self.conversation_phase = self.conversation_phase + 1\r\n\r\n elif talk_status == 'Same_phase':\r\n is_the_conversation_ended = False\r\n\r\n else :\r\n is_the_conversation_ended = True\r\n\r\n #Marc the conversation as ended if needed\r\n\r\n if is_the_conversation_ended :\r\n\r\n self.active = False\r\n\r\n except telegram.TelegramError:\r\n\r\n # If we catch a expection, we try 5 times more after sleep 4 seconds. If failed, delete the\r\n # conversation.\r\n\r\n self.errorcounter += 1\r\n\r\n if self.errorcounter < 5:\r\n self.logger.info('Telegram Error. Going to sleep 4 seconds.')\r\n time.sleep(4)\r\n self.ManageUpdate(bot,raw_message,chat_engine,conversation_list)\r\n else:\r\n\r\n self.active = False\r\n\r\n\r\n\r\n\r\n def commandsQ(self,raw_message):\r\n \"\"\"\r\n Utility function to know if a message from the user is in the message list.\r\n :param raw_message: The raw message of the user (String).\r\n :return: Boolean indicating if we know the message (Boolean).\r\n \"\"\"\r\n\r\n command = raw_message.split(' ')[0]\r\n if command in self.commands_dict: # If we recognise the message\r\n return True\r\n else: # If we not recognise the message\r\n return False\r\n\r\n def AssignCommand(self,raw_message):\r\n \"\"\"\r\n Utility function to assign command given the user message\r\n\r\n :param raw_message: The raw message of the user (String).\r\n :return: Function from the bot_commands collection. (Callable).\r\n \"\"\"\r\n\r\n command = raw_message.split(' ')[0] # Get the /command part of the message\r\n\r\n return self.commands_dict[command] # To get the actual command and args\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"o7s8r6/Python_Granada_bot","sub_path":"bot_library.py","file_name":"bot_library.py","file_ext":"py","file_size_in_byte":15161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43277729151","text":"from collections.abc import Iterable\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom DQN import DQNAgent\nimport gin\n\nclass ginDQN(DQNAgent):\n\n def __init__(self, params):\n super().__init__(params)\n\n self.win_reward = params['win_reward']\n self.loss_reward = params['loss_reward']\n self.no_winner_reward = params['no_winner_reward']\n \n ## #####################################################\n ## ###############################################################\n\n def set_reward(self, ginhand:gin.GinHand, player):\n \"\"\"\n Return the reward.\n \"\"\"\n self.reward = 0\n\n hand_winner, winner_score, is_done = ginhand.ginScore()\n\n if is_done:\n \"\"\"\n if ginhand.winner==None:\n ginHand_winner=\"None\"\n else:\n ginHand_winner=ginhand.winner.player.name\n playing = ginhand.playing[player.name]\n otherPlaying = ginhand.otherPlaying(playing)\n print(f\"setting reward for {player.name} when hand is done: \")\n print(f\"ginhand.winner={ginHand_winner}\")\n print(f\"{player.name}'s hand={playing.playerHand}\")\n print(f\"{player.name}'s deadwood={playing.playerHand.deadwood()}\")\n print(f\"{otherPlaying.player.name}'s hand={otherPlaying.playerHand}\")\n print(f\"{otherPlaying.player.name}'s deadwood={otherPlaying.playerHand.deadwood()}\")\n print(f\"ginhand.ginScore() returned: hand_winner={hand_winner.player.name}, winner_score={winner_score}, is_done={is_done} \")\n if not ((ginhand.winner==None) \n or (hand_winner.player.name==ginHand_winner)):\n print(f\"## ## ## winner mismatch ## ## ##\")\n \"\"\" \n \n if ginhand.playing[player.name] == hand_winner:\n # I won!\n #self.reward = self.win_reward\n self.reward = winner_score\n elif ginhand.currentlyPlaying.playerHand == hand_winner:\n # I lost!\n # self.reward = self.loss_reward\n self.reward = -winner_score\n else:\n # no winner - not helping\n #self.reward = self.no_winner_reward\n self.reward = -ginhand.playing[player.name].playerHand.deadwood()\n\n # normalize the reward so that is is between 0 and 1\n # the highest possible score is 10Xgin.\n denom=10*gin.HAND_SIZE+gin.GinHand.GIN_BONUS\n self.reward = round(self.reward*(1/denom),4)\n\n if (('use_cheat_rewards' in self.params)\n and (self.params['use_cheat_rewards'])):\n self.reward = self.get_cheat_reward(ginhand, player, self.reward)\n\n return self.reward\n\n def get_cheat_reward(self, ginhand:(gin.GinHand), player, normal_reward):\n if not hasattr(self, 'prev_cheat_score'):\n self.prev_cheat_score = 0\n if self.reward > 0:\n return self.reward\n myhand = ginhand.playing[player.name].playerHand\n cards = []\n pretty = myhand.prettyStr().split()\n for cstr in pretty:\n cards.append(gin.Card.fromStr(cstr))\n size=0\n for i in range(1,len(cards)):\n if ((cards[i].rank == cards[i-1].rank+1) and \n (cards[i].suit == cards[i-1].suit)):\n size+=1 # run\n if cards[i].rank == cards[i-1].rank:\n size+=1 # match\n cheat_score = float(size)/float(len(cards))\n cheat_reward = cheat_score - self.prev_cheat_score\n if cheat_reward!=0:\n print(f\"cheat_reward={cheat_reward}, size={size} hand={myhand.prettyStr()} prev={self.prev_cheat_score}\")\n self.prev_cheat_score = cheat_score\n return cheat_reward\n\n## ###############################################\n\n def init_input_size(self,params):\n self.input_size = params['input_size']\n if isinstance(self.input_size,Iterable):\n self.input_size = tuple(self.input_size)\n\n def create_layers(self):\n self.layers = nn.ModuleList(self.create_default_layers(self.input_size,\n self.output_size))\n\n def create_default_layers(self, prev_layer_size, output_size):\n # Layers\n llayers = []\n for layer_size in self.layer_sizes:\n llayers.append(nn.Linear(prev_layer_size, layer_size))\n prev_layer_size = layer_size\n llayers.append(nn.Linear(prev_layer_size, output_size))\n return llayers\n\n def forward(self, x):\n # Linear Layers\n for layer in self.layers[:-1]:\n if 'no_relu' in self.params and self.params['no_relu']:\n x = layer(x)\n else:\n x = F.relu(layer(x))\n x = self.layers[-1](x) # last layer\n return x\n\n## ###############################################\n## ###############################################\n","repo_name":"black-ejs/rlgin","sub_path":"ginDQN.py","file_name":"ginDQN.py","file_ext":"py","file_size_in_byte":5026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43065281113","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 12 17:22:43 2015\n\n@author: Brian\n\"\"\"\n\nimport hardio as fw\nimport utilities as general_utilities\nimport numba as nb\nimport numpy as np\nimport copy\nimport chemistry\nimport geometry\nimport mechanics\nfrom hardio import BASIC_INFO, DYNAMICS_INFO, VOL_EX_INFO, DETAILED_VOL_EX_INFO\n\n\n# ----------------------------------------------------------------------------------------\ndef pack_state_array(\n phase_var_indices,\n ode_cellwide_phase_var_indices,\n system_info_at_tstep):\n phase_var_array = (\n np.transpose(system_info_at_tstep[:, phase_var_indices])\n ).flatten()\n ode_cellwide_phase_var_array = system_info_at_tstep[\n 0, ode_cellwide_phase_var_indices\n ]\n\n return np.append(phase_var_array, ode_cellwide_phase_var_array)\n\n\n# -----------------------------------------------------------------------------------------\ndef interpret_state_array(rac_act_ix, rac_inact_ix, rho_act_ix,\n rho_inact_ix, x_ix, y_ix, state_array):\n phase_vars = state_array\n\n rac_mem_active_start_ix = rac_act_ix * 16\n rac_mem_active_end_ix = rac_mem_active_start_ix + 16\n\n rac_acts = phase_vars[\n rac_mem_active_start_ix:rac_mem_active_end_ix\n ]\n\n rac_mem_inactive_start_ix = rac_inact_ix * 16\n rac_mem_inactive_end_ix = rac_mem_inactive_start_ix + 16\n\n rac_inacts = phase_vars[\n rac_mem_inactive_start_ix:rac_mem_inactive_end_ix\n ]\n\n rho_mem_active_start_ix = rho_act_ix * 16\n rho_mem_active_end_ix = rho_mem_active_start_ix + 16\n\n rho_acts = phase_vars[\n rho_mem_active_start_ix:rho_mem_active_end_ix\n ]\n\n rho_mem_inactive_start_ix = rho_inact_ix * 16\n rho_mem_inactive_end_ix = rho_mem_inactive_start_ix + 16\n\n rho_inacts = phase_vars[\n rho_mem_inactive_start_ix:rho_mem_inactive_end_ix\n ]\n\n x_start_ix = x_ix * 16\n x_end_ix = x_start_ix + 16\n\n x = phase_vars[x_start_ix:x_end_ix]\n\n y_start_ix = y_ix * 16\n y_end_ix = y_start_ix + 16\n\n y = phase_vars[y_start_ix:y_end_ix]\n\n poly = general_utilities.make_verts_array_given_xs_and_ys(x, y\n )\n return rac_acts, rho_acts, rac_inacts, rho_inacts, poly\n\n\n# ----------------------------------------------------------------------------------------\ndef unpack_state_array(num_phase_var_indices, state_array):\n # reversing append\n node_phase_var_array = state_array\n ode_cellwide_phase_vars = np.array([])\n\n # reversing flatten\n phase_vars = np.transpose(np.array(np.split(\n node_phase_var_array,\n num_phase_var_indices)))\n\n return phase_vars, ode_cellwide_phase_vars\n\n\n# ----------------------------------------------------------------------------------------\ndef pack_state_array_from_system_history(\n phase_var_indices,\n ode_cellwide_phase_var_indices,\n system_info):\n state_array = pack_state_array(\n phase_var_indices,\n ode_cellwide_phase_var_indices,\n system_info)\n\n return state_array\n\n\n# ----------------------------------------------------------------------------------------\n@nb.jit(nopython=True)\ndef calculate_sum(num_elements, sequence):\n result = 0\n for i in range(num_elements):\n result += sequence[i]\n\n return result\n\n\n# ----------------------------------------------------------------------------------------\ndef eulerint(f, current_state, t0, t1, args, num_int_steps, cell_ix,\n curr_tpoint, rac_act_ix, rac_inact_ix,\n rho_act_ix, rho_inact_ix,\n x_ix, y_ix, writer):\n focus_verts = [0, 15]\n states = np.zeros(\n (2,\n current_state.shape[0]),\n dtype=np.float64)\n\n states[0] = copy.deepcopy(current_state)\n # logging.log(level=BASIC_INFO, msg=\"-----------------------------------\")\n # logging.log(level=BASIC_INFO, msg=\"curr_tpoint: {}, cell: {}\"\n # .format(curr_tpoint, cell_ix))\n dt = (t1 - t0) / num_int_steps\n\n for int_step in range(num_int_steps):\n # logging.log(level=DYNAMICS_INFO,\n # msg=\"-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\")\n _, _, _, _, init_poly \\\n = interpret_state_array(rac_act_ix, rac_inact_ix,\n rho_act_ix, rho_inact_ix,\n x_ix, y_ix, current_state)\n # for ix in focus_verts:\n # logging.log(level=DYNAMICS_INFO,\n # msg=\"init poly[{}]: {}\".format(ix,\n # init_poly[\n # ix]))\n deltas, sum_forces, edge_forces_plus, edge_forces_minus, \\\n rgtp_forces, cyto_forces, verts_before_ve, verts_after_ve = f(\n focus_verts,\n curr_tpoint, int_step, dt, writer, cell_ix, current_state, *args)\n current_state = current_state + dt * deltas\n _, _, _, _, final_poly = interpret_state_array(rac_act_ix,\n rac_inact_ix,\n rho_act_ix,\n rho_inact_ix,\n x_ix, y_ix,\n current_state)\n\n # for ix in focus_verts:\n # logging \\\n # .log(level=DYNAMICS_INFO,\n # msg=\"actual Delta poly({}): {}\"\n # .format(ix,\n # verts_before_ve[ix] -\n # init_poly[ix])\n # )\n # logging \\\n # .log(level=DYNAMICS_INFO,\n # msg=\"Delta poly after VE({}): {}\"\n # .format(ix, verts_after_ve[ix] - init_poly[ix])\n # )\n # logging \\\n # .log(level=DYNAMICS_INFO,\n # msg=\"final poly[{}]: {}\"\n # .format(ix, final_poly[0])\n # )\n curr_tpoint += dt\n\n states[1] = copy.deepcopy(current_state)\n\n return states\n\n\ndef cell_dynamics(\n focus_verts,\n tpoint,\n int_step,\n dt,\n writer,\n cell_ix,\n state_array,\n num_cells,\n all_cells_verts,\n num_phase_vars,\n rac_act_ix,\n rest_edge_len,\n rac_inact_ix,\n rho_act_ix,\n rho_inact_ix,\n x_ix,\n y_ix,\n kgtp_rac,\n kdgtp_rac,\n kgtp_rho,\n kdgtp_rho,\n kgtp_rac_auto,\n kgtp_rho_auto,\n kdgtp_rho_on_rac,\n kdgtp_rac_on_rho,\n k_mem_on_vertex,\n k_mem_off,\n halfmax_vertex_rgtp_conc,\n diffusion_rgtp,\n vertex_eta,\n stiffness_edge,\n halfmax_vertex_rgtp,\n const_protrusive,\n const_retractive,\n rest_area,\n stiffness_cyto,\n x_coas,\n close_point_smoothness_factors,\n x_cils,\n halfmax_tension_inhib,\n tension_inhib,\n rac_rands,\n coa_updates,\n cil_updates\n):\n phase_vars = state_array\n\n rac_mem_active_start_ix = rac_act_ix * 16\n rac_mem_active_end_ix = rac_mem_active_start_ix + 16\n\n rac_acts = phase_vars[\n rac_mem_active_start_ix:rac_mem_active_end_ix\n ]\n\n rac_mem_inactive_start_ix = rac_inact_ix * 16\n rac_mem_inactive_end_ix = rac_mem_inactive_start_ix + 16\n\n rac_inacts = phase_vars[\n rac_mem_inactive_start_ix:rac_mem_inactive_end_ix\n ]\n\n rho_mem_active_start_ix = rho_act_ix * 16\n rho_mem_active_end_ix = rho_mem_active_start_ix + 16\n\n rho_acts = phase_vars[\n rho_mem_active_start_ix:rho_mem_active_end_ix\n ]\n\n rho_mem_inactive_start_ix = rho_inact_ix * 16\n rho_mem_inactive_end_ix = rho_mem_inactive_start_ix + 16\n\n rho_inacts = phase_vars[\n rho_mem_inactive_start_ix:rho_mem_inactive_end_ix\n ]\n\n x_start_ix = x_ix * 16\n x_end_ix = x_start_ix + 16\n\n x = phase_vars[x_start_ix:x_end_ix]\n\n y_start_ix = y_ix * 16\n y_end_ix = y_start_ix + 16\n\n y = phase_vars[y_start_ix:y_end_ix]\n\n poly = general_utilities.make_verts_array_given_xs_and_ys(x, y\n )\n\n rac_cyto = (\n 1\n - calculate_sum(16, rac_acts)\n - calculate_sum(16, rac_inacts)\n )\n rho_cyto = (\n 1\n - calculate_sum(16, rho_acts)\n - calculate_sum(16, rho_inacts)\n )\n\n sum_forces, edge_forces_plus, edge_forces_minus, uevs, rgtp_forces, \\\n cyto_forces, \\\n edge_strains, local_strains, \\\n uivs = mechanics.calculate_forces(\n poly,\n rac_acts,\n rho_acts,\n rest_edge_len,\n stiffness_edge,\n halfmax_vertex_rgtp,\n const_protrusive,\n const_retractive,\n rest_area,\n stiffness_cyto,\n )\n\n sum_forces_x = sum_forces[:, 0]\n sum_forces_y = sum_forces[:, 1]\n\n only_tensile_local_strains = np.zeros_like(local_strains)\n for i in range(16):\n local_strain = local_strains[i]\n if local_strain > 0:\n only_tensile_local_strains[i] = local_strain\n\n edgeplus_lengths = geometry.calculate_edgeplus_lengths(poly)\n avg_edge_lengths = geometry.calculate_average_edge_length_around_nodes(\n edgeplus_lengths\n )\n conc_rac_acts = chemistry.calc_concs(rac_acts, avg_edge_lengths)\n\n kgtps_rac = chemistry.calculate_kgtp_rac(\n conc_rac_acts,\n halfmax_vertex_rgtp_conc,\n kgtp_rac,\n kgtp_rac_auto,\n x_coas,\n rac_rands,\n x_cils,\n close_point_smoothness_factors,\n )\n\n conc_rho_acts = chemistry.calc_concs(rho_acts, avg_edge_lengths\n )\n\n global_tension = np.sum(only_tensile_local_strains) / 16\n if global_tension < 0.0:\n global_tension = 0.0\n strain_inhibition = tension_inhib * \\\n chemistry.hill_function3(\n halfmax_tension_inhib,\n global_tension\n )\n\n kdgtps_rac = chemistry.calculate_kdgtp_rac(\n conc_rho_acts,\n halfmax_vertex_rgtp_conc,\n kdgtp_rac,\n kdgtp_rho_on_rac,\n x_cils,\n halfmax_tension_inhib,\n tension_inhib,\n only_tensile_local_strains,\n )\n\n kgtps_rho = chemistry.calculate_kgtp_rho(\n conc_rho_acts,\n x_cils,\n halfmax_vertex_rgtp_conc,\n kgtp_rho,\n kgtp_rho_auto,\n )\n\n kdgtps_rho = chemistry.calculate_kdgtp_rho(\n conc_rac_acts,\n halfmax_vertex_rgtp_conc,\n kdgtp_rho,\n kdgtp_rac_on_rho,\n )\n\n conc_rac_inacts = chemistry.calc_concs(rac_inacts, avg_edge_lengths)\n conc_rho_inact = chemistry.calc_concs(rho_inacts, avg_edge_lengths)\n\n rac_act_net_fluxes = chemistry.calculate_net_fluxes(\n conc_rac_acts,\n diffusion_rgtp,\n edgeplus_lengths,\n )\n rac_inact_net_fluxes = chemistry.calculate_net_fluxes(\n conc_rac_inacts,\n diffusion_rgtp,\n edgeplus_lengths,\n )\n rho_act_net_fluxes = chemistry.calculate_net_fluxes(\n conc_rho_acts,\n diffusion_rgtp,\n edgeplus_lengths,\n )\n rho_inact_net_fluxes = chemistry.calculate_net_fluxes(\n conc_rho_inact,\n diffusion_rgtp,\n edgeplus_lengths,\n )\n\n delta_rac_activated = np.zeros(16, dtype=np.float64)\n delta_rac_inactivated = np.zeros(16, dtype=np.float64)\n\n delta_rac_cytosol_to_membrane = np.zeros(16, dtype=np.float64)\n\n delta_rho_activated = np.zeros(16, dtype=np.float64)\n delta_rho_inactivated = np.zeros(16, dtype=np.float64)\n\n delta_rho_cytosol_to_membrane = np.zeros(16, dtype=np.float64)\n\n delta_x = np.zeros(16, dtype=np.float64)\n delta_y = np.zeros(16, dtype=np.float64)\n new_verts = np.zeros((16, 2), dtype=np.float64)\n np.zeros(2, dtype=np.float64)\n np.zeros(2, dtype=np.float64)\n\n # logging.log(level=DYNAMICS_INFO,\n # msg=\"tstep: {}, int_step: {}\".format(tpoint, int_step))\n # logging.log(level=DYNAMICS_INFO, msg=\"eta: {}\".format(vertex_eta))\n # logging.log(level=DYNAMICS_INFO, msg=\"1/eta: {}\".format(1 / vertex_eta))\n # for ix in focus_verts:\n # logging.log(level=DYNAMICS_INFO,\n # msg=\"rgtp_forces[{}]: {}\".format(ix, rgtp_forces[ix]))\n # logging.log(level=DYNAMICS_INFO,\n # msg=\"edge_forces[{}]: {}\".format(ix,\n # edge_forces_plus[ix]))\n # logging.log(level=DYNAMICS_INFO,\n # msg=\"cyto_forces[{}]: {}\".format(ix, cyto_forces[ix]))\n # logging.log(level=DYNAMICS_INFO,\n # msg=\"expected sum forces ({}) = {}\".format(ix,\n # rgtp_forces[\n # ix] +\n # edge_forces_plus[\n # ix] +\n # edge_forces_minus[\n # ix] +\n # cyto_forces[\n # ix]))\n # logging.log(level=DYNAMICS_INFO,\n # msg=\"sum_forces[{}]: {}\".format(ix, sum_forces[ix]))\n\n poly_area = geometry.calculate_polygon_area(poly)\n data = [(\"tpoint\", tpoint),\n (\"poly\", [[float(v) for v in x] for x in poly]),\n (\"rac_acts\", [float(v) for v in rac_acts]),\n (\"rac_inacts\", [float(v) for v in rac_inacts]),\n (\"rho_acts\", [float(v) for v in rho_acts]),\n (\"rho_inacts\", [float(v) for v in rho_inacts]),\n (\"sum_forces\", [list([float(x), float(y)]) for x, y in\n zip(sum_forces_x, sum_forces_y)]),\n (\"uivs\", [[float(v) for v in x] for x in uivs]),\n (\"rgtp_forces\", [[float(v) for v in x] for x in rgtp_forces]),\n (\"edge_forces\", [[float(v) for v in x] for x in edge_forces_plus]),\n (\"cyto_forces\", [[float(v) for v in x] for x in cyto_forces]),\n (\"kgtps_rac\", [float(v) for v in kgtps_rac]),\n (\"kdgtps_rac\", [float(v) for v in kdgtps_rac]),\n (\"kgtps_rho\", [float(v) for v in kgtps_rho]),\n (\"kdgtps_rho\", [float(v) for v in kdgtps_rho]),\n (\"x_cils\", [float(v) for v in x_cils]),\n (\"x_coas\", [float(v) for v in x_coas]),\n (\"edge_strains\", [float(v) for v in local_strains]),\n (\"avg_tens_strain\", [float(global_tension) for _ in local_strains]),\n (\"poly_area\", poly_area),\n (\"rac_act_net_fluxes\", [float(v) for v in rac_act_net_fluxes]),\n (\"rac_inact_net_fluxes\", [float(v) for v in rac_inact_net_fluxes]),\n (\"rho_act_net_fluxes\", [float(v) for v in rho_act_net_fluxes]),\n (\"rho_inact_net_fluxes\", [float(v) for v in rho_inact_net_fluxes]),\n (\"x_tens\", [float(strain_inhibition) for _ in local_strains])]\n # for d in data:\n # logging.log(level=99, msg=\"{}: {}\".format(d[0], d[1]))\n writer.save_int_step(data)\n\n for ni in range(16):\n old_coord = poly[ni]\n\n new_verts[ni][0] = old_coord[0] + dt * sum_forces_x[ni] / vertex_eta\n new_verts[ni][1] = old_coord[1] + dt * sum_forces_y[ni] / vertex_eta\n\n # for ix in focus_verts:\n # logging.log(level=DYNAMICS_INFO, msg=\"delta.poly[{}]: {}\"\n # .format(ix, (new_verts[0] - poly[0]) / dt)\n # )\n # logging.log(level=DYNAMICS_INFO,\n # msg=\"expected Delta poly 0 ({}): {}\"\n # .format(ix, dt * sum_forces[0] / vertex_eta)\n # )\n\n verts_before_ve = copy.deepcopy(new_verts)\n\n # calculate volume exclusion effects\n num_bisection_iterations = 10\n max_movement_mag = dt * const_protrusive / vertex_eta\n\n for other_ci in range(num_cells):\n if other_ci != cell_ix:\n # logging.log(level=VOL_EX_INFO, msg=\"testing poly: {}\".format(\n # other_ci))\n # logging.log(level=VOL_EX_INFO,\n # msg=\"coords: {}\".format(all_cells_verts[other_ci]))\n # logging.log(level=VOL_EX_INFO,\n # msg=\"max movement mag: {}\".format(max_movement_mag))\n are_new_nodes_inside_other_cell = \\\n geometry.are_points_inside_polygon(\n new_verts, all_cells_verts[other_ci]\n )\n # logging.log(level=VOL_EX_INFO, msg=\"in poly: {}\".format(\n # [i for (i, x) in\n # enumerate(are_new_nodes_inside_other_cell) if x])\n # )\n for ni in range(16):\n if are_new_nodes_inside_other_cell[ni]:\n # logging.log(level=VOL_EX_INFO,\n # msg=\"fixing vertex {} violation (current: {})\".format(\n # ni, new_verts[ni]))\n new_verts[ni] = enforce_volume_exclusion_for_vertex(\n poly[ni],\n new_verts[ni],\n uivs[ni],\n all_cells_verts[other_ci],\n num_bisection_iterations,\n max_movement_mag,\n )\n verts_after_ve = copy.deepcopy(new_verts)\n\n for ni in range(16):\n new_coord = new_verts[ni]\n old_coord = poly[ni]\n\n delta_x[ni] = (new_coord[0] - old_coord[0]) / dt\n delta_y[ni] = (new_coord[1] - old_coord[1]) / dt\n\n for ni in range(16):\n # finish assigning chemistry variables\n delta_rac_activated[ni] = kgtps_rac[ni] * rac_inacts[ni]\n delta_rac_inactivated[ni] = kdgtps_rac[ni] * rac_acts[ni]\n\n delta_rac_on = k_mem_on_vertex * rac_cyto\n delta_rac_off = k_mem_off * rac_inacts[ni]\n delta_rac_cytosol_to_membrane[ni] = delta_rac_on - delta_rac_off\n\n delta_rho_activated[ni] = kgtps_rho[ni] * rho_inacts[ni]\n delta_rho_inactivated[ni] = kdgtps_rho[ni] * rho_acts[ni]\n\n delta_rho_on = k_mem_on_vertex * rho_cyto\n delta_rho_off = k_mem_off * rho_inacts[ni]\n delta_rho_cytosol_to_membrane[ni] = delta_rho_on - delta_rho_off\n\n # set up ode array\n ode_array = np.empty(num_phase_vars * 16)\n\n for i in range(16):\n ode_array[i] = (\n delta_rac_activated[i]\n - delta_rac_inactivated[i]\n + rac_act_net_fluxes[i]\n )\n\n ode_array[i + 16] = (\n delta_rac_inactivated[i]\n - delta_rac_activated[i]\n + rac_inact_net_fluxes[i]\n + delta_rac_cytosol_to_membrane[i]\n )\n\n ode_array[i + 2 * 16] = (\n delta_rho_activated[i]\n - delta_rho_inactivated[i]\n + rho_act_net_fluxes[i]\n )\n\n ode_array[i + 3 * 16] = (\n delta_rho_inactivated[i]\n - delta_rho_activated[i]\n + rho_inact_net_fluxes[i]\n + delta_rho_cytosol_to_membrane[i]\n )\n\n ode_array[i + 4 * 16] = delta_x[i]\n\n ode_array[i + 5 * 16] = delta_y[i]\n\n return ode_array, sum_forces, edge_forces_plus, edge_forces_minus, \\\n rgtp_forces, cyto_forces, verts_before_ve, verts_after_ve\n\n\n# -----------------------------------------------------------------\ndef enforce_volume_exclusion_for_vertex(\n old_coord,\n new_coord,\n unit_inside_pointing_vector,\n polygon,\n num_bisection_iterations,\n max_movement_mag,\n):\n # min_x, max_x, min_y, max_y = geometry.calculate_polygon_bb(\n # polygon)\n\n is_old_in_poly = geometry.is_point_in_polygon_without_bb_check(\n old_coord, polygon\n )\n\n while is_old_in_poly:\n old_coord = old_coord + max_movement_mag * \\\n unit_inside_pointing_vector\n\n # logging.log(level=DETAILED_VOL_EX_INFO, msg=\"trial old v: {}\".format(\n # old_coord))\n # num_bisection_iterations = int(num_bisection_iterations*1.5)\n is_old_in_poly = geometry.is_point_in_polygon_without_bb_check(\n old_coord, polygon)\n\n # if we have reached here, then we know that the old_coord is in the\n # polygon, and the new coord is not in the polygon\n ok_coord = old_coord\n\n # logging.log(level=DETAILED_VOL_EX_INFO,\n # msg=\"settling with okay v: {} (in poly: {})\".format(\n # old_coord,\n # geometry.is_point_in_polygon_without_bb_check(\n # old_coord,\n # polygon)))\n problem_coord = new_coord\n np.zeros(2, dtype=np.float64)\n\n # logging.log(level=DETAILED_VOL_EX_INFO,\n # msg=\"problem v: {}\".format(problem_coord))\n for i in range(num_bisection_iterations):\n test_coord = 0.5 * (ok_coord + problem_coord)\n\n # logging.log(level=DETAILED_VOL_EX_INFO,\n # msg=\"testing: {}\".format(test_coord))\n\n if geometry.is_point_in_polygon_without_bb_check(\n test_coord, polygon\n ):\n # logging.log(level=DETAILED_VOL_EX_INFO, msg=\"setting as problem\")\n problem_coord = test_coord\n else:\n # logging.log(level=DETAILED_VOL_EX_INFO, msg=\"setting as ok\")\n ok_coord = test_coord\n\n # logging.log(level=DETAILED_VOL_EX_INFO,\n # msg=\"returning ok: {}\".format(ok_coord))\n return ok_coord\n","repo_name":"bzm3r/rust-ncc","sub_path":"py_model/dynamics.py","file_name":"dynamics.py","file_ext":"py","file_size_in_byte":21986,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"72023537975","text":"from opentelemetry import trace, baggage\nfrom opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter as OTLPSpanGrpcExporter\nfrom opentelemetry.sdk.resources import SERVICE_NAME, Resource, HOST_NAME\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\nfrom opentelemetry.trace import SpanKind\n\n\ndef inner_method():\n tracer = trace.get_tracer(__name__)\n with tracer.start_as_current_span(\"child_span\", kind=SpanKind.CLIENT):\n print(\"hello world\")\n\n\ndef outer_method():\n tracer = trace.get_tracer(__name__)\n with tracer.start_as_current_span(\"parent_span\", kind=SpanKind.SERVER):\n inner_method()\n\n\ndef baggage_and_attribute_usage():\n tracer = trace.get_tracer(__name__)\n global_ctx = baggage.set_baggage(\"key\", \"value_from_global_ctx\") # 使用baggage api,在不同span之间传递数据\n with tracer.start_as_current_span(name='baggage_parent_span', attributes={'attribute_key': 'value'},\n kind=SpanKind.SERVER):\n parent_ctx = baggage.set_baggage(\"key\", \"value_from_parent_ctx\")\n with tracer.start_as_current_span(name='baggage_child_span', context=parent_ctx,\n kind=SpanKind.INTERNAL):\n child_ctx = baggage.set_baggage(\"key\", \"value_from_child_ctx\")\n\n print(baggage.get_baggage(\"key\", global_ctx))\n print(baggage.get_baggage(\"key\", parent_ctx))\n print(baggage.get_baggage(\"key\", child_ctx))\n\n\ndef init_opentelemetry():\n # 设置服务名、主机名\n resource = Resource(attributes={\n SERVICE_NAME: \"PythonTest\",\n HOST_NAME: \"MyComputer\",\n \"token\": \"xxxxxxxxxx\" # 替换成控制台上的 Token\n })\n\n # 使用GRPC协议上报\n span_processor = BatchSpanProcessor(OTLPSpanGrpcExporter(\n endpoint=\"http://ap-guangzhou.apm.tencentcs.com:4317\", # 替换成控制台上的接入点\n ))\n\n trace_provider = TracerProvider(resource=resource, active_span_processor=span_processor)\n trace.set_tracer_provider(trace_provider)\n\n\nif __name__ == '__main__':\n init_opentelemetry()\n outer_method()\n baggage_and_attribute_usage()\n","repo_name":"TencentCloud/tencentcloud-opentelemetry-demo-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16277763374","text":"# -*- coding: utf-8 -*-\n# ***********************************\n# Author: Pedro Jorge De Los Santos \n# E-mail: delossantosmfq@gmail.com \n# License: MIT License\n# ***********************************\nimport numpy as np\nfrom nusa import *\nimport itertools\nimport matplotlib.pyplot as plt\n\ndef pairwise(iterable):\n #~ \"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)\n\n\n# Input data \nE = 29e6 # psi\nI = 10\nL = 10\nP = 10e3\n\nnelm = 10\nparts = np.linspace(0, L, nelm + 1)\n\nnodos = []\nfor xc in parts:\n cn = Node((xc,0))\n nodos.append(cn)\n\nelementos = []\nfor x in pairwise(nodos):\n ni,nj = x[0], x[1]\n ce = Beam((ni,nj),E,I)\n elementos.append(ce)\n\nm = BeamModel()\n\nfor n in nodos: m.add_node(n)\nfor e in elementos: m.add_element(e)\n\nm.add_constraint(nodos[0], ux=0, uy=0, ur=0)\nm.add_force(nodos[-1], (-P,))\nm.solve()\n\nm.plot_disp(1, label=\"Approx.\")\n\nxx = np.linspace(0,L)\nd = ((-P*xx**2.0)/(6.0*E*I))*(3*L - xx)\nplt.plot(xx, d, label=\"Classic\")\nplt.legend()\nplt.axis(\"auto\")\nplt.xlim(0,L+1)\n\nm.show()\n\n\n\n","repo_name":"JorgeDeLosSantos/nusa","sub_path":"examples/beam/beam_6_encastre.py","file_name":"beam_6_encastre.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"22"} +{"seq_id":"21378969174","text":"from datetime import datetime, timedelta\n\nfrom fastapi import Depends\nfrom fastapi.security import OAuth2PasswordBearer\nfrom sqlalchemy.orm import Session\nfrom redis import Redis\nfrom redis.exceptions import RedisError\n\nfrom app.database import get_db\nfrom app.common.redis_util import get_redis\nfrom app.api import user, commanage\nfrom app.api.auth.schema import Token\nfrom app.api.auth.token_util import TokenUtil, JwtTokenType\nfrom app.common.passwd_util import verify_password\n\nfrom app.api.auth.exception import TokenInvalidateErr\nfrom app.api.exception import api_error, crud_error\n\nfrom app.configs.log import logger\nfrom app.configs.config import settings\n\noauth2_scheme = OAuth2PasswordBearer(tokenUrl=settings.TOKEN_URL)\n\n\ndef authenticate(user_id: str, user_pw: str, db: Session) -> None:\n \"\"\"\n 아이디와 비밀번호로 인증\n :param user_id: 사용자 아이디\n :param user_pw: 사죶아 비밀번호\n :param db: db session\n :return: None\n \"\"\"\n try:\n get_user = user.crud.UserCRUD(db).get(\n user.schema.UserGet(user_id=user_id)\n )\n except crud_error.DatabaseGetErr:\n logger.error(f\"[auth-service] UserCRUD get error\")\n raise api_error.ServerError(f\"[auth-service] UserCRUD error\")\n\n if not get_user:\n logger.error(f\"[auth-service] user[{user_id} is not found\")\n raise api_error.UserNotFound(user_id=user_id)\n\n if not verify_password(plain_password=user_pw, hashed_password=get_user.user_pw):\n logger.error(f\"[auth-service] user password is invalid\")\n raise api_error.Unauthorized()\n\n if get_user.deleted:\n logger.error(f\"[auth-service] user[{user_id}] is deleted user\")\n raise api_error.Unauthorized()\n\n logger.info(f\"[auth-service] authenticate success. {user_id}\")\n\n\ndef create_token(db: Session, redis: Redis, user_id: str, host_id: int = 0) -> Token:\n \"\"\"\n token 생성\n :param db: db session\n :param redis: redis session\n :param user_id: 사용자 아이디\n :param host_id: 호스트 아이디\n :return: Token 스키마\n \"\"\"\n if host_id != 0:\n # host id 가 있을경우 commanage용 토큰 생성\n try:\n result = commanage.crud.CommanageCRUD(db).get(\n commanage.schema.ComManageByHost(host_id=host_id)\n )\n except crud_error.DatabaseGetErr:\n logger.error(f\"[auth-service] CommanageCRUD get error\")\n raise api_error.ServerError(f\"[auth-service] CommanageCRUD error\")\n\n if not result:\n logger.error(f\"[auth-service] host[{host_id}] is not found\")\n raise api_error.CommanageNotFound(host_id=host_id)\n\n token_util = TokenUtil(user_id=user_id, host_id=host_id)\n access_token = token_util.create(token_type=JwtTokenType.ACCESS)\n refresh_token = token_util.create(token_type=JwtTokenType.REFRESH)\n\n # refresh 저장\n try:\n redis.set(name=user_id, value=refresh_token)\n except RedisError as err:\n logger.error(f\"[auth-service] redis error : {err}\")\n raise api_error.ServerError(f\"[auth-service] redis error\")\n\n return Token(access_token=access_token, refresh_token=refresh_token)\n\n\ndef renew_token(token: str, redis: Redis) -> Token:\n \"\"\"\n token 갱신\n :param token: 토큰(리프레시 토큰)\n :param redis: redis session\n :return: Token 스키마\n \"\"\"\n try:\n token_util = TokenUtil.from_token(token)\n except TokenInvalidateErr as err:\n logger.error(f\"[auth-service] TokenUtil error : {err}\")\n raise api_error.Unauthorized()\n\n if token_util.token_type != JwtTokenType.REFRESH:\n logger.error(f\"[auth-service] current Token is not Refresh-token\")\n raise api_error.Unauthorized()\n\n # refresh token 만료전 체크일자\n compare_timedelta = (datetime.utcnow() + timedelta(days=settings.DATE_BEFORE_EXPIRATION)).timestamp()\n if token_util.is_expired(compare_timedelta):\n logger.info(\"[auth-service] refresh token's expiration date is approaching. Renew the token\")\n refresh_token = token_util.create(token_type=JwtTokenType.REFRESH)\n\n try:\n redis.set(name=token_util.user_id, value=refresh_token)\n except RedisError as err:\n logger.error(f\"[auth-service] redis error : {err}\")\n raise api_error.ServerError(f\"[auth-service] redis error\")\n else:\n refresh_token = None\n\n access_token = token_util.create(token_type=JwtTokenType.ACCESS)\n return Token(access_token=access_token, refresh_token=refresh_token)\n\n\ndef remove_token(token: str, redis: Redis) -> None:\n \"\"\"\n token 제거\n :param token: 제거할 토큰\n :param redis: redis session\n :return: None\n \"\"\"\n try:\n token_util = TokenUtil.from_token(token)\n except TokenInvalidateErr as err:\n logger.error(f\"[auth-service] TokenUtil error : {err}\")\n raise api_error.Unauthorized()\n\n expire_time = 60 * settings.ACCESS_TOKEN_EXPIRE_MINUTES\n\n try:\n redis.delete(token_util.user_id)\n redis.setex(name=f\"{token_util.user_id}_logout\",\n value=token,\n time=expire_time)\n except RedisError as err:\n logger.error(f\"[auth-service] redis error : {err}\")\n raise api_error.ServerError(f\"[auth-service] redis error\")\n\n\ndef verify_token(db: Session = Depends(get_db),\n redis: Redis = Depends(get_redis),\n token: str = Depends(oauth2_scheme)):\n \"\"\"\n token 인증\n :param db: db session\n :param redis: redis session\n :param token: 인증할 token\n :return:\n \"\"\"\n try:\n token_util = TokenUtil.from_token(token)\n except TokenInvalidateErr as err:\n logger.error(f\"[auth-service] TokenUtil error : {err}\")\n raise api_error.Unauthorized()\n\n try:\n get_user = user.crud.UserCRUD(db).get(\n user.schema.UserGet(user_id=token_util.user_id)\n )\n except crud_error.DatabaseGetErr:\n logger.error(f\"[auth-service] UserCRUD get error\")\n raise api_error.ServerError(f\"[auth-service] UserCRUD error\")\n\n if not get_user:\n logger.error(f\"[auth-service] user[{token_util.user_id} is not found\")\n raise api_error.UserNotFound(user_id=token_util.user_id)\n\n if get_user.deleted:\n logger.error(f\"[auth-service] user[{token_util.user_id} is deleted user\")\n raise api_error.Unauthorized()\n\n try:\n if redis.get(f\"{token_util.user_id}_logout\"):\n logger.error(f\"[auth-service] user[{token_util.user_id} is logout user\")\n raise api_error.Unauthorized()\n except RedisError as err:\n logger.error(f\"[auth-service] redis error : {err}\")\n raise api_error.ServerError(f\"[auth-service] redis error\")\n\n return token\n","repo_name":"f-lab-edu/ComMoni","sub_path":"server/app/api/auth/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":6801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33977683611","text":"import pytest\nfrom types import SimpleNamespace\nfrom corona_radio.storage.connection_factory import DatabaseConnectionFactory\nfrom corona_radio.storage.subscription import SubscriptionStorage\nfrom datetime import datetime\n\n@pytest.fixture\ndef databaseConnection():\n factory = None \n try:\n factory = DatabaseConnectionFactory(dbfile=':memory:')\n yield factory.getConnection()\n finally:\n if factory is not None:\n factory.shutdown()\n\n@pytest.fixture\ndef cursor(databaseConnection):\n cursor = None\n try:\n cursor = databaseConnection.cursor()\n yield cursor\n finally:\n if cursor is not None:\n cursor.close()\n\ndef test_finalAll(databaseConnection, cursor):\n storage = SubscriptionStorage()\n\n timestamp = datetime.utcnow()\n cursor.execute('''INSERT INTO subscription\n (`title`, `link`, `latest_content`, `created_at`, `updated_at`)\n values (?, ?, ?, ?, ?)''', ['eltit', 'knil', None, timestamp, timestamp])\n databaseConnection.commit()\n\n actual = storage.findAll(cursor, databaseConnection)\n assert actual is not None\n assert len(actual) == 1\n \n subscription = actual[0]\n assert subscription.title == 'eltit'\n assert subscription.link == 'knil'\n assert subscription.latestContent is None\n assert subscription.createdAt == timestamp\n assert subscription.updatedAt == timestamp\n\ndef test_insert(databaseConnection, cursor):\n storage = SubscriptionStorage()\n\n timestamp = datetime.utcnow()\n entity = SimpleNamespace(\n title = 'Title',\n link = 'Link',\n latestContent = 'ABCDEFG',\n createdAt = timestamp,\n updatedAt = timestamp)\n\n actual = storage.save(cursor, databaseConnection, entity)\n\n assert hasattr(actual, 'id') and actual.id is not None\n assert actual.title == 'Title'\n assert actual.link == 'Link'\n assert actual.latestContent == 'ABCDEFG'\n assert actual.createdAt is not None and type(actual.createdAt) is datetime\n assert timestamp == actual.createdAt\n assert actual.updatedAt is not None\n\n cursor = databaseConnection.execute('select id, title, latest_content, created_at, updated_at '\n 'from subscription '\n 'where id = ? '\n 'and title = ? '\n 'and link = ?',[actual.id, 'Title', 'Link'])\n\n record = cursor.fetchone()\n assert record is not None\n assert record[2] == 'ABCDEFG'\n assert type(record[3]) == datetime and type(record[3]) == datetime\n assert record[3] == timestamp\n assert record[3] == record[4]\n","repo_name":"duetocode/corona_radio","sub_path":"test/storage/subscription_storage_test.py","file_name":"subscription_storage_test.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74898614777","text":"from hamcrest import *\nimport requests\nfrom behave import given, then, step\nfrom utils import random_string, filter_list_by_parameter_start_with, safe_load_json, remove_empty_from_json, \\\n threading_wait_until, UtilsManager, create_tags_set, is_json, values_to_boolean\nfrom local_agent import get_orb_agent_logs\nfrom test_config import TestConfig\nfrom datetime import datetime\nfrom control_plane_datasets import create_new_dataset, list_datasets\nfrom random import choice, choices, sample\nfrom deepdiff import DeepDiff\nimport json\nimport ciso8601\n\npolicy_name_prefix = \"test_policy_name_\"\nconfigs = TestConfig.configs()\norb_url = configs.get('orb_url')\nverify_ssl_bool = eval(configs.get('verify_ssl').title())\n\n\n@step(\"a new policy is requested to be created with the same name as an existent one and: {kwargs}\")\ndef create_policy_with_conflict_name(context, kwargs):\n if kwargs.split(\", \")[-1].split(\"=\")[-1] == \"flow\":\n kwargs_dict = parse_flow_policy_params(kwargs)\n else:\n kwargs_dict = parse_policy_params(kwargs)\n if kwargs_dict[\"handler\"] == \"flow\":\n policy_json = make_policy_flow_json(context.policy['name'], kwargs_dict['handle_label'], kwargs_dict['handler'],\n kwargs_dict['description'],\n kwargs_dict['tap'], kwargs_dict['input_type'], kwargs_dict['port'],\n kwargs_dict['bind'], kwargs_dict['flow_type'],\n kwargs_dict['sample_rate_scaling'], kwargs_dict['only_devices'],\n kwargs_dict['only_ips'], kwargs_dict['only_ports'],\n kwargs_dict['only_interfaces'], kwargs_dict['geoloc_notfound'],\n kwargs_dict['asn_notfound'], kwargs_dict['backend_type'])\n else:\n policy_json = make_policy_json(context.policy['name'], kwargs_dict['handle_label'],\n kwargs_dict[\"handler\"], kwargs_dict[\"description\"], kwargs_dict[\"tap\"],\n kwargs_dict[\"input_type\"], kwargs_dict[\"host_specification\"],\n kwargs_dict[\"bpf_filter_expression\"], kwargs_dict[\"pcap_source\"],\n kwargs_dict[\"only_qname_suffix\"], kwargs_dict[\"only_rcode\"],\n kwargs_dict[\"exclude_noerror\"], kwargs_dict[\"backend_type\"])\n\n context.error_message = create_policy(context.token, policy_json, expected_status_code=409)\n\n\n@step(\"a {handler} policy {input_type} with tap_selector matching {match_type} tag(s) of the tap from {condition}, \"\n \"{metric_groups_enabled} metric_groups enabled, {metric_groups_disabled} metric_groups disabled and settings: {\"\n \"settings} is applied to the group\")\ndef apply_policy_using_tap_selector(context, handler, input_type, match_type, condition, metric_groups_enabled,\n metric_groups_disabled, settings):\n module_name = f\"{handler}_{random_string(5)}\"\n policy_name = policy_name_prefix + random_string(10)\n if condition == \"0 agent\" and match_type == \"any\":\n tags = create_tags_set(\"3\", tag_prefix='testtaptag', string_mode='lower')\n elif condition == \"0 agent\" and match_type == \"all\":\n tags = list(context.tap_tags.values())[0]\n tags.update(create_tags_set(\"1\", tag_prefix='testtaptag', string_mode='lower'))\n elif condition == \"1 agent (1 tag matching)\":\n chosen_key = choice(list(context.tap_tags.keys()))\n tags = context.tap_tags[chosen_key]\n elif condition == \"1 agent (1 tag matching + 1 random tag)\":\n tags = create_tags_set(\"1\", tag_prefix='testtaptag', string_mode='lower')\n chosen_key = choice(list(context.tap_tags.keys()))\n tags.update(context.tap_tags[chosen_key])\n elif condition == \"an agent\":\n tags = list(context.tap_tags.values())[0]\n else:\n raise ValueError(\"Invalid selector condition\")\n\n policy = Policy(policy_name, f\"description: {condition}\", 'pktvisor')\n policy.add_input(input_type, 'tap_selector', input_match=match_type, tags=tags)\n if handler.lower() == \"pcap\":\n policy.add_pcap_module(module_name)\n elif handler.lower() == \"dns\":\n policy.add_dns_module(module_name, settings)\n elif handler.lower() == \"net\":\n policy.add_net_module(module_name, settings)\n elif handler.lower() == \"dhcp\":\n policy.add_dhcp_module(module_name)\n elif handler.lower() == \"bgp\":\n policy.add_bgp_module(module_name)\n elif handler.lower() == \"flow\":\n policy.add_flow_module(module_name, settings)\n elif handler.lower() == \"netprobe\":\n policy.add_netprobe_module(module_name)\n else:\n raise ValueError(\"Invalid policy handler. It must be one of pcap, dns, net, dhcp, bpg or flow.\")\n if metric_groups_enabled.lower() != \"default\" and metric_groups_enabled.lower() != \"none\":\n policy.enable_metric_groups(module_name, metric_groups_enabled.split(\", \"))\n if metric_groups_disabled.lower() != \"default\" and metric_groups_disabled.lower() != \"none\":\n policy.disable_metric_groups(module_name, metric_groups_disabled.split(\", \"))\n json_for_create_policy = remove_empty_from_json(policy.policy)\n context.policy = create_policy(context.token, json_for_create_policy)\n check_policies(context)\n create_new_dataset(context, 1, 'last', 1, 'sink')\n context.metric_groups_enabled = metric_groups_enabled\n context.metric_groups_disabled = metric_groups_disabled\n\n\n@step(\"the policy application error details must show that {message}\")\ndef check_policy_error_detail(context, message):\n error_message = context.agent['last_hb_data']['policy_state'][context.policy['id']]['error']\n assert_that(message, equal_to(error_message), f\"Unexpected error message. Agent: {context.agent}\")\n\n\n@step(\"a new policy is created using: {kwargs}\")\ndef create_new_policy(context, kwargs):\n if kwargs.split(\", \")[-1].split(\"=\")[-1] == \"flow\":\n kwargs_dict = parse_flow_policy_params(kwargs)\n elif kwargs.split(\", \")[-1].split(\"=\")[-1] == \"netprobe\":\n kwargs_dict = parse_netprobe_policy_params(kwargs)\n else:\n kwargs_dict = parse_policy_params(kwargs)\n if kwargs_dict[\"handler\"] == \"flow\":\n policy_json = make_policy_flow_json(kwargs_dict['name'], kwargs_dict['handle_label'], kwargs_dict['handler'],\n kwargs_dict['description'],\n kwargs_dict['tap'], kwargs_dict['input_type'], kwargs_dict['port'],\n kwargs_dict['bind'], kwargs_dict['flow_type'],\n kwargs_dict['sample_rate_scaling'], kwargs_dict['only_devices'],\n kwargs_dict['only_ips'], kwargs_dict['only_ports'],\n kwargs_dict['only_interfaces'], kwargs_dict['geoloc_notfound'],\n kwargs_dict['asn_notfound'], kwargs_dict['backend_type'])\n elif kwargs_dict[\"handler\"] == \"netprobe\":\n policy_json = make_policy_netprobe_json(kwargs_dict[\"name\"], kwargs_dict['handle_label'],\n kwargs_dict[\"handler\"], kwargs_dict[\"description\"], kwargs_dict[\"tap\"],\n kwargs_dict[\"input_type\"], kwargs_dict[\"test_type\"],\n kwargs_dict[\"interval_msec\"], kwargs_dict[\"timeout_msec\"],\n kwargs_dict[\"packets_per_test\"], kwargs_dict[\"packets_interval_msec\"],\n kwargs_dict[\"packet_payload_size\"], kwargs_dict[\"targets\"],\n kwargs_dict[\"backend_type\"])\n else:\n policy_json = make_policy_json(kwargs_dict[\"name\"], kwargs_dict['handle_label'],\n kwargs_dict[\"handler\"], kwargs_dict[\"description\"], kwargs_dict[\"tap\"],\n kwargs_dict[\"input_type\"], kwargs_dict[\"host_specification\"],\n kwargs_dict[\"bpf_filter_expression\"], kwargs_dict[\"pcap_source\"],\n kwargs_dict[\"only_qname_suffix\"], kwargs_dict[\"only_rcode\"],\n kwargs_dict[\"exclude_noerror\"], kwargs_dict[\"backend_type\"])\n\n context.policy = create_policy(context.token, policy_json)\n\n assert_that(context.policy['name'], equal_to(kwargs_dict[\"name\"]), f\"Policy name failed: {context.policy}\")\n if 'policies_created' in context:\n context.policies_created[context.policy['id']] = context.policy['name']\n else:\n context.policies_created = dict()\n context.policies_created[context.policy['id']] = context.policy['name']\n\n\n@step(\"editing a policy using {kwargs}\")\ndef policy_editing(context, kwargs):\n acceptable_keys = ['name', 'handler_label', 'handler', 'description', 'tap', 'input_type',\n 'host_specification', 'bpf_filter_expression', 'pcap_source', 'only_qname_suffix',\n 'only_rcode', 'exclude_noerror', 'backend_type']\n\n handler_label = list(context.policy[\"policy\"][\"handlers\"][\"modules\"].keys())[0]\n\n edited_attributes = {\n 'host_specification': return_policy_attribute(context.policy, 'host_specification'),\n 'bpf_filter_expression': return_policy_attribute(context.policy, 'bpf_filter_expression'),\n 'pcap_source': return_policy_attribute(context.policy, 'pcap_source'),\n 'only_qname_suffix': return_policy_attribute(context.policy, 'only_qname_suffix'),\n 'only_rcode': return_policy_attribute(context.policy, 'only_rcode'),\n 'description': return_policy_attribute(context.policy, 'description'),\n \"name\": return_policy_attribute(context.policy, 'name'),\n \"handler\": return_policy_attribute(context.policy, 'handler'),\n \"backend_type\": return_policy_attribute(context.policy, 'backend'),\n \"tap\": return_policy_attribute(context.policy, 'tap'),\n \"input_type\": return_policy_attribute(context.policy, 'input_type'),\n \"handler_label\": return_policy_attribute(context.policy, 'handler_label'),\n \"exclude_noerror\": return_policy_attribute(context.policy, \"exclude_noerror\")}\n\n if \"host_spec\" in context.policy[\"policy\"][\"input\"][\"config\"].keys():\n edited_attributes[\"host_specification\"] = context.policy[\"policy\"][\"input\"][\"config\"][\"host_spec\"]\n if \"pcap_source\" in context.policy[\"policy\"][\"input\"][\"config\"].keys():\n edited_attributes[\"pcap_source\"] = context.policy[\"policy\"][\"input\"][\"config\"][\"pcap_source\"]\n if \"bpf\" in context.policy[\"policy\"][\"input\"][\"filter\"].keys():\n edited_attributes[\"bpf_filter_expression\"] = context.policy[\"policy\"][\"input\"][\"filter\"][\"bpf\"]\n if \"description\" in context.policy.keys():\n edited_attributes[\"description\"] = context.policy['description']\n if \"only_qname_suffix\" in context.policy[\"policy\"][\"handlers\"][\"modules\"][handler_label]['filter'].keys():\n edited_attributes[\"only_qname_suffix\"] = \\\n context.policy[\"policy\"][\"handlers\"][\"modules\"][handler_label][\"filter\"][\n \"only_qname_suffix\"]\n if \"only_rcode\" in context.policy[\"policy\"][\"handlers\"][\"modules\"][handler_label]['filter'].keys():\n edited_attributes[\"only_rcode\"] = context.policy[\"policy\"][\"handlers\"][\"modules\"][handler_label][\"filter\"][\n \"only_rcode\"]\n if \"exclude_noerror\" in context.policy[\"policy\"][\"handlers\"][\"modules\"][handler_label]['filter'].keys():\n edited_attributes[\"exclude_noerror\"] = context.policy[\"policy\"][\"handlers\"][\"modules\"][handler_label][\"filter\"][\n \"exclude_noerror\"]\n\n for i in kwargs.split(\", \"):\n assert_that(i, matches_regexp(\"^.+=.+$\"), f\"Unexpected format for param {i}\")\n item = i.split(\"=\")\n edited_attributes[item[0]] = item[1]\n if item[1].isdigit() is False and str(item[1]).lower() == \"none\":\n edited_attributes[item[0]] = None\n if item[0] == \"handler\":\n edited_attributes[\"handler_label\"] = f\"default_{edited_attributes['handler']}_{random_string(3)}\"\n\n for attribute in acceptable_keys:\n if attribute not in edited_attributes.keys():\n edited_attributes[attribute] = None\n\n assert_that(all(key in acceptable_keys for key, value in edited_attributes.items()), equal_to(True),\n f\"Unexpected parameters for policy. Options are {acceptable_keys}\")\n\n if edited_attributes[\"only_qname_suffix\"] is not None:\n edited_attributes[\"only_qname_suffix\"] = edited_attributes[\"only_qname_suffix\"].replace(\"[\", \"\")\n edited_attributes[\"only_qname_suffix\"] = edited_attributes[\"only_qname_suffix\"].replace(\"]\", \"\")\n edited_attributes[\"only_qname_suffix\"] = edited_attributes[\"only_qname_suffix\"].split(\"/ \")\n\n if edited_attributes[\"name\"] == 'conflict':\n policies_list = list_policies(context.token)\n policies_filtered_list = filter_list_by_parameter_start_with(policies_list, 'name', policy_name_prefix)\n policies_name = list()\n for policy in policies_filtered_list:\n policies_name.append(policy['name'])\n policies_name.remove(context.policy['name'])\n name_to_use = choice(policies_name)\n edited_attributes[\"name\"] = name_to_use\n expected_status_code = 409\n else:\n expected_status_code = 200\n if policy_name_prefix not in edited_attributes[\"name\"]:\n context.random_part_policy_name = f\"_{random_string(10)}\"\n edited_attributes[\"name\"] = policy_name_prefix + edited_attributes[\"name\"] + context.random_part_policy_name\n\n policy_json = make_policy_json(edited_attributes[\"name\"], edited_attributes[\"handler_label\"],\n edited_attributes[\"handler\"], edited_attributes[\"description\"],\n edited_attributes[\"tap\"],\n edited_attributes[\"input_type\"], edited_attributes[\"host_specification\"],\n edited_attributes[\"bpf_filter_expression\"], edited_attributes[\"pcap_source\"],\n edited_attributes[\"only_qname_suffix\"], edited_attributes[\"only_rcode\"],\n edited_attributes[\"exclude_noerror\"], edited_attributes[\"backend_type\"])\n context.considered_timestamp = datetime.now().timestamp()\n\n if expected_status_code == 200:\n\n context.policy = edit_policy(context.token, context.policy['id'], policy_json)\n\n assert_that(context.policy['name'], equal_to(edited_attributes[\"name\"]),\n f\"Policy name failed: {context.policy}\")\n else:\n context.error_message = edit_policy(context.token, context.policy['id'], policy_json,\n expected_status_code=expected_status_code)\n\n\n@step(\"policy {attribute} must be {value}\")\ndef check_policy_attribute(context, attribute, value):\n acceptable_attributes = ['name', 'handler_label', 'handler', 'description', 'tap', 'input_type',\n 'host_specification', 'bpf_filter_expression', 'pcap_source', 'only_qname_suffix',\n 'only_rcode', 'backend_type', 'version', 'exclude_noerror']\n if attribute in acceptable_attributes:\n if attribute == \"name\":\n value = policy_name_prefix + value + context.random_part_policy_name\n policy_value = return_policy_attribute(context.policy, attribute)\n assert_that(str(policy_value), equal_to(value), f\"Unexpected value for policy {attribute}\")\n else:\n raise Exception(f\"Attribute {attribute} not found on policy\")\n\n\n@then(\"referred policy {condition} be listed on the orb policies list\")\ndef check_policies(context, **condition):\n if len(condition) > 0:\n condition = condition[\"condition\"]\n else:\n condition = \"must\"\n policy_id = context.policy['id']\n all_existing_policies = list_policies(context.token)\n is_policy_listed = bool()\n for policy in all_existing_policies:\n if policy_id in policy.values():\n is_policy_listed = True\n break\n is_policy_listed = False\n if condition == 'must':\n assert_that(is_policy_listed, equal_to(True), f\"Policy {policy_id} not listed on policies list\")\n get_policy(context.token, policy_id)\n elif condition == 'must not':\n assert_that(is_policy_listed, equal_to(False), f\"Policy {policy_id} exists in the policies list\")\n policy = get_policy(context.token, policy_id, 404)\n assert_that(policy['error'], equal_to('non-existent entity'),\n \"Unexpected response for get policy request\")\n\n\n@step('one of applied policies is removed')\ndef remove_policy_applied(context):\n context.considered_timestamp = datetime.now().timestamp()\n policy_removed = choice(context.list_agent_policies_id)\n context.policy = get_policy(context.token, policy_removed)\n delete_policy(context.token, context.policy[\"id\"])\n if 'removed_policies_ids' in context:\n context.removed_policies_ids.append(context.policy[\"id\"])\n else:\n context.removed_policies_ids = list()\n context.removed_policies_ids.append(context.policy[\"id\"])\n context.list_agent_policies_id.remove(context.policy[\"id\"])\n context.policies_created.pop(context.policy[\"id\"])\n existing_datasets = list_datasets(context.token)\n context.id_of_datasets_related_to_removed_policy = list_datasets_for_a_policy(policy_removed, existing_datasets)\n\n\n@step('container logs should inform that removed policy was stopped and removed within {time_to_wait} seconds')\ndef check_test(context, time_to_wait):\n stop_log_info = f\"policy [{context.policy['name']}]: stopping\"\n remove_log_info = f\"DELETE /api/v1/policies/{context.policy['name']} 200\"\n policy_removed = policy_stopped_and_removed(context.container_id, stop_log_info, remove_log_info,\n context.considered_timestamp, timeout=time_to_wait)\n assert_that(policy_removed, equal_to(True), f\"Policy {context.policy} failed to be unapplied. \\n\"\n f\"Agent: {json.dumps(context.agent, indent=4)}\")\n\n\n@then('cleanup policies')\ndef clean_policies(context):\n \"\"\"\n Remove all policies starting with 'policy_name_prefix' from the orb\n\n :param context: Behave class that contains contextual information during the running of tests.\n \"\"\"\n token = context.token\n policies_list = list_policies(token)\n policies_filtered_list = filter_list_by_parameter_start_with(policies_list, 'name', policy_name_prefix)\n delete_policies(token, policies_filtered_list)\n\n\n@given(\"that a policy using: {kwargs} already exists\")\ndef new_policy(context, kwargs):\n create_new_policy(context, kwargs)\n check_policies(context)\n\n\n@step('the container logs that were output after {condition} does not contain the message \"{text_to_match}\" referred '\n 'to deleted policy anymore')\ndef check_agent_logs_for_deleted_policies_considering_timestamp(context, condition, text_to_match):\n policies_have_expected_message, logs = \\\n check_agent_log_for_policies(text_to_match, context.container_id, list(context.policy['id']),\n context.considered_timestamp)\n assert_that(len(policies_have_expected_message), equal_to(0),\n f\"Message '{text_to_match}' for policy \"\n f\"'{context.policy['id']}: {context.policy['name']}'\"\n f\" present on logs even after removing policy! \\n\"\n f\"Agent: {json.dumps(context.agent, indent=4)}. \\n\"\n f\"Agent Logs: {logs}\")\n\n\n@step('the container logs that were output after {condition} contain the message \"{'\n 'text_to_match}\" referred to each applied policy within {time_to_wait} seconds')\ndef check_agent_logs_for_policies_considering_timestamp(context, condition, text_to_match, time_to_wait):\n # todo improve the logic for timestamp\n if \"reset\" in condition:\n considered_timestamp = context.considered_timestamp_reset\n else:\n considered_timestamp = context.considered_timestamp\n policies_data = list()\n policies_have_expected_message, logs = \\\n check_agent_log_for_policies(text_to_match, context.container_id, context.list_agent_policies_id,\n considered_timestamp, timeout=time_to_wait)\n if len(set(context.list_agent_policies_id).difference(policies_have_expected_message)) > 0:\n policies_without_message = set(context.list_agent_policies_id).difference(policies_have_expected_message)\n for policy in policies_without_message:\n policies_data.append(get_policy(context.token, policy))\n\n assert_that(policies_have_expected_message, equal_to(set(context.list_agent_policies_id)),\n f\"Message '{text_to_match}' for policy \"\n f\"'{policies_data}'\"\n f\" was not found in the agent logs!\"\n f\"Agent: {json.dumps(context.agent, indent=4)}. \\n\"\n f\"Agent Logs: {logs}\")\n\n\n@step('the container logs contain the message \"{text_to_match}\" referred to each policy within {'\n 'time_to_wait} seconds')\ndef check_agent_logs_for_policies(context, text_to_match, time_to_wait):\n policies_have_expected_message, logs = \\\n check_agent_log_for_policies(text_to_match, context.container_id, context.list_agent_policies_id,\n timeout=time_to_wait)\n assert_that(policies_have_expected_message, equal_to(set(context.list_agent_policies_id)),\n f\"Message '{text_to_match}' for policy \"\n f\"'{set(context.list_agent_policies_id).difference(policies_have_expected_message)}'\"\n f\" was not found in the agent logs!. \\n\"\n f\"Agent: {json.dumps(context.agent, indent=4)}. \\n\"\n f\"Agent Logs: {logs}\")\n\n\n@step('{amount_of_policies} {type_of_policies} policies are applied to the group')\ndef apply_n_policies(context, amount_of_policies, type_of_policies):\n args_for_policies = return_policies_type(int(amount_of_policies), type_of_policies)\n for i in range(int(amount_of_policies)):\n create_new_policy(context, args_for_policies[i][1])\n check_policies(context)\n create_new_dataset(context, 1, 'last', 1, 'sink')\n\n\n@step('{amount_of_policies} {type_of_policies} policies {policies_input} are applied to the group')\ndef apply_n_policies(context, amount_of_policies, type_of_policies, policies_input):\n if \"same input_type as created via config file\" in policies_input:\n policies_input = list(context.tap.values())[0]['input_type']\n args_for_policies = return_policies_type(int(amount_of_policies), type_of_policies, policies_input)\n if \"tap\" in context:\n tap_name = list(context.tap.keys())[0]\n input_type = list(context.tap.values())[0]['input_type']\n else:\n context.tap_name = tap_name = f\"default_tap_before_provision_{random_string(10)}\"\n input_type = policies_input\n for i in range(int(amount_of_policies)):\n kwargs = f\"{args_for_policies[i][1]}, tap={tap_name}, input_type={input_type}\"\n create_new_policy(context, kwargs)\n check_policies(context)\n create_new_dataset(context, 1, 'last', 1, 'sink')\n\n\n@step('{amount_of_policies} {type_of_policies} policies are applied to the group by {amount_of_datasets} datasets each')\ndef apply_n_policies_x_times(context, amount_of_policies, type_of_policies, amount_of_datasets):\n for n in range(int(amount_of_policies)):\n args_for_policies = return_policies_type(int(amount_of_policies), type_of_policies)\n create_new_policy(context, args_for_policies[n][1])\n check_policies(context)\n for x in range(int(amount_of_datasets)):\n create_new_dataset(context, 1, 'last', 1, 'sink')\n\n\n@step(\"{amount_of_policies} duplicated policies is applied to the group\")\ndef apply_duplicate_policy(context, amount_of_policies):\n for i in range(int(amount_of_policies)):\n context.policy = create_duplicated_policy(context.token, context.policy[\"id\"],\n policy_name_prefix + random_string(10))\n check_policies(context)\n create_new_dataset(context, 1, 'last', 1, 'sink')\n\n\n@step(\"try to duplicate this policy {times} times without set new name\")\ndef duplicate_policy_with_same_name(context, times):\n # note that the context.policy is NOT changed, because we need to duplicate always the same policy to make the test\n # correctly\n context.duplicate_policies = list()\n for i in range(int(times)):\n if i <= 2:\n duplicated_policy = create_duplicated_policy(context.token, context.policy['id'])\n else:\n duplicated_policy = create_duplicated_policy(context.token, context.policy['id'], status_code=409)\n context.duplicate_policies.append(duplicated_policy)\n\n\n@step(\"try to duplicate this policy {times} times with a random new name\")\ndef duplicate_policy_with_new_name(context, times):\n # note that the context.policy is NOT changed, because we need to duplicate always the same policy to make the test\n # correctly\n\n context.duplicate_policies = list()\n for i in range(int(times)):\n policy_new_name = policy_name_prefix + random_string(10)\n duplicated_policy = create_duplicated_policy(context.token, context.policy['id'],\n new_policy_name=policy_new_name)\n context.duplicate_policies.append(duplicated_policy)\n\n\n@step(\"{amount_successfully_policies} policies must be successfully duplicated and {amount_error_policies}\"\n \"must return an error\")\ndef check_duplicated_policies_status(context, amount_successfully_policies, amount_error_policies):\n successfully_duplicated = list()\n wrongly_duplicated = 0\n for policy in context.duplicate_policies:\n if \"id\" in policy.keys():\n get_policy(context.token, policy['id'])\n successfully_duplicated.append(policy['id'])\n elif \"error\" in policy.keys():\n wrongly_duplicated += 1\n assert_that(len(successfully_duplicated),\n equal_to(int(amount_successfully_policies)), f\"Amount of policies successfully duplicated fails.\"\n f\"Policies duplicated: {successfully_duplicated}\")\n assert_that(wrongly_duplicated, equal_to(int(amount_error_policies)), f\"Amount of policies wrongly duplicated fails\"\n f\".\")\n\n\ndef create_duplicated_policy(token, policy_id, new_policy_name=None, status_code=201):\n \"\"\"\n\n :param (str) token: used for API authentication\n :param (str) policy_id: id of policy that will be duplicated\n :param (str) new_policy_name: name for the new policy created\n :param (int) status_code: status code that must return on response\n :return: (dict) new policy created\n \"\"\"\n json_request = {\"name\": new_policy_name}\n json_request = remove_empty_from_json(json_request)\n headers_request = {'Content-type': 'application/json', 'Accept': 'application/json',\n 'Authorization': f'Bearer {token}'}\n post_url = f\"{orb_url}/api/v1/policies/agent/{policy_id}/duplicate\"\n response = requests.post(post_url, json=json_request, headers=headers_request, verify=verify_ssl_bool)\n try:\n response_json = response.json()\n except ValueError:\n response_json = response.text\n assert_that(response.status_code, equal_to(status_code),\n 'Request to create duplicated policy failed with status=' + str(response.status_code) + ': '\n + str(response_json))\n if status_code == 201:\n compare_two_policies(token, policy_id, response.json()['id'])\n return response_json\n\n\ndef compare_two_policies(token, id_policy_one, id_policy_two):\n \"\"\"\n\n :param (str) token: used for API authentication\n :param (str) id_policy_one: id of first policy\n :param str() id_policy_two: id of second policy\n\n \"\"\"\n policy_one = get_policy(token, id_policy_one)\n policy_two = get_policy(token, id_policy_two)\n diff = DeepDiff(policy_one, policy_two, exclude_paths={\"root['name']\", \"root['id']\", \"root['ts_last_modified']\",\n \"root['ts_created']\"})\n assert_that(diff, equal_to({}), f\"Policy duplicated is not equal the one that generate it. Policy 1: {policy_one}\\n\"\n f\"Policy 2: {policy_two}\")\n\n\ndef create_policy(token, json_request, expected_status_code=201):\n \"\"\"\n\n Creates a new policy in Orb control plane\n\n :param (str) token: used for API authentication\n :param (dict) json_request: policy json\n :expected_status_code (int): code to be returned on response\n :return: response of policy creation\n\n \"\"\"\n\n headers_request = {'Content-type': 'application/json', 'Accept': '*/*', 'Authorization': f'Bearer {token}'}\n\n response = requests.post(orb_url + '/api/v1/policies/agent', json=json_request, headers=headers_request,\n verify=verify_ssl_bool)\n try:\n response_json = response.json()\n except ValueError:\n response_json = response.text\n assert_that(response.status_code, equal_to(expected_status_code),\n 'Request to create policy failed with status=' + str(response.status_code) + ': '\n + str(response_json))\n\n return response_json\n\n\ndef edit_policy(token, policy_id, json_request, expected_status_code=200):\n \"\"\"\n Editing a policy on Orb control plane\n\n :param (str) token: used for API authentication\n :param (str) policy_id: that identifies the policy to be edited\n :param (dict) json_request: policy json\n :param (int) expected_status_code: status to be returned on response\n :return: response of policy editing\n \"\"\"\n headers_request = {'Content-type': 'application/json', 'Accept': '*/*', 'Authorization': f'Bearer {token}'}\n\n response = requests.put(orb_url + f\"/api/v1/policies/agent/{policy_id}\", json=json_request,\n headers=headers_request, verify=verify_ssl_bool)\n try:\n response_json = response.json()\n except ValueError:\n response_json = response.text\n assert_that(response.status_code, equal_to(expected_status_code),\n 'Request to editing policy failed with status=' + str(response.status_code) + ': '\n + str(response_json))\n\n return response_json\n\n\ndef make_policy_json(name, handler_label, handler, description=None, tap=\"default_pcap\",\n input_type=\"pcap\", host_specification=None, bpf_filter_expression=None, pcap_source=None,\n only_qname_suffix=None, only_rcode=None, exclude_noerror=None, backend_type=\"pktvisor\"):\n \"\"\"\n\n Generate a policy json\n\n :param (str) name: of the policy to be created\n :param (str) handler_label: of the handler\n :param (str) handler: to be added\n :param (str) description: description of policy\n :param tap: named, host specific connection specifications for the raw input streams accessed by pktvisor\n :param input_type: this must reference a tap name, or application of the policy will fail\n :param (str) host_specification: Subnets (comma separated) which should be considered belonging to this host,\n in CIDR form. Used for ingress/egress determination, defaults to host attached to the network interface.\n :param (str) bpf_filter_expression: these decide exactly which data to summarize and expose for collection.\n Tcpdump compatible filter expression for limiting the traffic examined\n (with BPF). See https://www.tcpdump.org/manpages/tcpdump.1.html.\n :param (str) pcap_source: Packet capture engine to use. Defaults to best for platform.\n Options: af_packet (linux only) or libpcap.\n :param (str) only_qname_suffix: Filter out any queries whose QName does not end in a suffix on the list\n :param (int) only_rcode: Filter out any queries which are not the given RCODE. Options:\n \"NOERROR\": 0,\n \"NXDOMAIN\": 3,\n \"REFUSED\": 5,\n \"SERVFAIL\": 2\n :param exclude_noerror: Filter out any queries which are not error response\n :param backend_type: Agent backend this policy is for. Cannot change once created. Default: pktvisor\n :return: (dict) a dictionary containing the created policy data\n \"\"\"\n if only_rcode is not None: only_rcode = int(only_rcode)\n assert_that(pcap_source, any_of(equal_to(None), equal_to(\"af_packet\"), equal_to(\"libpcap\")),\n \"Unexpected type of pcap_source\")\n assert_that(only_rcode, any_of(equal_to(None), equal_to(0), equal_to(2), equal_to(3), equal_to(5)),\n \"Unexpected type of only_rcode\")\n if exclude_noerror is not None:\n assert_that(exclude_noerror.lower(), any_of(equal_to(\"false\"), equal_to(\"true\")),\n \"Unexpected value for exclude no error filter\")\n exclude_noerror = eval(exclude_noerror.title())\n assert_that(handler, any_of(equal_to(\"dns\"), equal_to(\"dhcp\"), equal_to(\"net\")), \"Unexpected handler for policy\")\n assert_that(name, not_none(), \"Unable to create policy without name\")\n\n if only_qname_suffix is not None and isinstance(only_qname_suffix, str):\n only_qname_suffix = only_qname_suffix.split(\",\")\n\n json_request = {\"name\": name,\n \"description\": description,\n \"backend\": backend_type,\n \"policy\": {\n \"kind\": \"collection\",\n \"input\": {\n \"tap\": tap,\n \"input_type\": input_type,\n \"config\": {\n \"host_spec\": host_specification,\n \"pcap_source\": pcap_source},\n \"filter\": {\"bpf\": bpf_filter_expression}},\n \"handlers\": {\n \"modules\": {\n handler_label: {\n \"type\": handler,\n \"filter\": {\n \"only_qname_suffix\": only_qname_suffix,\n \"only_rcode\": only_rcode,\n \"exclude_noerror\": exclude_noerror\n }\n }\n }\n }\n }\n }\n json_request = remove_empty_from_json(json_request.copy())\n return json_request\n\n\ndef make_policy_flow_json(name, handler_label, handler, description=None, tap=\"default_flow\",\n input_type=\"flow\", port=None, bind=None, flow_type=None, sample_rate_scaling=None,\n only_devices=None, only_ips=None, only_ports=None, only_interfaces=None, geoloc_notfound=None,\n asn_notfound=None, backend_type=\"pktvisor\"):\n \"\"\"\n\n Generate a policy json\n\n :param (str) name: of the policy to be created\n :param (str) handler_label: of the handler\n :param (str) handler: to be added\n :param (str) description: description of policy\n :param tap: named, host specific connection specifications for the raw input streams accessed by pktvisor\n :param input_type: this must reference a tap name, or application of the policy will fail\n :param backend_type: Agent backend this policy is for. Cannot change once created. Default: pktvisor\n :return: (dict) a dictionary containing the created policy data\n \"\"\"\n assert_that(handler, equal_to(\"flow\"), \"Unexpected handler for policy\")\n assert_that(name, not_none(), \"Unable to create policy without name\")\n\n json_request = {\"name\": name,\n \"description\": description,\n \"backend\": backend_type,\n \"policy\": {\n \"kind\": \"collection\",\n \"input\": {\n \"tap\": tap,\n \"input_type\": input_type,\n \"config\": {\"port\": port,\n \"bind\": bind,\n \"only_ports\": only_ports,\n \"flow_type\": flow_type}},\n \"handlers\": {\n \"modules\": {\n handler_label: {\n \"type\": handler,\n \"filter\": {\"only_devices\": only_devices,\n \"only_ips\": only_ips,\n \"only_ports\": only_ports,\n \"only_interfaces\": only_interfaces,\n \"geoloc_notfound\": geoloc_notfound,\n \"asn_notfound\": asn_notfound},\n \"config\": {\n \"sample_rate_scaling\": sample_rate_scaling}\n }\n }\n }\n }\n }\n json_request = remove_empty_from_json(json_request.copy())\n return json_request\n\n\ndef make_policy_netprobe_json(name, handler_label, handler, description=None, tap=\"default_netprobe\",\n input_type=\"flow\", test_type='ping', interval_msec=None, timeout_msec=None,\n packets_per_test=None, packets_interval_msec=None, packet_payload_size=None, targets=None,\n backend_type=\"pktvisor\"):\n \"\"\"\n\n Generate a policy json\n\n :param (str) name: of the policy to be created\n :param (str) handler_label: of the handler\n :param (str) handler: to be added\n :param (str) description: description of policy\n :param tap: named, host specific connection specifications for the raw input streams accessed by pktvisor\n :param input_type: this must reference a tap name, or application of the policy will fail\n :param backend_type: Agent backend this policy is for. Cannot change once created. Default: pktvisor\n :return: (dict) a dictionary containing the created policy data\n \"\"\"\n assert_that(handler, equal_to(\"netprobe\"), \"Unexpected handler for policy\")\n assert_that(name, not_none(), \"Unable to create policy without name\")\n\n #netprobe configs are on tap level\n json_request = {\"name\": name,\n \"description\": description,\n \"backend\": backend_type,\n \"policy\": {\n \"kind\": \"collection\",\n \"input\": {\n \"tap\": tap,\n \"input_type\": input_type,\n \"config\": {\"test_type\": test_type,\n \"interval_msec\": interval_msec,\n \"timeout_msec\": timeout_msec,\n \"packets_per_test\": packets_per_test,\n \"packets_interval_msec\": packets_interval_msec,\n \"packet_payload_size\": packet_payload_size,\n \"targets\": targets}},\n \"handlers\": {\n \"modules\": {\n handler_label: {\n \"type\": handler,\n \"config\": {},\n \"filter\": {}\n }\n }\n }\n }\n }\n json_request = remove_empty_from_json(json_request.copy())\n return json_request\n\n\ndef get_policy(token, policy_id, expected_status_code=200):\n \"\"\"\n Gets a policy from Orb control plane\n\n :param (str) token: used for API authentication\n :param (str) policy_id: that identifies policy to be fetched\n :param (int) expected_status_code: expected request's status code. Default:200.\n :returns: (dict) the fetched policy\n \"\"\"\n\n get_policy_response = requests.get(orb_url + '/api/v1/policies/agent/' + policy_id,\n headers={'Authorization': f'Bearer {token}'}, verify=verify_ssl_bool)\n try:\n response_json = get_policy_response.json()\n except ValueError:\n response_json = get_policy_response.text\n assert_that(get_policy_response.status_code, equal_to(expected_status_code),\n 'Request to get policy id=' + policy_id + ' failed with status= ' + str(get_policy_response.status_code)\n + \" response= \" + str(response_json))\n\n return response_json\n\n\ndef list_policies(token, limit=100, offset=0):\n \"\"\"\n Lists all policies from Orb control plane that belong to this user\n\n :param (str) token: used for API authentication\n :param (int) limit: Size of the subset to retrieve. (max 100). Default = 100\n :param (int) offset: Number of items to skip during retrieval. Default = 0.\n :returns: (list) a list of policies\n \"\"\"\n\n all_policies, total, offset = list_up_to_limit_policies(token, limit, offset)\n\n new_offset = limit + offset\n\n while new_offset < total:\n policies_from_offset, total, offset = list_up_to_limit_policies(token, limit, new_offset)\n all_policies = all_policies + policies_from_offset\n new_offset = limit + offset\n\n return all_policies\n\n\ndef list_up_to_limit_policies(token, limit=100, offset=0):\n \"\"\"\n Lists up to 100 policies from Orb control plane that belong to this user\n\n :param (str) token: used for API authentication\n :param (int) limit: Size of the subset to retrieve. (max 100). Default = 100\n :param (int) offset: Number of items to skip during retrieval. Default = 0.\n :returns: (list) a list of policies, (int) total policies on orb, (int) offset\n \"\"\"\n\n response = requests.get(orb_url + '/api/v1/policies/agent', headers={'Authorization': f'Bearer {token}'},\n params={'limit': limit, 'offset': offset}, verify=verify_ssl_bool)\n try:\n response_json = response.json()\n except ValueError:\n response_json = response.text\n\n assert_that(response.status_code, equal_to(200),\n 'Request to list policies failed with status=' + str(response.status_code) + ': '\n + str(response_json))\n\n policies_as_json = response_json\n return policies_as_json['data'], policies_as_json['total'], policies_as_json['offset']\n\n\ndef delete_policies(token, list_of_policies):\n \"\"\"\n Deletes from Orb control plane the policies specified on the given list\n\n :param (str) token: used for API authentication\n :param (list) list_of_policies: that will be deleted\n \"\"\"\n\n for policy in list_of_policies:\n delete_policy(token, policy['id'])\n\n\ndef delete_policy(token, policy_id):\n \"\"\"\n Deletes a policy from Orb control plane\n\n :param (str) token: used for API authentication\n :param (str) policy_id: that identifies the policy to be deleted\n \"\"\"\n\n response = requests.delete(orb_url + '/api/v1/policies/agent/' + policy_id,\n headers={'Authorization': f'Bearer {token}'}, verify=verify_ssl_bool)\n\n assert_that(response.status_code, equal_to(204), 'Request to delete policy id='\n + policy_id + ' failed with status=' + str(response.status_code))\n\n\ndef check_logs_contain_message_for_policies(logs, expected_message, list_agent_policies_id, considered_timestamp):\n \"\"\"\n Checks agent container logs for expected message for all applied policies and the log analysis loop is interrupted\n as soon as a log is found with the expected message for each applied policy.\n\n :param (list) logs: list of log lines\n :param (str) expected_message: message that we expect to find in the logs\n :param (list) list_agent_policies_id: list with all policy id applied to the agent\n :param (float) considered_timestamp: timestamp from which the log will be considered\n :returns: (set) set containing the ids of the policies for which the expected logs exist\n\n\n\n \"\"\"\n policies_have_expected_message = set()\n for log_line in logs:\n log_line = safe_load_json(log_line)\n if is_expected_msg_in_log_line(log_line, expected_message, list_agent_policies_id,\n considered_timestamp) is True:\n policies_have_expected_message.add(log_line['policy_id'])\n if set(list_agent_policies_id) == set(policies_have_expected_message):\n return policies_have_expected_message\n return policies_have_expected_message\n\n\n@threading_wait_until\ndef check_agent_log_for_policies(expected_message, container_id, list_agent_policies_id,\n considered_timestamp=datetime.now().timestamp(), event=None):\n \"\"\"\n Checks agent container logs for expected message for each applied policy over a period of time\n\n :param (str) expected_message: message that we expect to find in the logs\n :param (str) container_id: agent container id\n :param (list) list_agent_policies_id: list with all policy id applied to the agent\n :param (float) considered_timestamp: timestamp from which the log will be considered.\n Default: timestamp at which behave execution is started\n :param (obj) event: threading.event\n \"\"\"\n logs = get_orb_agent_logs(container_id)\n policies_have_expected_message = \\\n check_logs_contain_message_for_policies(logs, expected_message, list_agent_policies_id,\n considered_timestamp)\n if len(policies_have_expected_message) == len(list_agent_policies_id):\n event.set()\n return policies_have_expected_message, logs\n\n return policies_have_expected_message, logs\n\n\ndef is_expected_msg_in_log_line(log_line, expected_message, list_agent_policies_id, considered_timestamp):\n \"\"\"\n Test if log line has expected message\n - not be None\n - have a 'msg' property that matches the expected_message string.\n - have a 'ts' property whose value is greater than considered_timestamp\n - have a property 'policy_id' that is also contained in the list_agent_policies_id list\n\n :param (dict) log_line: agent container log line\n :param (str) expected_message: message that we expect to find in the logs\n :param (list) list_agent_policies_id: list with all policy id applied to the agent\n :param (float) considered_timestamp: timestamp from which the log will be considered.\n :return: (bool) whether expected message was found in the logs for expected policies\n\n \"\"\"\n if log_line is not None:\n if expected_message in log_line['msg'] and 'policy_id' in log_line.keys():\n if log_line['policy_id'] in list_agent_policies_id:\n if isinstance(log_line['ts'], int) and log_line['ts'] > considered_timestamp:\n return True\n elif isinstance(log_line['ts'], str) and datetime.timestamp(ciso8601.parse_datetime(log_line['ts'])) > \\\n considered_timestamp:\n return True\n return False\n\n\ndef is_expected_log_info_in_log_line(log_line, expected_log_info, considered_timestamp):\n \"\"\"\n Test if log line has expected log\n - not be None\n - have a 'log' property that contains the expected_log_info string.\n - have a 'ts' property whose value is greater than considered_timestamp\n\n :param (dict) log_line: agent container log line\n :param (str) expected_log_info: log info that we expect to find in the logs\n :param (float) considered_timestamp: timestamp from which the log will be considered.\n :return: (bool) whether expected log info was found in the logs\n\n \"\"\"\n if log_line is not None and 'log' in log_line.keys() and isinstance(log_line['ts'], int) and log_line['ts'] > \\\n considered_timestamp:\n if expected_log_info in log_line['log']:\n return True\n elif log_line is not None and 'log' in log_line.keys() and isinstance(log_line['ts'], str) and \\\n datetime.timestamp(ciso8601.parse_datetime(log_line['ts'])) > considered_timestamp:\n if expected_log_info in log_line['log']:\n return True\n return False\n\n\ndef list_datasets_for_a_policy(policy_id, datasets_list):\n \"\"\"\n\n :param (str) policy_id: that identifies the policy\n :param (list) datasets_list: list of datasets that will be filtered by policy\n :return: (list) list of ids of datasets related to referred policy\n \"\"\"\n id_of_related_datasets = list()\n for dataset in datasets_list:\n if dataset['agent_policy_id'] == policy_id:\n id_of_related_datasets.append(dataset['id'])\n return id_of_related_datasets\n\n\ndef return_policies_type(k, policies_type='mixed', input_type=\"pcap\"):\n assert_that(policies_type, any_of(equal_to('mixed'), any_of('simple'), any_of('advanced')),\n \"Unexpected value for policies type\")\n\n if input_type == \"flow\":\n advanced = {\n \"advanced_flow\": \"handler=flow, description='policy_flow', asn_notfound=true, sample_rate_scaling=true\"\n }\n simple = {\n 'simple_flow': \"handler=flow\"\n }\n elif input_type == \"netprobe\":\n advanced = {\n \"advanced_netprobe_1\": \"handler=netprobe, test_type=ping, interval_msec=3000, timeout_msec=1000, packets_per_test=2, packets_interval_msec=30, packet_payload_size=56\",\n \"advanced_netprobe_2\": \"handler=netprobe, test_type=ping, packet_payload_size=56\",\n \"advanced_netprobe_3\": \"handler=netprobe, test_type=ping, interval_msec=900, timeout_msec=500, packets_per_test=5, packets_interval_msec=45\"\n }\n simple = {\n 'simple_netprobe': \"handler=netprobe, test_type=ping\"\n }\n else:\n advanced = {\n 'advanced_dns_libpcap_0': \"handler=dns, description='policy_dns', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.orb.live/ .google.com], only_rcode=0\",\n 'advanced_dns_libpcap_2': \"handler=dns, description='policy_dns', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.orb.live/ .google.com], only_rcode=2\",\n 'advanced_dns_libpcap_3': \"handler=dns, description='policy_dns', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.orb.live/ .google.com], only_rcode=3\",\n 'advanced_dns_libpcap_5': \"handler=dns, description='policy_dns', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap, only_qname_suffix=[.orb.live/ .google.com], only_rcode=5\",\n\n 'advanced_net': \"handler=net, description='policy_net', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap\",\n }\n\n simple = {\n\n 'simple_dns': \"handler=dns\",\n 'simple_net': \"handler=net\"\n }\n\n if input_type != \"dnstap\":\n advanced['advanced_dhcp'] = \"handler=dhcp, description='policy_dhcp', host_specification=10.0.1.0/24,10.0.2.1/32,2001:db8::/64, bpf_filter_expression=udp port 53, pcap_source=libpcap\"\n simple['simple_dhcp'] = \"handler=dhcp\"\n\n mixed = dict()\n mixed.update(advanced)\n mixed.update(simple)\n\n master_dict = {'advanced': advanced, 'simple': simple, 'mixed': mixed}\n\n if k <= len(master_dict[policies_type]):\n return sample(list(master_dict[policies_type].items()), k=k)\n\n return choices(list(master_dict[policies_type].items()), k=k)\n\n\ndef return_policy_attribute(policy, attribute):\n \"\"\"\n\n :param (dict) policy: json of policy\n :param (str) attribute: policy attribute whose value is to be returned\n :return: (str, bool or None) value referring to policy attribute\n\n \"\"\"\n\n handler_label = list(policy[\"policy\"][\"handlers\"][\"modules\"].keys())[0]\n if attribute == \"name\":\n return policy['name']\n elif attribute == \"handler_label\":\n return handler_label\n elif attribute == \"handler\":\n return list(policy[\"policy\"][\"handlers\"][\"modules\"].values())[0][\"type\"]\n elif attribute == \"backend_type\":\n return policy[\"backend\"]\n elif attribute == \"tap\":\n return policy[\"policy\"][\"input\"][\"tap\"]\n elif attribute == \"input_type\":\n return policy[\"policy\"][\"input\"][\"input_type\"]\n elif attribute == \"version\" and \"version\" in policy.keys():\n return policy[\"version\"]\n elif attribute == \"description\" and \"description\" in policy.keys():\n return policy['description']\n elif attribute == \"host_specification\" and \"host_spec\" in policy[\"policy\"][\"input\"][\"config\"].keys():\n return policy[\"policy\"][\"input\"][\"config\"][\"host_spec\"]\n elif attribute == \"bpf_filter_expression\" and \"bpf\" in policy[\"policy\"][\"input\"][\"filter\"].keys():\n return policy[\"policy\"][\"input\"][\"filter\"][\"bpf\"]\n elif attribute == \"pcap_source\" and \"pcap_source\" in policy[\"policy\"][\"input\"][\"config\"].keys():\n return policy[\"policy\"][\"input\"][\"config\"][\"pcap_source\"]\n elif attribute == \"only_qname_suffix\" and \"only_qname_suffix\" in \\\n policy[\"policy\"][\"handlers\"][\"modules\"][handler_label][\"filter\"].keys():\n return policy[\"policy\"][\"handlers\"][\"modules\"][handler_label][\"filter\"][\"only_qname_suffix\"]\n elif attribute == \"exclude_noerror\" and \"exclude_noerror\" in \\\n policy[\"policy\"][\"handlers\"][\"modules\"][handler_label][\"filter\"].keys():\n return policy[\"policy\"][\"handlers\"][\"modules\"][handler_label][\"filter\"][\"exclude_noerror\"]\n elif attribute == \"only_rcode\" and \"only_rcode\" in policy[\"policy\"][\"handlers\"][\"modules\"][handler_label][\n \"filter\"].keys():\n return policy[\"policy\"][\"handlers\"][\"modules\"][handler_label][\"filter\"][\"only_rcode\"]\n else:\n return None\n\n\n@threading_wait_until\ndef policy_stopped_and_removed(container_id, stop_policy_info, remove_policy_info, start_considering_time, event=None):\n \"\"\"\n\n :param (str) container_id: agent container id\n :param (str) stop_policy_info: log info that confirms that the policy was stopped\n :param (str) remove_policy_info: log info that confirms that the policy was removed\n :param (str) start_considering_time: timestamp after which logs must be validated\n :param (obj) event: threading.event\n :return: (bool) if the expected message is found return True, if not, False\n \"\"\"\n found = {'stop': False, 'remove': False}\n logs = get_orb_agent_logs(container_id)\n for log_line in logs:\n log_line = safe_load_json(log_line)\n if found['stop'] is False:\n found['stop'] = is_expected_log_info_in_log_line(log_line, stop_policy_info, start_considering_time)\n\n if found['remove'] is False:\n found['remove'] = is_expected_log_info_in_log_line(log_line, remove_policy_info,\n start_considering_time)\n if found['stop'] is True and found['remove'] is True:\n event.set()\n return event.is_set()\n return event.is_set()\n\n\ndef parse_policy_params(kwargs):\n acceptable_keys = ['name', 'handler_label', 'handler', 'description', 'tap', 'input_type',\n 'host_specification', 'bpf_filter_expression', 'pcap_source', 'only_qname_suffix',\n 'only_rcode', 'exclude_noerror', 'backend_type']\n\n name = policy_name_prefix + random_string(10)\n\n kwargs_dict = {'name': name, 'handler': None, 'description': None, 'tap': \"default_pcap\",\n 'input_type': \"pcap\", 'host_specification': None, 'bpf_filter_expression': None,\n 'pcap_source': None, 'only_qname_suffix': None, 'only_rcode': None, 'exclude_noerror': None,\n 'backend_type': \"pktvisor\"}\n\n for i in kwargs.split(\", \"):\n assert_that(i, matches_regexp(\"^.+=.+$\"), f\"Unexpected format for param {i}\")\n item = i.split(\"=\")\n kwargs_dict[item[0]] = item[1]\n\n assert_that(all(key in acceptable_keys for key, value in kwargs_dict.items()), equal_to(True),\n f\"Unexpected parameters for policy. Options are {acceptable_keys}\")\n\n if kwargs_dict[\"only_qname_suffix\"] is not None:\n kwargs_dict[\"only_qname_suffix\"] = kwargs_dict[\"only_qname_suffix\"].replace(\"[\", \"\")\n kwargs_dict[\"only_qname_suffix\"] = kwargs_dict[\"only_qname_suffix\"].replace(\"]\", \"\")\n kwargs_dict[\"only_qname_suffix\"] = kwargs_dict[\"only_qname_suffix\"].split(\"/ \")\n\n if policy_name_prefix not in kwargs_dict[\"name\"]:\n kwargs_dict[\"name\"] + policy_name_prefix + kwargs_dict[\"name\"]\n\n assert_that(kwargs_dict[\"handler\"], any_of(equal_to(\"dns\"), equal_to(\"dhcp\"), equal_to(\"net\")),\n \"Unexpected handler for policy\")\n kwargs_dict['handle_label'] = f\"default_{kwargs_dict['handler']}_{random_string(3)}\"\n\n return kwargs_dict\n\n\ndef parse_flow_policy_params(kwargs):\n name = policy_name_prefix + random_string(10)\n\n kwargs_dict = {'name': name, 'handler': None, 'description': None, 'tap': \"default_flow\",\n 'input_type': \"flow\", 'port': None, 'bind': None, 'flow_type': None, 'sample_rate_scaling': None,\n 'only_devices': None, 'only_ips': None, 'only_ports': None, 'only_interfaces': None,\n 'geoloc_notfound': None,\n 'asn_notfound': None, 'backend_type': \"pktvisor\"}\n\n for i in kwargs.split(\", \"):\n assert_that(i, matches_regexp(\"^.+=.+$\"), f\"Unexpected format for param {i}\")\n item = i.split(\"=\")\n kwargs_dict[item[0]] = item[1]\n\n if policy_name_prefix not in kwargs_dict[\"name\"]:\n kwargs_dict[\"name\"] + policy_name_prefix + kwargs_dict[\"name\"]\n\n assert_that(kwargs_dict[\"handler\"], equal_to(\"flow\"), \"Unexpected handler for policy\")\n kwargs_dict['handle_label'] = f\"default_{kwargs_dict['handler']}_{random_string(3)}\"\n\n return kwargs_dict\n\n\ndef parse_netprobe_policy_params(kwargs):\n name = policy_name_prefix + random_string(10)\n\n kwargs_dict = {'name': name, 'handler': None, 'description': None, 'tap': \"default_netprobe\",\n 'input_type': \"netprobe\", 'test_type': 'ping', 'interval_msec': None, 'timeout_msec': None,\n 'packets_per_test': None, 'packets_interval_msec': None, 'packet_payload_size': None,\n 'targets': None, 'backend_type': \"pktvisor\"}\n\n for i in kwargs.split(\", \"):\n assert_that(i, matches_regexp(\"^.+=.+$\"), f\"Unexpected format for param {i}\")\n item = i.split(\"=\")\n kwargs_dict[item[0]] = item[1]\n\n if policy_name_prefix not in kwargs_dict[\"name\"]:\n kwargs_dict[\"name\"] + policy_name_prefix + kwargs_dict[\"name\"]\n\n assert_that(kwargs_dict[\"handler\"], equal_to(\"netprobe\"), \"Unexpected handler for policy\")\n kwargs_dict['handle_label'] = f\"default_{kwargs_dict['handler']}_{random_string(3)}\"\n\n return kwargs_dict\n\n\nclass HandlerConfigs(UtilsManager):\n def __init__(self):\n self.handler_configs = dict()\n\n def add_configs(self, **kwargs):\n self.handler_configs = UtilsManager.add_configs(self, self.handler_configs, **kwargs)\n\n return self.handler_configs\n\n def remove_configs(self, *args):\n self.handler_configs = UtilsManager.remove_configs(self, self.handler_configs, *args)\n\n return self.handler_configs\n\n def json(self):\n return json.dumps(self.handler_configs)\n\n\nclass HandlerModules(HandlerConfigs):\n def __init__(self):\n self.handler_modules = dict()\n\n def __build_module(self, name, module_type, configs_list, filters_list, require_version=None):\n module = {\n name: {\n \"type\": module_type,\n \"config\": {\n },\n\n \"filter\": {\n },\n \"metric_groups\": {\n }\n }\n }\n if require_version is not None:\n module[name][\"require_version\"] = require_version\n\n module = UtilsManager.update_object_with_filters_and_configs(self, module, name, configs_list, filters_list)\n\n self.handler_modules.update(module)\n\n def __parse_module_settings(self, settings):\n if settings is None or settings == \"default\":\n settings_json = {}\n else:\n settings_is_json, settings_json = is_json(settings)\n assert_that(settings_is_json, is_(True), f\"settings must be written in json format. Current settings: \"\n f\"{settings}\")\n settings_json = values_to_boolean(settings_json)\n return settings_json\n\n def add_dns_module(self, name, settings=None):\n\n settings_json = self.__parse_module_settings(settings)\n\n self.name = name\n self.public_suffix_list = {'public_suffix_list': settings_json.get('public_suffix_list', None)}\n self.only_rcode = {'only_rcode': settings_json.get(\"only_rcode\", None)}\n self.exclude_noerror = {'exclude_noerror': settings_json.get(\"exclude_noerror\", None)}\n self.only_dnssec_response = {'only_dnssec_response': settings_json.get(\"only_dnssec_response\", None)}\n self.answer_count = {'answer_count': settings_json.get(\"answer_count\", None)}\n self.only_qtype = {'only_qtype': settings_json.get(\"only_qtype\", None)}\n self.only_qname_suffix = {'only_qname_suffix': settings_json.get(\"only_qname_suffix\", None)}\n self.geoloc_notfound = {'geoloc_notfound': settings_json.get(\"geoloc_notfound\", None)}\n self.asn_notfound = {'asn_notfound': settings_json.get(\"asn_notfound\", None)}\n self.dnstap_msg_type = {'dnstap_msg_type': settings_json.get(\"dnstap_msg_type\", None)}\n self.require_version = settings_json.get(\"require_version\", None)\n\n dns_configs = [self.public_suffix_list]\n\n dns_filters = [self.only_rcode, self.exclude_noerror, self.only_dnssec_response, self.answer_count,\n self.only_qtype, self.only_qname_suffix,\n self.geoloc_notfound, self.asn_notfound, self.dnstap_msg_type]\n\n self.__build_module(self.name, \"dns\", dns_configs, dns_filters, self.require_version)\n return self.handler_modules\n\n def add_net_module(self, name, settings=None):\n\n settings_json = self.__parse_module_settings(settings)\n\n self.name = name\n self.geoloc_notfound = {'geoloc_notfound': settings_json.get('geoloc_notfound', None)}\n self.asn_notfound = {'asn_notfound': settings_json.get('asn_notfound', None)}\n self.only_geoloc_prefix = {'only_geoloc_prefix': settings_json.get('only_geoloc_prefix', None)}\n self.only_asn_number = {'only_asn_number': settings_json.get('only_asn_number', None)}\n self.require_version = settings_json.get(\"require_version\", None)\n\n net_configs = []\n\n net_filters = [self.geoloc_notfound, self.asn_notfound, self.only_geoloc_prefix, self.only_asn_number]\n\n self.__build_module(self.name, \"net\", net_configs, net_filters, self.require_version)\n return self.handler_modules\n\n def add_dhcp_module(self, name):\n self.name = name\n\n dhcp_configs = []\n\n dhcp_filters = []\n\n self.__build_module(self.name, \"dhcp\", dhcp_configs, dhcp_filters)\n return self.handler_modules\n\n def add_bgp_module(self, name):\n self.name = name\n\n bgp_configs = []\n\n bgp_filters = []\n\n self.__build_module(self.name, \"bgp\", bgp_configs, bgp_filters)\n return self.handler_modules\n\n def add_pcap_module(self, name):\n self.name = name\n\n pcap_configs = []\n\n pcap_filters = []\n\n self.__build_module(self.name, \"pcap\", pcap_configs, pcap_filters)\n return self.handler_modules\n\n def add_flow_module(self, name, settings=None):\n\n settings_json = self.__parse_module_settings(settings)\n\n self.name = name\n self.sample_rate_scaling = {'sample_rate_scaling': settings_json.get(\"sample_rate_scaling\", None)}\n self.recorded_stream = {'recorded_stream': settings_json.get(\"recorded_stream\", None)}\n self.only_devices = {'only_devices': settings_json.get(\"only_devices\", None)}\n self.only_ips = {'only_ips': settings_json.get(\"only_ips\", None)}\n self.only_ports = {'only_ports': settings_json.get(\"only_ports\", None)}\n self.only_interfaces = {'only_interfaces': settings_json.get(\"only_interfaces\", None)}\n self.geoloc_notfound = {'geoloc_notfound': settings_json.get(\"geoloc_notfound\", None)}\n self.asn_notfound = {'asn_notfound': settings_json.get(\"asn_notfound\", None)}\n\n flow_configs = [self.sample_rate_scaling, self.recorded_stream]\n\n flow_filters = [self.only_devices, self.only_ips, self.only_ports, self.only_interfaces, self.geoloc_notfound,\n self.asn_notfound]\n\n self.__build_module(self.name, \"flow\", flow_configs, flow_filters)\n return self.handler_modules\n\n def add_netprobe_module(self, name):\n self.name = name\n\n netprobe_configs = []\n\n netprobe_filters = []\n\n self.__build_module(self.name, \"netprobe\", netprobe_configs, netprobe_filters)\n return self.handler_modules\n\n def add_configs(self, name, **kwargs):\n self.handler_modules[name][\"config\"] = UtilsManager.add_configs(self, self.handler_modules[name][\"config\"],\n **kwargs)\n\n return self.handler_modules\n\n def add_filters(self, name, **kwargs):\n if \"filter\" not in self.handler_modules[name].keys():\n self.handler_modules[name].update({\"filter\": {}})\n\n self.handler_modules[name][\"filter\"] = UtilsManager.add_filters(self, self.handler_modules[name][\"filter\"],\n **kwargs)\n\n return self.handler_modules\n\n def enable_metric_groups(self, name, args):\n self.metric_groups = self.handler_modules[name][\"metric_groups\"]\n metrics_enable = list()\n if 'enable' not in self.metric_groups.keys():\n self.metric_groups.update({\"enable\": metrics_enable})\n\n for metric in args:\n metrics_enable.append(metric)\n if 'disable' in self.metric_groups.keys() and metric in self.metric_groups['disable']:\n self.metric_groups['disable'].remove(metric)\n\n self.metric_groups['enable'] = metrics_enable\n\n return self.handler_modules\n\n def disable_metric_groups(self, name, args):\n self.metric_groups = self.handler_modules[name][\"metric_groups\"]\n metrics_disable = list()\n if 'disable' not in self.metric_groups.keys():\n self.metric_groups.update({\"disable\": metrics_disable})\n\n for metric in args:\n metrics_disable.append(metric)\n if 'enable' in self.metric_groups.keys() and metric in self.metric_groups['enable']:\n self.metric_groups['enable'].remove(metric)\n\n self.metric_groups['disable'] = metrics_disable\n\n return self.handler_modules\n\n def remove_metric_groups(self, name, args):\n self.metric_groups = self.handler_modules[name][\"metric_groups\"]\n\n for metric in args:\n if 'enable' in self.metric_groups.keys() and metric in self.metric_groups['enable']:\n self.metric_groups['enable'].remove(metric)\n if 'disable' in self.metric_groups.keys() and metric in self.metric_groups['disable']:\n self.metric_groups['disable'].remove(metric)\n\n return self.handler_modules\n\n def remove_filters(self, name, *args):\n\n self.handler_modules[name][\"filter\"] = UtilsManager.remove_configs(self, self.handler_modules[name][\"filter\"],\n *args)\n\n return self.handler_modules\n\n def remove_configs(self, name, *args):\n\n self.handler_modules[name][\"config\"] = UtilsManager.remove_configs(self, self.handler_modules[name][\"config\"],\n *args)\n\n return self.handler_modules\n\n def remove_module(self, name):\n assert_that(name, is_in(list(self.handler_modules.keys())), \"Invalid module\")\n self.handler_modules.pop(name)\n return self.handler_modules\n\n def json(self):\n return json.dumps(self.handler_modules)\n\n\nclass Policy(HandlerModules, HandlerConfigs):\n def __init__(self, name, description, backend_type):\n\n self.policy = {\"name\": name,\n \"description\": description,\n \"backend\": backend_type,\n \"policy\": {\"handlers\": {\n \"config\": {},\n \"modules\": {}\n },\n \"input\": {},\n \"config\": {},\n \"kind\": \"collection\"\n }}\n self.config = self.policy['policy']['config']\n self.handler_configs = self.policy['policy'][\"handlers\"][\"config\"]\n self.handler_modules = self.policy['policy'][\"handlers\"][\"modules\"]\n\n def add_module_configs(self, name, **kwargs):\n self.handler_modules[name]['config'] = UtilsManager.add_configs(self, self.handler_modules[name]['config'],\n **kwargs)\n return self.policy\n\n def remove_module_configs(self, name, *args):\n self.handler_modules[name]['config'] = UtilsManager.remove_configs(self, self.handler_modules[name]['config'],\n *args)\n return self.policy\n\n def add_module_filters(self, name, **kwargs):\n self.handler_modules[name]['filter'] = UtilsManager.add_filters(self, self.handler_modules[name]['filter'],\n **kwargs)\n return self.policy\n\n def remove_module_filters(self, name, *args):\n self.handler_modules[name]['filter'] = UtilsManager.remove_filters(self, self.handler_modules[name]['filter'],\n *args)\n return self.policy\n\n def add_handler_configs(self, **kwargs):\n self.handler_configs = UtilsManager.add_configs(self, self.handler_configs, **kwargs)\n return self.policy\n\n def remove_handler_configs(self, *args):\n self.handler_configs = UtilsManager.remove_configs(self, self.handler_configs, *args)\n return self.policy\n\n def add_input_configs(self, **kwargs):\n assert_that('input_type', is_in(list(self.policy['policy']['input'].keys())),\n \"It is not possible to enter settings without defining the input. Use `add_input` first.\")\n if 'tap' not in self.policy['policy']['input'].keys() and 'tap_selector' not in self.policy['policy'][\n 'input'].keys():\n raise ValueError(\"It is not possible to enter settings without defining the input. Use `add_input` first\")\n if 'config' not in self.policy['policy']['input'].keys():\n self.policy['policy']['input'].update({'config': {}})\n self.policy['policy']['input']['config'] = UtilsManager.add_configs(self,\n self.policy['policy']['input']['config'],\n **kwargs)\n return self.policy\n\n def remove_input_configs(self, *args):\n self.policy['policy']['input']['config'] = UtilsManager.remove_configs(self, self.policy['input']['config'],\n *args)\n return self.policy\n\n def add_input_filters(self, **kwargs):\n assert_that('input_type', is_in(list(self.policy['policy']['input'].keys())),\n \"It is not possible to enter settings without defining the input. Use `add_input` first.\")\n if 'tap' not in self.policy['policy']['input'].keys() and 'tap_selector' not in self.policy['policy'][\n 'input'].keys():\n raise ValueError(\"It is not possible to enter settings without defining the input. Use `add_input` first\")\n if 'filter' not in self.policy['policy']['input'].keys():\n self.policy['policy']['input'].update({'filter': {}})\n self.policy['policy']['input']['filter'] = UtilsManager.add_filters(self,\n self.policy['policy']['input']['filter'],\n **kwargs)\n return self.policy\n\n def remove_input_filters(self, name, *args):\n self.policy['policy']['input']['filter'] = UtilsManager.remove_filters(self,\n self.policy['policy']['input']['filter'],\n *args)\n return self.policy\n\n def add_configs(self, **kwargs):\n self.config = UtilsManager.add_configs(self, self.config, **kwargs)\n return self.policy\n\n def remove_configs(self, *args):\n self.config = UtilsManager.remove_configs(self, self.config, *args)\n return self.policy\n\n def add_filters(self, **kwargs):\n raise ValueError(f\"Policy objects do not have filters. Try `add_module_filters` or `add_input_filters` instead\")\n\n def remove_filters(self, **kwargs):\n raise ValueError(\n f\"Policy objects do not have filters. Try `remove_module_filters` or `remove_input_filters` instead\")\n\n def __add_input_tap(self, input_type, name):\n assert_that('tap_selector', not_(is_in(list(self.policy['policy']['input'].keys()))),\n \"tap_selector is already defined. Use `remove_input` first.\")\n if 'tap' not in self.policy['input'].keys():\n self.policy['policy']['input'].update({'tap': {}})\n if 'input_type' not in self.policy['policy']['input'].keys():\n self.policy['policy']['input'].update({'input_type': {}})\n self.policy['policy']['input']['tap'] = name\n self.policy['policy']['input']['input_type'] = input_type\n return self.policy\n\n def __add_input_tap_selector(self, input_type, **kwargs):\n assert_that('tap', not_(is_in(list(self.policy['policy']['input'].keys()))),\n \"tap is already defined. Use `remove_input` first.\")\n assert_that('input_match', is_in(list(kwargs.keys())),\n \"`input_match` is a required parameter if selector is `tap_selector`\")\n assert_that('tags', is_in(list(kwargs.keys())),\n \"`tags` is a required parameter if selector is `tap_selector`\")\n assert_that(kwargs['input_match'], any_of(equal_to('any'), equal_to('all')), \"Invalid input_match\")\n input_match = kwargs['input_match']\n kwargs.pop('input_match')\n if 'tap_selector' not in self.policy['policy']['input'].keys():\n self.policy['policy']['input'].update({'tap_selector': {}})\n if 'input_type' not in self.policy['policy']['input'].keys():\n self.policy['policy']['input'].update({'input_type': {}})\n all_selectors = list()\n elif input_match in self.policy['policy']['input']['tap_selector'].keys():\n all_selectors = self.policy['policy']['input']['tap_selector'][input_match]\n else:\n all_selectors = list()\n\n for selector_key in kwargs['tags']:\n all_selectors.append({selector_key: kwargs['tags'][selector_key]})\n\n self.policy['policy']['input']['tap_selector'] = {input_match: all_selectors}\n self.policy['policy']['input']['input_type'] = input_type\n\n def add_input(self, input_type, selector, **kwargs):\n assert_that(selector, any_of('tap', 'tap_selector'), \"Invalid input selector\")\n\n if selector == 'tap':\n assert_that('name', is_in(list(kwargs.keys())),\n \"If `selector=tap`, you need to specify tap name. name=`the_name_you_want`.\")\n self.__add_input_tap(input_type, kwargs['name'])\n\n else:\n assert_that('input_match', is_in(list(kwargs.keys())),\n \"If `selector=tap`, you need to specify input_match. input_match=`any` or input_match=`all`.\")\n self.__add_input_tap_selector(input_type, **kwargs)\n\n def remove_input(self):\n self.policy['policy']['input'] = dict()\n\n def json(self):\n return json.dumps(self.policy)\n","repo_name":"orb-community/orb","sub_path":"python-test/features/steps/control_plane_policies.py","file_name":"control_plane_policies.py","file_ext":"py","file_size_in_byte":78162,"program_lang":"python","lang":"en","doc_type":"code","stars":498,"dataset":"github-code","pt":"22"} +{"seq_id":"16863712715","text":"import math\nimport numpy as np\nimport numpy.linalg as LA\nimport argparse\n\nfrom suriko.obs_geom import *\n\ndef ParseTestArgs():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--debug\", help=\"debug level; {0: no debugging, 1: errors, 2: warnings, 3: debug, 4: interactive}\", type=int, default=2)\n parser.add_argument(\"--float\", help=\"[f32, f64, f128]\", type=str, default=\"f32\")\n args = parser.parse_args()\n\n el_type = np.float32\n if args.float == 'f32':\n el_type = np.float32\n elif args.float == 'f64':\n el_type = np.float64\n elif args.float == 'f128':\n el_type = np.float128\n else:\n raise ValueError(\"Unknown float type {}\".format(args.float))\n args.el_type = el_type\n return args\n\nclass CrystallGridDataSet:\n def __init__(self, el_type, img_width, img_height, provide_ground_truth = True):\n self.el_type = el_type\n self.img_width = img_width\n self.img_height = img_height\n self.provide_ground_truth = provide_ground_truth\n self.xs3D = []\n self.xs3D_virtual_ids = []\n self.ground_truth_R_per_frame = []\n self.ground_truth_T_per_frame = []\n self.debug = None\n self.cam_mat_pixel_from_meter = None\n self.cam_mat_changed = None # event that fires when the camera matrix is estimated\n\n def Generate(self):\n cx, cy, cz = 0.3, 0.3, 0.05 # cell size between atoms of the crystal\n Wx0, Wx, Wy0, Wy = 0.0, 5.0, -5.0, 0.0 # world size\n move_steps_count = 20\n move_step = min(Wx - Wx0, Wy - Wy0) / (\n move_steps_count) # world is crossed in at least N steps, 0.1 to jit coords if step=cell size\n dist_to_central_point = 3 * max(cx, cy, cz)\n num_visible_2dpoints = 6 * 6 * 2\n do_wobble = True # alters linear movement with rotation to prevent degenerate cases in 3D reconstruction\n wobble_freq = 1 / (move_steps_count / 7)\n wobble_amplitude = math.pi / 18 # max deviation of the wobble angle\n trample_on_the_spot = False\n trample_steps = 4 # max number of steps in one direction (after this number the direction is reversed)\n\n # create world's salient points\n next_virt_id = 10001\n inclusive_gap = 1.0e-8 # small value to make iteration inclusive\n for z in np.arange(0, cz + inclusive_gap, cz):\n for x in np.arange(Wx0, Wx + inclusive_gap, cx):\n # xjit = cz/10 # offset x to prevent overlapping of trajectories\n xjit = 0 # offset x to prevent overlapping of trajectories\n for y in np.arange(Wy0, Wy + inclusive_gap, cy):\n xtmp = x + xjit # offset changes for each change of Y\n xjit = -xjit\n pnt = np.array([xtmp, y, z], self.el_type)\n self.xs3D.append(pnt)\n self.xs3D_virtual_ids.append(next_virt_id)\n next_virt_id += 1\n # with cameras_lock: self.xs3d.append(pnt)\n self.xs3D = np.array(self.xs3D)\n\n pnt_ids = None\n cam_mat_pixel_from_meter = None\n R2 = None\n T2 = None\n cell_width = None\n period_right = True\n period_val = -1\n pad = 0 # eg: 0..2\n road = [(Wx0 + pad, Wy - pad), # bot-right=start\n (Wx0 + pad, Wy0 + pad), # bot-left\n (Wx - pad, Wy0 + pad), # top-left\n (Wx - pad, Wy - pad)] # top-right\n # add move_step to upper bound to make inclusive\n centralY_list_left = list(np.arange(road[0][1], road[1][1], -move_step))\n centralX_list_left = [road[0][0]] * len(centralY_list_left)\n centralX_list_up = list(np.arange(road[1][0], road[2][0], move_step))\n centralY_list_up = [road[1][1]] * len(centralX_list_up)\n centralY_list_right = list(np.arange(road[2][1], road[3][1], move_step))\n centralX_list_right = [road[2][0]] * len(centralY_list_right)\n centralX_list_down = list(np.arange(road[3][0], road[0][0], -move_step))\n centralY_list_down = [road[3][1]] * len(centralX_list_down)\n centralX_list = centralX_list_left + centralX_list_up + centralX_list_right + centralX_list_down\n centralY_list = centralY_list_left + centralY_list_up + centralY_list_right + centralY_list_down\n cam_poses_xy = list(enumerate(zip(centralX_list, centralY_list)))\n for i, (centralX, centralY) in cam_poses_xy:\n # for centralX, centralY in [(centralX_list[0],centralY_list[0]),(centralX_list[0],centralY_list[0])]:\n\n if trample_on_the_spot:\n if period_right:\n period_val += 1\n if period_val == trample_steps:\n period_val = trample_steps - 2\n period_right = False\n else:\n period_val -= 1\n if period_val == -1:\n period_val = -1 + 2\n period_right = True\n actual_index = period_val\n _, (centralX, centralY) = cam_poses_xy[actual_index]\n\n print(\"central-XY=({},{})\".format(centralX, centralY))\n\n # cam3\n cam3_from_world = np.eye(4, 4, dtype=self.el_type)\n # centralX,centralY = cx,-cy\n cam3_from_world = SE3Mat(None, np.array([-centralX, -centralY, 0]), dtype=self.el_type).dot(\n cam3_from_world) # stay on atom which will be in the center of the view\n # (handX,handY) = the distance to central atom in (X,Y) plane\n handX = dist_to_central_point / math.sqrt(3)\n handY = handX\n cam3_from_world = SE3Mat(None, np.array([handX, -handY, 0]), dtype=self.el_type).dot(\n cam3_from_world) # offset in (X,Y) plane\n handZ = handX # the altitude above the (X,Y) plane\n cam3_from_world = SE3Mat(None, np.array([0, 0, handZ]), dtype=self.el_type).dot(\n cam3_from_world) # offset in (X,Y) plane\n # point OZ in the direction of the central atom\n cam3_from_world = SE3Mat(rotMat([0, 1, 0], -math.pi / 2), None, dtype=self.el_type).dot(cam3_from_world)\n wobble_ang = 0\n if do_wobble:\n wobble_ang = math.sin(i * 2 * math.pi * wobble_freq) * wobble_amplitude\n cam3_from_world = SE3Mat(rotMat([1, 0, 0], -math.pi / 4 - wobble_ang), None, dtype=self.el_type).dot(cam3_from_world)\n cam3_from_world = SE3Mat(rotMat([0, 1, 0], math.radians(75)), None, dtype=self.el_type).dot(\n cam3_from_world) # rotate down OZ towards the central point\n cam3_from_world = SE3Mat(rotMat([0, 0, 1], -math.pi / 2), None, dtype=self.el_type).dot(\n cam3_from_world) # align axis in image (column,row) format, OX=right, OY=down\n R3 = cam3_from_world[0:3, 0:3].astype(self.el_type)\n T3 = cam3_from_world[0:3, 3].astype(self.el_type)\n\n if self.provide_ground_truth:\n self.ground_truth_R_per_frame.append(R3)\n self.ground_truth_T_per_frame.append(T3)\n\n xs3D_cam3 = np.dot(R3, self.xs3D.T).T + T3\n\n corrupt_with_noise = False\n if corrupt_with_noise:\n cell_width = max(cx, cy, cz)\n noise_perc = 0.01\n proj_err_pix = noise_perc * cell_width # 'radius' of an error\n print(\"proj_err_pix={0}\".format(proj_err_pix))\n n3 = np.random.rand(len(self.xs3D), 3) * 2 * proj_err_pix - proj_err_pix\n xs3D_cam3 += n3\n\n # perform general projection 3D->2D\n xs_img3 = xs3D_cam3.copy()\n for i in range(0, len(xs_img3)):\n xs_img3[i, :] /= xs_img3[i, -1]\n\n # set pixels formation matrix, so that specified number of projected 3D points is visible\n if cam_mat_pixel_from_meter is None:\n # example of pixel_from_meter camera matrix\n cam_mat_pixel_from_meter = np.array([\n [880, 0, self.img_width / 2],\n [0, 660, self.img_height / 2],\n [0., 0., 1.]], self.el_type)\n\n # project all 3D points in the image and look at closest N points\n # the maximum of (X,Y,Z) will determine the alphaX=focus_dist*sx\n p1_cam3 = cam3_from_world.dot([centralX, centralY, 0, 1])\n p1_cam3 = p1_cam3[0:3]\n dists = [LA.norm(p - p1_cam3) for p in xs_img3]\n closest_pnts = sorted(zip(xs3D_cam3, xs_img3, dists), key=lambda item: item[2])\n assert len(closest_pnts) > 0, \"Camera must observe at least one point\"\n far_point_ind = num_visible_2dpoints - 1\n if far_point_ind >= len(closest_pnts):\n far_point_ind = len(closest_pnts) - 1\n far_point_meter = closest_pnts[far_point_ind][0]\n max_rad_meter = max(abs(far_point_meter[0]), abs(far_point_meter[1]))\n max_z = abs(far_point_meter[2])\n\n # x_image_meter = focus_dist*X/Z, MASKS formula 3.4\n # x_image_pixel = x_image_meter * sx\n # => x_image_pixel = focus_dist*sx*X/Z\n # let alphaX = focus_dist*sx = x_image_pixel/X*Z\n alphaX = (self.img_width / 2) / max_rad_meter * max_z\n alphaY = (self.img_height / 2) / max_rad_meter * max_z\n\n # imageX (columns) is directed in the direction of OY of camera\n # imageY (rows) is directed in the direction of -OX of camera\n # xcol = x*alphaX+xcenter\n # yrow = y*alphaY+ycenter\n # where (xcenter,ycenter) is the principal point (the center) of the image in pixels\n cam_mat_pixel_from_meter = np.array([\n [alphaX, 0, self.img_width / 2],\n [0, alphaY, self.img_height / 2],\n [0.0, 0, 1]], self.el_type)\n print(\"cam_mat_pixel_from_meter=\\n{}\".format(cam_mat_pixel_from_meter))\n if not self.cam_mat_changed is None:\n self.cam_mat_pixel_from_meter = cam_mat_pixel_from_meter\n self.cam_mat_changed(cam_mat_pixel_from_meter)\n\n xs_pixel_all = cam_mat_pixel_from_meter.dot(xs_img3.T).T\n xs_objs_clipped = [(virt_id, (xpix, ypix)) for (virt_id, (xpix, ypix, w)) in zip(self.xs3D_virtual_ids, xs_pixel_all) if xpix < self.img_width and xpix >= 0 and ypix < self.img_height and ypix >= 0]\n frame_ind = i\n yield frame_ind, (R3,T3), xs_objs_clipped\n pass\n\n # returns [R,T], such that X2=[R,T]*X1\n def GroundTruthRelativeMotion(self, img_ind1, img_ind2):\n # ri from world\n r1_fromW = self.ground_truth_R_per_frame[img_ind1]\n t1_fromW = self.ground_truth_T_per_frame[img_ind1]\n r2_fromW = self.ground_truth_R_per_frame[img_ind2]\n t2_fromW = self.ground_truth_T_per_frame[img_ind2]\n\n # X1=M_1w*Xw, X2=M_2w*Xw => X2=M_2w*inv(M_1w)*X1\n r2_from1 = r2_fromW.dot(r1_fromW.T)\n t2_from1 = -r2_from1.dot(t1_fromW) + t2_fromW\n return (r2_from1, t2_from1)\n\n def GroundTruthMapPointPos(self, img_ind, map_point_id):\n pos_world = None\n for virt_id, pos in zip(self.xs3D_virtual_ids, self.xs3D):\n if virt_id == map_point_id:\n pos_world = pos\n break\n if not pos_world is None:\n # Xcam = M_camw*Xw\n cam_from_world_R = self.ground_truth_R_per_frame[img_ind]\n cam_from_world_T = self.ground_truth_T_per_frame[img_ind]\n pos_cam = SE3Apply((cam_from_world_R, cam_from_world_T), pos_world)\n return pos_cam\n\n return None\n\n def CamMatChanged(self, on_computed_cam_mat_fun):\n self.cam_mat_changed = on_computed_cam_mat_fun\n\nclass CircusGridDataSet:\n def __init__(self, el_type, img_width, img_height, world_range, cell_size, angles, rot_radius = None, provide_ground_truth=True):\n \"\"\":param cell_size cell size between atoms of the crystal\"\"\"\n self.el_type = el_type\n self.img_width = img_width\n self.img_height = img_height\n self.world_range = world_range\n self.cell_size = cell_size\n self.angles = angles\n if rot_radius is None:\n rot_radius = 5 * cell_size[0]\n self.rot_radius = rot_radius\n self.provide_ground_truth = provide_ground_truth\n self.xs3D = []\n self.xs3D_virtual_ids = []\n self.ground_truth_R_per_frame = []\n self.ground_truth_T_per_frame = []\n self.debug = None\n self.cam_mat_pixel_from_meter = None\n self.cam_mat_changed = None # event that fires when the camera matrix is estimated\n self.salient_points_created = None # event that fires when the world's salient 3D points are created\n\n def Generate(self):\n cx, cy, cz = self.cell_size\n Wx0, Wx, Wy0, Wy, Wz0, Wz = self.world_range\n\n # create world's salient points\n next_virt_id = 10001\n inclusive_gap = 1.0e-8 # small value to make iteration inclusive\n for z in np.arange(Wz0, Wz + inclusive_gap, cz):\n for x in np.arange(Wx0, Wx + inclusive_gap, cx):\n for y in np.arange(Wy0, Wy + inclusive_gap, cy):\n # x plus small offset to avoid centering on stable point\n z_curve = math.cos(x / Wx * math.pi/2)\n pnt = np.array([x+0.2, y, z_curve], self.el_type)\n self.xs3D.append(pnt)\n self.xs3D_virtual_ids.append(next_virt_id)\n next_virt_id += 1\n # with cameras_lock: self.xs3d.append(pnt)\n self.xs3D = np.array(self.xs3D)\n if not self.salient_points_created is None:\n self.salient_points_created(self.xs3D)\n\n frame_ind = 0\n # add move_step to upper bound to make inclusive\n for ang in self.angles:\n # cam3\n cam3_from_world = np.eye(4, 4, dtype=self.el_type)\n # angle=0 corresponds to OX (to the right) axis\n # -ang to move clockwise\n shiftX = cx*math.cos(ang)\n shiftY = cx*math.sin(ang)\n shiftZ = cx\n shift_scale = self.rot_radius / LA.norm([shiftX, shiftY, shiftZ]) # scale offset upto given radius of rotation\n shiftX, shiftY, shiftZ = shiftX * shift_scale, shiftY * shift_scale, shiftZ * shift_scale\n cam3_from_world = SE3Mat(None, np.array([-shiftX, -shiftY, -shiftZ]), dtype=self.el_type).dot(cam3_from_world)\n\n # move OY towards direction 'towards center'\n toCenterXOY = [-shiftX, -shiftY, 0] # the direction towards center O\n oy = [0, 1, 0]\n ang_yawOY = np.sign(np.cross(oy, toCenterXOY).dot([0,0,1])) * math.acos(np.dot(oy, toCenterXOY) / (LA.norm(oy)*LA.norm(toCenterXOY)))\n cam3_from_world = SE3Mat(rotMat([0, 0, 1], -ang_yawOY), None, dtype=self.el_type).dot(cam3_from_world)\n\n # look down towards the center\n look_down_ang = math.atan2(shiftZ, LA.norm([shiftX, shiftY]))\n cam3_from_world = SE3Mat(rotMat([1, 0, 0], look_down_ang + math.pi/2), None, dtype=self.el_type).dot(cam3_from_world)\n R3 = cam3_from_world[0:3, 0:3].astype(self.el_type)\n T3 = cam3_from_world[0:3, 3].astype(self.el_type)\n\n if self.provide_ground_truth:\n self.ground_truth_R_per_frame.append(R3)\n self.ground_truth_T_per_frame.append(T3)\n\n xs3D_cam3 = np.dot(R3, self.xs3D.T).T + T3\n\n corrupt_with_noise = False\n if corrupt_with_noise:\n cell_width = max(cx, cy, cz)\n noise_perc = 0.01\n proj_err_pix = noise_perc * cell_width # 'radius' of an error\n print(\"proj_err_pix={0}\".format(proj_err_pix))\n n3 = np.random.rand(len(self.xs3D), 3) * 2 * proj_err_pix - proj_err_pix\n xs3D_cam3 += n3\n\n # perform general projection 3D->2D\n xs_img3 = xs3D_cam3.copy()\n for i in range(0, len(xs_img3)):\n xs_img3[i, :] /= xs_img3[i, -1]\n\n # set pixels formation matrix, so that specified number of projected 3D points is visible\n if self.cam_mat_pixel_from_meter is None:\n # example of pixel_from_meter camera matrix\n self.cam_mat_pixel_from_meter = np.array([\n [880, 0, self.img_width / 2],\n [0, 660, self.img_height / 2],\n [0., 0., 1.]], self.el_type)\n if not self.cam_mat_changed is None:\n self.cam_mat_changed(self.cam_mat_pixel_from_meter)\n\n xs_pixel_all = self.cam_mat_pixel_from_meter.dot(xs_img3.T).T\n xs_objs_clipped = [(virt_id, (xpix, ypix)) for (virt_id, (xpix, ypix, w)) in\n zip(self.xs3D_virtual_ids, xs_pixel_all) if\n xpix < self.img_width and xpix >= 0 and ypix < self.img_height and ypix >= 0]\n yield frame_ind, (R3, T3), xs_objs_clipped\n frame_ind += 1\n pass\n\n # returns [R,T], such that X2=[R,T]*X1\n def GroundTruthRelativeMotion(self, img_ind1, img_ind2):\n # ri from world\n r1_fromW = self.ground_truth_R_per_frame[img_ind1]\n t1_fromW = self.ground_truth_T_per_frame[img_ind1]\n r2_fromW = self.ground_truth_R_per_frame[img_ind2]\n t2_fromW = self.ground_truth_T_per_frame[img_ind2]\n\n # X1=M_1w*Xw, X2=M_2w*Xw => X2=M_2w*inv(M_1w)*X1\n r2_from1 = r2_fromW.dot(r1_fromW.T)\n t2_from1 = -r2_from1.dot(t1_fromW) + t2_fromW\n return (r2_from1, t2_from1)\n\n def GroundTruthMapPointPos(self, img_ind, map_point_id):\n pos_world = None\n for virt_id, pos in zip(self.xs3D_virtual_ids, self.xs3D):\n if virt_id == map_point_id:\n pos_world = pos\n break\n if not pos_world is None:\n # Xcam = M_camw*Xw\n cam_from_world_R = self.ground_truth_R_per_frame[img_ind]\n cam_from_world_T = self.ground_truth_T_per_frame[img_ind]\n pos_cam = SE3Apply((cam_from_world_R, cam_from_world_T), pos_world)\n return pos_cam\n\n return None\n\n def CamMatChanged(self, on_computed_cam_mat_fun):\n self.cam_mat_changed = on_computed_cam_mat_fun\n\n def GetWorldSalientPoints(self):\n return self.xs3D\n\n def GetCamMat(self):\n return self.cam_mat_pixel_from_meter","repo_name":"whigg/surikatoko","sub_path":"py_proto/suriko/test_data_builder.py","file_name":"test_data_builder.py","file_ext":"py","file_size_in_byte":18699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35710603131","text":"import sys\nclass customer:\n '''customer class with bank operations'''\n bankname = 'State bank of india'\n address = 'Visakhapatnam'\n def __init__(self,name,balance=0):\n self.name = name\n self.balance = balance\n def deposit(self,amount):\n self.balance += amount\n print('Balance afer deposit : %s'%self.balance)\n def withdraw(self,amount):\n if amount>self.balance:\n print('Insufficient funds')\n sys.exit()\n self.balance -= amount\n print('Balance afer withdraw %s'%self.balance)\n #def Balance_enquiry(self):\n #print(\"your balance is:\",self.balance)\nprint('Welcome to',customer.bankname,customer.address,'branch')\nname = input('Enter your name: ')\nprint('Welcome', name)\nc = customer(name)\nwhile True:\n option = input(\"Please select operation you want to perform from menu\\nd-deposit\\nw-withdraw\\nb-balance\\ne-exit\\n: \")\n if option.casefold() == 'd':\n amount = float(input('Enter an amount: '))\n c.deposit(amount)\n elif option.casefold() == 'w':\n amount = float(input(\"Enter an amount: \"))\n c.withdraw(amount)\n elif option.casefold() =='b':\n print(\"your account balance is: \",c.balance)\n elif option.casefold() =='e':\n print(\"Thanks for banking at\",c.bankname)\n sys.exit()\n else:\n print('please choose correct option')\n","repo_name":"Nityaanand12/Firstrepository","sub_path":"Bankapplication.py","file_name":"Bankapplication.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35509652149","text":"from flask import url_for, current_app\nfrom models import Album, Sound\nimport json\n\n\ndef to_json_relationship(of_user, against_user):\n \"\"\"\n user relationship against_user\n of_user is the user \"point of view\"\n following = is of_user following against_user ?\n followed_by = is against_user following of_user ?\n etc.\n \"\"\"\n if not of_user:\n return None\n obj = dict(\n id=against_user.id,\n following=True if of_user.actor[0].is_following(against_user.actor[0]) else False,\n followed_by=True if against_user.actor[0].is_following(of_user.actor[0]) else False,\n blocking=False, # TODO handle that\n muting=False, # TODO maybe handle that\n muting_notifications=False,\n requested=False, # TODO handle that\n domain_blocking=False,\n showing_reblogs=True,\n endorsed=False, # not managed\n )\n return obj\n\n\ndef to_json_account(user, relationship=False):\n url_feed = url_for(\"bp_feeds.tracks\", user_id=user.flake_id, _external=True)\n if user.path_avatar():\n url_avatar = url_for(\"get_uploads_stuff\", thing=\"avatars\", stuff=user.path_avatar(), _external=True)\n else:\n url_avatar = f\"{current_app.config['REEL2BITS_URL']}/static/userpic_placeholder.svg\"\n\n obj = dict(\n id=user.id,\n flakeId=user.flake_id,\n username=user.name,\n acct=user.acct(),\n display_name=user.display_name,\n locked=False,\n created_at=user.created_at,\n followers_count=user.actor[0].followers.count(),\n following_count=user.actor[0].followings.count(),\n statuses_count=user.sounds.filter(\n Sound.private.is_(False), Sound.transcode_state == Sound.TRANSCODE_DONE\n ).count(),\n note=user.actor[0].summary,\n url=user.actor[0].url,\n avatar=url_avatar,\n avatar_static=url_avatar,\n header=\"\",\n header_static=\"\",\n emojis=[],\n moved=None,\n fields=[],\n bot=False,\n source={\n \"privacy\": \"unlisted\",\n \"sensitive\": False,\n \"language\": user.locale,\n \"note\": user.actor[0].summary,\n \"fields\": [],\n },\n pleroma={\"is_admin\": user.is_admin()},\n reel2bits={\n \"albums_count\": user.albums.filter(Album.private.is_(False)).count(),\n \"lang\": user.locale,\n \"quota_limit\": user.quota,\n \"quota_count\": user.quota_count,\n \"url_feed\": url_feed,\n },\n )\n if relationship:\n obj[\"pleroma\"][\"relationship\"] = relationship\n return obj\n\n\ndef to_json_track(track, account):\n si = track.sound_infos.first()\n url_orig = url_for(\"get_uploads_stuff\", thing=\"sounds\", stuff=track.path_sound(orig=True), _external=True)\n url_transcode = url_for(\"get_uploads_stuff\", thing=\"sounds\", stuff=track.path_sound(orig=False), _external=True)\n if track.path_artwork():\n url_artwork = url_for(\"get_uploads_stuff\", thing=\"artwork_sounds\", stuff=track.path_artwork(), _external=True)\n else:\n url_artwork = None\n\n obj = {\n \"id\": track.flake_id,\n \"uri\": None,\n \"url\": None,\n \"account\": account,\n \"in_reply_to_id\": None,\n \"in_reply_to_account_id\": None,\n \"reblog\": None,\n \"content\": track.description,\n \"created_at\": track.uploaded,\n \"emojis\": [],\n \"replies_count\": 0,\n \"reblogs_count\": 0,\n \"favourites_count\": 0,\n \"reblogged\": None,\n \"favorited\": None,\n \"muted\": None,\n \"sensitive\": None,\n \"spoiler_text\": None,\n \"visibility\": None,\n \"media_attachment\": [],\n \"mentions\": [],\n \"tags\": [],\n \"card\": None,\n \"application\": None,\n \"language\": None,\n \"pinned\": None,\n \"reel2bits\": {\n \"type\": \"track\",\n \"slug\": track.slug,\n \"local\": track.user.actor[0].is_local(),\n \"title\": track.title,\n \"picture_url\": url_artwork,\n \"media_orig\": url_orig,\n \"media_transcoded\": url_transcode,\n \"waveform\": (json.loads(si.waveform) if si else None),\n \"private\": track.private,\n \"uploaded_elapsed\": track.elapsed(),\n \"album_id\": (track.album.id if track.album else None),\n \"album_order\": (track.album_order if track.album else None),\n \"genre\": track.genre,\n \"tags\": [a.name for a in track.tags],\n \"processing\": {\n \"basic\": (si.done_basic if si else None),\n \"transcode_state\": track.transcode_state,\n \"transcode_needed\": track.transcode_needed,\n \"done\": track.processing_done(),\n },\n \"metadatas\": {\n \"licence\": track.licence_info(),\n \"duration\": (si.duration if si else None),\n \"type\": (si.type if si else None),\n \"codec\": (si.codec if si else None),\n \"format\": (si.format if si else None),\n \"channels\": (si.channels if si else None),\n \"rate\": (si.rate if si else None), # Hz\n \"file_size\": track.file_size,\n \"transcode_file_size\": track.transcode_file_size,\n },\n },\n }\n if si:\n if si.bitrate and si.bitrate_mode:\n obj[\"reel2bits\"][\"metadatas\"][\"bitrate\"] = si.bitrate\n obj[\"reel2bits\"][\"metadatas\"][\"bitrate_mode\"] = si.bitrate_mode\n return obj\n\n\ndef to_json_album(album, account):\n url_feed = url_for(\"bp_feeds.album\", user_id=album.user.flake_id, album_id=album.id, _external=True)\n if album.path_artwork():\n url_artwork = url_for(\"get_uploads_stuff\", thing=\"artwork_albums\", stuff=album.path_artwork(), _external=True)\n else:\n url_artwork = None\n\n obj = {\n \"id\": album.flake_id,\n \"uri\": None,\n \"url\": None,\n \"account\": account,\n \"in_reply_to_id\": None,\n \"in_reply_to_account_id\": None,\n \"reblog\": None,\n \"content\": album.description,\n \"created_at\": album.created,\n \"emojis\": [],\n \"replies_count\": 0,\n \"reblogs_count\": 0,\n \"favourites_count\": 0,\n \"reblogged\": None,\n \"favorited\": None,\n \"muted\": None,\n \"sensitive\": None,\n \"spoiler_text\": None,\n \"visibility\": None,\n \"media_attachment\": [],\n \"mentions\": [],\n \"tags\": [],\n \"card\": None,\n \"application\": None,\n \"language\": None,\n \"pinned\": None,\n \"reel2bits\": {\n \"type\": \"album\",\n \"slug\": album.slug,\n \"local\": True, # NOTE, albums doesn't federate (yet)\n \"title\": album.title,\n \"picture_url\": url_artwork,\n \"private\": album.private,\n \"uploaded_elapsed\": album.elapsed(),\n \"tracks_count\": album.sounds.count(),\n \"tracks\": [to_json_track(t, account) for t in album.sounds],\n \"genre\": album.genre,\n \"tags\": [a.name for a in album.tags],\n \"url_feed\": url_feed,\n },\n }\n return obj\n\n\ndef default_genres():\n return [\n \"acid house\",\n \"acid jazz\",\n \"acid techno\",\n \"acoustic blues\",\n \"acoustic rock\",\n \"afrobeat\",\n \"alternative country\",\n \"alternative dance\",\n \"alternative folk\",\n \"alternative hip hop\",\n \"alternative metal\",\n \"alternative pop\",\n \"alternative punk\",\n \"alternative rock\",\n \"ambient\",\n \"ambient house\",\n \"ambient techno\",\n \"americana\",\n \"anarcho-punk\",\n \"aor\",\n \"arena rock\",\n \"art rock\",\n \"atmospheric black metal\",\n \"audiobook\",\n \"avant-garde\",\n \"avant-garde jazz\",\n \"avant-garde metal\",\n \"avant-garde pop\",\n \"bachata\",\n \"ballad\",\n \"barbershop\",\n \"baroque\",\n \"bebop\",\n \"bhangra\",\n \"big band\",\n \"big beat\",\n \"black metal\",\n \"blackened death metal\",\n \"blackgaze\",\n \"blue-eyed soul\",\n \"bluegrass\",\n \"blues\",\n \"blues rock\",\n \"bolero\",\n \"bolero son\",\n \"boom bap\",\n \"bossa nova\",\n \"breakbeat\",\n \"breakcore\",\n \"breaks\",\n \"britpop\",\n \"broken beat\",\n \"brutal death metal\",\n \"bubblegum pop\",\n \"cajun\",\n \"calypso\",\n \"canterbury scene\",\n \"cantopop\",\n \"celtic\",\n \"celtic punk\",\n \"chamber pop\",\n \"champeta\",\n \"chanson\",\n \"chicago blues\",\n \"chillout\",\n \"chiptune\",\n \"christian rock\",\n \"christmas music\",\n \"city pop\",\n \"classic blues\",\n \"classic country\",\n \"classic jazz\",\n \"classic rock\",\n \"classical\",\n \"club\",\n \"comedy\",\n \"conscious hip hop\",\n \"contemporary christian\",\n \"contemporary classical\",\n \"contemporary folk\",\n \"contemporary gospel\",\n \"contemporary jazz\",\n \"contemporary r&b\",\n \"contra\",\n \"cool jazz\",\n \"country\",\n \"country blues\",\n \"country folk\",\n \"country pop\",\n \"country rock\",\n \"crossover prog\",\n \"crust punk\",\n \"cumbia\",\n \"d-beat\",\n \"dance\",\n \"dance-pop\",\n \"dance-punk\",\n \"dancehall\",\n \"dark ambient\",\n \"dark electro\",\n \"dark folk\",\n \"dark wave\",\n \"death metal\",\n \"death-doom metal\",\n \"deathcore\",\n \"deathgrind\",\n \"deathrock\",\n \"deep house\",\n \"delta blues\",\n \"desert rock\",\n \"digital hardcore\",\n \"disco\",\n \"doo-wop\",\n \"doom metal\",\n \"downtempo\",\n \"drill\",\n \"drone\",\n \"drum and bass\",\n \"dub\",\n \"dub techno\",\n \"dubstep\",\n \"dungeon synth\",\n \"east coast hip hop\",\n \"ebm\",\n \"electric blues\",\n \"electro\",\n \"electro house\",\n \"electro swing\",\n \"electro-funk\",\n \"electro-industrial\",\n \"electroclash\",\n \"electronic\",\n \"electronic rock\",\n \"electronica\",\n \"electronicore\",\n \"electropop\",\n \"electropunk\",\n \"emo\",\n \"emocore\",\n \"enka\",\n \"ethereal\",\n \"euro house\",\n \"eurodance\",\n \"europop\",\n \"experimental\",\n \"experimental rock\",\n \"fado\",\n \"filk\",\n \"flamenco\",\n \"folk\",\n \"folk metal\",\n \"folk pop\",\n \"folk punk\",\n \"folk rock\",\n \"freak folk\",\n \"free improvisation\",\n \"free jazz\",\n \"funk\",\n \"funk carioca\",\n \"funk metal\",\n \"funk rock\",\n \"funk soul\",\n \"funky house\",\n \"fusion\",\n \"future jazz\",\n \"futurepop\",\n \"g-funk\",\n \"gabber\",\n \"gangsta rap\",\n \"garage\",\n \"garage house\",\n \"garage punk\",\n \"garage rock\",\n \"glam\",\n \"glam metal\",\n \"glam rock\",\n \"glitch\",\n \"goa trance\",\n \"goregrind\",\n \"gospel\",\n \"gothic\",\n \"gothic metal\",\n \"gothic rock\",\n \"grebo\",\n \"grime\",\n \"grindcore\",\n \"groove metal\",\n \"grunge\",\n \"guaracha\",\n \"happy hardcore\",\n \"hard bop\",\n \"hard house\",\n \"hard rock\",\n \"hard trance\",\n \"hardcore punk\",\n \"hardcore techno\",\n \"hardstyle\",\n \"heavy metal\",\n \"hip hop\",\n \"honky tonk\",\n \"horror punk\",\n \"horrorcore\",\n \"house\",\n \"idm\",\n \"illbient\",\n \"indie\",\n \"indie folk\",\n \"indie pop\",\n \"indie rock\",\n \"indietronica\",\n \"indorock\",\n \"industrial\",\n \"industrial metal\",\n \"industrial rock\",\n \"instrumental\",\n \"instrumental jazz\",\n \"instrumental rock\",\n \"irish folk\",\n \"italo-disco\",\n \"j-pop\",\n \"j-rock\",\n \"jazz\",\n \"jazz blues\",\n \"jazz fusion\",\n \"jazz rap\",\n \"jazz rock\",\n \"jazz-funk\",\n \"jungle\",\n \"k-pop\",\n \"kayōkyoku\",\n \"kizomba\",\n \"klezmer\",\n \"krautrock\",\n \"latin\",\n \"latin jazz\",\n \"latin pop\",\n \"latin rock\",\n \"leftfield\",\n \"line dance\",\n \"lo-fi\",\n \"lounge\",\n \"lovers rock\",\n \"madchester\",\n \"mainstream rock\",\n \"mambo\",\n \"mandopop\",\n \"martial industrial\",\n \"math rock\",\n \"mathcore\",\n \"medieval\",\n \"melodic black metal\",\n \"melodic death metal\",\n \"melodic metalcore\",\n \"melodic rock\",\n \"melodic trance\",\n \"mento\",\n \"merengue\",\n \"metal\",\n \"metalcore\",\n \"microhouse\",\n \"milonga\",\n \"min'yō\",\n \"mincecore\",\n \"minimal\",\n \"modern blues\",\n \"modern classical\",\n \"modern country\",\n \"motown\",\n \"mpb\",\n \"musical\",\n \"neo soul\",\n \"neo-progressive rock\",\n \"neo-rockabilly\",\n \"neofolk\",\n \"nerdcore\",\n \"new age\",\n \"new jack swing\",\n \"new romantic\",\n \"new wave\",\n \"no wave\",\n \"noise\",\n \"noise pop\",\n \"noisecore\",\n \"non-music\",\n \"norteño\",\n \"northern soul\",\n \"nu jazz\",\n \"nu metal\",\n \"occult rock\",\n \"oi\",\n \"old school death metal\",\n \"old-time\",\n \"opera\",\n \"orchestral\",\n \"outlaw country\",\n \"p-funk\",\n \"pachanga\",\n \"pop\",\n \"pop metal\",\n \"pop punk\",\n \"pop rap\",\n \"pop rock\",\n \"pop soul\",\n \"pornogrind\",\n \"post-bop\",\n \"post-classical\",\n \"post-grunge\",\n \"post-hardcore\",\n \"post-metal\",\n \"post-punk\",\n \"post-rock\",\n \"power electronics\",\n \"power metal\",\n \"power pop\",\n \"powerviolence\",\n \"production music\",\n \"progressive\",\n \"progressive folk\",\n \"progressive house\",\n \"progressive metal\",\n \"progressive rock\",\n \"progressive trance\",\n \"psy-trance\",\n \"psychedelic\",\n \"psychedelic folk\",\n \"psychedelic pop\",\n \"psychedelic rock\",\n \"psychobilly\",\n \"psytrance\",\n \"punk\",\n \"punk rock\",\n \"queercore\",\n \"r&b\",\n \"ragga\",\n \"ragga hip-hop\",\n \"ragga jungle\",\n \"ragtime\",\n \"raï\",\n \"ranchera\",\n \"rap rock\",\n \"rapcore\",\n \"rave\",\n \"reggae\",\n \"reggaeton\",\n \"rhythmic noise\",\n \"rock\",\n \"rock and roll\",\n \"rockabilly\",\n \"rocksteady\",\n \"roots reggae\",\n \"rumba\",\n \"salsa\",\n \"samba\",\n \"schlager\",\n \"screamo\",\n \"shibuya-kei\",\n \"shoegaze\",\n \"singer-songwriter\",\n \"ska\",\n \"ska punk\",\n \"skacore\",\n \"slow waltz\",\n \"sludge metal\",\n \"smooth jazz\",\n \"smooth soul\",\n \"soca\",\n \"soft rock\",\n \"son cubano\",\n \"son montuno\",\n \"soul\",\n \"soul jazz\",\n \"southern rock\",\n \"southern soul\",\n \"space rock\",\n \"speed garage\",\n \"speed metal\",\n \"spoken word\",\n \"stoner metal\",\n \"stoner rock\",\n \"street punk\",\n \"surf rock\",\n \"swing\",\n \"symphonic black metal\",\n \"symphonic metal\",\n \"symphonic prog\",\n \"symphonic rock\",\n \"symphony\",\n \"synth-pop\",\n \"synthwave\",\n \"tango\",\n \"tech house\",\n \"technical death metal\",\n \"techno\",\n \"teen pop\",\n \"thrash metal\",\n \"thrashcore\",\n \"timba\",\n \"traditional country\",\n \"trance\",\n \"trap\",\n \"trap edm\",\n \"tribal house\",\n \"trip hop\",\n \"turntablism\",\n \"uk drill\",\n \"uk garage\",\n \"underground hip hop\",\n \"vallenato\",\n \"vaporwave\",\n \"viking metal\",\n \"visual kei\",\n \"vocal house\",\n \"vocal jazz\",\n \"vocal trance\",\n \"west coast hip hop\",\n \"west coast swing\",\n \"yé-yé\",\n \"zamrock\",\n \"zydeco\",\n ]\n","repo_name":"reel2bits/reel2bits","sub_path":"api/datas_helpers.py","file_name":"datas_helpers.py","file_ext":"py","file_size_in_byte":16478,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"22"} +{"seq_id":"12729021138","text":"import numpy as np \nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom sklearn.model_selection import train_test_split\n\n\nclass RandomDataset(Dataset):\n\tdef __init__(self, data_tuple):\n\n\t\tself.X, self.y = data_tuple\n\n\tdef __len__(self):\n\t\treturn(len(self.y))\n\n\tdef __getitem__(self, index):\n\t\treturn (self.X[index, :], self.y[index])\n\n\nclass CreateRandomDataset():\n\t\"\"\" create random dataset \n\t\"\"\"\n\tdef __init__(self, datatype=\"random\", feat_size=300,\n\t\t\t\t n_samples=1000, n_classes=3, val_ratio=0.2,\n\t\t\t\t test_ratio=0.2, batch_size=32, labels_per_sample=1):\n\t\tself.type = datatype\n\t\tself.feat_size = feat_size\n\t\tself.samples = n_samples\n\t\tself.val_ratio = val_ratio\n\t\tself.test_ratio = test_ratio\n\t\tself.bs = batch_size\n\t\tself.labels = labels_per_sample\n\n\t\tif isinstance(n_classes, list):\n\t\t\tself.classes = n_classes\n\t\telse:\n\t\t\tself.classes = [n_classes \n\t\t\t\t\t\t\tfor _ in range(labels_per_sample)]\n\n\t\tself.data_dict = self.generate_dataset()\n\t\t\n\tdef get_dataloaders(self):\n\t\ttrain_set = self.data_dict[\"train\"]\n\t\tval_set = self.data_dict[\"val\"]\n\t\ttest_set = self.data_dict[\"test\"]\n\n\t\ttrain_loader = DataLoader(RandomDataset(train_set),\n\t\t\t\t\t\t\t\t batch_size=self.bs,\n\t\t\t\t\t\t\t\t shuffle=True)\n\t\tval_loader = DataLoader(RandomDataset(val_set),\n\t\t\t\t\t\t\t\tbatch_size=self.bs,\n\t\t\t\t\t\t\t\tshuffle=False)\n\t\ttest_loader = DataLoader(RandomDataset(test_set),\n\t\t\t\t\t\t\t\t batch_size=self.bs,\n\t\t\t\t\t\t\t\t shuffle=False)\n\n\t\treturn train_loader, val_loader, test_loader \n\n\tdef generate_dataset(self):\n\t\tif self.type == 'random':\n\t\t\tX, y = self._create_random_dataset()\n\t\telif self.type == 'pseudo':\n\t\t\tX, y = self._create_pseudo_dataset()\n\t\telif self.type == 'multilabel':\n\t\t\tX, y = self._create_multi_dataset()\n\t\telif self.type == 'inv hierlabel':\n\t\t\tX, y = self._create_inv_hier_multi_dataset()\n\t\telif self.type == 'hierlabel':\n\t\t\tX, y = self._create_hier_multi_dataset()\n\t\telif self.type == 'sum_hierlabel':\n\t\t\tX, y = self._create_sum_multi_dataset()\n\t\telse:\n\t\t\traise ValueError('Not an implemented dataset')\n\t\t\n\t\tX_temp, X_test, y_temp, y_test = \\\n\t\t\ttrain_test_split(X, y, test_size=self.test_ratio, random_state=1)\n\t\tX_train, X_val, y_train, y_val = \\\n\t\t\ttrain_test_split(X_temp, y_temp, test_size=self.val_ratio, random_state=1)\n\n\t\treturn ({\"train\":(X_train,y_train),\"val\": (X_val,y_val),\"test\": (X_test,y_test)})\n\n\n\tdef _create_random_dataset(self):\n\t\tX = np.random.rand(self.samples, self.feat_size)\n\t\ty = []\n\t\t# todo\n\t\t# fix for self.class is list case\n\t\tfor _ in range(self.samples):\n\t\t\ty.append(np.random.randint(0,self.classes))\n\t\treturn X, y\n\n\tdef _create_pseudo_dataset(self):\n\t\tX = np.random.rand(self.samples, self.feat_size)\n\t\ty = []\n\t\t# TODO \n\t\t# fix for self.class is list case\n\t\tfor i in range(self.samples):\n\t\t\tlabel = np.random.randint(0,self.classes)\n\t\t\ty.append(label)\n\t\t\t# 3 is the label's position in the feature vector\n\t\t\tX[i,3] = label\n\t\treturn X, y\n\t\n\tdef _create_multi_dataset(self):\n\t\tX = np.random.rand(self.samples, self.feat_size)\n\t\ty = []\n\t\tfor i in range(self.samples):\n\t\t\tlabel = []\n\t\t\tfor l in range(self.labels):\n\t\t\t\tpseudo_label = np.random.randint(0, self.classes[l])\n\t\t\t\tlabel.append(pseudo_label)\n\t\t\t\tX[i,l] = pseudo_label\n\t\t\ty.append(label)\t\n\n\t\treturn X, y\n\n\tdef _create_inv_hier_multi_dataset(self):\n\t\tX = np.random.rand(self.samples, self.feat_size)\n\t\ty = []\n\t\tfor i in range(self.samples):\n\t\t\tlabel = []\n\t\t\tfor l in range(self.labels):\n\t\t\t\tpseudo_label = np.random.randint(0, self.classes[l])\n\t\t\t\tlabel.append(pseudo_label)\n\n\t\t\tnew_label = []\n\t\t\tfor l in range(self.labels):\n\t\t\t\tif l == 0:\n\t\t\t\t\tnew_label.append(sum(label[1:]))\n\t\t\t\telse:\n\t\t\t\t\tnew_label.append(label[l]) \n\t\t\t\tX[i,l] = new_label[l]\n\t\t\ty.append(new_label)\t\n\n\t\treturn X, y\n\n\tdef _create_hier_multi_dataset(self):\n\t\tX = np.random.rand(self.samples, self.feat_size)\n\t\ty = []\n\t\tfor i in range(self.samples):\n\t\t\tlabel = []\n\t\t\tfor l in range(self.labels-1):\n\t\t\t\tpseudo_label = np.random.randint(0, self.classes[l])\n\t\t\t\tlabel.append(pseudo_label)\n\t\t\t\tX[i,l] = pseudo_label\n\t\t\t# last label is the sum of all previous\n\t\t\tpseudo_label = sum(label)\n\t\t\tlabel.append(pseudo_label)\n\t\t\tX[i, self.labels-1] = pseudo_label\n\t\t\ty.append(label) \n\n\t\treturn X, y\n\n\tdef _create_sum_multi_dataset(self):\n\t\tX = np.random.rand(self.samples, self.feat_size)\n\t\ty = []\n\t\tfor i in range(self.samples):\n\t\t\tlabel = []\n\t\t\tsum = 0\n\t\t\tfor l in range(self.labels):\n\t\t\t\tpseudo_label = np.random.randint(0, self.labels)\n\t\t\t\tsum += pseudo_label\n\t\t\t\tlabel.append(sum)\n\t\t\t\tX[i,l] = pseudo_label\n\t\t\ty.append(label) \n\n\t\treturn X, y\n\n\tdef dataset_statistics(self):\n\t\t#TODO\n\t\tpass\n\n","repo_name":"efthymisgeo/pruner","sub_path":"dataloaders/random_data.py","file_name":"random_data.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19802887320","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @title: \n# @author: luowen\n# @website: https://loovien.github.com\n# @time: 9/5/2020 12:30 AM\n\n\nclass VideoItem(object):\n def __init__(self, title: str, img_src: str, src: str, href: str):\n self.title = title\n self.img_src = img_src\n self.src = src\n self.href = href\n","repo_name":"loovien/mediaxz","sub_path":"src/models/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"18446002994","text":"\"\"\"IETF usage guidelines plugin\nSee RFC 6087\n\"\"\"\n\nimport optparse\nimport sys\n\nfrom pyang import plugin\nfrom pyang import statements\nfrom pyang import error\nfrom pyang.error import err_add\n\ndef pyang_plugin_init():\n plugin.register_plugin(IETFPlugin())\n\nclass IETFPlugin(plugin.PyangPlugin):\n def add_opts(self, optparser):\n optlist = [\n optparse.make_option(\"--ietf\",\n dest=\"ietf\",\n action=\"store_true\",\n help=\"Validate the module(s) according to \" \\\n \"IETF rules.\"),\n ]\n optparser.add_options(optlist)\n\n def setup_ctx(self, ctx):\n if not ctx.opts.ietf:\n return\n\n ctx.canonical = True;\n ctx.max_line_len = 70\n ctx.max_identifier_len = 64\n ctx.implicit_errors = False\n\n # register our grammar validation funs\n\n statements.add_validation_var(\n '$chk_default',\n lambda keyword: keyword in _keyword_with_default)\n statements.add_validation_var(\n '$chk_required',\n lambda keyword: keyword in _required_substatements)\n\n statements.add_validation_var(\n '$chk_recommended',\n lambda keyword: keyword in _recommended_substatements)\n \n statements.add_validation_fun(\n 'grammar', ['$chk_default'],\n lambda ctx, s: v_chk_default(ctx, s))\n statements.add_validation_fun(\n 'grammar', ['$chk_required'],\n lambda ctx, s: v_chk_required_substmt(ctx, s))\n statements.add_validation_fun(\n 'grammar', ['$chk_recommended'],\n lambda ctx, s: v_chk_recommended_substmt(ctx, s))\n\n statements.add_validation_fun(\n 'grammar', ['namespace'],\n lambda ctx, s: v_chk_namespace(ctx, s))\n\n statements.add_validation_fun(\n 'grammar', ['module', 'submodule'],\n lambda ctx, s: v_chk_module_name(ctx, s)) \n\n statements.add_validation_fun(\n 'unique_name', ['module'],\n lambda ctx, s: v_chk_top_level_nodes(ctx, s))\n\n # register our error codes\n error.add_error_code(\n 'IETF_EXPLICIT_DEFAULT', 4,\n 'IETF rule: statement \"%s\" is given with its default value \"%s\"')\n error.add_error_code(\n 'IETF_MISSING_REQUIRED_SUBSTMT', 3,\n 'IETF rule: statement \"%s\" must have a \"%s\" substatement')\n error.add_error_code(\n 'IETF_MISSING_RECOMMENDED_SUBSTMT', 4,\n 'IETF rule: statement \"%s\" should have a \"%s\" substatement')\n error.add_error_code(\n 'IETF_BAD_NAMESPACE_VALUE', 4,\n 'IETF rule: namespace value should be \"%s\"')\n error.add_error_code(\n 'IETF_TOO_MANY_TOP_LEVEL_NODES', 4,\n 'IETF rule: too many top-level data nodes: %s')\n error.add_error_code(\n 'IETF_NO_MODULE_PREFIX', 4,\n 'IETF rule: no module name prefix used, suggest ietf-%s')\n\n # override std error string\n error.add_error_code(\n 'LONG_LINE', 4,\n 'IETF rule: line length %s exceeds %s characters')\n error.add_error_code(\n 'LONG_IDENTIFIER', 3,\n 'IETF rule: identifier %s exceeds %s characters')\n \n \n_keyword_with_default = {\n 'status': 'current',\n 'mandatory': 'false',\n 'min-elements': '0',\n 'max-elements': 'unbounded',\n 'config': 'true',\n 'yin-element': 'false',\n }\n\n_required_substatements = {\n 'module': ('contact', 'organization', 'description', 'revision'),\n 'submodule': ('contact', 'organization', 'description', 'revision'),\n 'revision':('reference',),\n 'extension':('description',),\n 'feature':('description',),\n 'identity':('description',),\n 'typedef':('description',),\n 'grouping':('description',),\n 'grouping':('description',),\n 'augment':('description',),\n 'rpc':('description',),\n 'notification':('description',),\n 'container':('description',),\n 'leaf':('description',),\n 'leaf-list':('description',),\n 'list':('description',),\n 'choice':('description',),\n 'anyxml':('description',),\n }\n\n_recommended_substatements = {\n 'must':('description',),\n 'when':('description',),\n 'enum':('description',),\n 'bit':('description',),\n }\n\n\n_ietf_namespace_prefix = 'urn:ietf:params:xml:ns:yang:'\n\ndef v_chk_default(ctx, stmt):\n if (stmt.arg == _keyword_with_default[stmt.keyword] and\n stmt.parent.keyword != 'refine'):\n err_add(ctx.errors, stmt.pos, 'IETF_EXPLICIT_DEFAULT',\n (stmt.keyword, stmt.arg))\n\ndef v_chk_required_substmt(ctx, stmt):\n if stmt.keyword in _required_substatements:\n for r in _required_substatements[stmt.keyword]:\n if stmt.search_one(r) is None:\n err_add(ctx.errors, stmt.pos,\n 'IETF_MISSING_REQUIRED_SUBSTMT',\n (stmt.keyword, r))\n\ndef v_chk_recommended_substmt(ctx, stmt):\n if stmt.keyword in _recommended_substatements:\n for r in _recommended_substatements[stmt.keyword]:\n if stmt.search_one(r) is None:\n err_add(ctx.errors, stmt.pos,\n 'IETF_MISSING_RECOMMENDED_SUBSTMT',\n (stmt.keyword, r))\n\ndef v_chk_namespace(ctx, stmt):\n if not stmt.arg == _ietf_namespace_prefix + stmt.i_module.arg:\n err_add(ctx.errors, stmt.pos, 'IETF_BAD_NAMESPACE_VALUE',\n _ietf_namespace_prefix + stmt.i_module.arg)\n \ndef v_chk_top_level_nodes(ctx, stmt):\n top = [x for x in stmt.i_children if x.keyword not in ['rpc','notification']]\n if len(top) > 1:\n err_add(ctx.errors, stmt.pos, 'IETF_TOO_MANY_TOP_LEVEL_NODES',\n \", \".join([x.arg for x in top]))\n\ndef v_chk_module_name(ctx, stmt):\n # can't check much, but we can check that a prefix is used\n if stmt.arg.find('-') == -1:\n err_add(ctx.errors, stmt.pos, 'IETF_NO_MODULE_PREFIX', stmt.arg)\n","repo_name":"OpenNetworkingFoundation/configuration","sub_path":"pyang-onf/pyang/plugins/ietf.py","file_name":"ietf.py","file_ext":"py","file_size_in_byte":6087,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"22"} +{"seq_id":"18732926462","text":"from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer\n\nfrom CMGTools.VVResonances.tools.Pair import Pair\nfrom PhysicsTools.HeppyCore.utils.deltar import *\nfrom CMGTools.VVResonances.tools.VectorBosonToolBox import VectorBosonToolBox\nimport itertools\nimport ROOT\n\nclass Substructure(object):\n def __init__(self):\n pass\n\n\nclass VVBuilder(Analyzer):\n def __init__(self, cfg_ana, cfg_comp, looperName):\n super(VVBuilder,self).__init__(cfg_ana, cfg_comp, looperName)\n self.vbTool = VectorBosonToolBox()\n self.smearing=ROOT.TRandom(10101982)\n\n def copyLV(self,LV):\n out=[]\n for i in LV:\n out.append(ROOT.math.XYZTLorentzVector(i.px(),i.py(),i.pz(),i.energy()))\n return out \n\n def substructure(self,jet):\n #if we already filled it exit\n if hasattr(jet,'substructure'):\n return\n \n constituents=[]\n LVs = ROOT.std.vector(\"math::XYZTLorentzVector\")()\n\n for i in range(0,jet.numberOfDaughters()):\n if jet.daughter(i).numberOfDaughters()==0:\n if jet.daughter(i).pt()>13000 or jet.daughter(i).pt()==float('Inf'):\n continue\n if hasattr(self.cfg_ana,\"doPUPPI\") and self.cfg_ana.doPUPPI and jet.daughter(i).puppiWeight()>0.0:\n \n LVs.push_back(jet.daughter(i).p4()*jet.daughter(i).puppiWeight())\n else:\n LVs.push_back(jet.daughter(i).p4())\n else:\n for j in range(0,jet.daughter(i).numberOfDaughters()):\n if jet.daughter(i).daughter(j).pt()>13000 or jet.daughter(i).daughter(j).pt()==float('Inf'):\n continue\n if jet.daughter(i).daughter(j).numberOfDaughters()==0:\n if hasattr(self.cfg_ana,\"doPUPPI\") and self.cfg_ana.doPUPPI and jet.daughter(i).daughter(j).puppiWeight()>0.0:\n LVs.push_back(jet.daughter(i).daughter(j).p4()*jet.daughter(i).daughter(j).puppiWeight())\n else:\n LVs.push_back(jet.daughter(i).daughter(j).p4())\n \n interface = ROOT.cmg.FastJetInterface(LVs,-1.0,0.8,1,0.01,5.0,4.4)\n #make jets\n interface.makeInclusiveJets(150.0)\n \n outputJets = interface.get(True)\n if len(outputJets)==0:\n return\n \n jet.substructure=Substructure()\n #OK!Now save the area\n jet.substructure.area=interface.getArea(1,0)\n\n #For the pruned sub jets calculate the correction\n #without L1\n corrNoL1 = jet.corr/jet.CorrFactor_L1\n\n\n #Get pruned lorentzVector and subjets\n interface.prune(True,0,0.1,0.5)\n\n \n jet.substructure.prunedJet = self.copyLV(interface.get(False))[0]*corrNoL1\n jet.substructure.prunedJetUp = 1.05*jet.substructure.prunedJet.mass()\n jet.substructure.prunedJetDown = 0.95*jet.substructure.prunedJet.mass()\n jet.substructure.prunedJetSmear = jet.substructure.prunedJet.mass()*self.smearing.Gaus(1.0,1.1)\n\n\n interface.makeSubJets(False,0,2)\n jet.substructure.prunedSubjets = self.copyLV(interface.get(False)) \n\n #getv the btag of the pruned subjets\n jet.subJetTags=[-1.0,-1.0]\n jet.subJetCTagL=[-1.0,-1.0]\n jet.subJetCTagB=[-1.0,-1.0]\n\n for i,s in enumerate(jet.substructure.prunedSubjets):\n for o in jet.subjets(\"SoftDrop\"):\n dr=deltaR(s.eta(),s.phi(),o.eta(),o.phi())\n if dr<0.1:\n found=True\n jet.subJetTags[i] = o.bDiscriminator(self.cfg_ana.bDiscriminator)\n jet.subJetCTagL[i] = o.bDiscriminator(self.cfg_ana.cDiscriminatorL)\n jet.subJetCTagB[i] = o.bDiscriminator(self.cfg_ana.cDiscriminatorB)\n break;\n\n\n #Get soft Drop lorentzVector and subjets\n\n\n interface.softDrop(True,0,0.0,0.1,0.8)\n jet.substructure.softDropJet = self.copyLV(interface.get(False))[0]*corrNoL1\n jet.substructure.softDropMassUp = 1.05*jet.substructure.softDropJet.mass()\n jet.substructure.softDropJetDown = 0.95*jet.substructure.softDropJet.mass()\n jet.substructure.softDropJetSmear = jet.substructure.softDropJet.mass()*self.smearing.Gaus(1.0,0.1)\n\n interface.makeSubJets(False,0,2)\n jet.substructure.softDropSubjets = self.copyLV(interface.get(False)) \n\n #get NTau\n jet.substructure.ntau = interface.nSubJettiness(0,4,0,6,1.0,0.8,999.0,999.0,999)\n\n \n #recluster with CA and do massdrop\n\n interface = ROOT.cmg.FastJetInterface(LVs,0.0,1.5,1,0.01,5.0,4.4)\n interface.makeInclusiveJets(150.0)\n\n mu= ROOT.Double(0.667)\n y= ROOT.Double(0.08)\n jet.substructure.massDropTag = interface.massDropTag(0,mu,y)\n jet.substructure.massDrop = (mu,y)\n\n\n def cleanOverlap(self,collection,toRemove):\n after=list(set(collection)-set(toRemove))\n return after\n\n\n def topology(self,VV,jets,leptons):\n VV.otherLeptons=leptons\n VV.satteliteJets=jets\n #VBF Tag\n if len(jets)>1:\n VV.vbfDEta = abs(jets[0].eta()-jets[1].eta())\n VV.vbfMass = (jets[0].p4()+jets[1].p4()).M()\n else: \n VV.vbfDEta = -999\n VV.vbfMass = -999\n\n #Btags\n jetsCentral = filter(lambda x: abs(x.eta())<2.4,jets)\n VV.satteliteCentralJets=jetsCentral\n VV.nLooseBTags = len(filter(lambda x: x.bDiscriminator(self.cfg_ana.bDiscriminator)>0.605,jetsCentral))\n VV.nMediumBTags = len(filter(lambda x: x.bDiscriminator(self.cfg_ana.bDiscriminator)>0.89,jetsCentral))\n VV.nTightBTags = len(filter(lambda x: x.bDiscriminator(self.cfg_ana.bDiscriminator)>0.97,jetsCentral))\n VV.nOtherLeptons = len(leptons)\n \n def selectJets(self,jets,func,otherObjects,DR,otherObjects2=None,DR2=0.0):\n output=[]\n for j in jets:\n if not func(j):\n continue\n overlap=False\n for o in otherObjects:\n dr=deltaR(j.eta(),j.phi(),o.eta(),o.phi())\n if dr120) or (abs(x.pdgId())==13 and x.highPtIDIso and x.pt()>53 and abs(x.eta())<2.1),event.selectedLeptons)\n\n\n\n if len(tightLeptonsForW)==0:\n return output\n \n #make leptonic W\n W = self.vbTool.makeW(tightLeptonsForW,event.met)\n if len(W)==0:\n return output\n\n\n bestW = max(W,key = lambda x: x.leg1.pt()) \n #now the jets\n fatJets=self.selectJets(event.jetsAK8,lambda x: x.pt()>200.0 and abs(x.eta())<2.4 and x.jetID('POG_PFID_Loose') ,tightLeptonsForW,1.0)\n if len(fatJets)==0:\n return output\n bestJet = max(fatJets,key=lambda x: x.pt())\n\n VV=Pair(bestW,bestJet)\n if deltaR(bestW.leg1.eta(),bestW.leg1.phi(),bestJet.eta(),bestJet.phi())30.0 and x.jetID('POG_PFID_Loose') ,tightLeptonsForW,0.3,[bestJet],0.8)\n otherLeptons = self.cleanOverlap(looseLeptonsForW,[bestW.leg1])\n self.topology(VV,satteliteJets,otherLeptons) \n\n\n\n output.append(VV)\n return output\n\n\n\n def makeTOPCR(self,event):\n output=[]\n\n #loop on the leptons\n looseLeptonsForW = filter(lambda x: (abs(x.pdgId())==11 and x.heepID) or (abs(x.pdgId())==13 and x.highPtIDIso ),event.selectedLeptons)\n tightLeptonsForW = filter(lambda x: (abs(x.pdgId())==11 and x.heepID and x.pt()>120) or (abs(x.pdgId())==13 and x.highPtIDIso and x.pt()>53 and abs(x.eta())<2.1),event.selectedLeptons)\n\n\n if len(tightLeptonsForW)==0:\n return output\n \n #make leptonic W\n W = self.vbTool.makeW(tightLeptonsForW,event.met)\n if len(W)==0:\n return output\n\n\n bestW = max(W,key = lambda x: x.leg1.pt()) \n #now the jets\n fatJets=self.selectJets(event.jetsAK8,lambda x: x.pt()>200.0 and abs(x.eta())<2.4 and x.jetID('POG_PFID_Loose') ,tightLeptonsForW,1.0)\n fatJets=filter(lambda x: abs(deltaPhi(bestW.leg1.phi(),x.phi()))>ROOT.TMath.Pi()/2.0,fatJets)\n\n if len(fatJets)==0:\n return output\n\n bestJet = max(fatJets,key=lambda x: x.mass())\n \n VV=Pair(bestW,bestJet)\n if deltaR(bestW.leg1.eta(),bestW.leg1.phi(),bestJet.eta(),bestJet.phi())30.0 and x.jetID('POG_PFID_Loose') ,tightLeptonsForW,0.3,[bestJet],0.8)\n otherLeptons = self.cleanOverlap(looseLeptonsForW,[bestW.leg1])\n self.topology(VV,satteliteJets,otherLeptons) \n\n\n\n output.append(VV)\n return output\n\n\n\n\n\n def makeZV(self,event):\n output=[]\n\n #loop on the leptons\n\n\n leptonsForZ = filter(lambda x: (abs(x.pdgId())==11 and x.heepIDNoIso) or (abs(x.pdgId())==13 and (x.highPtID or x.highPtTrackID)),event.selectedLeptons)\n\n\n\n if len(leptonsForZ)<2:\n return output\n \n #make leptonic Z\n Z = self.vbTool.makeZ(leptonsForZ)\n if len(Z)==0:\n return output\n bestZ = max(Z,key = lambda x: x.pt()) \n\n\n #other higbn pt isolated letpons in the event \n otherGoodLeptons=self.cleanOverlap(leptonsForZ,[bestZ.leg1,bestZ.leg2])\n otherTightLeptons = filter(lambda x: (abs(x.pdgId())==11 and x.heepID) or (abs(x.pdgId())==13 and (x.highPtIDIso)),otherGoodLeptons)\n #now the jets\n fatJets=self.selectJets(event.jetsAK8,lambda x: x.pt()>200.0 and abs(x.eta())<2.4 and x.jetID('POG_PFID_Loose') ,[bestZ.leg1,bestZ.leg2],1.0)\n if len(fatJets)==0:\n return output\n bestJet = max(fatJets,key=lambda x: x.pt())\n\n VV=Pair(bestZ,bestJet)\n \n #substructure\n self.substructure(VV.leg2)\n\n if not hasattr(VV.leg2,\"substructure\"):\n return output\n\n\n #check if there are subjets\n\n # if len(VV.leg2.substructure.prunedSubjets)<2:\n # print 'No substructure',len(VV.leg2.substructure.prunedSubjets)\n # return output\n\n #topology \n satteliteJets = self.selectJets(event.jets,lambda x: x.pt()>30.0 and x.jetID('POG_PFID_Loose') ,otherTightLeptons,0.3,[bestJet],0.8)\n self.topology(VV,satteliteJets,otherTightLeptons) \n output.append(VV)\n return output\n\n\n\n def makeJJ(self,event):\n output=[]\n\n #loop on the leptons\n leptons= filter(lambda x: (abs(x.pdgId())==11 and x.heepID) or (abs(x.pdgId())==13 and x.highPtIDIso ),event.selectedLeptons)\n fatJets=self.selectJets(event.jetsAK8,lambda x: x.pt()>200.0 and abs(x.eta())<2.4 and x.jetID('POG_PFID_Tight') ,leptons,1.0)\n\n if len(fatJets)<2:\n return output\n\n VV=Pair(fatJets[0],fatJets[1])\n\n #kinematics\n if abs(VV.leg1.eta()-VV.leg2.eta())>1.3 or VV.mass()<1000:\n return output\n\n self.substructure(VV.leg1)\n self.substructure(VV.leg2)\n\n\n if not hasattr(VV.leg1,\"substructure\"):\n return output\n\n if not hasattr(VV.leg2,\"substructure\"):\n return output\n\n #check if there are subjets\n\n # if len(VV.leg2.substructure.prunedSubjets)<2 or len(VV.leg1.substructure.prunedSubjets)<2:\n # print 'No substructure'\n # return output\n \n\n\n #topology \n satteliteJets = self.selectJets(event.jets,lambda x: x.pt()>30.0 and x.jetID('POG_PFID_Loose') ,leptons,0.3,[VV.leg1,VV.leg2],0.8)\n self.topology(VV,satteliteJets,leptons) \n output.append(VV)\n return output\n\n\n def makeMETV(self,event):\n output=[]\n\n #loop on the leptons\n leptons= filter(lambda x: (abs(x.pdgId())==11 and x.heepID) or (abs(x.pdgId())==13 and x.highPtIDIso ),event.selectedLeptons)\n fatJets=self.selectJets(event.jetsAK8,lambda x: x.pt()>200.0 and abs(x.eta())<2.4 and x.jetID('POG_PFID_Loose') ,leptons,1.0)\n\n if len(fatJets)<1:\n return output\n\n VV=Pair(event.met,fatJets[0])\n \n #kinematics\n if VV.deltaPhi()<2.0 or VV.leg1.pt()<200:\n return output\n\n self.substructure(VV.leg2)\n\n if not hasattr(VV.leg2,\"substructure\"):\n return output\n\n\n #check if there are subjets\n\n# if len(VV.leg2.substructure.prunedSubjets)<2:\n# print 'No substructure'\n# return output\n \n\n #topology \n satteliteJets = self.selectJets(event.jets,lambda x: x.pt()>30.0 and x.jetID('POG_PFID_Loose') ,leptons,0.3,[VV.leg2],0.8)\n self.topology(VV,satteliteJets,leptons) \n output.append(VV)\n return output\n\n\n \n\n\n\n def process(self, event):\n\n LNuJJ=self.makeWV(event)\n LLJJ =self.makeZV(event)\n JJ=self.makeJJ(event)\n JJNuNu=self.makeMETV(event)\n TopCR=self.makeTOPCR(event)\n\n setattr(event,'LNuJJ'+self.cfg_ana.suffix,LNuJJ)\n setattr(event,'JJ'+self.cfg_ana.suffix,JJ)\n setattr(event,'LLJJ'+self.cfg_ana.suffix,LLJJ)\n setattr(event,'JJNuNu'+self.cfg_ana.suffix,JJNuNu)\n setattr(event,'TopCR'+self.cfg_ana.suffix,TopCR)\n\n\n","repo_name":"clseitz/cmgtools-lite","sub_path":"VVResonances/python/analyzers/VVBuilder.py","file_name":"VVBuilder.py","file_ext":"py","file_size_in_byte":15225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"22584107832","text":"from qingcloud.cli.iaas_client.actions.base import BaseAction\nfrom qingcloud.cli.misc.utils import explode_array\n\nclass CreateNicsAction(BaseAction):\n\n action = 'CreateNics'\n command = 'create-nics'\n usage = '%(prog)s --vxnet [options] [-f ]'\n\n @classmethod\n def add_ext_arguments(cls, parser):\n\n parser.add_argument('-x', '--vxnet', dest='vxnet',\n action='store', type=str, default=None,\n help='the ID of vxnet.')\n\n parser.add_argument('-N', '--nic-name', dest='nic_name',\n action='store', type=str, default=None,\n help='the name of nic.')\n\n parser.add_argument('-p', '--private-ips', dest='private_ips',\n action='store', type=str, default=None,\n help='''the private ip of nics. ''')\n\n parser.add_argument('-c', '--count', dest='count',\n action='store', type=int, default=1,\n help='the number of nics to create.')\n\n @classmethod\n def build_directive(cls, options):\n required_params = {'vxnet': options.vxnet}\n for param in required_params:\n if required_params[param] is None or required_params[param] == '':\n print('error: [%s] should be specified' % param)\n return None\n\n return {\n 'vxnet': options.vxnet,\n 'count' : options.count,\n 'nic_name' : options.nic_name,\n 'private_ips': explode_array(options.private_ips),\n }\n","repo_name":"yunify/qingcloud-cli","sub_path":"qingcloud/cli/iaas_client/actions/nic/create_nics.py","file_name":"create_nics.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"22"} +{"seq_id":"28055836828","text":"\nUSE_LMDB = False\nIMG_LMDB_PATH = '/data/data/liuhuawei/data_lmdb_backup_for_ssd/data_lmdb_for_image_copy_and_mark_data'\n\n# Snapshot iteration \nMETADATA_JSON = './data/objectid_to_metadata.json'\n\n## json path to triplelt file, key:(a_objectid, p_objectid) val:[n1_ob, n2_ob, ....]\nTRIPLET_JSON = './data/test.json'\n\n## image config\nTARGET_SIZE = 224\nPIXEL_MEANS = [104.0, 117.0, 123.0]\n\n## The number of samples in each minibatch\nBATCH_SIZE = 39\n\n## prefetch process for data layer (must be false here)\nUSE_PREFETCH = False\nRNG_SEED = 8\n\n","repo_name":"fighting-liu/python_tripletloss","sub_path":"lib/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"22"} +{"seq_id":"19083589391","text":"import json\nimport requests as req\nimport sys\nimport getopt\n\ncat_url = 'https://cat-fact.herokuapp.com/facts'\nheaders = {'Content-Type': 'application/json', 'Accept': 'application/json'}\n\n\ndef get_json():\n # assume internet connectivity ok for the moment\n resp = req.get(cat_url, headers)\n if resp.status_code != 200:\n print(\"Error: HTTP response code = {0}\".format(resp.status_code), file=sys.stderr)\n sys.exit(255)\n return json.loads(resp.content)\n\n\ndef make_users():\n facts = get_json()['all']\n users = dict()\n for fact in facts:\n try:\n user = fact['user']\n except KeyError:\n # empty user is possible but probably not intended so report error\n print(\"No user defined for fact with id: {0}\".format(fact['_id']), file=sys.stderr)\n continue\n uid = user['_id']\n if uid in users:\n users[uid] = (users[uid][0] + fact['upvotes'], users[uid][1])\n else:\n first = user['name']['first']\n last = user['name']['last']\n users[uid] = (fact['upvotes'], '{0} {1}'.format(first, last))\n sorted_users = sorted(users.values(), reverse=True)\n return sorted_users\n\n\ndef write_file(path, reporters):\n try:\n f = open(path, \"w\")\n except FileNotFoundError:\n print('Bad path spec: {0}'.format(path), file=sys.stderr)\n sys.exit(255)\n f.write(\"user, totalVotes\\n\")\n for reporter in reporters:\n f.write(\"{0}, {1}\\n\".format(reporter[1], reporter[0]))\n f.close()\n\n\ndef main(argv):\n try:\n opt, arg = getopt.getopt(argv, \"-f\", [])\n except getopt.GetoptError:\n print(\"usage: python main.py -f \", file=sys.stderr)\n sys.exit(255)\n if len(opt) == 0 or len(arg) == 0 or opt[0][0] not in [\"-f\"]:\n print(\"usage: python main.py -f \", file=sys.stderr)\n sys.exit(255)\n reporters = make_users()\n write_file(arg[0], reporters)\n\n\nmain(sys.argv[1:])\n","repo_name":"MattUrsnott/elabor8","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"22359100170","text":"#!/usr/bin/python3\nimport os\n\nlttng_provider_cnt_fp = open(\"lttng_provider_cnt\",\"r\")\nlttngMaxProvider=int(lttng_provider_cnt_fp.read())\nlttng_provider_cnt_fp.close()\n\nif __name__==\"__main__\":\n index = 0;\n while (index < lttngMaxProvider):\n lttng_cmd_str_1 = \"lttng create tp_session_%s > /dev/null\"%(str(index))\n os.system(lttng_cmd_str_1)\n lttng_cmd_str_2 = \"lttng enable-event -u -s tp_session_{0} 'tp_{0}:*'\".format(str(index))\n os.system(lttng_cmd_str_2)\n lttng_cmd_str_3 = \"lttng start tp_session_%s\"%(str(index))\n os.system(lttng_cmd_str_3)\n index += 1\n\n","repo_name":"vjanandr/sampleP","sub_path":"lttng/lttng_session_create.py","file_name":"lttng_session_create.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34000250109","text":"#!/usr/bin/env python3\n#-*- coding: utf-8 -*-\n\nimport os\nfrom typing import List, Tuple\nimport pandas as pd\nimport numpy as np\nimport datetime as dt\nimport py_scripts.transform\nimport pickle\n\nMODELSDIR = r'../models'\nDATASET = r'../data/sim_ts_limpo.csv'\n\nclass modelo_produtos:\n \"\"\" encapsulador para os modelos de cada produto \"\"\"\n\n def __init__(self, dataset: str = DATASET, modelsdir: str = MODELSDIR):\n self.dataset = dataset\n self.modelsdir = modelsdir\n\n # importanto dados limpos\n ts_raw = pd.read_csv(self.dataset)\n tsd, self.tswide = py_scripts.transform.pipeline(ts_raw)\n\n # produtos\n self.produtos = self.tswide.columns\n\n # importar modelos\n self.modelo, self.serie_treino = self.importar_modelos()\n\n self.fat_total = self.tswide.sum(axis = 'columns')\n\n def importar_modelos(self):\n\n modelo = {}\n serie_treino = {}\n\n for produto in self.produtos:\n produto_ = produto.split('_')[0]\n\n picklefile = fr'produto_{produto_}.model'\n picklefn = os.path.join(os.path.abspath(self.modelsdir), picklefile)\n\n with open(picklefn, 'rb') as modelo_arq:\n unpickler = pickle.Unpickler(modelo_arq)\n modelo_dict = unpickler.load()\n modelo[produto] = modelo_dict['modelo']\n serie_treino[produto] = modelo_dict['serie_treino']\n \n return modelo, serie_treino \n\n def get_models(self):\n return self.modelo, self.serie_treino\n \n def get_test_begin(self, produtos: List or None = None):\n \n if produtos is None:\n produtos = self.produtos\n\n serie_treino_prods = { p: s for p, s in self.serie_treino.items() if p in produtos }\n\n train_end = pd.Series(\n [ v.index[-1] for v in serie_treino_prods.values() ],\n index = produtos\n )\n\n test_start = train_end + pd.offsets.MonthBegin(1)\n\n return test_start\n\n def get_all_test_begin(self):\n \n train_end = max([ v.index[-1] for v in self.serie_treino.values() ])\n\n test_start = train_end + pd.offsets.MonthBegin(1)\n\n return test_start\n\n def predict(self, n_periods: int, return_conf_int: bool = False, \n predict_array: bool = True,\n *args, **kwards\n ) -> pd.Series or pd.DataFrame or np.array or Tuple[np.array, np.array]:\n\n if n_periods <= 0:\n raise ValueError('Can only predict forward!')\n\n # construimos um dataframe onde ficarão as predições individuais\n preds = pd.DataFrame([], columns = self.produtos)\n\n if return_conf_int:\n colsmult = pd.MultiIndex.from_product((self.produtos, ['lb', 'ub']))\n preds_ci = pd.DataFrame([], columns = colsmult)\n\n # obtemos a maior data em todos os conjuntos de treino\n\n max_train_right_bound = max([ v.index[-1] for v in self.serie_treino.values() ])\n\n for produto in self.produtos:\n # maior data de cada conjunto de treino\n train_right_bound = self.serie_treino[produto].index[-1]\n\n # construimos o índice de datas do conjunto de teste de cada produto: \n # range entre mês após o último contido no conjunto de treino e \n idx_test = pd.date_range(\n start = train_right_bound + dt.timedelta(days = 1), \n end = max_train_right_bound + pd.offsets.MonthBegin(n_periods), freq = 'MS')\n\n # geramos a predição para cada produto. Essa predição vem em um np.array\n # como queremos o intervalo de confiança, o resultado da função é uma tupla com\n # - o array da predição média\n # - um array com duas colunas contendo o lower bound e o upper bound\n arr_pred_all = self.modelo[produto].predict(n_periods = idx_test.shape[0], return_conf_int = return_conf_int)\n\n # primeiro trataremos das médias\n if return_conf_int:\n arr_pred = arr_pred_all[0]\n else:\n arr_pred = arr_pred_all\n \n # convertemos o array para Series\n pred = pd.Series(arr_pred, index = idx_test)\n pred.name = 'predicted_mean'\n\n # adicionamos a Series ao DataFrame `preds`\n preds[produto] = pred\n\n # agora trabalharemos nos bounds\n if return_conf_int:\n arr_pred_ci = arr_pred_all[1]\n\n pred_ci = pd.DataFrame(\n arr_pred_ci, \n columns = pd.MultiIndex.from_product(((produto, ), ('lb', 'ub'))), \n index = idx_test\n )\n\n\n preds_ci[pred_ci.columns] = pred_ci\n\n preds_series = preds.dropna().sum(axis = 'columns')\n preds_series.name = 'predicted_mean'\n\n if return_conf_int:\n fat_test = pd.DataFrame([])\n\n fat_test['predicted_mean'] = preds_series\n\n fat_test['lb'] = preds_ci.loc[:, (slice(None), 'lb')].dropna().sum(axis = 'columns')\n fat_test['ub'] = preds_ci.loc[:, (slice(None), 'ub')].dropna().sum(axis = 'columns')\n \n if predict_array:\n return (\n fat_test['predicted_mean'].values,\n fat_test[['lb', 'ub']].values\n )\n else:\n return fat_test\n \n else:\n if predict_array:\n return preds_series.values\n else:\n return preds_series\n\n def __str__(self):\n totalstr = 'Modelos:'\n\n tamanho_campo = max([ len(produto) for produto in self.produtos ]) + 4\n\n for produto, modelo in self.modelo.items():\n totalstr += f\"\\n{produto:>{tamanho_campo}s}: {modelo}\"\n \n return totalstr\n \n def __repr__(self):\n return str(self.modelo)","repo_name":"flimao/case-previsao-faturamento","sub_path":"py_scripts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5923,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30702716215","text":"import argparse\nimport json\nimport re\n\nimport constants\n\nfrom entities import BodyField, Header, Request\nfrom enumClasses import BodyFieldType, ContentType, HeaderFieldType, Method\n\nclass CommandParser:\n\tdef __init__(self):\n\t\tparser = argparse.ArgumentParser(\n\t\t\tprog = constants.APP_NAME,\n\t\t\tdescription=_(\"HTTPまたはHTTPSによる通信を行います。コマンドは本家curlに近いですが、完全に再現されているわけではないことに注意してください。\"),\n\t\t)\n\n\t\t# 結果や表示関係の処理\n\t\t#parser.add_argument(\"-o\", \"--output\", help=_(\"出力を指定した名前のファイルに保存します。カレントディレクトリに指定した名前のファイルが既に存在した場合、上書きされます。\"))\n\t\t#parser.add_argument(\"-O\", \"--remote-name\", action=\"store_true\", help=_(\"指定したURLのファイル名部分(パラメータを含む)をと同じ名前で出力を保存します。カレントディレクトリに指定した名前のファイルが既に存在した場合、上書きされます。URLデコードは行われないことに注意してください。\"))\n\t\t#parser.add_argument(\"-J\", \"--remote-header\", help=_(\"URLからファイル名を抽出する代わりに、レスポンス中のContent-Dispositionヘッダの内容を-O、--remote-nameオプションに指定します。指定のファイルが既に存在する場合、上書きはされずにこのオプションが無視されます。URLデコードは行われないこと、DLLなどソフトウェアから自動で読み込まれるファイルの名前を返されること等に注意してください。\"))\n\t\t#parser.add_argument(\"-s\", \"--silent\", action=\"store_true\", help=_(\"サイレント実行。進行状況やエラーを表示しません。\"))\n\t\t#parser.add_argument(\"-S\", \"--show-error\", action=\"store_true\", help=_(\"-s、--silentと併せて使用すると、失敗した場合にエラーメッセージが表示されます。\"))\n\t\t#parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n\t\t#parser.add_argument(\"--trace-ascii\", action=\"store_true\")\n\t\t#parser.add_argument(\"-w\", \"--write-out\")\n\t\t#roup = parser.add_mutually_exclusive_group()\n\n\n\t\t# 制御設定\n\t\t#リダイレクトの追跡\n\t\t#parser.add_argument(\"-l\", \"--location\", action=\"store_true\")\n\t\t#parser.add_argument(\"--location-trusted\", action=\"store_true\", help=_(\"-l、--locationと同じですが、リダイレクト先にも-u、--userで指定した内容を送信します。HTTPサイトにリダイレクトする場合、セキュリティ上の問題が生じる場合があります。\"))\n\t\t#parser.add_argument(\"-k\", \"--insecure\", action=\"store_true\")\n\n\n\t\t# 内容の指定\n\t\tparser.add_argument(\"-d\", \"--data\", \"--data-raw\", \"--data-ascii\",\n\t\t\thelp=_(\"リクエストの本文を設定します。URLエンコードなどの前処理は行われません。\")\n\t\t)\n\t\t#parser.add_argument(\"--data-urlencode\")\n\t\tparser.add_argument(\"-H\", \"--header\", action=\"append\", default=[], \n\t\t\thelp=_(\"リクエストに追加で含めるヘッダを設定します。内部で生成されるものを上書きすることができますが、このような指定は推奨されません。\" +\n\t\t\t\t\"ヘッダ名だけを指定することで、設定済みのヘッダを削除できます。\" +\n\t\t\t\t\"値のないヘッダを送信する場合、:の代わりに;を指定します。この場合、:に置き換えて送信されます。\" +\n\t\t\t\t\"改行コードは自動的に挿入去れるため、引数に含めないでください。\" +\n\t\t\t\t\"@を使用したファイルの指定には対応していません。\" +\n\t\t\t\t#\"-L,--locationと併せて指定し���場合、リダイレクト先にも送信されるため、セキュアな情報の指定をする際には注意してください。\"\n\t\t\t\t\"このオプションは、複数回指定することで複数のヘッダを指定可能です。\")\n\t\t)\n\t\t#parser.add_argument(\"-f\", \"--file\")\n\t\tparser.add_argument(\"-X\", \"--request\", choices=[item.name for item in Method], default=\"\", help=_(\"送信するメソッド名を設定します。この指定を行っても、送信するメソッド名が変わるのみであり、プログラムの動作は変更されません。-L、--locationと併せて指定した場合、リダイレクト時のステータスコードにかかわらず、すべてのリクエストにここで指定したメソッドを使用するため、意図しない動作となる場合があります。\"))\n\t\t#parser.add_argument(\"-u\", \"--user\", help=_(\"認証に使用するユーザ名とパスワードを送信します。パスワードの省略、Windows環境で利用できる高度な機能等には対応していません。\"))\n\t\t#parser.add_argument(\"--digest\", action=\"store_true\", help=_(\"-u、--userで指定した情報を用いてダイジェスト認証を行います。\"))\n\t\tparser.add_argument(\"URLs\", default=\"\", help=\"通信先URLを指定します。複数指定や{}・[]を用いた指定には対応していません。\")\n\n\t\t# セッションの保存と利用\n\t\t#parser.add_argument(\"-c\", \"-cookiejar\")\t\t# 書き込み\n\t\t#parser.add_argument(\"-b\", \"--cookie\")\t\t# 読み込み\n\n\n\n\t\tinvalid_options = {\n\t\t\t\"--fail-early\":_(\"複数のURLを指定して実行し、途中の通信でエラーになった場合、そこで実行を終了し、エラーを返します。終了コードによってエラーを確実に検出できるようにすることが目的のオプションですが、CADは複数のURLの指定をサポートしていないため、この指定はできません。\"),\n\t\t\t\"-f\":_(\"ステータスコードが200以外の場合に、結果を出力せず終了コード22等で終了するオプションですが、CADでは終了コードによる結果の返却やCUIのみでの利用に対応していないため、この指定はできません。\"),\n\t\t\t\"--fail\":_(\"ステータスコードが200以外の場合に、結果を出力せず終了コード22等で終了するオプションですが、CADでは終了コードによる結果の返却やCUIのみでの利用に対応していないため、この指定はできません。\"),\n\t\t\t\"--remote-name-all\":_(\"複数のURLを指定した際、すべてのURLに対して-O、--remote-nameを指定するオプションですが、CADは複数のURLの指定に対応していないため、この指定はできません。\"),\n\t\t\t\"--basic\": _(\"-u、--userと併せて指定することでベーシック認証を使用することを使用するオプションですが、この動作はデフォルトであり、CADでは対応していない複数URLの指定をしない限り使い道がないため、この指定はできません。\"),\n\t\t\t\"--negotiate\": _(\"ネゴシエート(SPNEGO)認証を使用するオプションですが、CADは対応していません。\"),\n\t\t\t\"--abstract-unix-socket\": _(\"Windows環境に対応していないオプションのため、使用できません。\"),\n\t\t\t\"-K\":_(\"外部ファイルから設定を読み込んでプログラムを実行するオプションですが、CADは対応していません。\"),\n\t\t\t\"--config\":_(\"外部ファイルから設定を読み込んでプログラムを実行するオプションですが、CADは対応していません。\"),\n\t\t\t\"-q\":_(\"設定ファイルの読み込みを抑制するオプションですが、CADはcurl設定ファイルに対応していないため、指定できません。\"),\n\t\t\t\"--disable\":_(\"設定ファイルの読み込みを抑制するオプションですが、CADはcurl設定ファイルに対応していないため、指定できません。\"),\n\t\t\t\"--interface\":_(\"通信に用いるネットワークカードを指定するオプションですが、CADは対応していません。\"),\n\n\t\t}\n\t\t#parser.add_argument()\n\t\t#parser.add_argument()\n\t\t#parser.add_argument()\n\t\t#parser.add_argument()\n\n\t\tself.parser = parser\n\n\tdef parse_args(self):\n\t\targs = self.parser.parse_args()\n\n\t\t# ヘッダ\n\t\theaders = parseHeaders(args.header)\n\n\t\t# メソッド\n\t\tif args.request:\n\t\t\tmethod=Method[args.request]\n\t\telse:\t# 他のコマンドから推測\n\t\t\t# 何もなければGET\n\t\t\tmethod = Method.GET\n\t\t\t# -d などがあればPOST\n\t\t\tif args.data:\n\t\t\t\tmethod = Method.POST\n\n\t\t# ContentType\n\t\t# 基本はFORM\n\t\tcontentType=ContentType.FORM\n\t\t# ヘッダでJSON指定していればJSONにする\n\t\tfor item in headers:\n\t\t\tif item.getName().lower() == \"content-type\" and item.getValue().lower().startswith(\"application/json\"):\n\t\t\t\tcontentType=ContentType.JSON\n\n\t\t# body\n\t\tbody = parseBody(args.data, contentType)\n\n\t\treturn Request.Request(\"commandline request\", contentType, method, args.URLs, headers, body)\n\ndef parseHeaders(headers):\n\tpattern = re.compile(r'^[\\041-\\071\\073-\\176]*:')\t# 072=0x3A=:はダメ\n\tresult = []\n\tfor item in headers:\n\t\t# キーのみのヘッダ\n\t\tif re.match(r'^[\\041-\\071\\073-\\176]*;$', item):\n\t\t\tresult.append(Header.Header(item[:-1], HeaderFieldType.CONST, \"\"))\n\t\t\tcontinue\n\t\t# 条件を満たさない\n\t\telif not pattern.match(item):\n\t\t\traise ValueError(_(\"ヘッダの指定が不正です。\"))\n\n\t\ti = item.find(':')\n\t\tv = item[i+1:].lstrip()\n\t\tif v:\n\t\t\tresult.append(Header.Header(item[:i], HeaderFieldType.CONST, v))\n\t\telse:\n\t\t\tresult.append(Header.Header(item[:i], HeaderFieldType.REMOVE, \"\"))\n\treturn result\n\n\n\ndef parseBody(data, contentType):\n\tbody = []\n\n\t# 何も考えずにJSONパース\n\ttry:\n\t\titems = json.loads(data)\n\t\tfor k,v in items.items():\n\t\t\tif type(k) != str or type(v) not in (bool,float,int, type(None), str):\n\t\t\t\traise ValueError(_(\"現在、JSONリクエストでのリストや辞書の利用はサポートしていません。\"))\n\t\t\tbody.append(BodyField.BodyField(k, BodyFieldType.CONST, v))\n\t\treturn body\n\texcept:\n\t\tif contentType == ContentType.JSON:\n\t\t\traise ValueError(_(\"JSONデータのパースに失敗しました。\"))\n\n\tif data and contentType == ContentType.FORM:\n\t\tfor arg in data.split(\"&\"):\n\t\t\tif not arg:\n\t\t\t\tcontinue\n\t\t\tnv = arg.split('=', 1)\n\t\t\tif len(nv) != 2:\n\t\t\t\tnv.append(\"\")\n\t\tbody.append(BodyField.BodyField(nv[0], BodyFieldType.ENCORDED, nv[1]))\n\treturn body\n","repo_name":"actlaboratory/CAD","sub_path":"commandParser.py","file_name":"commandParser.py","file_ext":"py","file_size_in_byte":10217,"program_lang":"python","lang":"ja","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"22564699477","text":"import unittest\r\nimport numpy as np\r\nimport sys, os\r\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\r\nimport channel as Channel\r\nfrom waveform import get_random\r\nclass test_channel(unittest.TestCase):\r\n def setUp(self): \r\n self.ch = Channel.Channel()\r\n \r\n def test_awgn(self): \r\n data_in = get_random(1024*1000)\r\n data_out = self.ch.awgn(data_in, snr_db = 0)\r\n self.assertEqual(len(data_in),len(data_out))\r\n self.assertAlmostEqual(np.var(data_in),np.var(data_out)/2.0, places=2)\r\n\r\n def test_multipath(self):\r\n data_in = np.zeros(10, dtype = complex)\r\n data_in[2] = 1.0 + 0.0j\r\n self.ch.impulse_response = np.arange(10)+1j*np.arange(10)\r\n data_out = self.ch.multipath(data_in)\r\n np.testing.assert_array_almost_equal(data_out[2:12], self.ch.last_impulse_response)\r\n #self.assertAlmostEqual(np.linalg.norm(data_in), np.linalg.norm(data_out))\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n ","repo_name":"Barkhausen-Institut/GFDM-PHY-Reference","sub_path":"sdr_utils/unittest/test_channel.py","file_name":"test_channel.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"20230702320","text":"import sqlite3\nimport csv\n\nimport sqlite3 as sq\nimport pandas as pd\nfrom io import StringIO\n\ndef sql_to_csv(database, table_name):\n \n try:\n conn = sqlite3.connect(database)\n print(\"database is opened\")\n except Exception as e:\n print(\"error during connection\", str(e))\n results = conn.execute(\"SELECT * FROM \" + table_name)\n \n #header of the sql table\n names = [description[0] for description in results.description]\n\n #selecting header - column names\n CSV = \"\"\n header = \"\"\n for j in names:\n header += j\n header += \",\"\n\n header_1 = header[0:-1] + \"\\n\"\n\n CSV = header_1\n\n #creating a CSV string\n \n for row in results:\n #converting all the types of data (int etc) of the list into string type by map\n my_string = ','.join(map(str, row))\n CSV += my_string\n CSV += '\\n'\n \n # deleting the last '\\n' sign \n CSV_f = CSV[0:-1]\n return CSV_f\n \n # implementing & closing connection to SQLite database\n conn.execute()\n conn.close()\n\n#to SEE the RESULTS - delete # sign in front of \"print(sql_to_csv()\"\n#print(sql_to_csv('all_fault_line.db','fault_lines'))\n\ncsv_content = open(\"list_volcano.csv\")\n\ndef csv_to_sql(csv_content, database, table_name):\n \n # creating a connection object\n connection = sq.connect(database)\n # creating a cursor object\n curs = connection.cursor()\n \n #reading csv file\n data = csv_content.read()\n #print(data)\n \n #creating header for the sql table\n data_h = \"\"\n for i in data:\n if i != '\\n':\n data_h += i\n else:\n break\n \n res = data_h.split(',')\n res_f = []\n \n for i in res:\n i = i.replace(\" \", \"_\")\n i = i.replace(\")\", \"\")\n i = i.replace(\"(\", \"\")\n res_f.append(i)\n \n res_f_s = \"\"\n for i in res_f:\n res_f_s += i\n res_f_s += \", \"\n \n res_f_s_1 = res_f_s[0:-2]\n \n # running and creation of table sql query\n \n curs.execute(\"CREATE TABLE if not Exists \" + table_name +\n\n \"(\" + res_f_s_1 + \")\") \n \n \n #loading CSV data into Pandas DataFrame\n TESTDATA = StringIO(data)\n \n df = pd.read_csv(TESTDATA, sep=\",\")\n \n # writing the data to a sqlite db table\n df.to_sql(table_name, connection, if_exists='replace', index=False)\n\n # running and selecting sql query\n curs.execute('select * from ' + table_name)\n \n #Displaying the results - DELETE # sign in front of \"for row in records\" & \"print(row)\" - TO SEE THE RESULTS \n #for row in records:\n # show row\n #print(row)\n \n # closing CSV file & implementing & closing connection to SQLite database\n csv_content.close()\n connection.commit()\n connection.close()\n\n# to SEE the RESULTS delete # sign in front of \"csv_to_sql()\" \n#csv_to_sql(csv_content, 'list_volcano.db','volcanos') ","repo_name":"Nadir-Alpeiss-1/SQL_to_CSV_CSV_to_SQL_converter","sub_path":"my_ds_babel.py","file_name":"my_ds_babel.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"242955823","text":"from django.core.cache import cache\nfrom django_celery_beat.models import IntervalSchedule, PeriodicTask\nfrom sentry_sdk import capture_message\n\nfrom aggregator.models import DataSource\nfrom articles.models import Article, Picture\nfrom jobs.celery import app\nfrom jobs.utils import download_image\n\n\nclass AggregateContent(app.Task):\n name = 'aggregator.aggregate_content'\n\n @staticmethod\n def get_data(datasource):\n data = []\n try:\n data = datasource.get_data()\n except ConnectionError as e:\n capture_message(e, level='debug')\n\n task = PeriodicTask.objects.filter(\n kwargs__contains=f'\"datasource_id\": {datasource.id}').first()\n if task and task.interval.every < 60 and task.interval.period == 'minutes':\n task.interval, _ = IntervalSchedule.objects.get_or_create(\n every=task.interval.every + 1,\n period='minutes'\n )\n task.save()\n return data\n\n @staticmethod\n def save_data(data, datasource):\n counter = 0\n for d in data:\n icon_url = d.pop('icon_url', str(datasource.icon))\n picture = None\n if Picture.objects.filter(url=icon_url).exists():\n picture = Picture.objects.get(url=icon_url)\n elif not Picture.objects.filter(url=icon_url).exists():\n picture = Picture.objects.create(image=datasource.icon, url=icon_url)\n elif icon_url.startswith('http'):\n path = download_image(icon_url, Picture.image.field.upload_to)\n picture = Picture.objects.create(\n image=path,\n url=icon_url\n )\n\n Article.objects.clean_create(\n source=datasource,\n active=True,\n picture=picture,\n **d\n )\n counter += 1\n return counter\n\n def run(self, datasource_id=None, *args, **kwargs):\n if datasource_id:\n datasource = DataSource.objects.get(id=datasource_id)\n else:\n datasource = DataSource.objects.all().order_by('last_use_time').first()\n\n data = self.get_data(datasource)\n save_count = self.save_data(data, datasource)\n if save_count:\n cache.delete('stats_view')\n\n datasource.save()\n\n\napp.tasks.register(AggregateContent())\n","repo_name":"q-user/django-jobs","sub_path":"src/aggregator/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70440852216","text":"import os\nimport glob\nfrom .utils import preprocess_text\n\nbase_path = os.path.abspath(\n os.path.join(\n os.path.dirname(__file__), \"..\",\n )\n)\n\ncurated_path = os.path.join(base_path, \"data\", \"contracts\", \"curated\")\n\ndownloaded_contracts = os.path.join(\n base_path, \"data\", \"contracts\",\n \"downloaded\", \"txt\",\n)\n\ncurated_contracts = {\n \"ADMA\": lambda **kwargs: yield_lines(\n os.path.join(curated_path, \"ADMA Biomanufacturing - Services Agreement.txt\"),\n **kwargs\n ),\n \"Biogen Credit Agreement\": lambda **kwargs: yield_lines(\n os.path.join(curated_path, \"Biogen Credit Agreement - 2020.txt\"),\n **kwargs\n ),\n\n \"Bright Horizons - Credit Agreement\": lambda **kwargs: yield_lines(\n os.path.join(curated_path, \"Bright Horizons - Credit Agreement.txt\"),\n **kwargs\n ),\n \"Datawatch\": lambda **kwargs: yield_lines(\n os.path.join(curated_path, \"DataWatch Corp.txt\"),\n **kwargs\n ),\n\n # Strange contract, ask Will\n # \"DnB\": lambda **kwargs: yield_lines(\n # os.path.join(curated_path, \"DnB - Services Agreement.txt\"),\n # chunk_flextronics, **kwargs\n # ),\n\n \"Flextronics\": lambda **kwargs: yield_lines(\n os.path.join(curated_path, \"Flextronics - Data Processing Service Agreement.txt\"),\n **kwargs\n ),\n\n # Awkward structure\n # \"General Atlantic\": lambda **kwargs: yield_lines(\n # os.path.join(curated_path, \"General Atlantic - Merger Agreement.txt\"),\n # chunk_general_atlantic, **kwargs\n # ),\n\n \"GA - Purchase Agreement\": lambda **kwargs: yield_lines(\n os.path.join(curated_path, \"GA - Purchase Agreement.txt\"),\n **kwargs\n ),\n\n # Quite ok, it has a veeeeeeery long exhibit clause at the end of it\n \"IMA between Black Rock and the Fed\": lambda **kwargs: yield_lines(\n os.path.join(curated_path, \"IMA between Black Rock and the Fed.txt\"),\n **kwargs\n ),\n\n \"Jagged Peak Energy - Assignment Agreement\": lambda **kwargs: yield_lines(\n os.path.join(curated_path, \"Jagged Peak Energy - Assignment Agreement.txt\"),\n **kwargs\n ),\n\n \"Oasis Petroleum - Credit Agreement\": lambda **kwargs: yield_lines(\n os.path.join(curated_path, \"Oasis Petroleum - Credit Agreement.txt\"),\n **kwargs\n ),\n\n \"Quality Technology Services - Service Agreement\": lambda **kwargs: yield_lines(\n os.path.join(curated_path, \"Quality Technology Services - Service Agreement.txt\"),\n **kwargs\n ),\n\n \"RenovoRx - Service Agreement\": lambda **kwargs: yield_lines(\n os.path.join(curated_path, \"RenovoRx - Service Agreement.txt\"),\n **kwargs\n ),\n\n \"Sample Asset Purchase Agreement\": lambda **kwargs: yield_lines(\n os.path.join(curated_path, \"Sample Asset Purchase Agreement.txt\"),\n **kwargs\n ),\n\n\n \"Sample DIP Loan Agreement\": lambda **kwargs: yield_lines(\n os.path.join(curated_path, \"Sample DIP Loan Agreement.txt\"),\n **kwargs\n ),\n\n \"Veritone - Merger Agreement\": lambda **kwargs: yield_lines(\n os.path.join(curated_path, \"Veritone - Merger Agreement.txt\"),\n **kwargs\n )\n}\n\n\ncontracts = curated_contracts.copy()\n\ncontract_paths = glob.glob(os.path.join(downloaded_contracts, \"*.txt\"))\n\ndef create_contract_generator(path):\n # This is to avoid lambda in loop issue :-)\n return lambda **kwargs: yield_lines(path, **kwargs)\n\nfor path in contract_paths:\n basename = os.path.basename(path)\n name, ext = os.path.splitext(basename)\n\n contracts[name] = create_contract_generator(path)\n\n\ndef yield_lines(path, **kwargs):\n \"\"\"\n Helper function for pre-chunked contracts\n \"\"\"\n with open(path, \"r\") as f:\n for paragraph in preprocess_text(f, **kwargs):\n yield paragraph\n","repo_name":"finiteautomata/text-representations","sub_path":"representations/contracts.py","file_name":"contracts.py","file_ext":"py","file_size_in_byte":3799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"3468322405","text":"from spinn_utilities.overrides import overrides\nfrom spinnman.messages.scp import SCPRequestHeader\nfrom spinnman.messages.scp.abstract_messages import AbstractSCPRequest\nfrom spinnman.messages.scp.enums import SCPCommand\nfrom spinnman.messages.sdp import SDPFlag, SDPHeader\nfrom .get_chip_info_response import GetChipInfoResponse\n\n\nclass GetChipInfo(AbstractSCPRequest[GetChipInfoResponse]):\n \"\"\"\n An SCP request to read the chip information from a core.\n \"\"\"\n __slots__ = ()\n\n def __init__(self, x: int, y: int, with_size: bool = False):\n \"\"\"\n :param int x:\n The x-coordinate of the chip to read from, between 0 and 255\n :param int y:\n The y-coordinate of the chip to read from, between 0 and 255\n :param bool with_size:\n Whether the size should be included in the response\n \"\"\"\n # Bits 0-4 + bit 6 = all information except size\n argument_1 = 0x5F\n if with_size:\n\n # Bits 0-6 = all information including size\n argument_1 = 0x7F\n\n super().__init__(\n SDPHeader(\n flags=SDPFlag.REPLY_EXPECTED, destination_port=0,\n destination_cpu=0, destination_chip_x=x,\n destination_chip_y=y),\n SCPRequestHeader(command=SCPCommand.CMD_INFO),\n argument_1=argument_1)\n\n @overrides(AbstractSCPRequest.get_scp_response)\n def get_scp_response(self) -> GetChipInfoResponse:\n return GetChipInfoResponse()\n","repo_name":"SpiNNakerManchester/SpiNNMan","sub_path":"spinnman/messages/scp/impl/get_chip_info.py","file_name":"get_chip_info.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"20653691285","text":"import sys\nimport time\nfrom typing import Tuple, Set, Dict, Any\n\nimport numpy\nfrom deap import creator\n\nimport settings\nfrom coverage.emma.emma_coverage_fetcher import EmmaCoverageFetcher\nfrom dependency_injection.required_feature import RequiredFeature\nfrom devices import adb\nfrom devices.device import Device\nfrom generation.individual import Individual\nfrom generation.individual_generator import IndividualGenerator\nfrom test_runner.test_event import TestCase, TestSuite\nfrom test_runner.test_runner import TestRunner\n\n\nclass IndividualWithCoverageFetcherGenerator(IndividualGenerator, EmmaCoverageFetcher):\n\n def __init__(self) -> None:\n super(IndividualWithCoverageFetcherGenerator, self).__init__()\n\n def gen_individual(self, device: Device, individual_index: int, generation: int) -> Any:\n start_time = time.time()\n device.mark_work_start()\n suite, fitness = self.get_suite_with_fitness(device, generation, individual_index)\n device.mark_work_stop()\n\n individual: Individual = getattr(creator, Individual.get_name())(suite)\n individual.fitness.values = fitness\n\n finish_time = time.time()\n elapsed_time = finish_time - start_time\n individual.creation_finish_timestamp = finish_time\n individual.creation_elapsed_time = elapsed_time\n\n individual.evaluation_finish_timestamp = finish_time\n # the following will indicate that generation and evaluation occurred at the same time\n individual.evaluation_elapsed_time = 0\n\n individual.index_in_generation = individual_index\n individual.generation = generation\n\n return individual\n\n def get_suite_with_fitness(self, device: Device, generation: int, individual_index: int) -> Tuple[TestSuite, Tuple[float, float, int]]:\n self.package_name: str = RequiredFeature('compiled_package_name').request()\n self.result_dir: str = RequiredFeature('result_dir').request()\n\n test_suite = []\n lengths = []\n unique_crashes: Set[str] = set()\n scripts_crash_status: Dict[str, bool] = {}\n\n self.there_is_coverage = False\n self.set_coverage_paths(device, generation, individual_index)\n adb.shell_command(device, f\"am force-stop {self.package_name}\")\n\n # run scripts\n for test_case_index in range(0, settings.SUITE_SIZE):\n script_path = self.get_path_for_test_case(generation, individual_index, test_case_index)\n test_content = self.generate_test_and_coverage(device, script_path, generation, individual_index,\n test_case_index, unique_crashes, scripts_crash_status)\n\n test_suite.append(test_content)\n if scripts_crash_status[script_path]:\n lengths.append(len(test_content))\n\n # collect fitness data\n coverage = 0\n if self.there_is_coverage:\n coverage = self.get_coverage(device)\n\n crashes = len(unique_crashes)\n\n length = sys.maxsize\n if len(lengths) > 0:\n length = numpy.mean(lengths)\n\n return test_suite, (coverage, length, crashes)\n\n def generate_test_and_coverage(self,\n device: Device,\n script_path: str,\n generation: int,\n individual_index: int,\n test_case_index: int,\n unique_crashes: Set[str],\n scripts_crash_status: Dict[str, bool]\n ) -> TestCase:\n\n # clear app's data and state\n output, errors, result_code = adb.shell_command(device, f\"pm clear {self.package_name}\")\n if result_code != 0:\n adb.log_evaluation_result(device, self.result_dir, script_path, False)\n raise Exception(f\"Unable to clear package for script_path {script_path} in device: {device.name}\")\n\n # generate test case\n test_runner: TestRunner = RequiredFeature('test_runner').request()\n test_content = test_runner.generate(device, self.package_name, script_path)\n\n self.dump_script_coverage(device, script_path, generation, individual_index, test_case_index, unique_crashes,\n scripts_crash_status)\n\n return test_content\n","repo_name":"FlyingPumba/evolutiz","sub_path":"generation/individual_with_coverage_generator.py","file_name":"individual_with_coverage_generator.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"72788858296","text":"import sys\nsys.stdin=open('input.txt', 'r')\n\nfor test_case in range(int(input())):\n arr = list(map(int, input().split()))\n s = 0\n for x in arr:\n if x%2:\n s += x\n print(f'#{test_case+1} {s}')","repo_name":"helloddkd/TIL","sub_path":"algorithm/00input/view4.py","file_name":"view4.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28295924705","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\ndef superReducedString(s):\n changed=True\n while(changed and len(s)!=0):\n changed=False\n for i in range(len(s)-1):\n if(s[i]==s[i+1]):\n changed=True \n s=s[:i]+s[i+2:]\n break\n if(s==\"\"):\n return \"Empty String\"\n return s\n \n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n s = input()\n\n result = superReducedString(s)\n\n fptr.write(result + '\\n')\n\n fptr.close()\n","repo_name":"swathichatrathi/ELITE-DAY-TO-DAY-WORK","sub_path":"11-02-23/HACKERRANK REGULAR108 CONTEST/SUPER REDUCED STRING.py","file_name":"SUPER REDUCED STRING.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3683764500","text":"from itertools import count\nfrom django.views.generic import CreateView\nfrom django.urls import reverse\nfrom .models import Result\nfrom .forms import ResultModelForm\nfrom django.shortcuts import render\nfrom django.db.models import Q\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport matplotlib.font_manager as fm\n\n#폰트 이름 출력\n# font_location = './static/fonts/NanumSquareRoundB.ttf' #font 경로 설정\n# font_name = fm.FontProperties(fname=font_location).get_name()\n# print(\"==========================\")\n# print(font_name)\n# print(\"==========================\")\n\n#폰트 매니저에 새로운 폰트 추가\nfont_dirs = ['./static/fonts/']\nfont_files = fm.findSystemFonts(fontpaths=font_dirs)\nfor font_file in font_files:\n fm.fontManager.addfont(font_file)\n\n#폰트 변경\nplt.rcParams['font.family'] = 'NanumSquareRound'\n\n\nclass ResultCreateView(CreateView):\n model = Result\n template_name = 'index.html'\n form_class = ResultModelForm\n\n def get_success_url(self):\n return reverse('result') #두 번째 html 파일 이름으로 수정\n\n\ndef result(request):\n\n start = 14\n student_number_count = {}\n for i in range(9):\n tempSet = Result.objects.filter(student_number=str(start))\n student_number_count[start] = tempSet.count()\n start += 1\n\n # value 값의 역순으로 정렬 후 (key, value)튜플을 원소로 갖는 리스트 생성\n tuple_list = sorted(student_number_count.items(),\n key=lambda x: x[1], reverse=True)\n print(tuple_list)\n\n top5_tuple_list = [] # 최상위 5개 아이템 선정\n for i in range(5):\n top5_tuple_list.append(tuple_list[i])\n\n top5_dict = dict(top5_tuple_list)\n ratio = top5_dict.values() # 비율에 value 리스트 입력\n temp = []\n for t in top5_dict.keys():\n temp.append(str(t)+\"학번\")\n labels = temp # label에 ['key + 학번'] 형태의 리스트 입력\n colors = ['#ff9999', '#ffc000', '#8fd9b6', '#d395d0', '#7AD1FF']\n wedgeprops = {'width': 0.7, 'edgecolor': 'w', 'linewidth': 5}\n\n plt.pie(ratio, labels=labels, autopct='%.1f%%', startangle=260,\n counterclock=False, colors=colors, wedgeprops=wedgeprops)\n plt.savefig('./static/img/top5_studentnumber.png', transparent=True)\n plt.clf()\n ###############################################################################################################################\n\n # 가장 많이 참여한 학과 도출\n major_count = []\n # 공대\n major_count.append(['computer', Result.objects.filter(Q(major=\"컴퓨터공학부\") | Q(\n major=\"컴공\") | Q(major=\"컴퓨터전자시스템공학부\") | Q(major=\"컴전\") | Q(major=\"컴퓨터공학\")).count()])\n major_count.append(['information', Result.objects.filter(\n Q(major=\"정보통신공학과\") | Q(major=\"정통\") | Q(major=\"정보통신공학\")).count()])\n major_count.append(['electronic', Result.objects.filter(\n Q(major=\"전자공학과\") | Q(major=\"전자\") | Q(major=\"전자공학\")).count()])\n major_count.append(['industry', Result.objects.filter(Q(major=\"산업경영공학과\") | Q(\n major=\"산업경영공학\") | Q(major=\"산업경영\") | Q(major=\"산경공\")).count()])\n # 글스산\n major_count.append(['global_sport', Result.objects.filter(Q(major=\"글로벌스포츠산업\") | Q(major=\"글로벌스포츠산업학과\") | Q(\n major=\"글로벌스포츠산업학부\") | Q(major=\"글스산\") | Q(major=\"국제스포츠레저학과\") | Q(major=\"국스레\") | Q(major=\"국제스포츠레저\")).count()])\n # 통번역\n major_count.append(['english', Result.objects.filter(\n Q(major=\"영어통번역학부\") | Q(major=\"영어통번역\") | Q(major=\"영통\")).count()])\n major_count.append(['germany', Result.objects.filter(\n Q(major=\"독일어통번역학과\") | Q(major=\"독일어통번역\") | Q(major=\"독통\")).count()])\n major_count.append(['spain', Result.objects.filter(\n Q(major=\"스페인어통번역학과\") | Q(major=\"스페인어통번역\") | Q(major=\"영통\")).count()])\n major_count.append(['italy', Result.objects.filter(\n Q(major=\"이탈리아어통번역학과\") | Q(major=\"이탈리아어통번역\") | Q(major=\"이통\")).count()])\n major_count.append(['china', Result.objects.filter(\n Q(major=\"중국어통번역학과\") | Q(major=\"중국어통번역\") | Q(major=\"중통\")).count()])\n major_count.append(['japan', Result.objects.filter(\n Q(major=\"일본어통번역학과\") | Q(major=\"일본어통번역\") | Q(major=\"일통\")).count()])\n major_count.append(['arab', Result.objects.filter(\n Q(major=\"아랍어통번역학과\") | Q(major=\"아랍어통번역\") | Q(major=\"아통\")).count()])\n major_count.append(['indonesia', Result.objects.filter(\n Q(major=\"말레이·인도네시아어통번역학과\") | Q(major=\"말레이·인도네시아어통번역학\") | Q(major=\"마통\")).count()])\n major_count.append(['thai', Result.objects.filter(\n Q(major=\"태국어통번역학과\") | Q(major=\"태국어통번역\") | Q(major=\"태통\")).count()])\n # 인문대\n major_count.append(['philosophy', Result.objects.filter(\n Q(major=\"쳘학과\") | Q(major=\"철학\")).count()])\n major_count.append(['history', Result.objects.filter(\n Q(major=\"사학과\") | Q(major=\"사학\")).count()])\n major_count.append(['language', Result.objects.filter(\n Q(major=\"언어인지과학과\") | Q(major=\"언어인지과학\")).count()])\n major_count.append(['knowledge', Result.objects.filter(\n Q(major=\"지식콘텐츠학부\") | Q(major=\"지식콘텐츠\") | Q(major=\"지콘\")).count()])\n # 동유럽\n major_count.append(['poland', Result.objects.filter(\n Q(major=\"폴란드어과\") | Q(major=\"폴란드\")).count()])\n major_count.append(['rumania', Result.objects.filter(\n Q(major=\"루마니아어과\") | Q(major=\"루마니아\")).count()])\n major_count.append(['cheko', Result.objects.filter(Q(major=\"체코슬로바키아어과\") | Q(\n major=\"체코슬로바키아어과\") | Q(major=\"체코어과\"), Q(major=\"체코\")).count()])\n major_count.append(['secro', Result.objects.filter(\n Q(major=\"세르비아크로아티아어과\") | Q(major=\"세르비아크로아티아어\") | Q(major=\"세크\")).count()])\n major_count.append(['ukraine', Result.objects.filter(\n Q(major=\"우크라이나어과\") | Q(major=\"우크라이나어\")).count()])\n # 국지대\n major_count.append(['france', Result.objects.filter(\n Q(major=\"프랑스\") | Q(major=\"프랑스학과\")).count()])\n major_count.append(['brazil', Result.objects.filter(\n Q(major=\"브라질\") | Q(major=\"브라질학과\")).count()])\n major_count.append(['greece', Result.objects.filter(Q(major=\"그리스불가리아학과\") | Q(\n major=\"그리스불가리아\") | Q(major=\"그불\") | Q(major=\"그불과\")).count()])\n major_count.append(['indo', Result.objects.filter(\n Q(major=\"인도\") | Q(major=\"인도학과\")).count()])\n major_count.append(['asia', Result.objects.filter(\n Q(major=\"중앙아시아\") | Q(major=\"중앙아시아학과\") | Q(major=\"앙과\")).count()])\n major_count.append(['africa', Result.objects.filter(Q(major=\"아프리카학부\") | Q(major=\"아프리카학과\") | Q(major='아카') | Q(\n major=\"동아프리카\") | Q(major=\"서아프리카\") | Q(major=\"남아프리카\") | Q(major=\"동아프리카학과\") | Q(major=\"서아프리카학과\") | Q(major=\"남아프리카학과\")).count()])\n major_count.append(['russia', Result.objects.filter(\n Q(major=\"러시아\") | Q(major=\"러시아학과\")).count()])\n major_count.append(['korea', Result.objects.filter(\n Q(major=\"한국\") | Q(major=\"한국학과\")).count()])\n # 경상대\n major_count.append(['gukgum', Result.objects.filter(\n Q(major=\"국제금융학과\") | Q(major=\"국제금융\") | Q(major=\"국금\")).count()])\n major_count.append(['gbt', Result.objects.filter(\n Q(major=\"GBT학부\") | Q(major=\"쥐비티\") | Q(major=\"지비티\")).count()])\n # 자연대\n major_count.append(['math', Result.objects.filter(\n Q(major=\"수학과\") | Q(major=\"수학\")).count()])\n major_count.append(['statistic', Result.objects.filter(\n Q(major=\"통계학과\") | Q(major=\"통계\") | Q(major=\"통계학\")).count()])\n major_count.append(['elec_physic', Result.objects.filter(\n Q(major=\"전자물리학과\") | Q(major=\"전물\") | Q(major=\"전자물리\")).count()])\n major_count.append(['envi', Result.objects.filter(\n Q(major=\"환경학과\") | Q(major=\"환경\")).count()])\n major_count.append(['bio_engineer', Result.objects.filter(\n Q(major=\"생명공학과\") | Q(major=\"생공\") | Q(major=\"생명공학\")).count()])\n major_count.append(['chemical', Result.objects.filter(\n Q(major=\"화학과\") | Q(major=\"화학\")).count()])\n # 융인대\n major_count.append(['yoong_in', Result.objects.filter(\n Q(major=\"융합인재대학\") | Q(major=\"융인대\") | Q(major=\"융합인재\") | Q(major=\"융인\")).count()])\n # 바메공\n major_count.append(['bamegong', Result.objects.filter(Q(major=\"바이오메디컬공학부\") | Q(\n major=\"바이오메디컬공학과\") | Q(major=\"바메공\") | Q(major=\"바메공학과\") | Q(major=\"바메공학부\")).count()])\n\n major_count.sort(key=lambda x: -x[1])\n for k in major_count:\n print(k)\n ratio_2 = []\n labels_2 = []\n for i in range(0, 5):\n ratio_2.append(major_count[i][1])\n temp = decide_label(major_count[i][0])\n labels_2.append(temp)\n colors = ['#ff9999', '#ffc000', '#8fd9b6', '#d395d0', '#7AD1FF']\n wedgeprops = {'width': 0.7, 'edgecolor': 'w', 'linewidth': 5}\n plt.pie(ratio_2, labels=labels_2, autopct='%.1f%%', startangle=260,\n counterclock=False, colors=colors, wedgeprops=wedgeprops)\n plt.savefig('./static/img/top5_major.png', transparent=True)\n plt.clf()\n return render(request, 'result.html')\n\n\ndef decide_label(name):\n if name == 'computer':\n return \"컴퓨터공학과\"\n elif name == 'information':\n return \"정보통신공학과\"\n elif name == 'electronic':\n return \"전자공학과\"\n elif name == 'industry':\n return \"산업경영공학과\"\n elif name == 'global_sport':\n return \"글로벌스포츠산업학부\"\n elif name == 'english':\n return \"영어통번역학과\"\n elif name == 'germany':\n return \"독일어통번역학과\"\n elif name == 'italy':\n return \"이탈리아어통번역학과\"\n elif name == 'china':\n return \"중국어통번역학과\"\n elif name == 'japan':\n return \"일본어통번역학과\"\n elif name == 'arab':\n return \"아랍어통번역학과\"\n elif name == 'indonesia':\n return \"말레이인도네시아어통번역학과\"\n elif name == 'thai':\n return \"태국어통번역학과\"\n elif name == 'philosophy':\n return \"철학과\"\n elif name == 'history':\n return \"사학과\"\n elif name == 'language':\n return \"언어인지과학과\"\n elif name == 'knowledge':\n return \"지식콘텐츠학부\"\n elif name == 'poland':\n return \"폴란드어과\"\n elif name == 'rumania':\n return \"루마니아어과\"\n elif name == 'cheko':\n return \"체코슬로바키아어과\"\n elif name == 'secro':\n return \"세르비아크로아티아어과\"\n elif name == 'france':\n return \"프랑스학과\"\n elif name == 'brazil':\n return \"브라질학과\"\n elif name == 'greece':\n return \"그리스·불가리아학과\"\n elif name == 'indo':\n return \"인도학과\"\n elif name == 'asia':\n return \"중앙아시아학과\"\n elif name == 'africa':\n return \"아프리카학부\"\n elif name == 'russia':\n return \"러시아학과\"\n elif name == 'korea':\n return \"한국학과\"\n elif name == 'gukgum':\n return \"국제금융학과\"\n elif name == 'gbt':\n return \"GBT학부\"\n elif name == 'math':\n return \"수학과\"\n elif name == 'statistic':\n return \"통계학과\"\n elif name == 'elec_physic':\n return \"전자물리학과\"\n elif name == 'envi':\n return \"환경학과\"\n elif name == 'bio_engineer':\n return \"생명공학과\"\n elif name == 'chemical':\n return \"화학과\"\n elif name == 'yoong_in':\n return \"융합인재대학\"\n elif name == 'bamegong':\n return \"바이오메디컬공학부\"\n","repo_name":"hufslion10th/team4_miniproject","sub_path":"global_forest/pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13783226928","text":"import datetime\nfrom datetime import date\nimport IEXClass as IEX\n\n\ndef key():\n api_key = input('Paste your API from IEX, there is no user input validation, please copy your API from IEX to the terminal: ')\n return api_key.strip()\n\ndef ticker():\n\n\n while True:\n ticker = input('Enter a ticker (Stock Symbol - Example TSLA is Tesla) between 1 to 5 letters: ')\n if len(ticker) > 0 and len(ticker) <= 6:\n break\n if ticker.isalpha() == False:\n print('Please use letters ')\n else:\n print('ticker is usually 1 to 4 letter name i.e TSLA ')\n\n return ticker.upper()\n\ndef user_input_date():\n \"\"\"\n :return: The Date that will be input in the class\n \"\"\"\n print(\"Please enter a DATE for your analysis, a time series will be created starting with that date\")\n print()\n print(\"My recommendation is that you don't use more than 5 years of data, and that you use more than 1 year of data\")\n\n ##Ask for user input, please note that you should only use 10 years, but feel free to modify\n current_year = date.today().year\n\n while True:\n try:\n year = int(input('Enter a year (4 digits, i.e 2015): '))\n if year >= current_year - 10 and year <= current_year - 2:\n break\n else:\n print(f\"Honestly, you should not be using SMA for more than 10 years,also maximum year is the current {current_year} \")\n except ValueError:\n print(\"Please ensure that you type a number\")\n\n while True:\n try:\n month = int(input('Enter a month - Remember that the year has 12 months: '))\n if month <= 12 and month > 0:\n break\n else:\n print(f\"Please use a number between 1 and 12\")\n except ValueError:\n print(\"Please ensure that you type a number\")\n\n while True:\n try:\n day = int(input('Enter a day: '))\n if day<32 and day>0:\n break\n else:\n print(f\"Really, a month has a minimum of 28 days and maximum 31 days\")\n except ValueError:\n print(\"Please ensure that you type a number\")\n\n date_value = datetime.date(year,month,day)\n\n return date_value\n\ndef menu():\n print(\"\\n### Options Menu for Backtesting SMA Strategy ####\")\n print(\"1.- Would you like to Plot the Backtesting strategy for 2 SMA's (42 & 252), 4 plots will be displayed?\")\n print(\"2.- Would you like to save your file to a HDF5 file?\\n\")\n\n\n while True:\n try:\n menu_option = int(input(\"Please enter a number as per the menu above: \"))\n if menu_option > 0 and menu_option < 3:\n break\n if menu_option.isalpha():\n print(\"please enter a number\")\n\n except:\n print(\"please enter a number\")\n\n return menu_option\n\n\ndef user_info(api_key ,ticker,date_value, menu_option):\n\n class_list = [attribute for attribute in dir(IEX.IEXfin(api_key,date_value,ticker)) if callable(getattr(IEX.IEXfin(api_key,date_value,ticker),attribute)) and attribute.startswith('__') is False]\n\n\n option_dict = {}\n count = 0\n for classes in class_list:\n option_dict[count] = classes\n count += 1\n\n init_method = IEX.IEXfin(api_key,date_value,ticker)\n methods = getattr(init_method, option_dict.get(menu_option))\n return methods()\n\n\nif __name__ == \"__main__\":\n\n api_key = key()\n ticker = ticker()\n date_value = user_input_date()\n menu_option = menu()\n user_info(api_key ,ticker,date_value,menu_option)\n\n\n\n\n\n\n","repo_name":"colina83/IEX_Class","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14845419162","text":"print('Fundo A: valor mínimo: 50 reais, sem tempo mínimo, rende 10% ao ano.')\nprint('Fundo B: valor mínimo: 100 reais, tempo mínimo: 1 ano, rende 12% ao ano.')\nprint('Fundo C: valor mínimo: 500 reais, tempo mínimo: 2 anos, rende 13% ao ano.')\nprint('Fundo D: valor mínimo: 1000 reais, tempo mínimo: 3 anos, rende 15% ao ano.')\nprint('Fundo E: valor mínimo: 3000 reais, tempo mínimo: 5 anos, rende 18% ao ano.')\n\naplicacao = input('Escolha sua aplicação: ')\nvalor = float(input('Digite o valor para investir: '))\ntempo = int(input('Digite a duração em anos da aplicação: '))\n\n# Caso uma aplicação válida e com as regras atendidas seja selecionada, ajustamos seus juros\nif aplicacao == 'A' and valor >= 50:\n juros = 1.1\nelif aplicacao == 'B' and valor >= 100 and tempo >= 1:\n juros = 1.2\nelif aplicacao == 'C' and valor >= 500 and tempo >= 2:\n juros = 1.3\nelif aplicacao == 'D' and valor >= 1000 and tempo >= 3:\n juros = 1.5\nelif aplicacao == 'E' and valor >= 3000 and tempo >= 5:\n juros = 1.8\n# aplicação inválida ou regras desrespeitadas, zeramos o juro\nelse:\n juros = 0\n\n# juros = 0 representa falha, > 0 representa sucesso e podemos fazer o cálculo\nif juros > 0:\n montante = valor*(juros)**tempo\n print(f'Valor a sacar: R$ {montante:.2f}')\nelse:\n print('Não foi possível realizar a aplicação.')\n","repo_name":"gabriela-gnsales/coding-tank-python","sub_path":"resolucoes-professor/d7f7b6b5-7fc9-42c7-85a6-f60a0daf6c71.py","file_name":"d7f7b6b5-7fc9-42c7-85a6-f60a0daf6c71.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42451466061","text":"import os, sys\n\n\n#path = os.path.dirname(__file__)\n#sys.path.append(path + '/../model')\n#sys.path.append(path + '/../algos')\n#sys.path.append(path + '/../extractions')\n#sys.path.append(path + '/../methods')\n#sys.path.append(path + '/../utils')\n\n\ndef filter_patterns(patterns, min_token, max_token, min_slot, max_slot):\n \"\"\"\n\n :param patterns:\n :param min_token:\n :param max_token:\n :param min_slot:\n :param max_slot:\n :return:\n \"\"\"\n filtered = {}\n for i, pats in patterns.items():\n filtered[i] = []\n for p in pats:\n toks = len(p.split())\n slots = p.count('$')\n if toks > max_token or toks < min_token:\n continue\n if slots > max_slot or slots < min_slot:\n continue\n filtered[i].append(p)\n\n return filtered\n\n\ndef filter_mentions(mentions, min_token, max_token):\n \"\"\"\n\n :param mentions:\n :param min_token:\n :param max_token:\n :return:\n \"\"\"\n filtered = {}\n for i, ments in mentions.items():\n filtered[i] = []\n for m in ments:\n toks = len(m.split())\n if toks > max_token or toks < min_token:\n continue\n filtered[i].append(f'{m}\\n')\n return filtered\n\n\n","repo_name":"HugoBoulanger/Pattern-Filling-Generation","sub_path":"src/filtering.py","file_name":"filtering.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"21866999557","text":"#!/usr/bin/env python3\n# Raphaël Teysseyre, 2022\n\nmost = [0, 0, 0]\ncurr = 0\n\nwith open('1_input') as fd:\n for line in fd:\n try:\n curr = curr + int(line)\n except ValueError:\n if curr > most[0]:\n most[2] = most[1]\n most[1] = most[0]\n most[0] = curr\n elif curr > most[1]:\n most[2] = most[1]\n most[1] = curr\n elif curr > most[0]:\n most[0] = curr\n \n curr = 0\n\n# Part 1\nprint(most[0])\n\n# Part 2\nprint(sum(most))\n","repo_name":"rteysseyre/aoc","sub_path":"2022/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34852138057","text":"import pickle\ndef write():\n f=open(\"studentDetails.dat\",\"wb\")\n while True:\n roll=int(input(\"enter the roll no:-\"))\n name=input(\"enter the name:-\")\n data=[roll,name]\n pickle.dump(data,f)\n choice=input(\"more?(Y/N)\")\n if choice in \"Nn\":\n break\n f.close()\ndef read():\n f=open(\"studentDetails.dat\",\"rb\")\n try:\n while True:\n r=pickle.load(f)\n print(r)\n except EOFError:\n f,close()\ndef search():\n found=0\n rollno=int(input(\"enter the rollno whose name you want to display:-\"))\n f=open(\"studentDetails.dat\",\"rb\")\n try:\n while True:\n r=pickle.load(f)\n if r[0]==rollno:\n print(r[i])\n found=1\n break\n except EOFError:\n f.close()\n if found==0:\n print(\"sorry record not found\")\nwrite()\nsearch()","repo_name":"JianreiliuThaimei/binaryfile","sub_path":"create a student details.py","file_name":"create a student details.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27650101611","text":"from .worker import *\n\n\nasync def up(event):\n if not event.is_private:\n return\n stt = dt.now()\n ed = dt.now()\n v = ts(int((ed - uptime).seconds) * 1000)\n ms = (ed - stt).microseconds / 1000\n p = f\"📊Pɪɴɢ = {ms}ms\"\n await event.reply(v + \"\\n\" + p)\n\n\nasync def start(event):\n await event.reply(\n f\"Hi `{event.sender.first_name}`\\nThis is A Compressor Bot Which Can Encode Videos.\\nReduce Size of Videos With Negligible Quality Change\\nAlso you can Generate Screenshots too.\",\n buttons=[\n [Button.inline(\"Checkout Help Menu 📑\", data=\"ihelp\")],\n [\n Button.url(\"Aɴιмє Grσυρ 💬\", url=\"t.me/AnimeListChat\"),\n Button.url(\"Anime Channel 🔥\", url=\"t.me/AnimeListUp\"),\n ],\n ],\n )\n\n\nasync def help(event):\n await event.reply(\n \"**🤖 A Quality Compressor Bot**\\n\\n • This Bot Compress Videos With Negligible Quality Change.\\n • Generate Sample Compressed Video\\n • Easy to Use\\n • Due to Quality Settings Bot Takes Time To Compress.\\n • So Be patience Nd Send videos One By One After Completing.\\n • Dont Spam Bot.\\n\\nJust Forward Video To Get Options\"\n )\n\n\nasync def ihelp(event):\n await event.edit(\n \"**🤖 A Quality Compressor Bot**\\n\\n • This Bot Compress Videos With Negligible Quality Change.\\n • Generate Sample Compressed Video\\n • Screenshots Too\\n • Easy to Use\\n • Due to Quality Settings Bot Takes Time To Compress.\\n • So Be patience Nd Send videos One By One After Completing.\\n • Dont Spam Bot.\\n\\n • Just Forward Video To Get Options\",\n buttons=[Button.inline(\"BACK\", data=\"beck\")],\n )\n\n\nasync def beck(event):\n await event.edit(\n f\"Hi `{event.sender.first_name}`\\n • This is A CompressorQueue Which Can Encode Videos.\\n • Reduce Size of Videos With Negligible Quality Change\\n • You can Generate Screenshots too.\",\n buttons=[\n [Button.inline(\"Checkout Help Menu 📑\", data=\"ihelp\")],\n [\n Button.url(\"Aɴιмє Grσυρ 💬\", url=\"t.me/AnimeListChat\"),\n Button.url(\"Anime Channel 🔥\", url=\"t.me/AnimeListUp\"),\n ],\n ],\n )\n","repo_name":"AliAryanTech/Encoding-Bot","sub_path":"bot/stuff.py","file_name":"stuff.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"10038165891","text":"from bs4 import BeautifulSoup as soup\nfrom copy import copy\nfrom collections import Counter\nimport requests\nimport json\nimport sys\n\nfrom models import Set, Part, Listing\nfrom setlist import create_setlist\n\n\ndef get_part_listings(qty, part_id, element_id):\n if element_id == -1:\n return [Listing(part_id, element_id, qty, 0, '', '')]\n listings = []\n url = 'http://www.bricklink.com/search.asp'\n params = {\n 'viewFrom': 'sa',\n 'qMin': qty,\n 'shipCountryID': 'US',\n 'sellerCountryID': 'US',\n 'moneyTypeID': 1,\n 'q': element_id,\n 'sellerLoc': 'C',\n 'searchSort': 'P',\n 'sz': 10\n }\n html = requests.get(url, params=params).text\n results = soup(html, 'html.parser').findAll('td', {'valign' : 'TOP'})\n if len(results) == 0:\n listings.append(Listing(part_id, element_id, qty, 0, '', ''))\n for r in results:\n link = r.find('a')\n price = r.findAll('b')[1].text\n price = float(price.replace('US $', ''))\n listing = Listing(part_id, element_id, qty, price,\n link.text, link['href'])\n listings.append(listing)\n return listings\n\n\ndef optimize_bricklink(lego_set):\n stores = []\n pieces = []\n purchase = []\n for part in lego_set.parts:\n listings = get_part_listings(int(part.qty), part.part_id,\n part.element_id)\n if len(listings) > 0:\n print(part.element_id)\n stores = stores + [o.name for o in listings]\n pieces.append(listings)\n best_stores = Counter(stores)\n for store, val in best_stores.most_common():\n temp_pieces = copy(pieces)\n for piece in temp_pieces:\n listing = [x for x in piece if x.name == store]\n if len(listing) > 0:\n purchase.append(listing[0])\n pieces.remove(piece)\n return purchase\n\n\ndef output_purchase_to_csv(lego_set, purchase, set_id):\n with open(lego_set.bricklink_file, 'w+') as f:\n f.write('part_id,element_id,qty,price,name,link\\n')\n for p in purchase:\n f.write(str(p))\n\n\nif __name__ == '__main__':\n try:\n set_id = sys.argv[1]\n except:\n set_id = '75102-1'\n lego_set = create_setlist(set_id)\n to_buy = optimize_bricklink(lego_set)\n output_purchase_to_csv(lego_set, to_buy, set_id)\n ","repo_name":"Brobin/bricklink-pro","sub_path":"bricklink/bricklink.py","file_name":"bricklink.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"22"} +{"seq_id":"33669867005","text":"import os\nimport zmq\nimport logging\n\nclass MutexPolicy:\n def __init__(self, address=\"127.0.0.1\"):\n \"Connects to the mutex policy daemon running at the specified address\"\n logging.info(\"Connecting to mutex policy daemon...\")\n self.context = zmq.Context()\n self.socket = self.context.socket(zmq.REQ)\n self.socket.connect(f\"tcp://{address}:5555\")\n logging.info(\"Connected to mutex policy daemon\")\n\n def open(self, mutex_name):\n \"\"\"Opens a mutex identified by an unique name.\n The caller must eventually call `close()` on the returned mutex to\n free up the resources used for it by the daemon.\n \"\"\"\n logging.info(\"Opening mutex...\")\n\n msg = f\"{os.getpid()} O {mutex_name}\"\n self.socket.send_string(msg)\n\n msg_rec = self.socket.recv_string()\n\n if msg_rec != \"Ok\":\n raise Exception(f\"Error: {msg_rec}\")\n\n logging.info(\"Mutex successfully opened\")\n return Mutex(mutex_name, self.socket)\n\n def lst(self):\n \"\"\"Returns a list representing all the mutexes\n currently open in the system.\"\"\"\n logging.info(\"Returning mutex list...\")\n self.socket.send_string(\"list\")\n logging.info(\"Returned mutex list successfully\")\n return self.socket.recv_string()\n\nclass Mutex:\n def __init__(self, name, socket):\n self.name = name\n self.socket = socket\n\n def close(self):\n \"Close the given mutex \"\n logging.info(\"Closing mutex...\")\n\n msg = f\"{os.getpid()} C {self.name}\"\n self.socket.send_string(msg)\n\n msg_rec = self.socket.recv_string()\n\n if msg_rec != \"Ok\":\n raise Exception(f\"Error: {msg_rec}\")\n logging.info(\"Mutex successfully closed\")\n\n def lock(self):\n \"Lock the mutex or blocks until we are able to lock it.\"\n logging.info(\"Locking mutex...\")\n\n msg = f\"{os.getpid()} L {self.name}\"\n\n self.socket.send_string(msg)\n msg_rec = self.socket.recv_string()\n\n if msg_rec != \"Ok\":\n raise Exception(f\"Error: {msg_rec}\")\n logging.info(\"Mutex successfully locked\")\n\n def unlock(self):\n \"Unlocks the mutex and allows the next process to take it.\"\n logging.info(\"Unlocking mutex...\")\n\n msg = f\"{os.getpid()} U {self.name}\"\n\n self.socket.send_string(msg)\n msg_rec = self.socket.recv_string()\n\n if msg_rec != \"Ok\":\n raise Exception(f\"Error: {msg_rec}\")\n logging.info(\"Mutex successfully unlocked\")","repo_name":"GabrielMajeri/MutexPolicy","sub_path":"demo/mpolicy.py","file_name":"mpolicy.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"16223498005","text":"import time\n\nimport requests\nimport datetime\n\nAPI_URL = \"http://localhost:9292/\"\nSEARCH_TERM = \"ych\"\n\n####\n# This is an experiment to observe FA search. This script checks the FA search for SEARCH_TERM once every minute, and\n# records which submissions are added, and deleted, each minute. It logs this information to a file, along with whether\n# the length of the results list changes.\n###\n# Results\n# So, using this script, I have found that:\n# - FA search updates its index and adds new results every 5 minutes.\n# - The length of the returned list can drop between these re-indexes, if submissions are removed.\n# - - List will return to 72 elements at the next re-index\n# - Between ~08:15 and 08:45 (BST), the search results become erratic.\n# - - The number of results on a page drops dramatically at about 08:15, from specified 72 to about 20-30\n# - - During this time, the results are all from 24 hours ago\n# - - During this time, the number of results steadily increases, reaching maybe 30-40 before springing back to 72\n###\n\n\ndef log(line):\n line = f\"{datetime.datetime.now().isoformat()}: {line}\"\n with open(\"log.txt\", \"a+\") as f:\n f.write(line+\"\\n\")\n print(line)\n\n\nlast_set = None\nwhile True:\n time.sleep(60)\n resp = requests.get(f\"{API_URL}/search.json?q={SEARCH_TERM}&perpage=72\")\n set_ids = set(resp.json())\n if last_set is None:\n log(f\"Starting watcher, first list: {set_ids}\")\n last_set = set_ids\n continue\n new = set_ids - last_set\n lost = last_set - set_ids\n if len(set_ids) != len(last_set):\n log(f\"Results length changed. Was {len(last_set)}, now {len(set_ids)}\")\n if len(new) != 0:\n log(f\"New results: {new}\")\n if len(lost) != 0:\n log(f\"Lost results: {lost}\")\n log(\"---\")\n last_set = set_ids\n","repo_name":"Deer-Spangle/FA-search-bot","sub_path":"experiments/fa-search-data-logger.py","file_name":"fa-search-data-logger.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"22"} +{"seq_id":"42272793485","text":"\n\nli={\n\n '2':'ABC',\n '3':'DEF',\n '4':'GHI',\n '5':'JKL',\n '6':'MNO',\n '7':'PQRS',\n '8':'TUV',\n '9':'WXYZ'\n}\nT=input()\ntotal=0\nfor i in T:\n for j,k in li.items():\n if i in k:\n total+=int(j)+1\nprint(total)","repo_name":"gkgg123/TIL","sub_path":"baekjoon/5622_call_dial.py","file_name":"5622_call_dial.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10043155671","text":"#problem with json data\n\nimport json\nwith open('/home/atul/simple.json' ,'r') as persondata:\n employee=json.load(persondata)\n print(employee)\n\n\n#The error is occured because of Your Json data in the form of like\n#\n# [\n# {\n# 'id': \"A001\",\n# 'name': \"Tom\",\n# 'math': 60,\n# 'physics': 66,\n# 'chemistry': 61\n# }\n# ]\n#\n# # Your Data is in the form of as like in double quates\n#\n# [\n# {\n# \"id\": \"A001\",\n# \"name\": \"Tom\",\n# \"math\": 60,\n# \"physics\": 66,\n# \"chemistry\": 61\n# }\n# ]","repo_name":"atulmane01/pythontraining","sub_path":"Day12/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73543858935","text":"import time\nimport paho.mqtt.client as mqtt\nimport ssl\nimport json\nimport _thread\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected to AWS IoT: \" + str(rc))\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.tls_set(ca_certs='./rootCA.pem', certfile='./certificate.pem.crt', keyfile='./private.pem.key', tls_version=ssl.PROTOCOL_SSLv23)\nclient.tls_insecure_set(True)\nclient.connect(\"YOUR_IoT_ENDPOINT\", 8883, 60)\n\ndef publishData(txt):\n print(txt)\n ctr = 1\n while (True):\n msg = \"Testing\" + str(ctr)\n print(msg)\n client.publish(\"raspi/data\", payload=json.dumps({\"msg\": msg}), qos=0, retain=False)\n ctr = ctr + 1\n\n time.sleep(5)\n \n_thread.start_new_thread(publishData,(\"Spin-up new Thread...\",))\n\nclient.loop_forever()","repo_name":"CumulusCycles/AWS_IoT_demo","sub_path":"Pi_IoT/iot-test.py","file_name":"iot-test.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"72715871096","text":"# -*- coding: utf-8 -*-\n'''\n@author: acer4560g\n@file: remember_me.py\n@time: 2020/2/3 11:03\n@contact:python初学者(微信公众号)\n@vision:3.7.3 \n--------------------- \n'''\nimport sys\n\nprint('本程序在python3.7.3编译,运行时请注意python版本')\nprint('python当前版本:\\n' + sys.version)\nprint('--------------------------\\n')\nimport json\n\n# username=input('What is your name?')\n#\n# filename='username.json'\n# with open(filename,'w') as f_obj:\n# json.dump(username,f_obj)\n# print('We\\'ll remember you when you come back,'+username+'!')\n\n# def greet_user():\n# '''问候用户,并指出其姓名'''\n# #如果以前存储了用户名,就加载它\n# #否则,就表示用户输入用户名并存储它\n# filename='username.json'\n# try:\n# with open(filename)as f_obj:\n# username=json.load(f_obj)\n# except FileNotFoundError:\n# username=input('What is your name?')\n# with open(filename,'w') as f_obj:\n# json.dump(username,f_obj)\n# print('We\\'ll remember you when you come back,'+username+'!')\n# else:\n# print('Welcome back,'+username+'!')\n# greet_user()\n\ndef get_stored_username():\n '''如果存储了用户名,就获取它'''\n filename='username.json'\n try:\n with open(filename) as f_obj:\n username=json.load(f_obj)\n except FileNotFoundError:\n return None\n else:\n return username\ndef greet_user():\n '''问候用户,并指出其姓名'''\n username=get_stored_username()\n if username:\n print('Welcome back,'+username+'!')\n else:\n username=input('What is your name?')\n filename='username.json'\n with open(filename,'w') as f_obj:\n json.dump(username,f_obj)\n print('We\\'ll remember youo when you come back,'+ username+'!')\ngreet_user()","repo_name":"yue008/python-code","sub_path":"chapter10/remember_me.py","file_name":"remember_me.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15939140885","text":"from kivy.uix.screenmanager import Screen\nfrom kivy.uix.button import Button\nfrom functools import partial\n\nclass sc_BoatPlacer(Screen):\n\n ButtonGrid = []\n\n def onLoad(self):\n #GridLayout\n GL = self.ids.GL\n\n #clear all data\n \n self.ButtonGrid = []\n GL.children.clear()\n\n #generation of the buttons grid\n for i in range(0,10):\n ButtonRow = []\n for j in range(0,10):\n b = Button()\n b.name = str(i)+\":\"+str(j)\n b.font_size=\"20sp\"\n b.bind(on_release=self.b_onclick)\n # b.on_release= self.b_onclick()\n ButtonRow.append(b)\n GL.add_widget(b)\n self.ButtonGrid.append(ButtonRow)\n\n\n def b_onclick(self,*args):\n B = args[0]\n B.text = \"O\"\n\n def convertion(self,Fichier):\n file= open(Fichier, 'r')\n a=[]\n for line in file:\n a.append(line)\n\n for i in range(len(a)):\n a[i]=a[i].strip()\n print(a)\n\n file.close() #convertir le fichier txt en python, on obtient tout le tableau avec les * et les lettres\n\n map=[]\n\n for i in range(1,11):\n grenier=[]\n for j in range (1,11):\n if a[i][j] in 'tscp':\n grenier.append(1)\n self.ButtonGrid[i-1][j-1].text = \"O\"\n else:\n grenier.append(0) \n map.append(grenier)\n print(map)\n return map\n","repo_name":"FlorianLebecque/BattleShip","sub_path":"screens/Mclass/sc_BoatPlacer.py","file_name":"sc_BoatPlacer.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12343928898","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata = tf.keras.datasets.mnist.load_data()\n(X_train, Y_train), (X_test, Y_test) = data\nX_train = X_train / 255\nX_test = X_test / 255\n\nmodel = tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10, activation='softmax')])\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\nr = model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=10)\n\nplt.plot(r.history['loss'], label='loss')\nplt.plot(r.history['val_loss'], label='val_loss')\nplt.legend()\nplt.show()\n\nplt.plot(r.history['accuracy'], label='acc')\nplt.plot(r.history['val_accuracy'], label='val_acc')\nplt.legend()\nplt.show()\n\nprint(model.evaluate(X_test, Y_test))\nmodel.save('mnist_ann.h5')\n","repo_name":"CommissarSilver/Udemy-s-Tensorflow-2.0-Course","sub_path":"MNIST ANN/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23877949580","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nstudents = pd.read_excel('./excel/Students-012.xlsx', index_col='From')\nprint(students)\n\n# lable = students['From']\n# print(lable)\n# 由于数字比较特殊,此处只能使用方括号(虽然在表格中类型为字符串),字符则用students.Field\nstudents['2017'].plot.pie(fontsize=8)\n# plt.pie(students['2017'], labels=students['From']) # 使用这个时需要去掉读表时的索引\n\n# 方法一、使用排序再startangle进行按顺时针旋转\n# students['2017'].sort_values(ascending=True).plot.pie(fontsize=8, startangle=-270)\n\n# 方法二、不排序,调整一个counterclock参数\n# students['2017'].plot.pie(fontsize=8, counterclock=False, startangle=-270)\n\n# 优化饼图\n# plt.title('Source of International Students', fontsize=16, fontweight='bold')\nplt.title('Source of International Students', fontsize=16)\n# plt.ylabel('2017', fontsize=12, fontweight='bold')\nplt.ylabel('2017', fontsize=12)\nplt.show()\n","repo_name":"python-yc/pycharm_script","sub_path":"Pandas_study/p012.py","file_name":"p012.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73991620537","text":"import asyncio\nimport random\nimport json\nimport discord\n\nfrom jshbot import utilities, configurations, plugins, logger, data\nfrom jshbot.exceptions import BotException, ConfiguredBotException\nfrom jshbot.commands import (\n Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response)\n\n__version__ = '0.1.0'\nCBException = ConfiguredBotException('Tag remote')\nuses_configuration = False\n\nDATA_VERSION = 1\nWEBHOOK_SET = set()\nTAG_CONVERTER = None\n\n@plugins.command_spawner\ndef get_commands(bot):\n return [Command(\n 'tagremote', subcommands=[\n SubCommand(doc='Gets the current remote session.', function=tagremote),\n SubCommand(\n Opt('start'),\n doc='Starts a sound tag remote session.',\n function=tagremote_start),\n SubCommand(\n Opt('stop'),\n doc='Stops the current sound tag remote session.',\n function=tagremote_stop),\n SubCommand(\n Opt('update'),\n doc='Provides a refreshed tag list. Updates can be '\n 'applied in the settings menu of the tag remote app.',\n function=tagremote_update)\n ],\n description='Call sound tags through your phone.',\n allow_direct=False\n )]\n\n\nasync def tagremote(bot, context):\n \"\"\"Gets the current session data as a link.\"\"\"\n session_data = data.get(bot, __name__, 'data', guild_id=context.guild.id)\n if not session_data:\n raise CBException(\n \"No session available.\\nStart one with `{}tagremote start`\".format(\n utilities.get_invoker(bot, guild=context.guild)))\n\n channel_id, session_code = session_data['channel'], session_data['session']\n voice_channel_id = session_data['voice_channel']\n channel_mention = data.get_channel(bot, channel_id, guild=context.guild).mention\n voice_channel_mention = data.get_channel(bot, voice_channel_id, guild=context.guild).mention\n description = 'The session code is:\\n`{}`\\nThe session is attached to {} and {}'.format(\n session_code, channel_mention, voice_channel_mention)\n return Response(embed=discord.Embed(\n title='Tap here on your phone to use the tag remote',\n url='https://jkchen2.github.io/tag-remote/#{}'.format(session_code),\n description=description))\n\n\ndef _get_tag_dictionary(bot, guild):\n \"\"\"Retrieves the tag dictionary of the server.\"\"\"\n if configurations.get(bot, 'tags.py', 'global_tags'):\n table_suffix = 'global'\n else:\n table_suffix = str(guild.id)\n tags_plugin = bot.plugins['tags.py']\n sound_bit = tags_plugin._get_flag_bits(['sound'])\n private_bit = tags_plugin._get_flag_bits(['private'])\n cursor = data.db_select(\n bot, from_arg='tags', table_suffix=table_suffix,\n where_arg='flags & %s = %s AND flags & %s = 0',\n input_args=[sound_bit, sound_bit, private_bit])\n raw_tag_list = cursor.fetchall() if cursor else []\n if not raw_tag_list:\n raise CBException(\"No sound tags available.\")\n tag_dictionary = {}\n for tag in raw_tag_list:\n tag_dictionary[tag.key] = {'name': tag.name, 'hits': tag.hits}\n return tag_dictionary\n\n\nasync def _upload_session_data(bot, channel, voice_channel, webhook, tag_dictionary):\n \"\"\"Uploads the tag dictionary and returns the session code.\"\"\"\n tag_data = utilities.get_text_as_file(json.dumps({\n 'version': DATA_VERSION,\n 'bot_id': str(bot.user.id),\n 'guild': str(channel.guild.id),\n 'guild_name': channel.guild.name,\n 'channel': str(channel.id),\n 'channel_name': channel.name,\n 'voice_channel': str(voice_channel.id),\n 'voice_channel_name': voice_channel.name,\n 'webhook': [str(webhook.id), webhook.token],\n 'tags': tag_dictionary\n }))\n url = await utilities.upload_to_discord(bot, tag_data, filename='remote_data', close=True)\n url_segments = [it[::-1] for it in url[::-1].split('/')[2:0:-1]]\n return '{}:{}'.format(*url_segments)\n\n\nasync def tagremote_start(bot, context):\n \"\"\"Starts a tag remote session.\"\"\"\n\n # Check for an existing session\n session_data = data.get(bot, __name__, 'data', guild_id=context.guild.id)\n if session_data:\n raise CBException(\"Session already exists.\")\n if not context.channel.permissions_for(context.guild.me).manage_webhooks:\n raise CBException(\"Missing the `Manage Webhooks` permission.\")\n\n # Retrieve and format tag data\n tag_dictionary = _get_tag_dictionary(bot, context.guild)\n\n # Check that the user is in an unblocked voice channel\n if not context.author.voice:\n raise CBException(\"You must be in a voice channel.\")\n voice_channel = context.author.voice.channel\n await utilities.join_and_ready(bot, voice_channel, is_mod=context.elevation >= 1)\n\n # Create webhook\n webhook = await context.channel.create_webhook(name='Tag Remote []')\n\n # Upload session data\n session_code = await _upload_session_data(\n bot, context.channel, voice_channel, webhook, tag_dictionary)\n\n # Track session data\n session_data = {\n 'webhook': webhook.id,\n 'channel': context.channel.id,\n 'voice_channel': voice_channel.id,\n 'session': session_code\n }\n data.add(bot, __name__, 'data', session_data, guild_id=context.guild.id)\n data.list_data_append(bot, __name__, 'webhooks', webhook.id, duplicates=False)\n WEBHOOK_SET.add(webhook.id)\n\n return await tagremote(bot, context)\n\n\nasync def tagremote_stop(bot, context):\n await _delete_session(bot, context.guild)\n return Response(content=\"The session has been stopped.\")\n\n\nasync def tagremote_update(bot, context):\n \"\"\"Renames the webhook with an updated tag list file.\"\"\"\n\n # Check for an existing session\n session_data = data.get(bot, __name__, 'data', guild_id=context.guild.id)\n if not session_data:\n raise CBException(\"No session available.\")\n channel = data.get_channel(bot, session_data['channel'])\n if not channel:\n await _delete_session(bot, context.guild)\n raise CBException(\"Failed to get the channel.\")\n voice_channel = data.get_channel(bot, session_data['voice_channel'])\n if not voice_channel:\n await _delete_session(bot, context.guild)\n raise CBException(\"Failed to get the voice channel.\")\n webhooks = await channel.webhooks()\n if not webhooks:\n await _delete_session(bot, context.guild)\n raise CBException(\"No webhooks available.\")\n for webhook in webhooks:\n if webhook.id == session_data['webhook']:\n break\n else:\n await _delete_session(bot, context.guild)\n raise CBException(\"Webhook not found.\")\n\n tag_dictionary = _get_tag_dictionary(bot, context.guild)\n session_code = await _upload_session_data(bot, channel, voice_channel, webhook, tag_dictionary)\n\n updated_code = session_code.split(':')[1]\n await webhook.edit(name='Tag Remote [{}]'.format(updated_code))\n\n return Response(\n content=\"Tag data refreshed. Update the remote on your phone via the options menu.\")\n\n\nasync def _delete_session(bot, guild):\n \"\"\"Deletes the session for the given guild.\"\"\"\n session_data = data.remove(bot, __name__, 'data', guild_id=guild.id, safe=True)\n if not session_data:\n raise CBException(\"Session does not exist.\")\n channel_id, webhook_id = session_data['channel'], session_data['webhook']\n channel = data.get_channel(bot, channel_id, safe=True)\n webhooks = await channel.webhooks()\n for webhook in webhooks:\n if webhook.id == webhook_id:\n await webhook.delete()\n break\n else:\n logger.warn('Webhook to delete (%s) not found!', webhook_id)\n try:\n WEBHOOK_SET.remove(webhook_id)\n except KeyError:\n logger.warn(\"Webhook not found in WEBHOOK_SET\")\n data.list_data_remove(bot, __name__, 'webhooks', value=webhook_id, safe=True)\n\n if guild.voice_client and guild.voice_client.channel.id == session_data['voice_channel']:\n await utilities.stop_audio(bot, guild)\n\n\n@plugins.permissions_spawner\ndef setup_permissions(bot):\n return { 'manage_webhooks': \"Allows tags to be called by webhook.\" }\n\n\n@plugins.listen_for('bot_on_ready_boot')\nasync def setup_globals(bot):\n global WEBHOOK_SET, TAG_CONVERTER\n TAG_CONVERTER = bot.plugins['tags.py'].TagConverter(\n apply_checks=True, voice_channel_bypass=True)\n WEBHOOK_SET = set(data.get(bot, __name__, 'webhooks', default=[]))\n\n\n@plugins.listen_for('on_message')\nasync def check_webhook_messages(bot, message):\n \"\"\"Reads webhook messages and calls tags if necessary.\"\"\"\n if message.author.id in WEBHOOK_SET:\n session_data = data.get(bot, __name__, 'data', guild_id=message.guild.id)\n voice_channel = data.get_channel(bot, session_data['voice_channel'], guild=message.guild)\n\n # Ignore if nobody is in the channel\n if not [it for it in voice_channel.members if not it.bot]:\n pass\n\n # Retrieve tag\n elif message.content.startswith('[Retrieve]'):\n tag_name = message.content[10:].strip()\n try:\n tag = TAG_CONVERTER(bot, message, tag_name, channel_bypass=voice_channel)\n except BotException as e:\n logger.warn(\"Failed to retrieve tag: %s\", e)\n else:\n tags_plugin = bot.plugins['tags.py']\n url = random.choice(tag.value)\n try:\n await tags_plugin._play_sound_tag(bot, tag, url, voice_channel, delay=-1)\n except BotException as e:\n logger.warn(\"Failed to play tag: %s\", e)\n else:\n tags_plugin._update_hits(bot, tag.key, message.author.id, message.guild.id)\n\n # Stop audio\n elif message.content == '[Stop audio]':\n voice_client = message.guild.voice_client\n if (voice_client and\n voice_client.channel == voice_channel and\n voice_client.is_playing()):\n voice_client.stop()\n\n # Always remove messages\n await asyncio.sleep(3)\n try:\n await message.delete()\n except:\n pass\n","repo_name":"jkchen2/JshBot-plugins","sub_path":"tag_remote/tag_remote.py","file_name":"tag_remote.py","file_ext":"py","file_size_in_byte":10289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12093277300","text":"import requests\nfrom datetime import datetime\n\nclass RadarrClient:\n def __init__(self, api_key, url):\n self.api_key = api_key\n self.url = url\n\n def get_movies(self):\n response = requests.get(f\"{self.url}/api/movie\", params={\"apikey\": self.api_key})\n response.raise_for_status()\n return response.json()\n\n def update_movie(self, movie, add_archive=False, remove_archive=False):\n if add_archive:\n # If the movie doesn't already have the 'Archive' label, add it\n if 'Archive' not in movie['labels']:\n movie['labels'].append('Archive')\n movie['archive_date'] = datetime.now()\n elif remove_archive:\n # If the movie has the 'Archive' label, remove it\n if 'Archive' in movie['labels']:\n movie['labels'].remove('Archive')\n movie['archive_date'] = None\n\n response = requests.put(\n f\"{self.url}/api/movie/{movie['id']}\",\n params={\"apikey\": self.api_key},\n json=movie\n )\n response.raise_for_status()\n\n def delete_movie(self, movie):\n response = requests.delete(\n f\"{self.url}/api/movie/{movie['id']}\",\n params={\"apikey\": self.api_key}\n )\n response.raise_for_status()\n","repo_name":"dazrave/purgarr","sub_path":"app/radaar.py","file_name":"radaar.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24837114638","text":"import yaml\nfrom pyfsm import FSM\n\nclass yamlFSM(FSM):\n def __init__(self):\n descr = yaml.load(self.__doc__)\n descr[\"handlers\"] = dict(\n (i,getattr(self.__class__,i))\n for i in dir(self.__class__) if not i.startswith(\"_\"))\n FSM.__init__(self,descr)\n\n","repo_name":"FxIII/pyfsm","sub_path":"pyfsm/fsm_yaml.py","file_name":"fsm_yaml.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2453317982","text":"import torch\nimport torch.nn as nn\nfrom torch.optim.adam import Adam\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch import save\nimport pandas as pd\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport math\nfrom collections import defaultdict\nimport pkg_resources\n\n\nclass PytorchModel(object):\n _gpu_available = torch.cuda.is_available()\n _gpus = torch.device(\"cuda\")\n # _gpu_2 = torch.device(\"cuda:1\")\n _cpu = torch.device(\"cpu\")\n _batch_size = 100000\n _output_size = 1\n _model = None\n layer_count = 6\n hidden_layers = None\n default_hidden_layer_size = 800\n activation = nn.PReLU\n final_activation = nn.Sigmoid\n loss_func = torch.nn.SmoothL1Loss()\n allow_negative_predictions = True\n train_time = 3000\n training_curve = defaultdict(list)\n best_loss = 10000000000000000.0\n best_params_dict = {}\n use_cpu = False\n load_cached_model = None\n save_cached_model = 'model_v12'\n x_tensor = None\n y_tensor = None\n t_x = None\n x_y = None\n optimizer = None\n outputs = None\n batch_counter = 0\n counter = 0\n running_loss = 0.0\n fit_model = True\n\n def __init__(self):\n\n self.activation = nn.SELU\n self.layer_count = 20\n self.hidden_layers = [40] * self.layer_count\n self.train_layers = [True] * (self.layer_count * 2)\n # TODO Add in an update for layer count when the hidden layer size list is updated\n\n def fit(self, x, y, test_x=None, test_y=None):\n self.outputs, size = self._prep_input(x, y)\n if self.load_cached_model is not None:\n self.load_model(x, y, self.load_cached_model)\n if self.fit_model:\n self._fit(x, y)\n\n def load_model(self, x, y, path):\n device = self.get_device()\n self._model = torch.load(path)\n self._model.to(device)\n\n def _prep_tensors(self, x, y):\n x, y = self.handle_pandas(x, y)\n y = torch.tensor(y)\n x = torch.tensor(x, requires_grad=True)\n x = x.float()\n y = self.set_y_data_type(y)\n if self._gpu_available and not self.use_cpu:\n x.cuda().to(self._gpus)\n y.cuda().to(self._gpus)\n else:\n x.to(self._cpu)\n y.to(self._cpu)\n return x, y\n\n def _prep_input(self, x, y):\n size = y.shape[0]\n if y.ndim > 1:\n outputs = y.shape[1]\n else:\n outputs = 1\n if size < self._batch_size:\n self._batch_size = size\n if self._model is None:\n self._setup_model(x, y)\n return outputs, size\n\n def predict(self, x):\n if x.shape[0] > 50000:\n predictions = []\n split_size = int(x.shape[0] / 50000) + 1\n list_of_outputs = np.array_split(x, split_size)\n for output in list_of_outputs:\n predictions.append(self._predict(output))\n predictions = np.concatenate(predictions)\n else:\n predictions = self._predict(x)\n return predictions\n\n def _fit(self, x, y, test_x=None, test_y=None):\n for idx, param in enumerate(self._model.parameters()):\n param.requires_grad = self.train_layers[idx]\n self.optimizer = torch.optim.Adam(\n filter(lambda p: p.requires_grad, self._model.parameters()),\n lr=0.005, amsgrad=True)\n # scheduler = ReduceLROnPlateau(optimizer)\n\n self.x_tensor, self.y_tensor = self._prep_tensors(x, y)\n if test_x is not None:\n self.t_x, self.t_y = self._prep_tensors(test_x, test_y)\n permutation = torch.randperm(self.x_tensor.size()[0])\n last_loss = 100000000\n done = False\n path = self.get_path('start_test')\n torch.save(self._model, path)\n t = 0\n while True:\n t += 1\n last_loss = self.run_epoch(t, permutation, last_loss)\n if done:\n break\n if t > self.train_time:\n break\n self._model.load_state_dict(self.best_params_dict)\n # if self._gpu_available:\n # self._model.to(self._gpus)\n print(((self.best_loss) ** (0.5)) / x.shape[0])\n path = self.get_path(self.save_cached_model)\n torch.save(self._model, path)\n self.load_cached_model = path\n\n def _predict(self, x):\n x, _ = self.handle_pandas(x)\n x = torch.tensor(x)\n x = x.float()\n if self._gpu_available:\n x.cuda().to(self._gpus)\n try:\n predictions = self._model(x.cuda())\n predictions = predictions.cpu()\n except Exception as ex:\n print(ex)\n self.load_model(None, None, self.load_cached_model)\n predictions = self._model(x)\n\n else:\n predictions = self._model(x)\n # predictions[predictions < 0] = 0\n return predictions.detach().numpy()\n\n def _setup_model(self, x, y):\n if y.ndim == 2:\n if y.shape[1] > 1:\n self.hidden_layers[len(self.hidden_layers)-1] = y.shape[1]\n modules = []\n previous_layer_size = x.shape[1]\n for x in range(self.layer_count):\n if self.hidden_layers is not None:\n layer_size = self.hidden_layers[x]\n else:\n layer_size = self.default_hidden_layer_size\n modules.append(nn.Linear(previous_layer_size, layer_size))\n if x == self.layer_count - 1:\n # modules.append(self.final_activation())\n pass\n else:\n # modules.append(nn.Dropout(p=0.001))\n modules.append(self.activation())\n previous_layer_size = layer_size\n self._model = nn.Sequential(*modules)\n if self._gpu_available:\n self._model = self._model.cuda().to(self._gpus)\n\n def run_epoch(self, t, permutation, last_loss):\n self.counter = 0\n self.running_loss = 0.0\n self.batch_counter = 0\n for i in range(0, self.x_tensor.size()[0], self._batch_size):\n self.run_training_iteration(i, permutation)\n if self.running_loss < self.best_loss:\n self.best_params_dict = self._model.state_dict()\n self.best_loss = self.running_loss\n print(t, self.running_loss)\n if self.running_loss - last_loss > -0.000001:\n if last_loss < 100000:\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * 0.97\n # elif abs((self.running_loss / self.counter) - last_loss) < 0.00000001:\n # for param_group in self.optimizer.param_groups:\n # param_group['lr'] = param_group['lr'] * 1.0001\n # last_loss = self.running_loss / self.counter\n self.training_curve['iter'].append(t)\n self.training_curve['loss'].append(self.running_loss)\n last_loss = self.running_loss\n return last_loss\n\n def run_training_iteration(self, i, permutation):\n if i + self._batch_size > self.x_tensor.size()[0]:\n current_size = self.x_tensor.size()[0] - i\n else:\n current_size = self._batch_size\n self.batch_counter += self._batch_size\n\n indices = permutation[i:i + current_size]\n batch_x, batch_y = self.x_tensor[indices], self.y_tensor[indices, :]\n if self._gpu_available:\n predictions = self._model(batch_x.cuda())\n else:\n predictions = self._model(batch_x)\n if not self.allow_negative_predictions:\n predictions[predictions < 0] = 0\n if self._gpu_available:\n loss = self.loss_func(predictions,\n batch_y.cuda().view(current_size, self.outputs))\n else:\n loss = self.loss_func(predictions,\n batch_y.view(current_size,\n self.outputs))\n if self.counter % 10 == 0:\n print(loss.item())\n self._model.zero_grad()\n self.optimizer.zero_grad()\n loss.backward()\n self.running_loss += loss.item()\n self.optimizer.step()\n\n self.counter += 1\n\n def get_params(self):\n return {}\n\n def _get_param_names(self):\n return {}\n\n def set_params(self, **kwargs):\n return self\n\n def set_y_data_type(self, y):\n return y.float()\n\n def get_device(self):\n if self.use_cpu:\n device = self._cpu\n elif self._gpu_available:\n device = self._gpus\n else:\n device = self._cpu\n return device\n\n def handle_pandas(self, x, y=None):\n if not isinstance(x, pd.DataFrame) and not isinstance(x, pd.Series):\n if y is not None:\n if len(y.shape) == 1:\n if isinstance(y, pd.Series):\n y = y.values\n y = y.reshape([y.shape[0], 1])\n else:\n y = y.values\n return x, y\n if isinstance(x, pd.DataFrame):\n x = x.values\n if y is not None:\n y = y.values\n return x, y\n\n def get_path(self, modifier):\n # path = pkg_resources.resource_filename('crcdal', '/cache/'+modifier)\n return modifier\n\n","repo_name":"nathangeology/cyclist_dataset","sub_path":"data_science_layer/machine_learning/not_sk_learn_ml_models/pytorch_base.py","file_name":"pytorch_base.py","file_ext":"py","file_size_in_byte":9349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11647049338","text":"import pystray\nimport datetime\nimport time\nfrom PIL import Image, ImageDraw, ImageFont\n\nwidth = 32\nheight = 32\ncolor1 = \"white\"\ncolor2 = \"black\"\n\ndef run_loop(icon, count=0):\n if count % 30 == 0:\n icon.visible = True\n icon.icon = create_image(icon)\n count = 0\n time.sleep(5)\n run_loop(icon, count+5)\n\ndef create_image(icon=None):\n # Generate an image and draw a pattern\n week = datetime.datetime.now().isocalendar()[1]\n wwtxt = '{week:02d}'.format(week=week)\n# wwtxt = '{}'.format(int(time.time())%52)\n if icon == None:\n image = Image.new('RGB', (width, height), color2)\n else:\n image = icon.icon\n fnt = ImageFont.truetype('consolab.ttf', 18)\n dc = ImageDraw.Draw(image)\n dc.rectangle([(0,0),(width,height)], fill=color2)\n dc.text((0,0), \"WW\", font=fnt, fill=color1)\n dc.text((0,16), wwtxt, font=fnt, fill=color1)\n return image\n\nicon = pystray.Icon('systray-workweek', run_loop)\nicon.icon = create_image()\n\ntry:\n icon.run(run_loop)\nexcept KeyboardInterrupt:\n sys.exit(0)\nexcept Exception as e:\n print(\"Exception Occured \\n\" + str(e))\n sys.exit(1)\nfinally:\n icon.stop()\n sys.exit(0)\n","repo_name":"netjunki/pystray-workweek","sub_path":"WW.py","file_name":"WW.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31225706014","text":"import tensorflow as tf\n\n\ndef mmd(x,\n y,\n kernel='rbf',\n **kernel_params):\n \"\"\"\n\n Args:\n x (tf.Tensor): shape (bs, N)\n y (tf.Tensor): shape (bs, N)\n kernel (str): kernel function\n bias (bool): biased or unbiased\n **kernel_params: parameters of kernel\n\n Returns:\n mmd_loss\n\n \"\"\"\n bs = x.get_shape().as_list()[0]\n half_bs = bs*(bs-1)//2\n norm_x = tf.reduce_sum(tf.square(x), axis=1, keepdims=True)\n dot_xx = tf.matmul(x, x, transpose_b=True)\n dis_xx = norm_x + tf.transpose(norm_x) - 2*dot_xx\n\n norm_y = tf.reduce_sum(tf.square(y), axis=1, keepdims=True)\n dot_yy = tf.matmul(y, y, transpose_b=True)\n dis_yy = norm_y + tf.transpose(norm_y) - 2*dot_yy\n\n dot_xy = tf.matmul(x, y, transpose_b=True)\n dis_xy = norm_x + tf.transpose(norm_y) - 2*dot_xy\n\n if kernel in ['gaussian', 'rbf', 'RBF']:\n sigma2_k = tf.nn.top_k(\n tf.reshape(dis_xy, [-1]), half_bs).values[half_bs - 1]\n sigma2_k += tf.nn.top_k(\n tf.reshape(dis_xx, [-1]), half_bs).values[half_bs - 1]\n\n res1 = tf.exp(- dis_xx / 2. / sigma2_k)\n res1 += tf.exp(- dis_yy / 2. / sigma2_k)\n res1 = tf.multiply(res1, 1. - tf.eye(bs))\n res1 = tf.reduce_sum(res1) / (bs * (bs - 1))\n res2 = tf.exp(- dis_xy / 2. / sigma2_k)\n res2 = tf.reduce_sum(res2) * 2. / (bs * bs)\n stat = res1 - res2\n elif kernel in ['IMQ']:\n raise NotImplementedError\n else:\n raise ValueError\n return stat\n","repo_name":"salty-vanilla/tf-gans","sub_path":"ops/losses/mmd.py","file_name":"mmd.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"20430937767","text":"# importing the dataset\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\n\n\n\n# Reading the data\ndataset = pd.read_csv('Chess games stats.csv')\n\n\n\n# Collecting the data\nX =dataset['Number of Blunders'].values # independent variable\nY=dataset['White Rating'].values # dependent variable\n\n# Mean X and Y\nmean_x= np.mean(X)\nmean_y =np.mean(Y)\n\n\n# Total number of values\nn = len(X)\n\n# Using the formula to calculate B1 and B2\nnumer = 0\ndenom =0\nfor i in range(n):\n numer+= (X[i]-mean_x)*(Y[i]-mean_y)\n denom +=(X[i]-mean_x)**2\nb1 = numer /denom\nb0 = mean_y - (b1 * mean_x)\n\nprint(\"The value of B1 :\" + str(b1) +\"The value of B0\"+ str(b0))\n\n\n# Plotting values and regression line\nmax_x = np.max(X)\nmin_x=np.min(X)\n\n# Calculating Line values\nx=np.linspace(min_x,max_x,1000)\ny= b0 + b1 * x\n\n# Plotting the line\nplt.scatter(x,y,color='#58b970',label ='regression line')\n\nplt.scatter(X,Y,c='#ef5423',label ='Scatter Plot')\n\nplt.xlabel('Number of Blunders')\nplt.ylabel('players rating')\n\nplt.legend()\nplt.show()\n\nss_t =0\nss_r = 0\nfor i in range (n):\n y_pred = b0 + b1 * X[i]\n ss_t+=(Y[i]-mean_y)** 2\n ss_r+=(Y[i]-y_pred)**2\nr2 = 1 -(ss_r/ss_t)\nprint(\"The R^2 Value is :\"+str(r2))\n","repo_name":"Muhhammeddadell/Selected-topics","sub_path":"Simple_Linear_Regression.py","file_name":"Simple_Linear_Regression.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25420260912","text":"import sys\n\nN = int(input())\n\ndata = [0]*10001\n\nfor _ in range(N):\n i = int(sys.stdin.readline())\n data[i-1] = data[i-1]+1\n\nfor i in range(10001): \n if data[i] != 0:\n for j in range(data[i]):\n print(i+1) \n \n\n","repo_name":"lhs961021/python_algorithm","sub_path":"practice/12_정렬/10989.py","file_name":"10989.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70560953657","text":"from datetime import datetime\nfrom unittest import mock\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom oauth2_provider.models import Application\nfrom rest_framework.fields import DateField, DateTimeField\n\nfrom mtp_auth.constants import (\n CASHBOOK_OAUTH_CLIENT_ID, BANK_ADMIN_OAUTH_CLIENT_ID,\n NOMS_OPS_OAUTH_CLIENT_ID, SEND_MONEY_CLIENT_ID,\n)\nfrom mtp_auth.models import Role, ApplicationUserMapping, PrisonUserMapping\nfrom mtp_auth.tests.mommy_recipes import (\n create_bank_admin,\n create_disbursement_bank_admin,\n create_prison_clerk,\n create_prisoner_location_admin,\n create_refund_bank_admin,\n create_security_fiu_user,\n create_security_staff_user,\n create_send_money_shared_user,\n create_user_admin,\n)\nfrom prison.models import Prison\n\nUser = get_user_model()\n\nFLAKY_TEST_WARNING = (\n 'WARNING: This test has been flaky in the past. '\n 'It may fail even when nothing is broken. '\n 'Rerun the tests if that happens. '\n 'See: https://dsdmoj.atlassian.net/browse/MTP-1370'\n)\n\n\nclass MockModelTimestamps:\n \"\"\"\n Context manager to allow specifying the created and modified\n datetimes when saving models extending TimeStampedModel\n \"\"\"\n\n def __init__(self, created=None, modified=None):\n self.patches = []\n if created:\n self.patches.append(\n mock.patch('model_utils.fields.AutoCreatedField.get_default',\n return_value=created)\n )\n if modified:\n self.patches.append(\n mock.patch('model_utils.fields.now',\n return_value=modified)\n )\n\n def __enter__(self):\n for patch in self.patches:\n patch.start()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n for patch in self.patches:\n patch.stop()\n\n\ndef make_applications():\n owner = get_user_model().objects.first()\n\n def make_application_and_roles(client_id, name, *roles):\n app = Application.objects.filter(\n client_id=client_id\n ).first()\n if not app:\n app = Application.objects.create(\n client_id=client_id,\n client_type='confidential',\n authorization_grant_type='password',\n client_secret=client_id,\n name=name,\n user=owner,\n )\n for role in roles:\n groups = [Group.objects.get_or_create(name=group)[0] for group in role['groups']]\n key_group, groups = groups[0], groups[1:]\n role, _ = Role.objects.get_or_create(\n name=role['name'],\n application=app,\n key_group=key_group,\n login_url='http://localhost/%s/' % client_id,\n )\n role.other_groups.set(groups)\n\n make_application_and_roles(\n CASHBOOK_OAUTH_CLIENT_ID, 'Digital cashbook',\n {'name': 'prison-clerk', 'groups': ['PrisonClerk']},\n )\n make_application_and_roles(\n NOMS_OPS_OAUTH_CLIENT_ID, 'Prisoner money intelligence',\n {'name': 'prisoner-location-admin', 'groups': ['PrisonerLocationAdmin']},\n {'name': 'security', 'groups': ['Security']},\n )\n make_application_and_roles(\n BANK_ADMIN_OAUTH_CLIENT_ID, 'Bank admin',\n {'name': 'bank-admin', 'groups': ['RefundBankAdmin', 'BankAdmin']},\n {'name': 'disbursement-admin', 'groups': ['DisbursementBankAdmin']},\n )\n make_application_and_roles(\n SEND_MONEY_CLIENT_ID, 'Send money to someone in prison',\n )\n\n\ndef give_superusers_full_access():\n super_admins = get_user_model().objects.filter(is_superuser=True)\n for super_admin in super_admins:\n super_admin.flags.get_or_create(name='hmpps-employee')\n PrisonUserMapping.objects.assign_prisons_to_user(super_admin, Prison.objects.all())\n for application in Application.objects.all():\n ApplicationUserMapping.objects.get_or_create(\n user=super_admin,\n application=application,\n )\n\n\ndef make_test_users(clerks_per_prison=2, num_security_fiu_users=1):\n # prison clerks\n prison_clerks = []\n for prison in Prison.objects.all():\n for _ in range(clerks_per_prison):\n prison_clerks.append(create_prison_clerk(prisons=[prison]))\n\n # noms-ops users\n prisoner_location_admins = [create_prisoner_location_admin()]\n security_fiu_users = [\n create_security_fiu_user(name_and_password=f'security-fiu-{number}')\n for number in range(num_security_fiu_users)\n ]\n security_users = [\n create_security_staff_user(),\n create_security_staff_user(name_and_password='prison-security', prisons=[Prison.objects.first()]),\n *security_fiu_users,\n ]\n\n # bank admin\n bank_admins = [create_bank_admin()]\n refund_bank_admins = [create_refund_bank_admin()]\n disbursement_bank_admins = [create_disbursement_bank_admin()]\n\n # send money shared user\n send_money_users = [create_send_money_shared_user()]\n\n # create test oauth applications\n make_applications()\n\n def link_users_with_client(users, client_id):\n for user in users:\n ApplicationUserMapping.objects.get_or_create(\n user=user,\n application=Application.objects.get(client_id=client_id)\n )\n\n link_users_with_client(prison_clerks, CASHBOOK_OAUTH_CLIENT_ID)\n link_users_with_client(prisoner_location_admins, NOMS_OPS_OAUTH_CLIENT_ID)\n link_users_with_client(bank_admins, BANK_ADMIN_OAUTH_CLIENT_ID)\n link_users_with_client(refund_bank_admins, BANK_ADMIN_OAUTH_CLIENT_ID)\n link_users_with_client(disbursement_bank_admins, BANK_ADMIN_OAUTH_CLIENT_ID)\n link_users_with_client(send_money_users, SEND_MONEY_CLIENT_ID)\n link_users_with_client(security_users, NOMS_OPS_OAUTH_CLIENT_ID)\n link_users_with_client(security_fiu_users, NOMS_OPS_OAUTH_CLIENT_ID)\n\n return {\n 'prison_clerks': prison_clerks,\n 'prisoner_location_admins': prisoner_location_admins,\n 'bank_admins': bank_admins,\n 'refund_bank_admins': refund_bank_admins,\n 'disbursement_bank_admins': disbursement_bank_admins,\n 'send_money_users': send_money_users,\n 'security_staff': security_users,\n 'security_fiu_users': security_fiu_users,\n }\n\n\ndef make_test_user_admins():\n # prison user admins\n prison_clerks = []\n for prison in Prison.objects.all():\n prison_clerks.append(create_user_admin(\n create_prison_clerk, prisons=[prison], name_and_password='ua')\n )\n\n # The only Security user admins should be FIU\n security_fiu_users = [\n create_user_admin(create_security_fiu_user, name_and_password='security-fiu-100'),\n create_user_admin(\n create_security_fiu_user,\n name_and_password='security-fiu-101',\n prisons=[Prison.objects.first()]\n ),\n ]\n\n # prisoner location user admins\n prisoner_location_admins = [\n create_user_admin(create_prisoner_location_admin, name_and_password='pla-user-admin'),\n ]\n\n # bank admin user admins\n refund_bank_admins = [\n create_user_admin(create_refund_bank_admin, name_and_password='rba-user-admin-1'),\n create_user_admin(create_refund_bank_admin, name_and_password='rba-user-admin-2'),\n ]\n\n # create test oauth applications\n make_applications()\n\n def link_users_with_client(users, client_id):\n for user in users:\n ApplicationUserMapping.objects.get_or_create(\n user=user,\n application=Application.objects.get(client_id=client_id)\n )\n\n link_users_with_client(prison_clerks, CASHBOOK_OAUTH_CLIENT_ID)\n link_users_with_client(prisoner_location_admins, NOMS_OPS_OAUTH_CLIENT_ID)\n link_users_with_client(refund_bank_admins, BANK_ADMIN_OAUTH_CLIENT_ID)\n link_users_with_client(security_fiu_users, NOMS_OPS_OAUTH_CLIENT_ID)\n\n return {\n 'prison_clerk_uas': prison_clerks,\n 'prisoner_location_uas': prisoner_location_admins,\n 'bank_admin_uas': refund_bank_admins,\n 'security_fiu_uas': security_fiu_users,\n }\n\n\ndef format_date_or_datetime(value):\n \"\"\"\n Formats a date or datetime using DRF fields.\n\n This is for use in tests when comparing dates and datetimes with JSON-formatted values.\n \"\"\"\n if not value:\n return value\n\n if isinstance(value, datetime):\n return DateTimeField().to_representation(value)\n return DateField().to_representation(value)\n\n\ndef create_super_admin(stdout=None, style_success=None):\n try:\n admin_user = User.objects.get(username='admin')\n except User.DoesNotExist:\n admin_user = User.objects.create_superuser(\n username='admin',\n email='admin@mtp.local',\n password='adminadmin',\n first_name='Admin',\n last_name='User',\n )\n for group in Group.objects.all():\n admin_user.groups.add(group)\n\n if stdout and style_success:\n stdout.write(style_success('Model creation finished'))\n\n\ndef delete_non_related_nullable_fields(queryset, null_fields_to_leave_populated=None):\n \"\"\"\n This is intended for testing the minimum amount of data needed to be populated on an\n object for a codeflow, whilst also using the test data setup fixtures of the happy path\n \"\"\"\n blankable_fields = set()\n sample_instance = queryset.first()\n for field in sample_instance._meta.get_fields():\n # We don't want to blank any related objects\n if (\n getattr(field, 'null', False)\n and not getattr(field, 'related_model', False)\n ):\n blankable_fields.add(field.name)\n if null_fields_to_leave_populated:\n to_be_blanked_fields = blankable_fields - null_fields_to_leave_populated\n else:\n to_be_blanked_fields = blankable_fields\n\n for instance in queryset:\n for field in to_be_blanked_fields:\n setattr(instance, field, None)\n instance.save()\n instance.refresh_from_db()\n assert all([\n getattr(instance, field_name) is None\n for field_name in to_be_blanked_fields\n ])\n","repo_name":"ministryofjustice/money-to-prisoners-api","sub_path":"mtp_api/apps/core/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10279,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"38600147246","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\"\"\"从Thread类中派生出一个子例,创建一个这个子类的实例\"\"\"\n\nimport threading\nfrom time import sleep, ctime\n\nloops = (4, 2)\n\n\nclass MyThread(threading.Thread):\n \"\"\"\n 1.子类化Thread类\n 2.要先调用基类的构造器,进行显式覆盖\n 3.重新定义run()函数\n \"\"\"\n def __init__(self, func, args, name=''):\n super(MyThread, self).__init__()\n self.name = name\n self.func = func\n self.args = args\n\n def run(self):\n self.func(*self.args)\n\n\ndef loop(nloop, nsec):\n print('start loop', nloop, 'at:', ctime())\n sleep(nsec)\n print('loop', nloop, 'done at:', ctime())\n\n\ndef main():\n print('starting at:', ctime())\n threads = []\n nloops = range(len(loops))\n\n for i in nloops:\n t = MyThread(loop, (i, loops[i]), loop.__name__) # 创建子类的实例\n threads.append(t)\n\n for i in nloops:\n threads[i].start()\n\n for i in nloops:\n threads[i].join()\n\n print('all DONE at:', ctime())\n\nif __name__ == '__main__':\n main()","repo_name":"moranguo/python3playground","sub_path":"multiple_thread/multhread5.py","file_name":"multhread5.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7705900835","text":"import json\r\nimport re\r\nimport webbrowser\r\n\r\n\r\nlista_nombres=[]\r\nlista_edad=[]\r\nlista_activo=[]\r\nlista_promedio=[]\r\ncampos=[\"nombre\",\"edad\",\"activo\",\"promedio\"]\r\n\r\ndef main():\r\n opcion=\"\"\r\n while(\"salir\" not in opcion.lower()):\r\n print(\">>>\", end='')\r\n opcion = input()\r\n x=opcion.split(\" \")\r\n opcionP=x[0].lower()\r\n if (\"cargar\" == opcionP):\r\n try:\r\n cadena = opcion[0:6]\r\n opcion = opcion.replace(cadena, \"\")\r\n opcion = opcion.replace(\" \", \"\")\r\n arreglo2 = opcion.split(\",\")\r\n for i in arreglo2:\r\n rutaa = i\r\n Cargar(rutaa)\r\n print(\"Archivo: \", i, \" cargado\")\r\n except:\r\n print(\"Algun archivo no encontrado\")\r\n\r\n elif (\"seleccionar\" in opcionP):\r\n\r\n atributos=[]\r\n cadena=opcion[0:11]\r\n opcion=opcion.replace(cadena,\"\")\r\n arreglo=opcion.split(\",\")\r\n tamanio=len(arreglo)-1\r\n aste=opcion.replace(\" \",\"\")\r\n if(aste==\"*\"):\r\n print(\"\")\r\n print(aste)\r\n asterisco()\r\n\r\n elif(\"*\" in opcion and len(opcion)>5):\r\n opcion=opcion.replace(\"*\",\"\")\r\n opcion=pruebas(opcion)\r\n opcion=opcion[5:]\r\n opcion=pruebas(opcion)\r\n datos=opcion.split(\"=\")\r\n atributoo=datos[0].replace(\" \",\"\").lower()\r\n condi=pruebas(datos[1])\r\n if atributoo in campos:\r\n condi=condi.replace(\"'\",\"\")\r\n condi=condi.replace(\"“\",\"\")\r\n data=info(condi,campos)\r\n print(data)\r\n else:\r\n print(\"Error en Atributo\")\r\n\r\n else:\r\n\r\n for i in range(tamanio):\r\n local = arreglo[i].replace(\" \", \"\")\r\n local = local.lower()\r\n atributos.append(local)\r\n\r\n elimardonde = arreglo[len(arreglo) - 1].split(\"=\")\r\n x = re.findall(\"\\A\\s\", elimardonde[0])\r\n c = elimardonde[0]\r\n z=\"\"\r\n if x:\r\n z = pruebas(c)\r\n else:\r\n print(\"\")\r\n condicion = elimardonde[1]\r\n condicion = condicion.replace(\" \", \"\")\r\n condicion = condicion.replace(\"“\", \"\")\r\n condicion = condicion.replace(\"”\", \"\")\r\n condicion=condicion.replace(\"'\",\"\")\r\n\r\n valiodonde = z.split(\" \", 1)\r\n ultimo_atributo = valiodonde[0]\r\n atributos.append(ultimo_atributo.lower())\r\n donde = valiodonde[1].split(\" \", 1)\r\n atributo_condicion = donde[1].replace(\" \", \"\")\r\n if ((len(atributo_condicion) > 8)):\r\n atributo_condicion = atributo_condicion[5:]\r\n\r\n atributo_condicion = atributo_condicion.lower()\r\n bandera = Validar(atributos)\r\n if (bandera):\r\n bandera2 = Validar2(atributo_condicion, campos)\r\n if (bandera2):\r\n if (atributo_condicion == \"nombre\"):\r\n data = info(condicion, atributos)\r\n print(data)\r\n elif (atributo_condicion == \"edad\"):\r\n data = info(condicion, atributos)\r\n print(data)\r\n elif (atributo_condicion == \"activo\"):\r\n data = info(condicion, atributos)\r\n print(data)\r\n elif (atributo_condicion == \"promedio\"):\r\n data = info(condicion, atributos)\r\n print(data)\r\n else:\r\n print(\"Error campo de condicion\")\r\n else:\r\n print(\"Error Campos\")\r\n\r\n elif (\"maximo\" in opcionP):\r\n cadena=opcion[0:6]\r\n opcion=opcion.replace(cadena,\"\")\r\n opcion=opcion.replace(\" \",\"\")\r\n\r\n if(opcion.lower()==\"edad\"):\r\n print(max(lista_edad))\r\n\r\n elif(opcion.lower()==\"promedio\"):\r\n print(max(lista_promedio))\r\n else:\r\n print(\"atributo no valido\")\r\n\r\n elif (\"minimo\" in opcion.lower()):\r\n cadena = opcion[0:6]\r\n opcion = opcion.replace(cadena, \"\")\r\n opcion = opcion.replace(\" \", \"\")\r\n\r\n if(opcion.lower()==\"edad\"):\r\n print(min(lista_edad))\r\n elif(opcion.lower()==\"promedio\"):\r\n print(min(lista_promedio))\r\n else:\r\n print(\"atributo no valido\")\r\n\r\n elif (\"cuenta\" in opcionP):\r\n valor= sumaCuenta(lista_nombres)\r\n print(valor)\r\n\r\n elif(opcionP == \"suma\"):\r\n cadena = opcion[0:4]\r\n opcion=opcion.replace(cadena,\"\")\r\n opcion=opcion.replace(\" \",\"\")\r\n if(opcion.lower()==\"edad\"):\r\n valor=sumalista(lista_edad)\r\n print(valor)\r\n elif(opcion.lower()==\"promedio\"):\r\n valor=sumalista(lista_promedio)\r\n print(valor)\r\n else:\r\n print(\"Atributo no valido\")\r\n\r\n elif (\"reportar\" in opcionP):\r\n\r\n opcion=opcion[8:]\r\n valorr=opcion.replace(\" \",\"\")\r\n n=int(valorr)\r\n\r\n if(n<=len(lista_nombres)):\r\n\r\n encabezado = '\\n' + '\\n' + '\\n' + '\\n' + 'Reporte\\n' + '\\n'\r\n encabezado = encabezado + '\\n' + '\\n' + '
\\n' + '\\n' + '\\n' + '\\n'\r\n\r\n for element in campos:\r\n temp = ''\r\n encabezado = encabezado + temp\r\n encabezado = encabezado + '\\n' + '\\n\\n'\r\n\r\n for i in range(n):\r\n etiqueta = '\\n'\r\n etiqueta = etiqueta + ''\r\n\r\n etiqueta = etiqueta + '\\n\\n'\r\n encabezado = encabezado + etiqueta\r\n encabezado = encabezado + '
' + element + '
' + lista_nombres[i] + '' + lista_edad[i] + '' + \\\r\n lista_activo[i] + '' + lista_promedio[i] + '
\\n' + '
\\n' + '\\n'+''\r\n\r\n\r\n doc = open(\"index.html\", \"w\")\r\n doc.write(encabezado)\r\n doc.close()\r\n\r\n webbrowser.open_new_tab('index.html')\r\n\r\n\r\n else:\r\n print(\"Error \",n,\" mayor a los datos registrados\")\r\n\r\n elif (opcionP == \"salir\"):\r\n print(\"Adios!\")\r\n else:\r\n print(\"comando no reconocido\")\r\n\r\ndef Cargar(rutaa):\r\n archivo = open(rutaa)\r\n info = json.load(archivo)\r\n archivo.close()\r\n\r\n for element in info:\r\n aux = str(element)\r\n aux = aux.replace(\"'\", \"\")\r\n aux = aux.replace(\"{\", \"\")\r\n aux = aux.replace(\"}\", \"\")\r\n aux = aux.replace(\":\", \"\")\r\n aux = aux.replace(\" \", \"\")\r\n arreglo = aux.split(\",\")\r\n arreglo[0]=arreglo[0].replace(\"nombre\",\"\")\r\n arreglo[1]=arreglo[1].replace(\"edad\",\"\")\r\n arreglo[2]=arreglo[2].replace(\"activo\",\"\")\r\n arreglo[3]=arreglo[3].replace(\"promedio\",\"\")\r\n lista_nombres.append(arreglo[0])\r\n lista_edad.append(arreglo[1])\r\n lista_activo.append(arreglo[2])\r\n lista_promedio.append(arreglo[3])\r\ndef sumalista(listaNumeros):\r\n laSuma = 0\r\n for i in listaNumeros:\r\n laSuma = laSuma + float(i)\r\n return laSuma\r\ndef sumaCuenta(listaNumeros):\r\n laSuma = 0\r\n for i in listaNumeros:\r\n laSuma = laSuma + 1\r\n return laSuma\r\ndef pruebas(valor):\r\n txt = valor\r\n txt2=txt\r\n x = re.search(\"\\A\" + \" \", txt)\r\n contador = 0\r\n while (x):\r\n\r\n x = re.search(\"\\A\"+\" \", txt)\r\n txt = txt.replace(\" \", \"\", 1)\r\n contador = contador + 1\r\n txt2=txt2.replace(\" \",\"\",(contador-1))\r\n return txt2\r\ndef Validar(lista_atributos):\r\n bandera = False\r\n for element in lista_atributos:\r\n if(element in campos):\r\n bandera=True\r\n else:\r\n bandera=False\r\n return bandera\r\n return bandera\r\ndef Validar2(atributo, campos):\r\n if(atributo in campos):\r\n bandera=True\r\n return bandera\r\n else:\r\n\r\n bandera=False\r\n return bandera\r\ndef info(condicion, atributos):\r\n data=\"\"\r\n for i in range(len(lista_nombres)):\r\n if (condicion == lista_nombres[i]):\r\n index = i\r\n for element in atributos:\r\n if (element == \"nombre\"):\r\n data = data + \"Nombre: \" + lista_nombres[index] + \"\\n\"\r\n elif (element == \"edad\"):\r\n data = data + \"Edad: \" + lista_edad[index] + \"\\n\"\r\n elif (element == \"activo\"):\r\n data = data + \"Activo: \" + lista_activo[index] + \"\\n\"\r\n elif (element == \"promedio\"):\r\n data = data + \"Promedio: \" + lista_promedio[index] + \"\\n\"\r\n return data\r\n\r\n elif (condicion == lista_edad[i]):\r\n index = i\r\n for element in atributos:\r\n if (element == \"nombre\"):\r\n data = data + \"Nombre: \" + lista_nombres[index] + \"\\n\"\r\n elif (element == \"edad\"):\r\n data = data + \"Edad: \" + lista_edad[index] + \"\\n\"\r\n elif (element == \"activo\"):\r\n data = data + \"activo: \" + lista_activo[index] + \"\\n\"\r\n elif (element == \"promedio\"):\r\n data = data + \"Promedio: \" + lista_promedio[index] + \"\\n\"\r\n return data\r\n elif (condicion == lista_promedio[i]):\r\n index = i\r\n for element in atributos:\r\n if (element == \"nombre\"):\r\n data = data + \"Nombre: \" + lista_nombres[index] + \"\\n\"\r\n elif (element == \"edad\"):\r\n data = data + \"Edad: \" + lista_edad[index] + \"\\n\"\r\n elif (element == \"activo\"):\r\n data = data + \"Activo: \" + lista_activo[index] + \"\\n\"\r\n elif (element == \"promedio\"):\r\n data = data + \"Promedio: \" + lista_promedio[index] + \"\\n\"\r\n return data\r\n elif (condicion == lista_activo[i]):\r\n index = i\r\n for element in atributos:\r\n if (element == \"nombre \"):\r\n data = data + \"Nombre: \" + lista_nombres[index] + \"\\n\"\r\n elif (element == \"edad\"):\r\n data = data + \"Edad: \" + lista_edad[index] + \"\\n\"\r\n elif (element == \"activo\"):\r\n data = data + \"Activo: \" + lista_activo[index] + \"\\n\"\r\n elif (element == \"promedio\"):\r\n data = data + \"Promedio: \" + lista_promedio[index] + \"\\n\"\r\n return data\r\ndef asterisco():\r\n\r\n for i in range(len(lista_nombres)):\r\n index=i+1\r\n print(\"\")\r\n print(index,\".--------------------\")\r\n print(\"nombre: \",lista_nombres[i])\r\n print(\"edad: \",lista_edad[i])\r\n print(\"activo: \",lista_activo[i])\r\n print(\"promedio: \",lista_promedio[i])\r\n print(\"----------------------\")\r\n\r\nmain()","repo_name":"Edwinhndz/Practica-LF-","sub_path":"practica/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11702,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"69947478778","text":"from kaggle.competitions import twosigmanews\n\nimport gc\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\nsns.set(font_scale=1)\n\nimport warnings\nimport missingno as msno\n\npd.set_option('display.max_columns', 200)\npd.set_option('display.max_rows', 100)\npd.options.mode.chained_assignment = None\n# dir(pd.options.display)\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.simplefilter(action='ignore', category=UserWarning)\n\nplt.style.use('ggplot')\n\nimport plotly.offline as py\npy.init_notebook_mode(connected=True)\nimport plotly.graph_objs as go\nimport plotly.tools as tls\nenv = twosigmanews.make_env()\n(market_train, news_train) = env.get_training_data()\ndel news_train\ngc.enable()\ngc.collect()\nret = market_train.returnsOpenNextMktres10\nuniv = market_train.universe\nlabel = (ret > 0).astype(int)\ndef ir(label, window):\n global market_train, ret, univ\n time_idx = market_train.time.factorize()[0]\n # (label * 2 - 1) : perfect confidence value\n x_t = (label * 2 - 1) * ret * univ\n x_t_sum = x_t.groupby(time_idx).sum()\n x_t_sum = x_t_sum[window:]\n score = x_t_sum.mean() / x_t_sum.std()\n return score\nir_l = [ir(label, t) for t in range(0, market_train.time.nunique(), 10)]\ntrace = go.Scatter(\n x = np.arange(0, market_train.time.nunique(), 10),\n y = ir_l,\n mode = 'lines+markers',\n marker = dict(\n size = 4,\n color = 'lightblue'\n ),\n line = dict(\n width = 1\n )\n)\ndata = [trace]\nlayout = go.Layout(dict(\n title = 'Eval Metric trend',\n xaxis = dict(title = 'operational days passed ( window start point )'),\n yaxis = dict(title = 'Evaluation metric'),\n height = 400,\n width = 750\n))\npy.iplot(dict(data=data, layout=layout), filename='IR trend')\nop = ['mean', 'std']\ndf = market_train[['time', 'returnsOpenPrevRaw1']].groupby('time').agg({\n 'returnsOpenPrevRaw1' : op,\n}).reset_index()\ndf.columns = ['time'] + [o + '_returnsOpenPrevRaw1' for o in op]\ntrace = go.Scatter(\n x = df.time,\n y = df.std_returnsOpenPrevRaw1,\n mode = 'lines+markers',\n marker = dict(\n size = 4,\n color = 'pink'\n ),\n line = dict(\n width = 1\n )\n)\ndata = [trace]\nlayout = go.Layout(dict(\n title = 'std of returnsOpenPrevRaw1',\n xaxis = dict(title = 'date'),\n yaxis = dict(title = 'std of returnsOpenPrevRaw1'),\n height = 400,\n width = 750\n))\npy.iplot(dict(data=data, layout=layout), filename='.')","repo_name":"aorursy/new-nb-5","sub_path":"maxwell110_naive-experiment-on-evaluation-metric.py","file_name":"maxwell110_naive-experiment-on-evaluation-metric.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28566689344","text":"\"\"\"Commands for operating on bands of datasets.\"\"\"\nimport collections\nimport logging\n\nimport click\nfrom cligj import files_inout_arg, format_opt\n\nfrom .helpers import resolve_inout\nfrom . import options\nimport rasterio\nfrom rasterio.compat import zip_longest\n\n\n# Stack command.\n@click.command(short_help=\"Stack a number of bands into a multiband dataset.\")\n@files_inout_arg\n@options.output_opt\n@format_opt\n@options.bidx_mult_opt\n@options.rgb_opt\n@options.force_overwrite_opt\n@options.creation_options\n@click.pass_context\ndef stack(ctx, files, output, driver, bidx, photometric, force_overwrite,\n creation_options):\n \"\"\"Stack a number of bands from one or more input files into a\n multiband dataset.\n\n Input datasets must be of a kind: same data type, dimensions, etc. The\n output is cloned from the first input.\n\n By default, rio-stack will take all bands from each input and write them\n in same order to the output. Optionally, bands for each input may be\n specified using a simple syntax:\n\n --bidx N takes the Nth band from the input (first band is 1).\n\n --bidx M,N,0 takes bands M, N, and O.\n\n --bidx M..O takes bands M-O, inclusive.\n\n --bidx ..N takes all bands up to and including N.\n\n --bidx N.. takes all bands from N to the end.\n\n Examples, using the Rasterio testing dataset, which produce a copy.\n\n rio stack RGB.byte.tif -o stacked.tif\n\n rio stack RGB.byte.tif --bidx 1,2,3 -o stacked.tif\n\n rio stack RGB.byte.tif --bidx 1..3 -o stacked.tif\n\n rio stack RGB.byte.tif --bidx ..2 RGB.byte.tif --bidx 3.. -o stacked.tif\n\n \"\"\"\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 2\n logger = logging.getLogger('rio')\n try:\n with rasterio.Env(CPL_DEBUG=verbosity > 2):\n output, files = resolve_inout(files=files, output=output,\n force_overwrite=force_overwrite)\n output_count = 0\n indexes = []\n for path, item in zip_longest(files, bidx, fillvalue=None):\n with rasterio.open(path) as src:\n src_indexes = src.indexes\n if item is None:\n indexes.append(src_indexes)\n output_count += len(src_indexes)\n elif '..' in item:\n start, stop = map(\n lambda x: int(x) if x else None, item.split('..'))\n if start is None:\n start = 1\n indexes.append(src_indexes[slice(start - 1, stop)])\n output_count += len(src_indexes[slice(start - 1, stop)])\n else:\n parts = list(map(int, item.split(',')))\n if len(parts) == 1:\n indexes.append(parts[0])\n output_count += 1\n else:\n parts = list(parts)\n indexes.append(parts)\n output_count += len(parts)\n\n with rasterio.open(files[0]) as first:\n kwargs = first.meta\n kwargs.update(**creation_options)\n kwargs['transform'] = kwargs.pop('affine')\n\n kwargs.update(\n driver=driver,\n count=output_count)\n\n if photometric:\n kwargs['photometric'] = photometric\n\n with rasterio.open(output, 'w', **kwargs) as dst:\n dst_idx = 1\n for path, index in zip(files, indexes):\n with rasterio.open(path) as src:\n if isinstance(index, int):\n data = src.read(index)\n dst.write(data, dst_idx)\n dst_idx += 1\n elif isinstance(index, collections.Iterable):\n data = src.read(index)\n dst.write(data, range(dst_idx, dst_idx + len(index)))\n dst_idx += len(index)\n\n except Exception:\n logger.exception(\"Exception caught during processing\")\n raise click.Abort()\n","repo_name":"ryfeus/lambda-packs","sub_path":"Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/rasterio/rio/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","stars":1104,"dataset":"github-code","pt":"22"} +{"seq_id":"30755445596","text":"import logging\n\nfrom sparkling_snakes import consts\nfrom sparkling_snakes.processor.types import Config\n\n\nclass AppLoggingHelper:\n \"\"\"Logging management class.\"\"\"\n\n level_mapper: dict[str, int] = {\n 'DEBUG': logging.DEBUG,\n 'INFO': logging.INFO,\n 'WARNING': logging.WARNING,\n 'ERROR': logging.ERROR\n }\n\n @staticmethod\n def configure_logging(config: Config) -> None:\n \"\"\"Configure logging using project consts.\n\n :return: None\n \"\"\"\n config_level: str = config.get('project', {}).get('logging_level', 'INFO')\n\n logging.basicConfig(format=consts.LOGGING_MAIN_FORMAT, datefmt=consts.LOGGING_DATE_FORMAT,\n level=AppLoggingHelper.level_mapper[config_level])\n","repo_name":"pakunek/SparklingSnakes","sub_path":"sparkling_snakes/helpers/app_logging.py","file_name":"app_logging.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"44287431331","text":"#\n# データ取得スレッド\n#\n\nimport asyncio\nfrom bleak import discover\n\n\nfrom head_nod_analysis import setup_variable\n\neSense_name = setup_variable.eSense_name\n\n# ============================ eSenseのアドレスを取得 ============================== #\neSense_address = 0\nasync def search_eSense(eSense_number):\n global eSense_address\n eSense_flg = True\n while eSense_flg:\n devices = await discover()\n for d in devices:\n if eSense_name[eSense_number-1] in str(d):\n eSense_flg = False\n print(d)\n eSense_address = str(d).rsplit(':', 1)\n\n\n# ============================ アドレ��取得スレッド ============================== #\ndef Get(eSense_number):\n loop1 = asyncio.get_event_loop()\n loop1.run_until_complete(search_eSense(eSense_number))\n return eSense_address[0]\n","repo_name":"zeroSms/RealTime_System_for_distribution_M2","sub_path":"head_nod_analysis/get_address.py","file_name":"get_address.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73545292537","text":"from parser_classes.metadata import ConstraintDetail\nfrom parser_classes.metadata import IndexDetail\nfrom parser_classes.metadata import Constraint\nfrom parser_classes.metadata import DbdSchema\nfrom parser_classes.metadata import Domain\nfrom parser_classes.metadata import Field\nfrom parser_classes.metadata import Index\nfrom parser_classes.metadata import Table\nfrom database_classes.query import SQLiteQuery\nfrom parser_classes.ram_to_xml import Writer\nimport pprint\n\n\nclass SQLiteToRAM:\n def __init__(self):\n self.query = SQLiteQuery()\n self.tree = {}\n\n @staticmethod\n def get_object_by_name(obj_name):\n try:\n if obj_name == 'schema':\n return DbdSchema()\n elif obj_name == 'domain':\n return Domain()\n elif obj_name == 'table':\n return Table()\n elif obj_name == 'field':\n return Field()\n elif obj_name == 'constraint':\n return Constraint()\n elif obj_name == 'constraint_detail':\n return ConstraintDetail()\n elif obj_name == 'index':\n return Index()\n elif obj_name == 'index_detail':\n return IndexDetail()\n except Exception as e:\n raise Exception(e)\n\n def select_func(self, query):\n self.query.execute(query)\n result = self.query.fetchall()\n result_list = []\n for elem in [list(elem) for elem in result]:\n new_list = [x if not x == 'True' else True for x in elem]\n result_list.append([x if not x == 'False' else False for x in new_list])\n return result_list\n\n @staticmethod\n def _create_object(obj, args):\n obj.set_list_attributes(args)\n if obj.is_valid():\n return obj\n else:\n raise Exception\n\n @staticmethod\n def get_query(table, additional=None):\n if table == 'schema':\n return \"\"\"select name, fulltext_engine, version, description from dbd$schemas\"\"\"\n elif table == 'domain':\n return \"\"\"select name, description, data_type_id, length, char_length, precision, scale, width, align, \n show_null, show_lead_nulls, thousands_separator, summable, case_sensitive\n from dbd$view_domains\"\"\"\n elif table == 'table':\n return \"\"\"select schema_id, name, description, can_add, can_edit, can_delete, temporal_mode, means \n from dbd$view_tables\"\"\"\n elif table == 'field':\n return \"\"\"select table_id, name, russian_short_name, description, domain_id, can_input, can_edit, \n show_in_grid, show_in_details, is_mean, autocalculated, required\n from dbd$view_fields\n where table_id = '{}'\"\"\".format(additional)\n elif table == 'index':\n return \"\"\"select table_id, name, local, kind, field_name, expression, descend\n from dbd$view_indices\n where table_id = '{}'\"\"\".format(additional)\n elif table == 'constraint':\n return \"\"\"select table_id, name, constraint_type, reference, unique_key_id, has_value_edit, cascading_delete, field_name\n from dbd$view_constraints\n where table_id = '{}'\"\"\".format(additional)\n\n def create_objects(self):\n for schema in self.select_func(self.get_query('schema')):\n self.tree['dbd_schema'] = {self._create_object(self.get_object_by_name('schema'), schema): {'domain': [], 'table': {}}}\n db_schema = list(self.tree['dbd_schema'].values())[0]\n for domain in self.select_func(self.get_query('domain')):\n db_schema['domain'].append(self._create_object(self.get_object_by_name('domain'), domain))\n for table in self.select_func(self.get_query('table')):\n table_obj = self._create_object(self.get_object_by_name('table'), table)\n db_schema['table'][table_obj] = []\n for field in self.select_func(self.get_query('field', table_obj.name)):\n field_obj = self._create_object(self.get_object_by_name('field'), field)\n db_schema['table'][table_obj].append(field_obj)\n for index in self.select_func(self.get_query('index', table_obj.name)):\n index_obj = self._create_object(self.get_object_by_name('index'), index[:4])\n index_detail_obj = self._create_object(self.get_object_by_name('index_detail'), [index_obj]+index[-3:])\n db_schema['table'][table_obj].append(index_detail_obj)\n for const in self.select_func(self.get_query('constraint', table_obj.name)):\n index_obj = self._create_object(self.get_object_by_name('constraint'), const[:-1])\n index_detail_obj = self._create_object(self.get_object_by_name('constraint_detail'), [index_obj]+const[-1:])\n db_schema['table'][table_obj].append(index_detail_obj)\n\n def write_to_concole(self):\n pp = pprint.PrettyPrinter(depth=6)\n pp.pprint(self.tree)\n\n def get_schema(self):\n return self.tree\n\n\n# ram = SQLiteToRAM()\n# ram.create_objects()\n# ram.write_to_concole()\n# writer = Writer(ram.get_schema())\n# writer.ram_to_xml()\n# writer.write_to_file()\n","repo_name":"kseniaryabinova/metadata","sub_path":"database_classes/sqlite_to_ram.py","file_name":"sqlite_to_ram.py","file_ext":"py","file_size_in_byte":5401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23694475272","text":"from topN import topN\nimport unittest\nimport os\n\n\nclass TopNTestCases(unittest.TestCase):\n # test top_n with nothing\n def test_top_n_w_empty_list(self):\n self.assertEqual([], topN([], 5))\n\n # test top_n with negative int\n def test_top_n_w_neg_n(self):\n with self.assertRaises(TypeError):\n topN([1, 2, 3, 4, 5], -10)\n\n # test top_n with a few different lists\n def test_top_n_from_lists(self):\n self.assertEqual([19, 18, 17, 16], topN(range(20), 4))\n self.assertEqual([88, 9, 7], topN([2, 4, 7, 2, 88, 9], 3))\n self.assertEqual([3, 2, 1], topN([1, 2, 3], 5))\n\n # test top_n from a file\n def test_top_n_from_file(self):\n real_top_n = [99999] * 5\n # create an input file\n with open('test_input.txt', 'w') as in_file:\n # put some junk in\n in_file.writelines('\\n'.join([str(i) for i in range(1000)]))\n in_file.write('\\n')\n # now write the winners\n in_file.writelines('\\n'.join([str(i) for i in real_top_n]))\n\n # read from input file, call topN and write results to file\n with open('test_input.txt', 'r') as in_file:\n with open('test_output.txt', 'w') as out_file:\n top_n = topN(in_file, 5)\n out_file.write('\\n'.join([str(i) for i in top_n]))\n\n # now make sure we've written the correct output\n with open('test_output.txt', 'r') as out_file:\n test_output = [int(line) for line in out_file]\n self.assertEqual(real_top_n, test_output)\n\n # now remove the files\n for file in ['test_input.txt', 'test_output.txt']:\n if os.path.exists(file):\n os.remove(file)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jtmurphy89/topN","sub_path":"topN_tests.py","file_name":"topN_tests.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70026790137","text":"n, l = map(int, input().split())\nif l > n:\n print(1)\n exit()\n\nMOD = 10 ** 9 + 7\ndp = [0] * (n + 1)\nfor i in range(l):\n dp[i] = 1\nfor i in range(l, n + 1):\n dp[i] = dp[i - l] + dp[i - 1]\nprint(dp[-1] % MOD)\n","repo_name":"ck-ksst/AtCoder","sub_path":"typical90/50.py","file_name":"50.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25126609834","text":"# 최대 상금 (그리디 + 완전 탐색)\n\n# 중복 허용 조합\ndef f(n, cnt, N): # n : 이전까지의 교환 횟수, cnt : 총 교환 횟수, N : 숫자판 갯수\n global maxV\n if n == cnt:\n tmp = int(''.join(num))\n if maxV < tmp:\n maxV = tmp\n else:\n for i in range(N-1): # 교환할 두 위치 i, j를 고르는 조합 i < j\n for j in range(i+1, N):\n num[i], num[j] = num[j], num[i]\n tmp = (''.join(num))\n if tmp not in u[n]:\n u[n] += [tmp]\n f(n+1, cnt, N)\n num[i], num[j] = num[j], num[i]\n\n\nfor tc in range(1, int(input()) + 1):\n num, cnt = input().split()\n num = list(num)\n cnt = int(cnt)\n\n N = len(num)\n maxV = 0\n u = [[] for _ in '_'*cnt]\n f(0, cnt, N)\n\n print(f'#{tc} {maxV}')","repo_name":"wolfy916/Algorithm","sub_path":"Algorithm_Solution/swea/swea_lesson/greedy/swea_maximum_reward.py","file_name":"swea_maximum_reward.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30524823912","text":"# 4. Simulate more explanatory variables (*)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# a)\n\n\ndef simulate_x_variables(samples):\n np.random.seed(42)\n # simulate 10 0000 of each x1,x2,x3\n x1 = np.abs(np.random.normal(loc=100, scale=100, size=samples))\n x2 = np.abs(np.random.uniform(0, 50, samples))\n x3 = np.abs(np.random.normal(loc=0, scale=2, size=samples))\n epsilon = np.random.normal(loc=0, scale=50, size=samples)\n df = pd.DataFrame({\"x1\": x1, \"x2\": x2, \"x3\": x3})\n df[\"y\"] = 25 + 2 * x1 + 0.5 * x2 + 50 * x3 + epsilon\n df[\"ones\"] = 1\n return df\n\n\ndef plot_histograms(data, head_title):\n fig, ax = plt.subplots(2, 2, dpi=100, figsize=(16, 8))\n\n ax[0,0].hist(data[\"x1\"])\n ax[0,1].hist(data[\"x2\"])\n ax[1,0].hist(data[\"x3\"])\n ax[1,1].hist(data[\"y\"])\n fig.suptitle(head_title, size=18)\n ax[0,0].set(ylabel=\"Frequency\")\n ax[0,0].set_title(\"Minutes\")\n ax[0,1].set(ylabel=\"Frequency\")\n ax[0,1].set_title(\"SMS\")\n ax[1,0].set(ylabel=\"Frequency\")\n ax[1,0].set_title(\"Surf (GB)\")\n ax[1,1].set( ylabel=\"Frequency\")\n ax[1,1].set_title(\"Cost\")\n plt.show()\n\n\ndef start_script():\n df = simulate_x_variables(10000)\n plot_histograms(df, \"Histogram with constraint line\")\n\n df_outliers_rem = df[(df[\"x1\"] < 300) & (df[\"x3\"] < 4) & (df[\"y\"] > 0)]\n plot_histograms(df_outliers_rem, \"Histogram with outliers removed\")\n\n\n#start_script() # remove # if run start_script which make exercise 4 to run\n\n\n","repo_name":"jonssonmarie/Maskininlarning1_Marie_jonsson","sub_path":"Exercises/E00_Linear_regression/Exercise_4.py","file_name":"Exercise_4.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21918445807","text":"import pandas as pd\n\nfrom influxdb import DataFrameClient\n\n\ndef load():\n df = pd.read_csv('../data/GHI_DHI_Temp_Wind_20130101.csv.gz', compression='gzip',skiprows=1)\n df.index = pd.to_datetime(df['DATE (MM/DD/YYYY)'] + ' ' + df['MST'], format='%m/%d/%Y %H:%M')\n df.columns = [u'DATE (MM/DD/YYYY)', u'MST', u'AtmosphericAnalogKind.irradanceGlobalHorizontal',\n u'AtmosphericAnalogKind.irradanceDirectNormal',\n u'AtmosphericAnalogKind.irradanceDiffuseHorizontal',\n u'AtmosphericAnalogKind.ambientTemperature', u'AtmosphericAnalogKind.humidity',\n u'AtmosphericAnalogKind.speed', u'AtmosphericAnalogKind.bearing']\n dbname = 'weather'\n\n protocol = 'json'\n\n client = DataFrameClient(host='localhost', port=8086)\n\n print(\"Delete database: \" + dbname)\n # client.drop_database(dbname)\n\n print(\"Create pandas DataFrame\")\n\n print(\"Create database: \" + dbname)\n client.create_database(dbname)\n\n client.switch_database(dbname)\n\n # print(\"Write DataFrame\")\n client.write_points(df.loc['2013-7-1':'2013-7-31'], 'measurements', protocol=protocol)\n client.write_points(df.loc['2013-8-1':'2013-8-31'], 'measurements', protocol=protocol)\n client.write_points(df.loc['2013-9-1':'2013-9-30'], 'measurements', protocol=protocol)\n\n print(\"Write DataFrame with Tags\")\n # client.write_points(df, 'demo',\n # {'k1': 'v1', 'k2': 'v2'}, protocol=protocol)\n\n print(\"Read DataFrame\")\n # client.query(\"select * from weather\")\n\nif __name__ == '__main__':\n load()","repo_name":"NREL/Solar-Forecasting","sub_path":"solar_forecasting/util/load_ghi.py","file_name":"load_ghi.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"22"} +{"seq_id":"26651242744","text":"import urllib.request\n\nurl = 'http://image.bitautoimg.com/bt/car/default/images/logo/masterbrand/png/55/m_9_55.png'\nweb = urllib.request.urlopen(url)\ndata = web.read()\n#f = open('f:/b.png',\"wb\")\nprint(1222)\nc='sdf'\nb='f:/'+c+'.png'\nprint(b)\nf = open(b,\"wb\")\nf.write(data)\nf.close()\n\n","repo_name":"zhaohuiren/guest","sub_path":"xiazai.py","file_name":"xiazai.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"26410880304","text":"import copy\nclass person:\n def __init__(self,name,age,hobies):\n self.name=name\n self.age=age\n self.hobies=hobies\n d=dict\n d={\n \"name\":name,\n \"age\":age,\n \"hobies\" : hobies\n }\n self.d=d\n def __repr__(self):\n return f\"name={self.name} | age={self.age} | hobies={self.hobies} | dictionnaries={self.d}\"\np=person(\"bs\",20,[\"music\",\"sport\"]) \np1=copy.deepcopy(p)\n(p1.hobies).append(\"bs\")\nprint(p1.d[\"hobies\"][2][0] + \"__4XX__83\")\n","repo_name":"HelmiDev03/Algorithms","sub_path":"OOP/Shallow Vs Deep copy.py","file_name":"Shallow Vs Deep copy.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"32493346906","text":"# Reto 3\n# David Monroy\n\n\ndef main() -> None:\n print(consultar_registro(auto_partes([(2001,'rosca', 'PT29872',2,45,'Luis Molero',3456,'12/06/2020'),\n (2010,'bujía', 'MS9512',4,15,'Carlos Rondon',1256,'12/06/2020'),\n (2010,'bujía', 'ER6523',9,36,'Pedro Montes',1243,'12/06/2020'),\n (3578,'tijera', 'QW8523',1,128,'Pedro Faria',1456,'12/06/2020'),\n (9251,'piñón', 'EN5698',2,8,'Juan Peña',565,'12/06/2020')]), 2010))\n\n print(consultar_registro(auto_partes([(5489,'tornillo', 'RS8512',2,33,'Julio Perez',3654213,'13/06/2020'),\n (3215,'zocalo', 'UM8587',2,125,'Laura Macias',1256321,'13/06/2020'),\n (3698,'biela', 'PT3218',1,78,'Luis Peña',14565487,'13/06/2020'),\n (8795,'cilindro', 'AZ8794',2,96,'Carlos Casio',5612405,'13/06/2020')]), 2001))\n\n print(consultar_registro(auto_partes([(9852,'Culata', 'XC9875',2,165,'Luis Molero',3455846,'14/06/2020'),\n (9852,'Culata', 'XC9875',2,165,'Jose Mejia',1355846,'14/06/2020'),\n (2564,'Cárter', 'PT29872',2,32,'Peter Cerezo',8545436,'14/06/2020'),\n (5412,'válvula', 'AZ8798',2,11,'Juan Peña',568975,'14/06/2020')]), 9852))\n\n\ndef auto_partes(ventas: list) -> list:\n \"\"\"\n Function that returns a list of dictionaries with the data of the car parts and its owners\n params:\n ventas[list]: This is the records list to store values\n returns:\n registro[list]: The new list with the respective data dictionary of each owner \n \"\"\"\n caracteristicas: list = ['IdProducto', 'dProducto', 'pnProducto',\n 'cvProducto', 'sProducto', 'nComprador', 'cComprador', 'fVenta']\n \n registro: list = []\n\n for elem in ventas:\n datos: dict = dict(zip(caracteristicas, elem)) # getting the data\n registro.append(datos)\n\n return registro\n \n\ndef consultar_registro(ventas: list, id_producto: int) -> str:\n \"\"\"\n Function that returns whether a record exists within the passed list\n params:\n ventas[list]: This is the records list to store values\n id_producto[int]: Id of the product to lookup\n returns:\n salida[str]: The human-readable information with the customer information if found\n \"\"\"\n registro_encontrado = None\n\n for item in ventas:\n if item['IdProducto'] == id_producto:\n registro_encontrado = item\n \n if registro_encontrado:\n salida: str = f\"Producto consultado : {registro_encontrado['IdProducto']} \"\\\n +f\"Descripción {registro_encontrado['dProducto']} \"\\\n +f\"#Parte {registro_encontrado['pnProducto']} \"\\\n +f\"Cantidad vendida {registro_encontrado['cvProducto']} \"\\\n +f\"Stock {registro_encontrado['sProducto']} \"\\\n +f\"Comprador {registro_encontrado['nComprador']} \"\\\n +f\"Documento {registro_encontrado['cComprador']} \"\\\n +f\"Fecha Venta {registro_encontrado['fVenta']}\"\n else:\n salida: str = 'No hay registro de venta de ese producto'\n\n return salida\n\n\nif __name__ == '__main__':\n main()","repo_name":"davidzaaan/retos-misiontic","sub_path":"ciclo-1/reto-3.py","file_name":"reto-3.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74705947894","text":"class TrieNode:\n def __init__(self):\n self.isEnd = False\n self.children = [None for _ in range(26)]\n self.value = 0\n \nclass MapSum:\n\n def __init__(self):\n self.root = TrieNode()\n\n def insert(self, key: str, val: int) -> None:\n cur = self.root\n for c in key:\n ind = ord(c.lower()) - ord(\"a\")\n if not cur.children[ind]:\n cur.children[ind] = TrieNode()\n cur = cur.children[ind]\n \n cur.isEnd = True\n cur.value = val\n\n def sum(self, prefix: str) -> int:\n cur = self.root\n \n for c in prefix:\n ind = ord(c.lower()) - ord(\"a\")\n if not cur.children[ind]:\n return 0\n cur = cur.children[ind]\n \n summ = 0\n def trav(node):\n nonlocal summ\n if not node:\n return\n \n if node.isEnd:\n summ += node.value\n \n for ind in range(26):\n trav(node.children[ind])\n \n trav(cur)\n return summ\n \n \n \n\n\n# Your MapSum object will be instantiated and called as such:\n# obj = MapSum()\n# obj.insert(key,val)\n# param_2 = obj.sum(prefix)","repo_name":"AnaniyaT/ananas","sub_path":"0677-map-sum-pairs/0677-map-sum-pairs.py","file_name":"0677-map-sum-pairs.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20796532885","text":"from typing import Union, List\n\nimport hydra.utils\nfrom omegaconf import DictConfig\n\nfrom torch.utils.data import DataLoader\nimport pytorch_lightning as pl\n\nfrom src.distributed_sampler import DistributedBatchSampler\nfrom src.sampler import MaxTokensBatchSampler\n\n\nclass DMLMPLDataModule(pl.LightningDataModule):\n def __init__(self, conf: DictConfig):\n super().__init__()\n self.conf = conf\n self.inventories = (\n hydra.utils.instantiate(self.conf.data.inventories)\n if \"inventories\" in self.conf.data\n else None\n )\n self.train_dataset, self.validation_dataset = None, None\n\n def train_dataloader(self, *args, **kwargs) -> DataLoader:\n if self.train_dataset is None:\n self.train_dataset = hydra.utils.instantiate(\n self.conf.data.train_dataset, inventories=self.inventories\n )\n else:\n self.train_dataset.init_final_dataset()\n\n lengths = self.train_dataset[\"length\"]\n sampler = MaxTokensBatchSampler(lengths, self.conf.data.train_max_tokens, self.conf.data.max_batch_size)\n\n if self.conf.train.pl_trainer.gpus > 1:\n sampler = DistributedBatchSampler(sampler, dump_batches=True)\n\n return DataLoader(\n self.train_dataset,\n batch_sampler=sampler,\n num_workers=self.conf.data.num_workers,\n collate_fn=self.train_dataset.collate_function,\n pin_memory=self.conf.data.get(\"pin_memory\", False),\n )\n\n def val_dataloader(self, *args, **kwargs) -> Union[DataLoader, List[DataLoader]]:\n if self.validation_dataset is None:\n self.validation_dataset = [\n hydra.utils.instantiate(val_data_conf, inventories=self.inventories)\n for val_data_conf in self.conf.data.validation_dataset\n ]\n else:\n for val_dataset in self.validation_dataset:\n val_dataset.init_final_dataset()\n\n validation_dataloaders = [\n DataLoader(\n dataset=vd,\n batch_size=self.conf.data.validation_batch_size,\n collate_fn=vd.collate_function,\n shuffle=False,\n num_workers=self.conf.data.num_workers,\n )\n for vd in self.validation_dataset\n ]\n return validation_dataloaders\n\n def test_dataloader(self, *args, **kwargs) -> Union[DataLoader, List[DataLoader]]:\n raise NotImplementedError\n","repo_name":"edobobo/dmlm","sub_path":"src/pl_data_modules.py","file_name":"pl_data_modules.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24485937833","text":"from psycopg2._psycopg import connection as Connection\n\nfrom spatialyze.database import CAMERA_COLUMNS, TRAJECTORY_COLUMNS\n\n\ndef export_tables(conn: Connection, data_path: str):\n # create a query to specify which values we want from the database.\n s = \"SELECT * FROM \"\n s_trajectory = (\n f\"SELECT {','.join([c for c, _ in TRAJECTORY_COLUMNS])} FROM Item_General_Trajectory\"\n )\n s_bbox = s + \"General_Bbox\"\n s_camera = f\"SELECT {','.join([c for c, _ in CAMERA_COLUMNS])} FROM Cameras\"\n\n # set up our database connection.\n db_cursor = conn.cursor()\n\n # Use the COPY function on the SQL we created above.\n SQL_trajectory_output = \"COPY ({0}) TO STDOUT WITH CSV HEADER\".format(s_trajectory)\n SQL_bbox_output = \"COPY ({0}) TO STDOUT WITH CSV HEADER\".format(s_bbox)\n SQL_camera_output = \"COPY ({0}) TO STDOUT WITH CSV HEADER\".format(s_camera)\n\n # Set up a variable to store our file path and name.\n trajectory_file = data_path + \"item_general_trajectory.csv\"\n with open(trajectory_file, \"w\") as trajectory_output:\n db_cursor.copy_expert(SQL_trajectory_output, trajectory_output)\n\n bbox_file = data_path + \"general_bbox.csv\"\n with open(bbox_file, \"w\") as bbox_output:\n db_cursor.copy_expert(SQL_bbox_output, bbox_output)\n\n camera_file = data_path + \"cameras.csv\"\n with open(camera_file, \"w\") as camera_output:\n db_cursor.copy_expert(SQL_camera_output, camera_output)\n","repo_name":"apperception-db/spatialyze","sub_path":"spatialyze/utils/export_tables.py","file_name":"export_tables.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"22242617705","text":"import datetime\n\n\ndef calculate_remaining(gui):\n\n # TODO: make inputs robust for invalid inputs like characters, etc\n # TODO: adjusting for inflation, and taxes!\n\n # first calculate remaining fields and/or update others accordingly\n income = int(gui.input_netto_inc.text())\n spendings = int(gui.input_yrly_spending.text())\n\n # calculate savings/year\n savings = income - spendings\n gui.input_yrly_savings.setText(str(savings))\n\n # calculate savings rate\n savings_rate = float(savings / income)\n gui.input_saving_rate.setText(\"{:.2%}\".format(savings_rate))\n\n # if not value was found, assume the following:\n if gui.input_cur_net_worth.text() is \"\":\n cur_net_worth = 0\n gui.input_cur_net_worth.setText(\"{}\".format(cur_net_worth))\n else:\n cur_net_worth = float(gui.input_cur_net_worth.text())\n\n if gui.input_interest_rate.text() is \"\":\n interest_rate = 0.05\n gui.input_interest_rate.setText(\"{:.2}\".format(interest_rate*100))\n else:\n if float(gui.input_interest_rate.text()) > 1:\n interest_rate = float(gui.input_interest_rate.text()) / 100\n else:\n interest_rate = float(gui.input_interest_rate.text())\n\n if gui.input_swr.text() is \"\":\n swr = 0.04\n gui.input_swr.setText(\"{:.2}\".format(swr*100))\n else:\n if float(gui.input_swr.text()) > 1:\n swr = float(gui.input_swr.text()) / 100\n else:\n swr = float(gui.input_swr.text())\n\n print(\"income: {}\".format(income))\n print(\"spendings: {}\".format(spendings))\n print(\"savings: {}\".format(savings))\n print(\"savings rate: {:.2%}\".format(savings_rate))\n print(\"current net worth: {}\".format(cur_net_worth))\n print(\"interest rate: {:.2%}\".format(interest_rate))\n print(\"swr: {:.2%}\".format(swr))\n\n return calculate_fire(savings=savings,\n spendings=spendings,\n cur_net_worth=cur_net_worth,\n interest_rate=interest_rate,\n swr=swr)\n\n\ndef calculate_fire(savings, spendings, cur_net_worth, interest_rate, swr):\n\n net_worth_over_time = []\n now = datetime.datetime.now()\n cur_year = int(now.year)\n year = 0\n net_interests = 0\n savings_without_interests = 0\n\n while (cur_net_worth*swr) < spendings:\n # calculate...\n if year is 0:\n # adding current net worth to first year of saving (b/c it will generate interests)\n savings_without_interests += cur_net_worth\n else:\n # adding savings of previous to current net worth\n cur_net_worth += savings\n savings_without_interests += savings\n\n # interests per year\n interests = cur_net_worth*interest_rate\n net_interests += interests\n # adding interests to current net worth\n cur_net_worth += interests\n\n # saving everything into list\n date = datetime.date(cur_year, 1, 1)\n net_worth_over_time.append((date,\n round(cur_net_worth, 2),\n round(savings_without_interests, 2),\n round(net_interests, 2)))\n\n year += 1\n cur_year += 1\n\n print(\"\\nnet worth after {} years: {}.\".format(year, round(cur_net_worth, 2)))\n print(\"savings alone: {}, interests generated: {}\".format(round(savings_without_interests, 2),\n round(net_interests, 2)))\n print(\"\\ncongratulations, you have reached financial independence!\")\n\n return net_worth_over_time\n\n\ndef years_to_fire_based_on_savings_rate(savings_rate):\n\n sr_to_years = []\n\n return sr_to_years\n\n\n\n\n\n\n\n\n","repo_name":"kastenfrosch/fire_calculator","sub_path":"src/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"45294995127","text":"import cv2\nimport numpy as np\nimport math\n\ndef xyz_to_cube_uv(x, y, z):\n absX = abs(x)\n absY = abs(y)\n absZ = abs(z)\n\n x_positive = x > 0 \n y_positive = y > 0 \n z_positive = z > 0 \n\n # Positive X\n if(x_positive and max(absX, absY, absZ) == absX):\n # u from +z to -z\n # v from -y to +y\n maxAxis = absX\n uc = -z\n vc = y\n index = 0\n # Negative X\n elif(not x_positive and max(absX, absY, absZ) == absX):\n maxAxis = absX\n uc = z\n vc = y\n index = 1\n # Positive Y\n elif(y_positive and max(absX, absY, absZ) == absY):\n maxAxis = absY\n uc = x\n vc = -z\n index = 2\n # Negative Y\n elif(not y_positive and max(absX, absY, absZ) == absY):\n maxAxis = absY\n uc = x\n vc = z\n index = 3\n # Positive Z\n elif(z_positive and max(absX, absY, absZ) == absZ):\n maxAxis = absZ\n uc = x\n vc = y\n index = 4\n elif(not z_positive and max(absX, absY, absZ) == absZ):\n maxAxis = absZ\n uc = -x\n vc = y\n index = 5\n\n # Shift from [-1; 1] to [0; 1]\n u = 0.5 * (uc / maxAxis + 1.0)\n v = 0.5 * (vc / maxAxis + 1.0)\n return u, v, index\n\ndef convert_cube_uv_to_xyz(index, u, v):\n # Shift [0; 1] to [-1; 1]\n uc = 2.0 * u - 1.0\n vc = 2.0 * v - 1.0\n\n if index == 0:\n x = 1.0 \n y= vc\n z= -uc\n elif index == 1:\n x = -1.0 \n y= vc\n z= uc\n elif index == 2:\n x = uc \n y= 1.0\n z= -vc\n elif index == 3:\n x = uc\n y = -1.0\n z = vc\n elif index == 4:\n x = uc\n y = vc\n z = 1.0\n elif index == 5:\n x = -uc\n y = vc\n z = -1.0\n return x, y, z\n\n\ndef sample_environment():\n res = 128\n cube_map = [np.zeros((res, res, 3)), np.zeros((res, res, 3)), np.zeros((res, res, 3)), np.zeros((res, res, 3)), np.zeros((res, res, 3)), np.zeros((res, res, 3))]\n\n for i in range(6):\n for x in range(res):\n for y in range(res):\n u = float(x) / float(res)\n v = float(y) / float(res)\n dir_x, dir_y, dir_z = convert_cube_uv_to_xyz(i, u, v)\n radius = math.sqrt(dir_x ** 2 + dir_y ** 2 + dir_z ** 2)\n\n theta = np.arccos(dir_z / radius)\n phi = np.arctan2(dir_y, dir_x)\n\n if theta > (np.pi / 2.0):\n color = np.array([0, 0, 255])\n else:\n color = np.array([255, 255, 255])\n\n # if i == 0:\n # color = np.array([0, 0, 255])\n # elif i == 1:\n # color = np.array([255, 0, 0])\n # elif i == 2:\n # color = np.array([0, 255, 0])\n # elif i == 3:\n # color = np.array([255, 255, 0])\n # elif i == 4:\n # color = np.array([255, 0, 255])\n # elif i == 5:\n # color = np.array([255, 255, 255])\n\n cube_map[i][x, y] = color\n\n for i in range(6):\n cv2.imshow(f\"Cube face {i}\", cube_map[i])\n cv2.waitKey(0)\n\n\n\ndef make_sphere_map():\n sphere_map = np.zeros(512, 512, 3)\n counter = np.zeros((1024, 1024))\n\n thetas = np.linspace(0, np.pi, 180)\n phis = np.linspace(0, 2 * np.pi, 360)\n\n\n for theta in thetas:\n for phi in phis:\n x = (np.sin(theta) * np.cos(phi)) + 1 # x,y normally in [-1;1] --> shift to [0;2]\n y = (np.sin(theta) * np.sin(phi)) + 1 \n\n if theta > (np.pi / 2):\n color = np.array([255, 0, 0])\n else:\n color = np.array([0, 0, 255])\n \n \n sphere_map[min(math.floor(x * 512), 1023), min(math.floor(y * 512), 1023)] += color\n counter[min(math.floor(x * 512), 1023), min(math.floor(y * 512), 1023)] += 1\n \n cv2.imshow(\"Sphere map\", sphere_map)\n cv2.waitKey(0)\n\n\ndef main():\n sample_environment()\n\nmain()","repo_name":"Jentuuh/scalable-coherent-path-tracing","sub_path":"mcrt-experiments/scripts/sphere_mapping.py","file_name":"sphere_mapping.py","file_ext":"py","file_size_in_byte":4080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9328070361","text":"from __future__ import absolute_import, division, print_function\nfrom __future__ import annotations\n\n__metaclass__ = type\n\nfrom ..module_utils.utils import PayloadMapper\nfrom ..module_utils.rest_client import RestClient\nfrom ..module_utils.typed_classes import TypedRoleToAnsible\n\nfrom typing import Any, Optional\n\n\nclass Role(PayloadMapper):\n def __init__(self, uuid: str, name: str):\n self.uuid = uuid\n self.name = name\n\n @classmethod\n def from_ansible(cls, ansible_data: Any) -> None:\n pass\n\n @classmethod\n def from_hypercore(cls, hypercore_data: Optional[dict[Any, Any]]) -> Optional[Role]:\n if not hypercore_data:\n # In case for get_record, return None if no result is found\n return None\n return cls(\n uuid=hypercore_data[\"uuid\"],\n name=hypercore_data[\"name\"],\n )\n\n def to_hypercore(self) -> None:\n pass\n\n def to_ansible(self) -> TypedRoleToAnsible:\n return dict(\n uuid=self.uuid,\n name=self.name,\n )\n\n def __eq__(self, other: object) -> bool:\n \"\"\"\n One User is equal to another if it has ALL attributes exactly the same.\n This method is used only in tests.\n \"\"\"\n if not isinstance(other, Role):\n return NotImplemented\n return all(\n (\n self.uuid == other.uuid,\n self.name == other.name,\n )\n )\n\n @classmethod\n def get_role_from_uuid(\n cls, role_uuid: str, rest_client: RestClient, must_exist: bool = False\n ) -> Optional[Role]:\n hypercore_dict = rest_client.get_record(\n \"/rest/v1/Role/{0}\".format(role_uuid), must_exist=must_exist\n )\n role = cls.from_hypercore(hypercore_dict)\n return role\n\n @classmethod\n def get_role_from_name(\n cls, role_name: str, rest_client: RestClient, must_exist: bool = False\n ) -> Optional[Role]:\n hypercore_dict = rest_client.get_record(\n \"/rest/v1/Role\", {\"name\": role_name}, must_exist=must_exist\n )\n role = cls.from_hypercore(hypercore_dict)\n return role\n","repo_name":"ScaleComputing/HyperCoreAnsibleCollection","sub_path":"plugins/module_utils/role.py","file_name":"role.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"22"} +{"seq_id":"3471456255","text":"import scrapy\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy import signals\nfrom multiprocessing import Process, Queue\nimport time\nfrom spiders import AraSpider\n\nclass AraCrawlBot(object):\n def __init__(self):\n self.results = []\n\n def addItem(self,item):\n self.results.append(item)\n\n def run(self):\n process = CrawlerProcess({\n 'FEED_FORMAT': 'json',\n 'FEED_URI': 'crawler/result.json'\n })\n process.crawl(AraSpider)\n for crawler in process.crawlers:\n crawler.signals.connect(self.addItem, signals.item_passed)\n process.start()\n for res in self.results:\n print(res)\n\ndef run_spiders():\n def f(q):\n try:\n bot = AraCrawlBot()\n bot.run()\n q.put(None)\n except Exception as e:\n q.put(e)\n\n q = Queue()\n p = Process(target=f, args=(q,))\n p.start()\n result = q.get()\n p.join()\n\n if result is not None:\n raise result\n\n\nwhile True:\n run_spiders()\n time.sleep(1200)\n","repo_name":"sparcs-kaist/Neobjugi","sub_path":"chatbot/crawler/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11969104292","text":"# Convert a trait name into a nicer looking version\ndef pretty_name(S):\n return \" \".join(S.split(\"_\")).title()\n\ndef show_dict(D,superdict=\"\"):\n \"\"\"\n Recursively show the contents of a dictionary or iterable that make contain\n other dictionaries or iterables\n \"\"\"\n if type(D) == dict:\n if len(D) == 0:\n print(f\"{superdict}: {D}\")\n for key,val in D.items():\n show_dict(val,superdict=f\"{superdict}['{key}']\")\n \n elif type(D) in (list,tuple):\n if len(D) == 0:\n print(f\"{superdict}: {D}\")\n for n,i in enumerate(D):\n show_dict(i,superdict=f\"{superdict}[{n}]\")\n \n else:\n print(f\"{superdict}: {D}\")\n\n\n# System for Removing Duplicate Characters That Share a Lore of Magic ###\n# This should only be used in JSONtoDataframe if it is needed elsewhere the\n# unitsDF_clean dataframe should just be loaded\nlores = [\" (Beasts)\",\n \" (Death)\",\n \" (Fire)\",\n \" (Heavens)\",\n \" (High)\",\n \" (Life)\",\n \" (Light)\",\n \" (Metal)\",\n \" (Shadows)\",\n \" (Dark)\",\n \" (Vampires)\",\n \" (Deep)\",\n \" (Plague)\",\n \" (Ruin)\"]\n\ndef remove_lore(name):\n for lore in lores:\n if lore in name:\n name = name.replace(lore,\" \")\n while \" \" in name:\n name = name.replace(\" \",\" \")\n return name\n return name\n\ndef deduplicate_lore(units):\n\n names = units[\"name\"]\n reduced_names = []\n for name in names:\n reduced_names.append(remove_lore(name))\n \n units_no_dupe_lores = units.replace(list(units[\"name\"]),reduced_names)\n units_no_dupe_lores.drop_duplicates(subset=\"name\",inplace=True)\n \n return units_no_dupe_lores\n\n\n\n\n\n# I believe this is correct based on the description by the developers\n# \"Armour = Max damage reduction percentage. Min is always 50% of armour value.\n# To be more precise, any time base damage is dealt, the target rolls for \n# armour. This armour roll is a random value between 50% and 100% of the \n# armour stat. The armour roll is then applied as percentage damage \n# reduction.\"\n# Legacy calculation method by numerical simulation\n#def average_armor_reduction_old(armor):\n# \"\"\"Returns the proportion of base damage blocked by the given armor value\"\"\"\n# ar = np.linspace(armor/2,armor,1000)\n# ar = [min(x,100) for x in ar]\n# return np.mean(ar)/100\n\n# Credit to u/Panthera__Tigris on reddit for the formula\n# (100*(armor-100)+(100-armor*0.5)*((armor*0.5+100)*0.5))/((armor-100)+(100-armor*0.5))/100\n# Credit to u/tilerkiwi for pointing out the simplification used below\ndef average_armor_reduction(armor):\n \"\"\"Returns the proportion of base damage blocked by the given armor value\"\"\"\n if armor < 0:\n raise Exception(\"Armor cannot be less than 0\")\n elif armor <= 100:\n return (armor+armor/2)/2/100\n elif armor <= 200:\n return 2-.0025*armor - 100/armor\n else:\n raise Exception(\"Armor cannot be more than 200\")\n\ndef average_damage_with_armor_raw(base_damage,ap_damage,armor):\n \"\"\"\n Returns the average damage done by an attack with given base and ap damage\n against a target with given armor\n \"\"\"\n armor_reduction = average_armor_reduction(armor)\n adjusted_base_damage = (1-armor_reduction)*base_damage\n return adjusted_base_damage+ap_damage\n\ndef average_damage_with_armor_ratio(total_damage,ap_ratio,armor):\n \"\"\"\n Returns the average damage done by an attack with given total damage and \n ap ratio against a target with given armor\n \"\"\"\n ap_damage = total_damage*ap_ratio\n base_damage = total_damage-ap_damage\n return average_damage_with_armor_raw(base_damage,ap_damage,armor)\n\n\n\n## Probability of hitting with a melee attack\ndef melee_hit_prob(melee_attack,melee_defense):\n r = 35+melee_attack-melee_defense\n h = min(max(r,8),90)\n return h/100\n\n\n\n## Most functions below accept the argument \"units\" which should be a pandas\n## DataFrame where each row is a unit description like the one created by\n## JSONtoDataFrame\n\ndef select_unit(unitsDF,name):\n \"\"\"\n Look for a unit in the unitsDF with a name exactly equal to name\n If there is exacty one then return that row using transpose and squeeze\n Otherwise get every unit with a naame that contains name\n If there are none those check if the input was a key instead and raise\n and error if it is not\n If there is exactly one result give that\n If there is more than one result go through the partial matches and return\n both their name and key\n \"\"\"\n # Look for a unit with a name that matches exactly\n # If we get exactly one match move on\n # Otherwise\n # look for every unit that includes that name\n # if there is exactly one move on\n # if there are zero matches then\n # check if there is an exact match as a key value\n # if not the input is invalid\n # if there is then move on\n # if there is more then one match print out all the possibilities along with their key\n \n unit = unitsDF[unitsDF[\"name\"] == name]\n if len(unit) != 1: \n unit = unitsDF[unitsDF[\"name\"].str.contains(name)]\n if len(unit) == 0:\n unit = unitsDF[unitsDF[\"key\"] == name]\n if len(unit) == 0:\n raise Exception(f\"{name} is not a unit name or key\")\n \n if len(unit) == 1:\n return unit.T.squeeze()\n \n if len(unit) > 1:\n helper = unit[[\"name\",\"key\"]]\n S = \"\"\n for line in helper.values:\n S += f\"{line[0]:<50} {line[1]}\\n\"\n raise Exception(f\"The name '{name}' is ambiguous. Please use one of these names or key values:\\n{S}\")\n \n return unit.T.squeeze()\n\n\n\n# Attributes, abilitiesm and spells are all stored as lists this extracts all\n# the unique attributes, abilities, or spells in a given units dataframe\ndef all_attributes(units):\n attributes = set([])\n for unit_atts in units[\"attributes\"]:\n for att in unit_atts:\n attributes.add(att)\n return sorted(attributes)\n \ndef all_abilities(units):\n abilities = set([])\n for unit_abs in units[\"abilities\"]:\n for ab in unit_abs:\n abilities.add(ab)\n return sorted(abilities)\n\ndef all_spells(units):\n spells = set([])\n for unit_spells in units[\"spells\"]:\n for spell in unit_spells:\n spells.add(spell)\n return sorted(spells)\n\n\n\n\n# Version of a unitsDF that has no single entities\ndef no_single_entity(units):\n is_not_single_entity = units[\"unit_size\"] != 1\n return units[is_not_single_entity]\n\n# Version of a unitsDF that has no special units. Meaning these kinds are removed:\n# 'blessed_spawning', 'crafted', 'elector_counts', 'mistwalker', 'renown', 'tech_lab'\ndef no_special_category(units):\n is_not_special_category = units[\"special_category\"] == \"\"\n return units[is_not_special_category]\n\n# Version of unitsDF without any summoned unitys\ndef no_summoned(units):\n # Tilde is the pandas NOT operator\n unbinding = ~units[\"key\"].str.contains(\"summoned\")\n return units[unbinding]\n\n# Remove summon units, units with a special category like RoR, Mistwalker, etc\n# Then also remove specific campaign only units\ndef no_nonstandard(units):\n units = no_summoned(units)\n units = no_special_category(units)\n \n nonstandard_keys = [\"wh_dlc07_brt_cha_damsel_beasts_2\",\n \"wh_dlc07_brt_cha_damsel_life_2\",\n \"wh_main_brt_cha_damsel_2\",\n \"wh_dlc05_brt_cha_armand_aquitaine_0\",\n \"wh_dlc05_brt_cha_armand_aquitaine_1\", \n \"wh_dlc05_brt_cha_armand_aquitaine_2\",\n \"wh_dlc05_brt_cha_armand_aquitaine_3\",\n \"wh_dlc07_brt_cha_damsel_beasts_2\",\n \"wh_dlc03_bst_cha_graktar_0\",\n \"wh2_dlc14_def_cha_malus_darkblade_tzarkan_0_final_battle\",\n \"wh2_dlc13_emp_cav_knights_blazing_sun_0_imperial_supply\",\n \"wh_dlc05_grn_cha_snorko_one_finger_0\",\n \"wh_dlc05_grn_cha_snorko_one_finger_1\",\n \"wh_dlc05_grn_cha_snorko_one_finger_2\",\n \"wh2_dlc15_grn_cha_night_goblin_warboss_0_big\",\n \"wh2_dlc15_hef_mon_forest_dragon_0\",\n \"wh2_main_lzd_cha_slann_mage_priest_campaign_0\",\n \"wh2_main_lzd_inf_temple_guards_nakai\",\n \"wh2_main_lzd_cav_horned_ones_0_nakai\",\n \"wh2_dlc13_lzd_mon_sacred_kroxigors_0_nakai\",\n \"wh2_main_lzd_mon_kroxigors_nakai\",\n \"wh2_dlc12_lzd_cav_terradon_riders_0_tlaqua\",\n \"wh2_dlc12_lzd_cav_terradon_riders_1_tlaqua\",\n \"wh2_dlc12_lzd_mon_ancient_stegadon_1_nakai\",\n \"wh2_dlc12_lzd_mon_bastiladon_3_nakai\",\n \"wh_dlc01_nor_cha_chaos_sorcerer_lord_0\",\n \"wh_dlc01_nor_cha_chaos_sorcerer_lord_1\",\n \"wh_main_nor_cha_chaos_sorcerer_0\",\n \"wh_main_nor_cha_chaos_sorcerer_1\",\n \"wh_main_nor_mon_chaos_warhounds_1\",\n \"wh2_dlc14_skv_cha_deathmaster_snikch_tzarkan_0\",\n \"wh2_main_skv_inf_stormvermin_0_quest\",\n \"wh_pro03_vmp_cha_krell_campaign_0\",\n \"wh_pro03_vmp_cha_krell_campaign_1\",\n \"wh_pro03_vmp_cha_krell_campaign_2\",\n \"wh_pro03_vmp_cha_krell_campaign_3\",\n \"wh_pro03_vmp_cha_krell_0\",\n \"wh2_dlc11_vmp_inf_crossbowmen\",\n \"wh2_dlc11_vmp_inf_handgunners\"\n ]\n \n for unwanted in nonstandard_keys:\n units = units[~units[\"key\"].str.contains(unwanted)]\n \n return units\n\ndef all_with_ability(units,ability):\n has_ability = []\n for L in units[\"abilities\"]:\n if ability in L:\n has_ability.append(True)\n else:\n has_ability.append(False)\n return units[has_ability]\n\ndef all_with_attribute(units,attribute):\n has_attribute = []\n for L in units[\"attributes\"]:\n if attribute in L:\n has_attribute.append(True)\n else:\n has_attribute.append(False)\n return units[has_attribute]\n\ndef all_from_faction(units,faction_group):\n faction = units[\"faction_group\"] == faction_group\n return units[faction]","repo_name":"SymmetricChaos/WarhammerStats","sub_path":"UtilityFunctions.py","file_name":"UtilityFunctions.py","file_ext":"py","file_size_in_byte":10704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"39756527226","text":"# Data classes make the class definition more concise since python 3.7\n# it automates the creation of __init__ with the attributes passed to the\n# creation of the object.\nfrom dataclasses import dataclass\n\n@dataclass\nclass Book:\n title: str\n author: str\n pages: int\n price: float\n\nb1 = Book(\"A Mao e a Luva\", \"Machado de Assis\", 356, 29.99)\nb2 = Book(\"Dom Casmurro\", \"Machado de Assis\", 230, 24.50)\nb3 = Book(\"Capitaes da Areia\", \"Jorge Amado\", 178, 14.50)\n\n# The data class also provides implementations for the __repr__ and __eq__ magic functions\nprint(b1.title)\nprint(b2.author)\n\nprint(b1 == b2)","repo_name":"j-hmd/daily-python","sub_path":"Object-Oriented-Python/dataclasses_intro.py","file_name":"dataclasses_intro.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"17530622721","text":"import json\nfrom typing import Optional\n\nfrom pydantic import BaseModel\n\nfrom prowler.lib.logger import logger\nfrom prowler.lib.scan_filters.scan_filters import is_resource_filtered\nfrom prowler.providers.aws.lib.service.service import AWSService\n\n\n################## KMS\nclass KMS(AWSService):\n def __init__(self, audit_info):\n # Call AWSService's __init__\n super().__init__(__class__.__name__, audit_info)\n self.keys = []\n self.__threading_call__(self.__list_keys__)\n if self.keys:\n self.__describe_key__()\n self.__get_key_rotation_status__()\n self.__get_key_policy__()\n self.__list_resource_tags__()\n\n def __list_keys__(self, regional_client):\n logger.info(\"KMS - Listing Keys...\")\n try:\n list_keys_paginator = regional_client.get_paginator(\"list_keys\")\n for page in list_keys_paginator.paginate():\n for key in page[\"Keys\"]:\n if not self.audit_resources or (\n is_resource_filtered(key[\"KeyArn\"], self.audit_resources)\n ):\n self.keys.append(\n Key(\n id=key[\"KeyId\"],\n arn=key[\"KeyArn\"],\n region=regional_client.region,\n )\n )\n except Exception as error:\n logger.error(\n f\"{regional_client.region} -- {error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}\"\n )\n\n def __describe_key__(self):\n logger.info(\"KMS - Describing Key...\")\n try:\n for key in self.keys:\n regional_client = self.regional_clients[key.region]\n response = regional_client.describe_key(KeyId=key.id)\n key.state = response[\"KeyMetadata\"][\"KeyState\"]\n key.origin = response[\"KeyMetadata\"][\"Origin\"]\n key.manager = response[\"KeyMetadata\"][\"KeyManager\"]\n key.spec = response[\"KeyMetadata\"][\"CustomerMasterKeySpec\"]\n except Exception as error:\n logger.error(\n f\"{regional_client.region} -- {error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}\"\n )\n\n def __get_key_rotation_status__(self):\n logger.info(\"KMS - Get Key Rotation Status...\")\n try:\n for key in self.keys:\n if (\n key.origin\n and key.manager\n and \"EXTERNAL\" not in key.origin\n and \"AWS\" not in key.manager\n ):\n regional_client = self.regional_clients[key.region]\n key.rotation_enabled = regional_client.get_key_rotation_status(\n KeyId=key.id\n )[\"KeyRotationEnabled\"]\n except Exception as error:\n logger.error(\n f\"{regional_client.region} -- {error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}\"\n )\n\n def __get_key_policy__(self):\n logger.info(\"KMS - Get Key Policy...\")\n try:\n for key in self.keys:\n if (\n key.manager and key.manager == \"CUSTOMER\"\n ): # only customer KMS have policies\n regional_client = self.regional_clients[key.region]\n key.policy = json.loads(\n regional_client.get_key_policy(\n KeyId=key.id, PolicyName=\"default\"\n )[\"Policy\"]\n )\n except Exception as error:\n logger.error(\n f\"{regional_client.region} -- {error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}\"\n )\n\n def __list_resource_tags__(self):\n logger.info(\"KMS - List Tags...\")\n for key in self.keys:\n if (\n key.manager and key.manager == \"CUSTOMER\"\n ): # only check customer KMS keys\n try:\n regional_client = self.regional_clients[key.region]\n response = regional_client.list_resource_tags(\n KeyId=key.id,\n )[\"Tags\"]\n key.tags = response\n except Exception as error:\n logger.error(\n f\"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n\n\nclass Key(BaseModel):\n id: str\n arn: str\n state: Optional[str]\n origin: Optional[str]\n manager: Optional[str]\n rotation_enabled: Optional[bool]\n policy: Optional[dict]\n spec: Optional[str]\n region: str\n tags: Optional[list] = []\n","repo_name":"prowler-cloud/prowler","sub_path":"prowler/providers/aws/services/kms/kms_service.py","file_name":"kms_service.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"en","doc_type":"code","stars":8822,"dataset":"github-code","pt":"22"} +{"seq_id":"70538857337","text":"\"\"\"\nGiven an array of non-negative integers, you are initially positioned at the first index of the array.\n\nEach element in the array represents your maximum jump length at that position.\n\nYour goal is to reach the last index in the minimum number of jumps.\n\"\"\"\n\nclass Solution:\n # @param A, a list of integers\n # @return an integer\n def jump(self, A):\n # write your code here\n if not A or len(A) == 0:\n return 0\n result, last, cur = 0, 0, 0\n for i in range(len(A)):\n if i > last:\n # if we still not reach the last one, return False\n if cur == last and last < len(A) - 1:\n return float('inf')\n last = cur\n result += 1\n cur = max(cur, i + A[i])\n return result\n","repo_name":"AnthonyNeu/LintCode","sub_path":"Python/Jump Game II.py","file_name":"Jump Game II.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"22"} +{"seq_id":"13212621439","text":"fruits = [\"mangoes\", \"oranges\", \"pear\"]\n\nstudent1 = {\n \"name\": \"lama\", \"age\": 17, \"math_grade\": 98\n}\n\nstudent2 = {\n \"name\": \"precious\", \"age\": 16, \"math_grade\": 97\n}\n\nstudent3 = {\n \"name\": \"michele\", \"age\": 15, \"math_grade\": 96\n}\n#iterate a list\n#for variable_name in listname:\n # do something\n\n\n#print every fruit in the fruits list\nfor fruit in fruits:\n print(fruit)\n\n #print the age student 1\nprint(student1[\"age\"])\n\n #print the name student 2\nprint(student2[\"name\"])\n\n #print the math grade student 3\nprint(student3[\"math_grade\"])\n\n\nstudents = [student1, student2, student3]\n#print list of students\nprint(students)\n\n\n#print each student dicitionary in students list\nfor student in students:\n print(student)\n\n#print each student's math_grade\nfor math_grade in students:\n print()\n","repo_name":"katherineanlinc/tweet_analysis","sub_path":"list_exp.py","file_name":"list_exp.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21758528001","text":"def solution(N, number):\n answer = -1\n # N개의 수로 표현 가능한 값을 담을 DP\n dp = []\n\n for i in range(1, 9):\n numbers = set()\n # 단순 반복되는 수를 담을 값 5, 55, 555,...\n numbers.add(int(str(N) * i))\n\n print(\"───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────\")\n print(f'i-1: {i - 1}')\n print(f'dp : ', dp)\n\n # 0부터 답을 찾을 때(i-1) 까지, 최대 8번째 까지 모든 경우를 검색해본다.\n for j in range(0, i - 1):\n print(\"dp[j] : \", dp[j])\n # set(N이 j개 쓰일 때) 나올 수 있는 값 계산\n # j=1208 1개 ,j=1 2개 ...\n # 경우의 수 구하기\n for x in dp[j]:\n for y in dp[-j - 1]:\n print(f'j:{j} x:{x} y:{y}')\n numbers.add(x + y)\n numbers.add(x - y)\n numbers.add(x * y)\n\n if y != 0:\n numbers.add(x // y)\n\n # 발견하면 리턴\n if number in numbers:\n answer = i\n break\n\n dp.append(numbers)\n\n return answer\n\n\nprint(solution(5, 12))\n","repo_name":"wlwlsus/algorithm-study","sub_path":"Programmers/42896.py","file_name":"42896.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31958055736","text":"import cv2\nimport bitstring\nimport numpy as np\nimport zigzag_matrix_scan as zz\nimport image_preprocessing as preprocess\nimport dct as dct\nimport multiprocessing\nfrom joblib import Parallel, delayed\n\n\nnum_cores = multiprocessing.cpu_count()\n\nstego_file = \"./stego_image.png\"\ncover_file = \"./lenna.jpg\"\nmessage = \"i want to tell you something i am here and i know things that would hurt you\"\n\n\ndef hide_message(message_bits, dct_blocks):\n data_complete = False\n message_bits.pos = 0\n encoded_data_len = bitstring.pack('uint:32', len(message_bits))\n encoded_blocks = []\n for dct_block in dct_blocks:\n # obilaze se svi AC koeficijenti, DC preskacemo\n for i in range(1, len(dct_block)):\n acc_coeff = np.int32(dct_block[i])\n if acc_coeff > 1:\n acc_coeff = np.uint8(dct_block[i])\n if message_bits.pos == (len(message_bits) - 1):\n data_complete = True\n break\n packed_coeff = bitstring.pack('uint:8', acc_coeff)\n if encoded_data_len.pos <= len(encoded_data_len) - 1:\n packed_coeff[-1] = encoded_data_len.read(1)\n else:\n packed_coeff[-1] = message_bits.read(1)\n dct_block[i] = np.float32(packed_coeff.read('uint:8'))\n encoded_blocks.append(dct_block)\n\n if not data_complete:\n raise ValueError(\"Message is too long!\")\n\n return encoded_blocks\n\n\ndef stego(cover_img, message):\n num_channels = 3\n cover_image_path = cover_img\n secret_message = message\n\n raw_cover_image = cv2.imread(cover_image_path, flags=cv2.IMREAD_COLOR)\n height, width = raw_cover_image.shape[:2]\n # ako dimenzije slike nisu deljive sa 8, povecavamo ih tako da budu\n while height % 8:\n height += 1\n while width % 8:\n width += 1\n valid_dim = (width, height)\n padded_image = cv2.resize(raw_cover_image, valid_dim)\n cover_image_f32 = np.float32(padded_image)\n # konvertujemo sliku u YCbCr format\n cover_image_YCC = preprocess.YCrCb(cv2.cvtColor(cover_image_f32, cv2.COLOR_BGR2YCrCb))\n stego_image = np.empty_like(cover_image_f32)\n\n for channel in range(num_channels):\n # primenjujemo dct nad blokovima\n\n dct_blocks = Parallel(n_jobs=num_cores)(delayed(dct.dct2)(block) for block in cover_image_YCC.channels[channel])\n # kvantizacija blokova\n dct_quants = [np.around(np.divide(item, preprocess.luminance_quant_table)) for item in dct_blocks]\n\n # koeficijenti u bloku se obilaze cik-cak i sortiraju po energiji\n sorted_coefficients = [zz.zigzag(block) for block in dct_quants]\n\n # podatke sakrivamo u luminance sloju jer su tu promene najmanje primetne\n if channel == 0:\n secret_data = \"\"\n for char in secret_message.encode('ascii'):\n secret_data += bitstring.pack('uint:8', char)\n embedded_dct_blocks = hide_message(secret_data, sorted_coefficients)\n desorted_coefficients = [zz.inverse_zigzag(block, max_width=8, max_height=8) for block in\n embedded_dct_blocks]\n else:\n # koeficijenti se vracaju u originalni raspored\n desorted_coefficients = [zz.inverse_zigzag(block, max_width=8, max_height=8) for block in\n sorted_coefficients]\n\n # dekvantizacija blokova\n dct_dequants = [np.multiply(data, preprocess.luminance_quant_table) for data in desorted_coefficients]\n\n # inverzni dct\n\n idct_blocks = Parallel(n_jobs=num_cores)(delayed(dct.idct2)(block) for block in dct_dequants)\n\n # spajanje blokova u sliku\n stego_image[:, :, channel] = np.asarray(preprocess.connect_8x8_blocks(cover_image_YCC.width, idct_blocks))\n\n return stego_image\n\n\n# stego_image = stego(cover_file, message)\n# # slika se konvertuje nazad u RGB format\n# stego_image_BGR = cv2.cvtColor(stego_image, cv2.COLOR_YCR_CB2BGR)\n#\n# # pikseli na vrednost od 0 do 255\n# final_stego_image = np.uint8(np.clip(stego_image_BGR, 0, 255))\n#\n# # cuvanje stego-slike\n# cv2.imwrite(stego_file, final_stego_image)\n#\n# original = cv2.imread(cover_file)\n# cv2.imshow(\"Cover image\", original)\n# stego = cv2.imread(stego_file)\n# cv2.imshow(\"Stego\", stego)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n","repo_name":"vakip3/digitalna_forenzika","sub_path":"dct_steganography/create_stego_image.py","file_name":"create_stego_image.py","file_ext":"py","file_size_in_byte":4355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3709959553","text":"\"\"\"\n\nTask: Given 2 timestamps, print absolute difference. Timestamps\nare in the format:\nDay dd Mon yyyy hh:mm:ss +xxxx\n\nInput: per lines:\nn number of test cases\nnext lines are the timestamps\n\nOutput: absolute difference in seconds\n\n\"\"\"\n\nfrom datetime import datetime\n\nif __name__ == '__main__':\n n = int(input())\n\n for i in range(n):\n t1 = datetime.strptime(input(), '%a %d %b %Y %H:%M:%S %z')\n t2 = datetime.strptime(input(), '%a %d %b %Y %H:%M:%S %z')\n delta = int(abs((t1 - t2).total_seconds()))\n # print(t1, t2)\n print(delta)","repo_name":"alothings/python_challenges","sub_path":"hacker_rank/python/date_time/time_delta.py","file_name":"time_delta.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5183528101","text":"import kivy\nfrom kivy.app import App\nfrom kivy.uix.label import Label\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.uix.button import Button\nfrom kivy.core.window import Window\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.uix.codeinput import CodeInput\nfrom kivy.clock import Clock\nimport random\n\nkivy.require(\"1.11.1\")\n\nlista = (1, 2, 3, 4, 5)\n\nname = \"mr_itolk\"\n\nclass my_sclow(ScrollView):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.layout = GridLayout(cols=1, size_hint_y=None)\n self.add_widget(self.layout)\n\n self.chat = Label(size_hint_y=None, markup=True)\n self.scrobar = Label()\n\n self.layout.add_widget(self.chat)\n self.layout.add_widget(self.scrobar)\n\n def update_chat(self, message, *_):\n self.chat.text += \"\\n\" + message\n\n self.layout.height = self.chat.texture_size[1] + 15\n self.chat.height = self.chat.texture_size[1]\n self.chat.text_size = self.chat.width * 0.98, None\n self.scroll_to(self.scrobar)\n\n\nclass myPage(GridLayout):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.cols = 1\n self.rows = 3\n\n self.mini = Label(text=\"pls what is you lucky number for 1 up to 5\", height=Window.size[0] * 0.1, size_hint_y=None)\n\n self.top_up = GridLayout(cols=1)\n self.top_up.add_widget(self.mini)\n self.add_widget(self.top_up)\n\n\n self.display = my_sclow(height=Window.size[1] * 0.8, size_hint_y=None)\n\n self.add_widget(self.display)\n\n # self.one(Label(text=\" Hello is me!!...\"))\n # self.add_widget(Label(text=\"Hello is me!!!!...\"))\n self.new_input = TextInput(width=Window.size[0] * 0.8, size_hint_x=None, multiline=False)\n self.one = Button(text=\"OK\")\n self.one.bind(on_press=self.button)\n\n last_line = GridLayout(cols=2)\n last_line.add_widget(self.new_input)\n last_line.add_widget(self.one)\n self.add_widget(last_line)\n\n self.bind(size=self.adjust_fields)\n\n Window.bind(on_key_dwon=self.on_key_down)\n\n Clock.schedule_once(self.focus_text_input, 0.1)\n\n def on_key_down(self, instance, keyboard, keycode, text, modifiers):\n if keycode == 40:\n self.button(None)\n\n def mr_italk(self):\n #info = \"hey am here!!!....\"\n\n #self.display.update_chat(info)\n #self.display.update_chat(lista)\n\n while self.new_input.text:\n self.n2 = random.choice(lista)\n self.n = int(self.new_input.text)\n if self.n == self.n2:\n go = str(self.n2)\n na = self.new_input.text\n self.display.update_chat(f\"it is in........ computer: {go} you: {na}\")\n self.new_input.text = ''\n Clock.schedule_once(self.focus_text_input, 0.1)\n break\n else:\n go = str(self.n2)\n na = self.new_input.text\n self.display.update_chat(f\"it is not in......computer: {go} you: {na}\")\n self.new_input.text = ''\n Clock.schedule_once(self.focus_text_input, 0.1)\n break\n\n def button(self, _):\n message = self.new_input.text\n self.remove_widget(self.top_up)\n Clock.schedule_once(self.focus_text_input, 0.1)\n #message.bind(active=self.mr_italk)\n\n if message:\n self.display.update_chat(f'{name} > {message}')\n n = self.new_input.text\n Clock.schedule_once(self.focus_text_input, 0.1)\n return self.mr_italk()\n\n Clock.schedule_once(self.focus_text_input, 0.1)\n\n def focus_text_input(self, *_):\n self.new_input.focus = True\n\n\n\n # self.add_widget(self.one)\n def adjust_fields(self, *_):\n\n if Window.size[1] * 0.1 < 50:\n new_height = Window.size[1] - 50\n else:\n new_height = Window.size[1] * 0.9\n self.display.height = new_height\n\n if Window.size[0] * 0.2 < 160:\n new_width = Window.size[0] - 160\n else:\n new_width = Window.size[0] * 0.8\n self.new_input.width = new_width\n\nclass pkApp(App):\n def build(self):\n return myPage()\n\n\nif __name__ == \"__main__\":\n RunApp = pkApp()\n RunApp.run()\n","repo_name":"prinako/python_semple_text_game","sub_path":"italk_graphic_interface/pk.py","file_name":"pk.py","file_ext":"py","file_size_in_byte":4412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"26303218609","text":"print('Введите N, N>0')\r\n\r\nn = int(input())\r\na=[]\r\n\r\nif (n>0):\r\n for i in range(1,n+1):\r\n a.append(i*2-1)\r\n print('ответ =',a, sep = ' ')\r\nelse:\r\n print('Ввеите значение, удовлетворяющее условию')","repo_name":"irishaoreshek/laboratornie","sub_path":"16 лабораторная/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43018953932","text":"class Stack():\n def __init__(self):\n self.top=[]\n def peek(self):\n if len(self.top)!=0:\n return self.top[-1]\n def pop(self):\n if len(self.top)!=0:\n last=self.top.pop(-1)\n return last\n def push(self,item):\n self.top.append(item)\n def isEmpty(self):\n if len(self.top)!=0:\n return False\n else:\n return True\n\nclass Solution():\n def Basic_calculator(self, string):\n s_ope=Stack()\n new=\"\"\n for w in string:\n if w is \")\":\n k=s_ope.pop()\n new+=k\n elif w in \"(+-*/\":\n s_ope.push(s)\n else:\n new+=w\n while not s_ope.isEmpty():\n new+=s_ope.pop()\n\n for w in new:\n if w not in \"+-*/\":\n s_ope.push(w)\n else:\n a=s_ope.pop()\n b=s_ope.pop()\n if w==\"+\":\n now=a+b\n elif w==\"-\":\n now=a-b\n elif w==\"*\":\n now=a*b\n else:\n now=a/b\n s_ope.push(now)\n\n return now\n\n\n\ns=Solution()\nprint(s.Basic_calculator(\"2*(5+5*2)/3+(6/2+8)\")) #21\nprint(s.Basic_calculator(\"(2+6*3+5-(3*14/7+2)*5)+3\")) #-12","repo_name":"RyangHa/selfstudy","sub_path":"2022 python/leetcode 872.py","file_name":"leetcode 872.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37765284229","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\n\nclass Browser:\n def __init__(self, link):\n self.driver = webdriver.Chrome('/bin/chromedriver')\n self.driver.get(link)\n self.driver.maximize_window()\n\n def search_for_a_job(self, job):\n search_box = self.driver.find_element(By.XPATH, '/html/body/div[1]/header/nav/section/section[2]/form/section[1]/input')\n search_box.send_keys(job)\n search_box.send_keys(Keys.RETURN)\n print('Busca concluida.')\n\n def get_jobs_list(self):\n jobs_list = self.driver.find_elements(By.XPATH, '//*[@id=\"main-content\"]/section/ul/li')\n return jobs_list\n\n def get_jobs_information(self, jobs_list):\n def wait_for_for_title():\n WebDriverWait(self.driver, 5).until(EC.visibility_of_element_located((By.XPATH, '/html/body/div[1]/div/section/div[2]/section/div/div[1]/div/a/h2')))\n\n jobs_information = []\n\n for i, job in enumerate(jobs_list):\n time.sleep(1)\n job.find_element(By.TAG_NAME, 'a').click()\n try:\n wait_for_for_title()\n except TimeoutException as ex:\n print(ex)\n jobs_list[i-1].find_element(By.TAG_NAME, 'a').click()\n time.sleep(1)\n job.find_element(By.TAG_NAME, 'a').click()\n time.sleep(1)\n\n try:\n job_information = {\n 'job_title': self.driver.find_element(By.XPATH, '/html/body/div[1]/div/section/div[2]/section/div/div[1]/div/a/h2').text,\n 'long_description': self.driver.find_element(By.XPATH, '/html/body/div[1]/div/section/div[2]/div/section[1]/div/div/section/div').text,\n 'experience_level': self.driver.find_element(By.XPATH, '/html/body/div[1]/div/section/div[2]/div/section[1]/div/ul/li[1]/span').text,\n 'job_type': self.driver.find_element(By.XPATH, '/html/body/div[1]/div/section/div[2]/div/section[1]/div/ul/li[2]/span').text,\n 'role': self.driver.find_element(By.XPATH, '/html/body/div[1]/div/section/div[2]/div/section[1]/div/ul/li[3]/span').text,\n 'sector': self.driver.find_element(By.XPATH, '/html/body/div[1]/div/section/div[2]/div/section[1]/div/ul/li[4]/span').text\n }\n except:\n job_information = {\n 'job_title': '-',\n 'long_description': '-',\n 'experience_level': '-',\n 'job_type': '-',\n 'role': '-',\n 'sector': '-'\n }\n jobs_information.append(job_information)\n\n print('Coleta de informações concluida.')\n return jobs_information\n","repo_name":"danielgaio/minicurso-rpa-ifsul","sub_path":"src/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43051153140","text":"\"\"\"This module contains the general information for DiagSrvCtrl ManagedObject.\"\"\"\n\nfrom ...ucsmo import ManagedObject\nfrom ...ucscoremeta import MoPropertyMeta, MoMeta\nfrom ...ucsmeta import VersionMeta\n\n\nclass DiagSrvCtrlConsts:\n ADMIN_STATE_CANCEL = \"cancel\"\n ADMIN_STATE_READY = \"ready\"\n ADMIN_STATE_TRIGGER = \"trigger\"\n END_TS_NEVER = \"never\"\n END_TS_M_NEVER = \"never\"\n OPER_STATE_CANCELLED = \"cancelled\"\n OPER_STATE_COMPLETED = \"completed\"\n OPER_STATE_FAILED = \"failed\"\n OPER_STATE_IDLE = \"idle\"\n OPER_STATE_IN_PROGRESS = \"in-progress\"\n OPER_STATE_UNKNOWN = \"unknown\"\n START_TS_NEVER = \"never\"\n START_TS_M_NEVER = \"never\"\n\n\nclass DiagSrvCtrl(ManagedObject):\n \"\"\"This is DiagSrvCtrl class.\"\"\"\n\n consts = DiagSrvCtrlConsts()\n naming_props = set([])\n\n mo_meta = MoMeta(\"DiagSrvCtrl\", \"diagSrvCtrl\", \"diag\", VersionMeta.Version111j, \"InputOutput\", 0x7f, [], [\"admin\", \"pn-equipment\", \"pn-maintenance\"], ['computeBlade', 'computeRackUnit', 'computeServerUnit'], ['diagRslt', 'diagRunPolicy', 'etherServerIntFIo'], [\"Get\"])\n\n prop_meta = {\n \"admin_state\": MoPropertyMeta(\"admin_state\", \"adminState\", \"string\", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, [\"cancel\", \"ready\", \"trigger\"], []),\n \"child_action\": MoPropertyMeta(\"child_action\", \"childAction\", \"string\", VersionMeta.Version111j, MoPropertyMeta.INTERNAL, 0x4, None, None, r\"\"\"((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}\"\"\", [], []),\n \"dn\": MoPropertyMeta(\"dn\", \"dn\", \"string\", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),\n \"end_ts\": MoPropertyMeta(\"end_ts\", \"endTs\", \"string\", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, None, None, r\"\"\"([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\\.([0-9]){3})){0,1}\"\"\", [\"never\"], []),\n \"end_ts_m\": MoPropertyMeta(\"end_ts_m\", \"endTsM\", \"string\", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, None, None, None, [\"never\"], [\"0-18446744073709551615\"]),\n \"error_descr\": MoPropertyMeta(\"error_descr\", \"errorDescr\", \"string\", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),\n \"oper_qualifier\": MoPropertyMeta(\"oper_qualifier\", \"operQualifier\", \"string\", VersionMeta.Version131c, MoPropertyMeta.READ_ONLY, None, None, None, r\"\"\"((defaultValue|not-applicable|stage-failed|test-failure|run-cancelled|component-error|stages-completed),){0,6}(defaultValue|not-applicable|stage-failed|test-failure|run-cancelled|component-error|stages-completed){0,1}\"\"\", [], []),\n \"oper_state\": MoPropertyMeta(\"oper_state\", \"operState\", \"string\", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [\"cancelled\", \"completed\", \"failed\", \"idle\", \"in-progress\", \"unknown\"], []),\n \"overall_progress\": MoPropertyMeta(\"overall_progress\", \"overallProgress\", \"byte\", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, None, None, None, [], [\"0-100\"]),\n \"rn\": MoPropertyMeta(\"rn\", \"rn\", \"string\", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),\n \"run_policy_name\": MoPropertyMeta(\"run_policy_name\", \"runPolicyName\", \"string\", VersionMeta.Version131c, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, [], []),\n \"sacl\": MoPropertyMeta(\"sacl\", \"sacl\", \"string\", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r\"\"\"((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}\"\"\", [], []),\n \"start_ts\": MoPropertyMeta(\"start_ts\", \"startTs\", \"string\", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, None, None, r\"\"\"([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\\.([0-9]){3})){0,1}\"\"\", [\"never\"], []),\n \"start_ts_m\": MoPropertyMeta(\"start_ts_m\", \"startTsM\", \"string\", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, None, None, None, [\"never\"], [\"0-18446744073709551615\"]),\n \"status\": MoPropertyMeta(\"status\", \"status\", \"string\", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x40, None, None, r\"\"\"((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}\"\"\", [], []),\n }\n\n prop_map = {\n \"adminState\": \"admin_state\", \n \"childAction\": \"child_action\", \n \"dn\": \"dn\", \n \"endTs\": \"end_ts\", \n \"endTsM\": \"end_ts_m\", \n \"errorDescr\": \"error_descr\", \n \"operQualifier\": \"oper_qualifier\", \n \"operState\": \"oper_state\", \n \"overallProgress\": \"overall_progress\", \n \"rn\": \"rn\", \n \"runPolicyName\": \"run_policy_name\", \n \"sacl\": \"sacl\", \n \"startTs\": \"start_ts\", \n \"startTsM\": \"start_ts_m\", \n \"status\": \"status\", \n }\n\n def __init__(self, parent_mo_or_dn, **kwargs):\n self._dirty_mask = 0\n self.admin_state = None\n self.child_action = None\n self.end_ts = None\n self.end_ts_m = None\n self.error_descr = None\n self.oper_qualifier = None\n self.oper_state = None\n self.overall_progress = None\n self.run_policy_name = None\n self.sacl = None\n self.start_ts = None\n self.start_ts_m = None\n self.status = None\n\n ManagedObject.__init__(self, \"DiagSrvCtrl\", parent_mo_or_dn, **kwargs)\n","repo_name":"CiscoUcs/ucsmsdk","sub_path":"ucsmsdk/mometa/diag/DiagSrvCtrl.py","file_name":"DiagSrvCtrl.py","file_ext":"py","file_size_in_byte":5335,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"22"} +{"seq_id":"31686026886","text":"from django.core.management.base import BaseCommand, CommandError\nfrom arches.app.models.models import Concept as modelConcept\nfrom arches.app.models.concept import Concept\n\n\nclass Command(BaseCommand):\n \"\"\"\n Commands for returning preflabel and uuid of concepts in a thesauri\n \"\"\"\n\n def handle(self, *args, **options):\n\n source_thesauri_id = \"117cddf0-8403-4e16-b325-43327efc9e1f\"\n target_thesauri_id = \"06cf74db-f2b8-46a9-8c2f-565bedaa6424\"\n\n for conceptid in [source_thesauri_id, target_thesauri_id]:\n c = Concept().get(\n id=conceptid,\n include_subconcepts=True,\n include_parentconcepts=False,\n include_relatedconcepts=True,\n depth_limit=None,\n up_depth_limit=None,\n )\n print({c.values[0].value: conceptid})\n for subc in c.subconcepts:\n print(vars(subc.values[0]))\n print({subc.values[0].value: subc.values[0].id})\n print(\"------------------------------------\")","repo_name":"KacperSzyf/arches_commands","sub_path":"get_concept_id.py","file_name":"get_concept_id.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"986181306","text":"# 6.1.2.2 A short journey from procedural to object approach\n\nstack = []\n\ndef push(val):\n stack.append(val)\n\n\ndef pop(i):\n val = stack[-1]\n del stack[-1]\n text = 'keluar ke-'+str(i)+' : '+val\n return text\n\nfor i in range(1,6):\n text = 'masuk ke-'+str(i)\n push(text)\n\nfor i in range(1,6):\n print(pop(i))","repo_name":"apriantoa917/Python-Latihan-DTS-2019","sub_path":"OOP/OOP - import class object/stack - examples 1.py","file_name":"stack - examples 1.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"22"} +{"seq_id":"5776955722","text":"#1. Write a Python program to calculate the length of a string\n\ndef str1(x):\n p=0\n for i in x:\n p=p+1\n print(p) \n #print(len(x))\nx='mahi' \nstr1(x) \n","repo_name":"Susama91/Project","sub_path":"WResource/String/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14604620947","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nfrom codecs import open\nfrom base64 import b64decode\nimport argparse\nimport lsc\nimport lessc\n\ndef depender(dependencias, dir):\n\tdep_cod = ''\n\n\tif dependencias:\n\t\tmap_sufijo_dep = {\n\t\t\t'.js' : '-min.js',\n\t\t\t'.css' : '.css'\n\t\t}\n\n\t\tsufijo_dep = map_sufijo_dep[sufijo]\n\n\t\tfor dep in dependencias:\n\t\t\tdep_f = os.path.join(dir, dep + sufijo_dep)\n\n\t\t\twith open(dep_f, 'r', 'utf-8') as f_dep:\n\t\t\t\tdep_cod += f_dep.read()\n\n\treturn dep_cod\n\nparser = argparse.ArgumentParser(description='''\nCompilador del proyecto\n''')\n\nparser.add_argument('fichero')\nparser.add_argument('--salida', dest='salida')\nparser.add_argument('-s' , dest='salida')\n\nparser.add_argument('--sufijo')\n\nparser.add_argument('-O', action='store_true', dest='optimizar')\n\nparser.add_argument('--externas')\n\nparser.add_argument('--internas')\n\nparser.add_argument('--compresor_js')\nparser.add_argument('--compresor_css')\n\nparser.add_argument('--3rdparty', dest='dir_dependencias')\n\nclass AccionDepurar(argparse.Action):\n\tdef __call__(self, parser, contexto, vals, option_string=None):\n\t\tsetattr(contexto, self.dest, '1' if vals is None else vals)\n\nparser.add_argument('--depurar', nargs='?', action=AccionDepurar)\n\nargs = parser.parse_args()\n\nfichero = args.fichero\nsufijo = args.sufijo\nsalida = args.salida\ndir_dependencias = args.dir_dependencias\n\nif args.externas: args.externas = args.externas.split(',')\nif args.internas: args.internas = args.internas.split(',')\n\nif not sufijo:\n\tf_suf = os.path.splitext(fichero)[1]\n\n\tmap_s = {\n\t\t'.ls' : '.js',\n\t\t'.less' : '.css'\n\t}\n\n\tsufijo = map_s[f_suf]\n\n\n# (-o-) decidir cómo compilar\nmap_compilador = {\n\t'.js' : lsc.compilar,\n\t'.css' : lessc.compilar\n}\n\ncompilar = map_compilador[sufijo]\n\ntry:\n\tcompilado = compilar(args.fichero)\nexcept Exception as ex:\n\tprint(ex)\n\tsys.exit(-1)\n\n\nif not salida:\n\tprint(compilado)\nelse:\n\tf_nom = os.path.basename(fichero)\n\tf_nombase = os.path.splitext(f_nom)[0]\n\n\tsalida_f = os.path.join(salida, f_nombase) + sufijo\n\n\ttmp_b = '/dev/shm' if 'posix' == os.name else os.path.dirname(os.path.dirname(os.path.abspath(args.fichero)))\n\n\tsalida_tmp_f = os.path.join(tmp_b, f_nombase) + sufijo\n\n\tif args.optimizar:\n\n\t\t# -------------------------------------\n\t\t#map_compresor = {\n\t\t\t#'.js' : 'java -jar %(compresor)s --language_in ECMASCRIPT5_STRICT --compilation_level %(nivel)s --process_closure_primitives --js renaming_map.js --js %(entrada)s --js_output_file %(salida)s',\n\n\t\t\t#'.css' : 'java -jar %(compresor)s --output-file %(salida)s --output-renaming-map-format CLOSURE_COMPILED --rename %(nivel)s --output-renaming-map renaming_map.js --allow-unrecognized-functions --allow-unrecognized-properties %(entrada)s'\n\t\t#}\n\n\t\tmap_compresor = {\n\t\t\t'.js' : b64decode('amF2YSAtamFyICUoY29tcHJlc29yKXMgLS1sYW5ndWFnZV9pbiBFQ01BU0NSSVBUNV9TVFJJQ1QgLS1jb21waWxhdGlvbl9sZXZlbCAlKG5pdmVsKXMgLS1wcm9jZXNzX2Nsb3N1cmVfcHJpbWl0aXZlcyAtLWpzIHJlbmFtaW5nX21hcC5qcyAtLWpzICUoZW50cmFkYSlzIC0tanNfb3V0cHV0X2ZpbGUgJShzYWxpZGEpcw=='),\n\n\t\t\t'.css' : b64decode('amF2YSAtamFyICUoY29tcHJlc29yKXMgLS1vdXRwdXQtZmlsZSAlKHNhbGlkYSlzIC0tb3V0cHV0LXJlbmFtaW5nLW1hcC1mb3JtYXQgQ0xPU1VSRV9DT01QSUxFRCAtLXJlbmFtZSAlKG5pdmVsKXMgLS1vdXRwdXQtcmVuYW1pbmctbWFwIHJlbmFtaW5nX21hcC5qcyAtLWFsbG93LXVucmVjb2duaXplZC1mdW5jdGlvbnMgLS1hbGxvdy11bnJlY29nbml6ZWQtcHJvcGVydGllcyAlKGVudHJhZGEpcw==')\n\t\t}\n\n\t\t#if args.depurar:\n\t\t\t#map_nivel_compresor = {\n\t\t\t\t## '.js' : 'WHITESPACE_ONLY --formatting PRETTY_PRINT --debug',\n\t\t\t\t#'.js' : b64decode('V0hJVEVTUEFDRV9PTkxZIC0tZm9ybWF0dGluZyBQUkVUVFlfUFJJTlQgLS1kZWJ1Zw=='),\n\t\t\t\t##'.css' : 'DEBUG --pretty-print'\n\t\t\t\t#'.css' : b64decode('REVCVUcgLS1wcmV0dHktcHJpbnQ=')\n\t\t\t#}\n\t\t#else:\n\t\tmap_nivel_compresor = {\n\t\t\t#'.js' : 'ADVANCED_OPTIMIZATIONS',\n\t\t\t'.js' : b64decode('QURWQU5DRURfT1BUSU1JWkFUSU9OUw=='),\n\t\t\t#'.css' : 'CLOSURE'\n\t\t\t'.css' : b64decode('Q0xPU1VSRQ==')\n\t\t}\n\n\t\tmap_ruta_compresor = {\n\t\t\t'.js' : args.compresor_js,\n\t\t\t'.css' : args.compresor_css\n\t\t}\n\n\t\tcompresor = map_compresor [sufijo]\n\t\tnivel_compresor = map_nivel_compresor [sufijo]\n\t\truta_compresor = map_ruta_compresor [sufijo]\n\t\t# -------------------------------------\n\n\t\tentrada_tmp_f = os.path.join(tmp_b, f_nom)\n\n\t\twith open(entrada_tmp_f, 'w', 'utf-8') as f_entrada:\n\t\t\tint_cod = depender(args.internas, dir_dependencias)\n\t\t\t# getCssName\n\t\t\trenam = b64decode('Z29vZy5nZXRDc3NOYW1l')\n\t\t\t# Css\n\t\t\trenom = b64decode('Z3ouQ3Nz')\n\t\t\tf_entrada.write(int_cod +\n\t\t\t\t\tcompilado.replace(renom, renam))\n\n\t\tif '.js' == sufijo:\n\t\t\tif args.depurar:\n\t\t\t\t# '--formatting PRETTY_PRINT'\n\t\t\t\tdepurar = ' ' + b64decode('LS1mb3JtYXR0aW5nIFBSRVRUWV9QUklOVA==')\n\t\t\t\tif '2' == args.depurar:\n\t\t\t\t\t# '--debug'\n\t\t\t\t\t#depurar += ' ' + b64decode('LS1kZWJ1Zw==')\n\t\t\t\t\t# 'SIMPLE_OPTIMIZATIONS'\n\t\t\t\t\tnivel_compresor = b64decode('U0lNUExFX09QVElNSVpBVElPTlM=')\n\n\t\t\t\tcompresor += depurar\n\n\t\t\tif args.externas:\n\t\t\t\tfor ext in args.externas:\n\t\t\t\t\tcompresor += \\\n\t\t\t\t\t\t' --externs %s' % os.path.join(\n\t\t\t\t\t\t\tdir_dependencias,\n\t\t\t\t\t\t\text + sufijo)\n\t\tos.system(compresor % {\n\t\t\t'compresor' : ruta_compresor,\n\t\t\t'entrada' : entrada_tmp_f,\n\t\t\t'salida' : salida_tmp_f,\n\t\t\t'nivel' : nivel_compresor\n\t\t})\n\n\t\tos.remove(entrada_tmp_f)\n\n\t\t#with open(salida_tmp_f, 'r', 'utf-8') as f_salida_tmp:\n\t\t\t#salida_limpia = f_salida_tmp.read().replace('\\n', ' ')\n\n\t\t#with open(salida_tmp_f, 'w', 'utf-8') as f_salida_tmp:\n\t\t\t#f_salida_tmp.write(salida_limpia)\n\n\telse:\n\t\twith open(salida_tmp_f, 'w') as f_sal:\n\t\t\tf_sal.write(compilado)\n\n\text_cod = depender(\n\t\t\targs.externas,\n\t\t\tdir_dependencias) if args.externas else ''\n\n\tderechos = b64decode('LyoqCiAqIENhdmFTb2Z0IFNBQyBodHRwOi8vY2F2YXNvZnRzYWMuY29tCiAqIGNyaXN0SGlhbiBHei4gKGdjY2EpIC0gaHR0cDovL2djY2EudGsKICovCg==')\n\n\twith open(salida_f, 'w', 'utf-8') as f_salida:\n\t\twith open(salida_tmp_f, 'r', 'utf-8') as f_salida_tmp:\n\t\t\tsalida = f_salida_tmp.read()\n\t\t\tcompilado = (ext_cod.replace('\\n', ' ') + salida) if args.depurar else (ext_cod + salida).replace('\\n', ' ')\n\t\t\tf_salida.write(derechos + compilado)\n\n\tos.remove(salida_tmp_f)\n","repo_name":"gcca/libres","sub_path":"lcs/setup1.py","file_name":"setup1.py","file_ext":"py","file_size_in_byte":6033,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39375537229","text":"import sys\nimport time\nimport socket\nimport logging\nfrom threading import Thread, Event\nfrom queue import Queue\n\nADDRESS, PORT = '127.0.0.1', 9000\nBUF_SIZE = 4096\n\nlogger = logging.getLogger('django.server')\n\nclass EngineConnector:\n def __init__(self):\n self.receiver_thread = None\n self.sender_thread = None\n self.abr_engine_socket = None\n self.connected_to_engine = Event()\n\n logger.info('Initializing ABR engine socket')\n self.abr_engine_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._try_engine_connect()\n\n self.subscribers = set()\n self.to_engine_queue = Queue()\n\n self.receiver_thread = Thread(target=self._receiver)\n self.sender_thread = Thread(target=self._sender)\n self.receiver_thread.start()\n self.sender_thread.start()\n\n def subscribe(self, callback):\n logger.info('New client subscribed to engine messages')\n self.subscribers.add(callback)\n \n def unsubscribe(self, callback):\n logger.info('A client disconnected from engine messages')\n self.subscribers.remove(callback)\n\n def send(self, message):\n '''\n `message` is a bytestring of the JSON message\n '''\n self.to_engine_queue.put(message)\n\n def _try_engine_connect(self):\n try:\n self.abr_engine_socket.connect((ADDRESS, PORT))\n self.abr_engine_socket.setblocking(False)\n self.connected_to_engine.set()\n return True\n except OSError:\n err = 'Unable to connect to ABR Engine'\n logger.error(err)\n return False\n\n def _receiver(self):\n # Keep going until the server is killed\n # Ping every 1s. The sender will set self.connected_to_engine to true\n # if it successfully sent bytes to the engine.\n while True:\n if self.connected_to_engine.is_set():\n logger.info('Started receiving from ABR engine')\n\n while self.connected_to_engine.is_set():\n # Wait for messages from the ABR engine, then forward them to the\n # composition client\n try:\n # Receive the length of the next message (an Int32, assumed to be\n # little endian)\n length = int.from_bytes(self.abr_engine_socket.recv(4), 'little')\n\n # Construct the whole message from the socket\n bytes_read = 0\n message = bytes()\n while bytes_read < length:\n received_bytes = self.abr_engine_socket.recv(min(length - bytes_read, BUF_SIZE))\n if received_bytes:\n bytes_read += len(received_bytes)\n message += received_bytes\n\n # Send message to all subscribing clients\n for callback in self.subscribers:\n callback(message)\n\n except BlockingIOError:\n pass\n except:\n self.connected_to_engine.clear()\n logger.info('Stopped receiving from ABR engine')\n\n time.sleep(1)\n\n\n def _sender(self):\n # Keep going until the server is killed\n # Ping every 1s.\n while True:\n # Try to a zero int to the engine. If success, set self.connected_to_engine\n if self._try_engine_connect() and self.abr_engine_socket.send(int.to_bytes(0, 4, 'little')):\n self.connected_to_engine.set()\n logger.info('Started sending to ABR engine')\n\n while self.connected_to_engine.is_set():\n # Send messages from the design client to the ABR engine\n while not self.to_engine_queue.empty():\n try:\n message = self.to_engine_queue.get()\n\n # Send the message length to the engine\n length = len(message)\n total_bytes = 0\n while total_bytes < 4:\n total_bytes += self.abr_engine_socket.send(int.to_bytes(length, 4, 'little'))\n\n total_bytes = 0\n while total_bytes < length:\n total_bytes += self.abr_engine_socket.send(message)\n\n except BlockingIOError:\n pass\n except:\n self.connected_to_engine.clear()\n logger.info('Stopped sending to ABR engine')\n\n time.sleep(1)\nif 'runserver' in sys.argv:\n engine = EngineConnector()\n","repo_name":"cweissman001/ABR_Legends","sub_path":"ABRDesignInterface-legend/composition/engine_connector.py","file_name":"engine_connector.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73841521016","text":"import json\nimport os\nimport pickle\nimport random\nimport re\n\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\n\ndef brier_multi(targets, probs):\n # https://stats.stackexchange.com/questions/403544/how-to-compute-the-brier-score-for-more-than-two-classes\n targets, probs = np.array(targets), np.array(probs)\n return np.mean(np.sum((probs - targets) ** 2, axis=1))\n\n\ndef corrupt_context_wordlevel_for_auxilary(\n ids,\n mask,\n use_attn: bool,\n corrupt_ratio: float,\n sep_id,\n skip_token_ids,\n device=None,\n model=None,\n):\n numpy_ids = np.array(ids)\n numpy_mask = np.array(mask)\n\n bs, max_len = numpy_ids.shape\n context_end_indices = np.where(numpy_ids == sep_id)[1].reshape(bs, 2)[:, 0]\n\n if use_attn:\n with torch.no_grad():\n _, attentions = model.get_attention(ids.to(device), mask.to(device))\n # 12, [bs,12,300,300]\n attention_output = [el.cpu().numpy() for el in attentions]\n\n for seq_idx, seq in enumerate(numpy_ids):\n this_seq_attention_output = sum(\n [sum(sum(el[seq_idx])) for el in attention_output]\n )\n attn_score = [\n tmp_score\n if seq[tmp_idx] not in skip_token_ids\n and tmp_idx > context_end_indices[seq_idx]\n else 0.0\n for tmp_idx, tmp_score in enumerate(this_seq_attention_output)\n ]\n\n sorted_score_indices = np.argsort(attn_score)[::-1]\n selected_indices = sorted(\n sorted_score_indices[\n : int((context_end_indices[seq_idx] - 1) * corrupt_ratio)\n ]\n )\n modified_ids = numpy_ids[seq_idx].copy().tolist()\n modified_mask = numpy_mask[seq_idx].copy().tolist()\n for deleted_order, deleted_index in enumerate(selected_indices):\n modified_ids.pop(deleted_index - deleted_order)\n modified_ids.append(0)\n modified_mask.pop(0)\n modified_mask.append(0)\n assert (\n len(modified_ids) == len(numpy_ids[seq_idx]) == len(modified_mask)\n )\n numpy_ids[seq_idx], numpy_mask[seq_idx] = modified_ids, modified_mask\n return torch.tensor(numpy_ids), torch.tensor(numpy_mask)\n else:\n for seq_idx, seq in enumerate(numpy_ids):\n selected_indices = [i + 1 for i in range(context_end_indices[seq_idx] - 1)]\n selected_indices = sorted(\n random.sample(\n selected_indices, int(len(selected_indices) * corrupt_ratio)\n )\n )\n modified_ids = numpy_ids[seq_idx].copy().tolist()\n modified_mask = numpy_mask[seq_idx].copy().tolist()\n for deleted_order, deleted_index in enumerate(selected_indices):\n modified_ids.pop(deleted_index - deleted_order)\n modified_ids.append(0)\n modified_mask.pop(0)\n modified_mask.append(0)\n assert len(modified_ids) == len(numpy_ids[seq_idx]) == len(modified_mask)\n numpy_ids[seq_idx], numpy_mask[seq_idx] = modified_ids, modified_mask\n return torch.tensor(numpy_ids), torch.tensor(numpy_mask)\n\n\ndef make_corrupted_select_dataset(\n uw_data,\n dd_dataset,\n retrieval_candidate_num,\n save_fname,\n tokenizer,\n max_seq_len,\n replace_golden_to_nota,\n):\n assert not replace_golden_to_nota\n if os.path.exists(save_fname):\n print(\"{} exist!\".format(save_fname))\n with open(save_fname, \"rb\") as f:\n return pickle.load(f)\n nota_token = get_nota_token()\n assert isinstance(uw_data, list) and all([len(el) == 2 for el in uw_data])\n responses = [uttr for conv in dd_dataset for uttr in conv[1:]]\n assert all([isinstance(el, str) for el in responses])\n for idx, hist in enumerate(uw_data):\n assert len(hist) == 2 and all([isinstance(el, str) for el in hist])\n assert hist[1] == nota_token or not replace_golden_to_nota\n candidates = random.sample(responses, retrieval_candidate_num - 1)\n uw_data[idx].extend(candidates)\n\n ids_list = [[] for _ in range(retrieval_candidate_num)]\n masks_list = [[] for _ in range(retrieval_candidate_num)]\n labels = []\n print(\"Tensorize...\")\n for sample_idx, sample in enumerate(tqdm(uw_data)):\n assert len(sample) == 1 + retrieval_candidate_num\n assert all([isinstance(el, str) for el in sample])\n context, candidates = sample[0], sample[1:]\n assert len(candidates) == retrieval_candidate_num\n encoded = tokenizer(\n [context] * retrieval_candidate_num,\n text_pair=candidates,\n max_length=max_seq_len,\n padding=\"max_length\",\n truncation=True,\n return_tensors=\"pt\",\n )\n encoded_ids, encoded_mask = encoded[\"input_ids\"], encoded[\"attention_mask\"]\n assert len(encoded_ids) == len(encoded_mask) == retrieval_candidate_num\n for candi_idx in range(retrieval_candidate_num):\n ids_list[candi_idx].append(encoded_ids[candi_idx])\n masks_list[candi_idx].append(encoded_mask[candi_idx])\n labels.append(0)\n assert len(list(set([len(el) for el in ids_list]))) == 1\n assert len(list(set([len(el) for el in masks_list]))) == 1\n ids_list = [torch.stack(el) for el in ids_list]\n masks_list = [torch.stack(el) for el in masks_list]\n labels = torch.tensor(labels)\n data = ids_list + masks_list + [labels]\n assert len(data) == 1 + 2 * retrieval_candidate_num\n with open(save_fname, \"wb\") as f:\n pickle.dump(data, f)\n return data\n\n\ndef make_tuple(exp):\n assert \"(\" in exp and \")\" in exp and exp.count(\",\") == 1\n exp = [el.strip() for el in exp.strip()[1:-1].split(\",\")]\n return exp\n\n\ndef get_ic_annotation(fname, change_ic_to_original: bool):\n with open(fname, \"r\") as f:\n ls = [el.strip() for el in f.readlines()]\n\n item_list, item = [], {}\n uttr_token = get_uttr_token()\n\n for line in ls:\n if line == \"\":\n assert len(item) != 0\n item_list.append(item)\n item = {}\n continue\n if len(item) == 0:\n tmp = [int(el) for el in line.strip().split()]\n assert len(tmp) == 2\n item[\"remain_context_num\"] = tmp[1]\n # item[\"removed_context_num\"] = tmp[1]\n # item[\"remain_context_num\"] = tmp[2]\n continue\n if \"uttrs\" not in item:\n item[\"uttrs\"] = []\n item[\"uttrs\"].append(line)\n if len(item) != 0:\n item_list.append(item)\n final_output = []\n for item_idx, item in enumerate(item_list):\n # removed_num, remain_num = item[\"removed_context_num\"], item[\"remain_context_num\"]\n remain_num = item[\"remain_context_num\"]\n uttrs = item[\"uttrs\"]\n # assert len(uttrs) in [removed_num + remain_num + 1, removed_num + remain_num]\n assert len(uttrs) == 1 + remain_num\n context = uttrs[:-1]\n response = uttrs[-1]\n\n # assert len(context) in [remain_num + removed_num, remain_num + removed_num - 1]\n assert len(context) == remain_num\n if not change_ic_to_original:\n context = context[-remain_num:]\n assert len(context) == remain_num\n else:\n raise ValueError\n context = uttr_token.join(context)\n context = context.replace(\" ##\", \"\")\n response = response.replace(\" ##\", \"\")\n assert \"##\" not in context\n assert \"##\" not in response\n final_output.append([context, response])\n\n return final_output\n\n\ndef get_uw_annotation(fname, change_uw_to_original: bool):\n with open(fname, \"r\") as f:\n ls = [el.strip() for el in f.readlines()]\n item_list, item = [], {}\n uttr_token = get_uttr_token()\n original_turn, changed_turn = False, False\n\n for line_idx, line in enumerate(ls):\n if line == \"\":\n if changed_turn:\n assert not original_turn\n assert len(item) != 0\n item_list.append(item)\n item = {}\n changed_turn = False\n continue\n elif original_turn:\n assert not changed_turn\n continue\n else:\n print(line_idx)\n print(original_turn, changed_turn)\n print(len(item_list))\n raise ValueError()\n\n # head\n if len(item) == 0:\n idx, change_num = [int(el) for el in line.split()]\n item[\"idx\"] = idx\n item[\"num_change\"] = change_num\n continue\n # original\n if len(item) == 2:\n original_words = line.split()\n item[\"original_words\"] = original_words\n continue\n # original\n if len(item) == 3:\n changed_words = line.split()\n item[\"changed_words\"] = changed_words\n continue\n\n if line == \"origin\":\n assert len(item) == 4\n assert not original_turn and not changed_turn\n original_turn = True\n item[\"original_uttrs\"] = []\n continue\n if line == \"changed\":\n assert len(item) == 5\n original_turn = False\n assert not original_turn and not changed_turn\n item[\"changed_uttrs\"] = []\n changed_turn = True\n continue\n if original_turn:\n item[\"original_uttrs\"].append(line.strip())\n continue\n if changed_turn:\n item[\"changed_uttrs\"].append(line.strip())\n continue\n if len(item) != 0:\n item_list.append(item)\n\n print(item_list[0][\"changed_uttrs\"])\n print(item_list[0][\"original_uttrs\"])\n print()\n final_output = []\n for itemIdx, item in enumerate(item_list):\n change_num, org_words, chd_words = (\n item[\"num_change\"],\n item[\"original_words\"],\n item[\"changed_words\"],\n )\n original_uttrs = item[\"original_uttrs\"]\n changed_uttrs = item[\"changed_uttrs\"]\n\n assert len(org_words) == len(chd_words) == change_num\n\n if change_uw_to_original:\n context, response = uttr_token.join(original_uttrs[:-1]), original_uttrs[-1]\n else:\n context, response = uttr_token.join(changed_uttrs[:-1]), changed_uttrs[-1]\n context = context.replace(\" ##\", \"\")\n response = response.replace(\" ##\", \"\")\n context = context.replace(\"##\", \"\")\n assert \"##\" not in context\n assert \"##\" not in response\n final_output.append([context, response])\n\n return final_output\n\n\ndef get_uw_annotation_legacy(\n fname, change_uw_to_original: bool, replace_golden_to_nota: bool, is_dev\n):\n with open(fname, \"r\") as f:\n ls = [el.strip() for el in f.readlines()]\n item_list, item = [], []\n uttr_token = get_uttr_token()\n nota_token = get_nota_token()\n for line in ls:\n if line == \"\":\n if len(item) != 0:\n item_list.append(item)\n item = []\n continue\n\n if \"(\" in line and \")\" in line:\n parsed_tuple = re.findall(r\"\\([^()]*\\)\", line)\n num_change = int(line.strip().split()[-1])\n change_map = [make_tuple(el) for el in parsed_tuple]\n assert len(parsed_tuple) == len(change_map)\n item.append(change_map)\n continue\n item.append(line)\n\n final_output = []\n for item_idx, item in enumerate(item_list):\n change_map, uttrs = item[0], item[1:]\n context = uttr_token.join(uttrs[:-1])\n response = uttrs[-1] if not replace_golden_to_nota else nota_token\n error_case = False\n if change_uw_to_original:\n for change_history in change_map:\n org, chd = change_history\n try:\n assert chd in context or chd[0].upper() + chd[1:] in context\n except:\n error_case = True\n break\n context = context.replace(chd, org).replace(\n chd[0].upper() + chd[1:], org\n )\n if not error_case:\n final_output.append([context, response])\n\n if is_dev:\n return final_output[: int(len(final_output) * 0.3)]\n else:\n return final_output[int(len(final_output) * 0.3) :]\n\n\ndef set_random_seed(seed):\n torch.manual_seed(seed)\n np.random.seed(seed)\n torch.cuda.manual_seed(seed)\n random.seed(seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n\ndef recall_x_at_k(score_list, x, k, answer_index):\n assert len(score_list) == x\n sorted_score_index = np.array(score_list).argsort()[::-1]\n assert answer_index in sorted_score_index\n return int(answer_index in sorted_score_index[:k])\n\n\nclass SelectionDataset(torch.utils.data.Dataset):\n def __init__(\n self,\n raw_dataset,\n tokenizer,\n setname: str,\n max_seq_len: int = 300,\n num_candidate: int = 10,\n uttr_token: str = \"[UTTR]\",\n txt_save_fname: str = None,\n tensor_save_fname: str = None,\n corrupted_context_dataset=None,\n # add_nota_in_every_candidate=False,\n ):\n\n self.tokenizer = tokenizer\n self.max_seq_len = max_seq_len\n self.uttr_token = uttr_token\n assert setname in [\"train\", \"dev\", \"test\"]\n txt_save_fname, tensor_save_fname = (\n txt_save_fname.format(setname),\n tensor_save_fname.format(setname),\n )\n # self.add_nota = add_nota_in_every_candidate\n selection_dataset = self._get_selection_dataset(\n raw_dataset, num_candidate, txt_save_fname, corrupted_context_dataset\n )\n # if self.add_nota:\n # for el in selection_dataset:\n # assert \"[NOTA]\" in el\n self.feature = self._tensorize_selection_dataset(\n selection_dataset, tensor_save_fname, num_candidate\n )\n\n def __len__(self):\n return len(self.feature[0])\n\n def __getitem__(self, idx):\n return tuple([el[idx] for el in self.feature])\n\n def _tensorize_selection_dataset(\n self, selection_dataset, tensor_save_fname, num_candidate\n ):\n if os.path.exists(tensor_save_fname):\n print(f\"{tensor_save_fname} exist!\")\n with open(tensor_save_fname, \"rb\") as f:\n return pickle.load(f)\n print(\"make {}\".format(tensor_save_fname))\n ids_list = [[] for _ in range(num_candidate)]\n masks_list = [[] for _ in range(num_candidate)]\n labels = []\n print(\"Tensorize...\")\n for sample_idx, sample in enumerate(tqdm(selection_dataset)):\n assert len(sample) == 1 + num_candidate and all(\n [isinstance(el, str) for el in sample]\n )\n context, candidates = sample[0], sample[1:]\n assert len(candidates) == num_candidate\n\n encoded = self.tokenizer(\n [context] * num_candidate,\n text_pair=candidates,\n max_length=self.max_seq_len,\n padding=\"max_length\",\n truncation=True,\n return_tensors=\"pt\",\n )\n encoded_ids, encoded_mask = encoded[\"input_ids\"], encoded[\"attention_mask\"]\n assert len(encoded_ids) == len(encoded_mask) == num_candidate\n for candi_idx in range(num_candidate):\n ids_list[candi_idx].append(encoded_ids[candi_idx])\n masks_list[candi_idx].append(encoded_mask[candi_idx])\n labels.append(0)\n\n assert len(list(set([len(el) for el in ids_list]))) == 1\n assert len(list(set([len(el) for el in masks_list]))) == 1\n ids_list = [torch.stack(el) for el in ids_list]\n masks_list = [torch.stack(el) for el in masks_list]\n labels = torch.tensor(labels)\n data = ids_list + masks_list + [labels]\n assert len(data) == 1 + 2 * num_candidate\n with open(tensor_save_fname, \"wb\") as f:\n pickle.dump(data, f)\n return data\n\n def _get_selection_dataset(\n self, raw_dataset, num_candidate, txt_save_fname, corrupted_context_dataset\n ):\n print(\"Selection filename: {}\".format(txt_save_fname))\n if os.path.exists(txt_save_fname):\n print(f\"{txt_save_fname} exist!\")\n with open(txt_save_fname, \"rb\") as f:\n return pickle.load(f)\n\n selection_dataset = self._make_selection_dataset(\n raw_dataset, num_candidate, corrupted_context_dataset\n )\n os.makedirs(os.path.dirname(txt_save_fname), exist_ok=True)\n with open(txt_save_fname, \"wb\") as f:\n pickle.dump(selection_dataset, f)\n return selection_dataset\n\n def _make_selection_dataset(\n self, raw_dataset, num_candidate, corrupted_context_dataset\n ):\n \"\"\"\n Returns:\n datset: List of [context(str), positive_response(str), negative_response_1(str), (...) negative_response_(num_candidate-1)(str)]\n \"\"\"\n assert isinstance(raw_dataset, list) and all(\n [isinstance(el, list) for el in raw_dataset]\n )\n print(f\"Serialized selection not exist. Make new file...\")\n dataset = []\n all_responses = []\n for idx, conv in enumerate(tqdm(raw_dataset)):\n slided_conversation = self._slide_conversation(conv)\n # Check the max sequence length\n for single_conv in slided_conversation:\n assert len(single_conv) == 2 and all(\n [isinstance(el, str) for el in single_conv]\n )\n concat_single_conv = \" \".join(single_conv)\n if len(self.tokenizer.tokenize(concat_single_conv)) + 3 <= 300:\n dataset.append(single_conv)\n all_responses.extend([el[1] for el in slided_conversation])\n\n if corrupted_context_dataset is not None:\n print(\"Samples with corrupted context are also included in training\")\n print(\"Before: {}\".format(len(dataset)))\n half_sampled_corrupt_sample = random.sample(\n corrupted_context_dataset, int(len(dataset) / 2)\n )\n for corrupted_sample in tqdm(half_sampled_corrupt_sample):\n changed_context = self.tokenizer.decode(\n corrupted_sample[\"changed_context\"]\n ).strip()\n assert isinstance(changed_context, str)\n assert \"[CLS]\" == changed_context[:5]\n assert \"[SEP]\" == changed_context[-5:]\n tmp_text = changed_context[5:-5].strip()\n assert len(self.tokenizer.tokenize(tmp_text)) + 2 <= 300\n dataset.append([tmp_text, \"[NOTA]\"])\n print(\"After: {}\".format(len(dataset)))\n\n for idx, el in enumerate(dataset):\n sampled_random_negative = random.sample(all_responses, num_candidate)\n if el[1] in sampled_random_negative:\n sampled_random_negative.remove(el[1])\n sampled_random_negative = sampled_random_negative[: num_candidate - 1]\n dataset[idx].extend(sampled_random_negative)\n\n # if not self.add_nota:\n # sampled_random_negative = sampled_random_negative[: num_candidate - 1]\n # dataset[idx].extend(sampled_random_negative)\n # else:\n # sampled_random_negative = [\"[NOTA]\"] + sampled_random_negative[: num_candidate - 2]\n # dataset[idx].extend(sampled_random_negative)\n assert len(dataset[idx]) == 1 + num_candidate\n assert all([isinstance(txt, str) for txt in dataset[idx]])\n return dataset\n\n def _slide_conversation(self, conversation):\n \"\"\"\n multi-turn utterance로 이루어진 single conversation을 여러 개의 \"context-response\" pair로 만들어 반환\n \"\"\"\n assert isinstance(conversation, list) and all(\n [isinstance(el, str) for el in conversation]\n )\n pairs = []\n for idx in range(len(conversation) - 1):\n context, response = conversation[: idx + 1], conversation[idx + 1]\n pairs.append([self.uttr_token.join(context), response])\n return pairs\n\n\nclass RankerDataset(torch.utils.data.Dataset):\n def __init__(\n self,\n raw_dataset,\n tokenizer,\n setname: str,\n max_seq_len: int = 300,\n uttr_token: str = \"[UTTR]\",\n tensor_fname: str = None,\n corrupted_dataset=None,\n ):\n self.tokenizer = tokenizer\n self.max_seq_len = max_seq_len\n self.uttr_token = uttr_token\n self.corrupted_dataset = corrupted_dataset\n assert setname in [\"train\", \"dev\", \"test\"]\n self.triplet_fname = \"./data/triplet/triplet_{}.pck\".format(setname)\n self.triplet_dataset = self._get_triplet_dataset(raw_dataset)\n if tensor_fname is None:\n self.tensor_fname = \"./data/triplet/tensor_{}.pck\".format(setname)\n else:\n self.tensor_fname = tensor_fname.format(setname)\n self.feature = self._tensorize_triplet_dataset(corrupted_dataset)\n\n def __len__(self):\n return len(self.feature[0])\n\n def __getitem__(self, idx):\n return tuple([el[idx] for el in self.feature])\n\n def _tensorize_triplet_dataset(self, corrupted_dataset):\n if os.path.exists(self.tensor_fname):\n with open(self.tensor_fname, \"rb\") as f:\n return pickle.load(f)\n\n ids, masks, labels = [], [], []\n print(\"Tensorize...\")\n for idx, triple in enumerate(tqdm(self.triplet_dataset)):\n assert len(triple) == 3 and all([isinstance(el, str) for el in triple])\n context, pos_uttr, neg_uttr = triple\n\n positive_sample = self.tokenizer(\n context,\n text_pair=pos_uttr,\n max_length=self.max_seq_len,\n padding=\"max_length\",\n truncation=True,\n return_tensors=\"pt\",\n )\n negative_sample = self.tokenizer(\n context,\n text_pair=neg_uttr,\n max_length=self.max_seq_len,\n padding=\"max_length\",\n truncation=True,\n return_tensors=\"pt\",\n )\n ids.extend(positive_sample[\"input_ids\"])\n masks.extend(positive_sample[\"attention_mask\"])\n labels.append(1)\n ids.extend(negative_sample[\"input_ids\"])\n masks.extend(negative_sample[\"attention_mask\"])\n labels.append(0)\n assert len(ids) == len(masks) == len(labels)\n\n data = torch.stack(ids), torch.stack(masks), torch.tensor(labels)\n with open(self.tensor_fname, \"wb\") as f:\n pickle.dump(data, f)\n return data\n\n def _get_triplet_dataset(self, raw_dataset):\n \"\"\"\n Args:\n raw_dataset (List[List[str]]): List of conversation. Each conversation is list of utterance(str).\n \"\"\"\n print(\"Triplet filename: {}\".format(self.triplet_fname))\n if os.path.exists(self.triplet_fname):\n print(f\"{self.triplet_fname} exist!\")\n with open(self.triplet_fname, \"rb\") as f:\n return pickle.load(f)\n\n triplet_dataset = self._make_triplet_dataset(raw_dataset)\n os.makedirs(os.path.dirname(self.triplet_fname), exist_ok=True)\n with open(self.triplet_fname, \"wb\") as f:\n pickle.dump(triplet_dataset, f)\n return triplet_dataset\n\n def _make_triplet_dataset(self, raw_dataset):\n assert isinstance(raw_dataset, list) and all(\n [isinstance(el, list) for el in raw_dataset]\n )\n print(f\"{self.triplet_fname} not exist. Make new file...\")\n dataset = []\n all_responses = []\n for idx, conv in enumerate(tqdm(raw_dataset)):\n slided_conversation = self._slide_conversation(conv)\n # Check the max sequence length\n for single_conv in slided_conversation:\n assert len(single_conv) == 2 and all(\n [isinstance(el, str) for el in single_conv]\n )\n concat_single_conv = \" \".join(single_conv)\n if len(self.tokenizer.tokenize(concat_single_conv)) + 3 <= 300:\n dataset.append(single_conv)\n all_responses.extend([el[1] for el in slided_conversation])\n for idx, el in enumerate(dataset):\n while True:\n sampled_random_negative = random.sample(all_responses, 1)[0]\n if sampled_random_negative != el[1]:\n break\n dataset[idx].append(sampled_random_negative)\n return dataset\n\n def _slide_conversation(self, conversation):\n assert isinstance(conversation, list) and all(\n [isinstance(el, str) for el in conversation]\n )\n pairs = []\n for idx in range(len(conversation) - 1):\n context, response = conversation[: idx + 1], conversation[idx + 1]\n pairs.append([self.uttr_token.join(context), response])\n return pairs\n\n\ndef get_uttr_token():\n return \"[UTTR]\"\n\n\ndef get_nota_token():\n return \"[NOTA]\"\n\n\ndef dump_config(args):\n with open(os.path.join(args.exp_path, \"config.json\"), \"w\") as f:\n json.dump(vars(args), f)\n\n\ndef write2tensorboard(writer, value, setname, step):\n for k, v in value.items():\n writer.add_scalars(k, {setname: v}, step)\n writer.flush()\n\n\ndef save_model(model, epoch, model_path):\n try:\n torch.save(\n model.module.state_dict(),\n os.path.join(model_path, f\"epoch-{epoch}.pth\"),\n )\n except:\n torch.save(\n model.state_dict(),\n os.path.join(model_path, f\"epoch-{epoch}.pth\"),\n )\n\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n return True\n elif v.lower() in (\"no\", \"false\", \"f\", \"n\", \"0\"):\n return False\n else:\n raise argparse.ArgumentTypeError(\"Boolean value expected.\")\n\n\ndef load_model(model, model_path, epoch, len_tokenizer):\n model.bert.resize_token_embeddings(len_tokenizer)\n model.load_state_dict(torch.load(model_path + f\"/epoch-{epoch}.pth\"))\n return model\n\n\ndef make_random_negative_for_multi_ref(multiref_original, num_neg=30):\n for idx, item in enumerate(multiref_original):\n context, responses = item\n sample = random.sample(range(len(multiref_original)), num_neg + 1)\n if idx in sample:\n sample.remove(idx)\n else:\n sample = sample[:-1]\n responses = [multiref_original[sample_idx][1] for sample_idx in sample]\n responses = [el for el1 in responses for el in el1]\n assert all([isinstance(el, str) for el in responses])\n negative = random.sample(responses, num_neg)\n multiref_original[idx].append(negative)\n return multiref_original\n","repo_name":"leenw23/dialogueUncertainty","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":27329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"18586020182","text":"from torch import nn, flatten\n\nclass AlexNet(nn.Module):\n \"\"\"\n Similar to the Dataset class, a custom architecture is defined by\n subclassing the nn.Module class. In particular, we need to overwrite the\n definition for __init__()\n \"\"\"\n def __init__(self):\n super().__init__() #Inheriting the init from the superclass\n self.net = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(64, 192, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(192, 384, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.AdaptiveAvgPool2d((6,6)),\n nn.Flatten(),\n nn.Dropout(p=0.5),\n nn.Linear(256*6*6, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 1000),\n )\n\n def forward(self, x):\n x = self.net(x)\n return x\n","repo_name":"hf-chow/paper2code","sub_path":"AlexNet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"893416742","text":"\"\"\"scrapli_community.ruckus.fastiron.ruckus_fastiron\"\"\"\nfrom scrapli.driver.network.base_driver import PrivilegeLevel\nfrom scrapli_community.ruckus.fastiron.async_driver import (\n default_async_on_close,\n default_async_on_open,\n)\nfrom scrapli_community.ruckus.fastiron.sync_driver import (\n default_sync_on_close,\n default_sync_on_open,\n)\n\nDEFAULT_PRIVILEGE_LEVELS = {\n \"exec\": (\n PrivilegeLevel(\n pattern=r\"^[a-z0-9 .\\-_@()/:]{1,63}>$\",\n name=\"exec\",\n previous_priv=\"\",\n deescalate=\"\",\n escalate=\"\",\n escalate_auth=False,\n escalate_prompt=\"\",\n )\n ),\n \"privilege_exec\": (\n PrivilegeLevel(\n pattern=r\"^[a-z0-9 .\\-_@/:]{1,63}#$\",\n name=\"privilege_exec\",\n previous_priv=\"exec\",\n deescalate=\"quit\",\n escalate=\"enable\",\n escalate_auth=True,\n escalate_prompt=r\"^[pP]assword:\\s?$\",\n )\n ),\n \"configuration\": (\n PrivilegeLevel(\n pattern=r\"^[a-z0-9 .\\-_@/:]{1,63}\\(conf[a-z0-9.\\-@/:\\+]{0,32}\\)#$\",\n name=\"configuration\",\n previous_priv=\"privilege_exec\",\n deescalate=\"end\",\n escalate=\"configure terminal\",\n escalate_auth=False,\n escalate_prompt=\"\",\n )\n ),\n}\n\nSCRAPLI_PLATFORM = {\n \"driver_type\": \"network\",\n \"defaults\": {\n \"privilege_levels\": DEFAULT_PRIVILEGE_LEVELS,\n \"default_desired_privilege_level\": \"privilege_exec\",\n \"sync_on_open\": default_sync_on_open,\n \"async_on_open\": default_async_on_open,\n \"sync_on_close\": default_sync_on_close,\n \"async_on_close\": default_async_on_close,\n \"failed_when_contains\": [\"Error -\", \"Invalid input -\"],\n \"textfsm_platform\": \"ruckus_fastiron\",\n \"genie_platform\": \"\",\n },\n \"variants\": {},\n}\n","repo_name":"scrapli/scrapli_community","sub_path":"scrapli_community/ruckus/fastiron/ruckus_fastiron.py","file_name":"ruckus_fastiron.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"22"} +{"seq_id":"24415933271","text":"# 2.1 IMPORTING LIBRARIES\n\nimport sys\nIN_COLAB = \"google.colab\" in sys.modules\n\nimport random\nimport gym\nimport numpy as np\n\nfrom IPython.display import clear_output\n\nclass DQNAgent:\n def __init__(\n self, \n env: gym.Env,\n ):\n \"\"\"Initialization.\n \n Args:\n env (gym.Env): openAI Gym environment\n gamma (float): discount factor\n \"\"\"\n \n # 2.3 CREATING THE Q-TABLE\n self.env = env\n \n self.state_size = self.env.observation_space.n\n self.action_size = self.env.action_space.n\n \n self.gamma = 0.9 # discount rate\n \n def one_step_lookahead(self, env, state, V, discount_factor):\n action_values = np.zeros(self.action_size)\n for action in range(self.action_size):\n for probability, next_state, reward, done in self.env.P[state][action]:\n action_values[action] += probability * (reward + discount_factor * V[next_state])\n return action_values\n\n def value_iteration(self, env, discount_factor=1.0, theta=1e-9, max_iterations=1e9):\n # Number of evaluation iterations\n evaluation_iterations = 1\n # Initialize state-value function with zeros for each env state\n V = np.zeros(self.state_size)\n for i in range(int(max_iterations)):\n # Initialize a change of value function as zero\n delta = 0\n # Iterate though each state\n for state in range(self.state_size):\n \n # Do a one-step lookahead to calculate state-action values\n action_value = self.one_step_lookahead(self.env, state, V, discount_factor)\n \n # Select best action to perform based on the highest state-action value\n best_action_value = np.max(action_value)\n\n # Calculate the absolute change of value function\n delta = max(delta, np.abs(V[state] - best_action_value))\n \n # Update the value function for current state\n V[state] = best_action_value\n evaluation_iterations += 1\n\n # Terminate if value change is insignificant\n if delta < theta:\n print(f'Value-iteration converged at iteration#{i}.')\n break\n\n # Create a deterministic policy using the optimal value function\n policy = np.zeros([self.state_size, self.action_size])\n for state in range(self.state_size):\n \n # One step lookahead to find the best action for this state\n action_value = self.one_step_lookahead(self.env, state, V, discount_factor)\n # Select best action based on the highest state-action value\n best_action = np.argmax(action_value)\n \n # Update the policy to perform a better action at a current state\n policy[state, best_action] = 1.0\n \n return policy, V\n\n'''\nValue iteration\n'''\n\n# 2.2 CREATING THE ENVIRONMENT\nenv_name = \"FrozenLake-v1\"\nenv = gym.make(env_name)\nenv.seed(777) # reproducible, general Policy gradient has high variance\n\n# 2.4 INITIALIZING THE Q-PARAMETERS\nmax_episodes = 10000 # Set total number of episodes to train agent on.\n\nmax_iterations = 99 # Max steps per episode\ngamma = 0.95 # Discounting rate\nrender = False # display the game environment\n\n\n# train\nagent = DQNAgent(\n env, \n# memory_size, \n# batch_size, \n# epsilon_decay,\n)\n\nif __name__ == \"__main__\":\n # Search for an optimal policy using policy iteration\n policy, V = agent.value_iteration(env.env)\n # Apply best policy to the real env\n \n wins = 0\n episode_reward = 0\n \n for episode in range(max_episodes):\n state = agent.env.reset()\n done = False # has the enviroment finished?\n \n if render: env.render()\n \n # 2.7 EACH TIME STEP \n while not done:\n # Select best action to perform in a current state\n action = np.argmax(policy[state])\n # Perform an action an observe how env acted in response\n next_state, reward, done, _ = agent.env.step(action)\n\n if render: env.render()\n # Our new state is state\n state = next_state\n \n # Summarize total reward\n episode_reward += reward\n # Calculate number of wins over episodes\n if done and reward == 1.0:\n wins += 1\n average_reward = episode_reward / max_episodes\n \n print(f'Value Iteration : number of wins over {max_episodes} episodes = {wins}')\n print(f'Value Iteration : average reward over {max_episodes} episodes = {average_reward} \\n\\n')\n\n\n","repo_name":"RichardMinsooGo-RL-Gym/Bible_5_C_Dynamic-programming","sub_path":"DP_Frozen_Lake_Value_Iteration.py","file_name":"DP_Frozen_Lake_Value_Iteration.py","file_ext":"py","file_size_in_byte":4810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19088076402","text":"class Solution:\n def searchInsert(self, nums: List[int], target: int) -> int:\n i = 0\n while i < len(nums):\n if nums[i] < target:\n i = i+1\n else:\n return i\n break\n return i\n \n\n\n\ndef stringToIntegerList(input):\n return json.loads(input)\n\ndef main():\n import sys\n import io\n def readlines():\n for line in io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8'):\n yield line.strip('\\n')\n\n lines = readlines()\n while True:\n try:\n line = next(lines)\n nums = stringToIntegerList(line);\n line = next(lines)\n target = int(line);\n \n ret = Solution().searchInsert(nums, target)\n\n out = str(ret);\n print(out)\n except StopIteration:\n break\n\nif __name__ == '__main__':\n main()\n","repo_name":"cat-meowmeow/My_Leetcode_Backup","sub_path":"0035.py","file_name":"0035.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15666342604","text":"import argparse\n\ndef training_parser():\n parser = argparse.ArgumentParser(description='Training arguments.')\n parser.add_argument('-lr', '--learning_rate', action='store',\n default=10e-4, type=float, help=('Learning Rate. Default: 0.001'))\n parser.add_argument('-bs', '--batch_size', action='store', \n default=8, type=int, help='Batch Size. Default: \"8\"')\n parser.add_argument('-eps', '--epoch_start', action='store', default=0, \n type=int, help=('Starting Epoch. Default: 0'))\n parser.add_argument('-ep', '--epochs', action='store', default=1, \n type=int, help=('Epochs. Default: 1'))\n parser.add_argument('-db', '--dense_blocks', action='store', default=3, \n type=int, help=('Number of dense blocks. Default: 3'))\n parser.add_argument('-du', '--dense_units', action='store', default=4, \n type=int, help=('Number of dense units. Default: 4'))\n parser.add_argument('-ld1', '--lambda_adv', action='store', default=0.01, \n type=float, help=('Lambda hyperparameter for generator adversarial loss. Default: 0.01'))\n parser.add_argument('-ld2', '--lambda_grd_pen', action='store', default=10, \n type=int, help=('Lambda hyperparameter for discriminator gradient penalty. Default: 10'))\n parser.add_argument('-ld3', '--lambda_cyc', action='store', default=0.01, \n type=float, help=('Lambda hyperparameter for cycle consistency loss. Default: 0.01'))\n parser.add_argument('-ld4', '--lambda_idt', action='store', default=0.005, \n type=float, help=('Lambda hyperparameter for the identity loss. Default: 0.005'))\n parser.add_argument('-ci', '--crit_iter', action='store', default=3, \n type=int, help=('Iterations for training discriminator for each generator step. Default: 3'))\n parser.add_argument('-to', '--train_only', action='store', default='', \n type=str, choices=['', 'GENERATORS', 'DISCRIMINATORS'],\n help=('Select to only train either generators or discriminators.'))\n parser.add_argument('-mo', '--model', action='store', default='3DRDN', \n type=str, choices=['3DRDN', '3DRDN-WGAN', '3DRDN-CGAN', '3DRDN-UCGAN'],\n help=('Model used during training. Default: 3DRDN'))\n return parser\n","repo_name":"omagdy/3DRDN-CycleGAN","sub_path":"arg_parser.py","file_name":"arg_parser.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"22"} +{"seq_id":"42362997434","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\nfrom pymongo import MongoClient\nimport pymongo.errors\n\nclass JobparserPipeline:\n def __init__(self):\n host_name = '192.168.1.35'\n port_name = 61290\n db_name = 'jobs_scrapy'\n db_user = 'scrapy'\n db_pwd = 'scrapy!'\n try:\n client = MongoClient(host_name, port_name,\n username=db_user,\n password=db_pwd,\n authSource=db_name,\n authMechanism=\"SCRAM-SHA-1\",\n connect=True)\n\n self.data_base = client[db_name].command(\"ismaster\")\n self.data_base= client[db_name]\n\n except pymongo.errors.ConnectionFailure:\n print(u'Сервер MongoDB не доступен')\n\n except pymongo.errors.OperationFailure:\n print(u'некорректное имя пользователя или пароль')\n\n def process_item(self, item, spider):\n if spider.name == 'hhru':\n info_vacancy = {'name_vacancy': item['name'][0], 'link_': item['link_vacancy'], 'source': 'hhru',\n 'salary': self.parsing_salary_hh(item['salary'])['salary']}\n self.data_base[spider.name].update_one(info_vacancy, {'$set': info_vacancy}, upsert=True)\n elif spider.name == 'superjob':\n info_vacancy = {'name_vacancy': item['name'][0], 'link_': item['link_vacancy'], 'source': 'superjob',\n 'salary': self.parsing_salary_superjob(item['salary'])['salary']}\n self.data_base[spider.name].update_one(info_vacancy, {'$set': info_vacancy}, upsert=True)\n return item\n\n @staticmethod\n def parsing_salary_hh(salary_):\n dict_salary = {}\n final_dict = {}\n if len(salary_) == 5:\n if salary_[0] == u'от ':\n dict_salary['minimum'] = salary_[1].replace('\\xa0','')\n dict_salary['maximum'] = None\n dict_salary['currency'] = salary_[3]\n dict_salary['condition'] = salary_[4]\n elif salary_[0] == u'до ':\n dict_salary['minimum'] = None\n dict_salary['maximum'] = salary_[2].replace('\\xa0','')\n dict_salary['currency'] = salary_[3]\n dict_salary['condition'] = salary_[4]\n final_dict['salary'] = dict_salary\n elif len(salary_) == 7:\n dict_salary['minimum'] = salary_[1].replace('\\xa0','')\n dict_salary['maximum'] = salary_[3].replace('\\xa0','')\n dict_salary['currency'] = salary_[5]\n dict_salary['condition'] = salary_[6]\n final_dict['salary'] = dict_salary\n else:\n final_dict['salary'] = salary_[0]\n\n return final_dict\n\n @staticmethod\n def parsing_salary_superjob(salary_):\n dict_salary = {}\n final_dict = {}\n if len(salary_) == 3:\n s_pars = salary_[2].split('\\xa0')\n if salary_[0] == u'от':\n dict_salary['minimum'] = s_pars[0] + s_pars[1]\n dict_salary['maximum'] = None\n dict_salary['currency'] = s_pars[2]\n elif salary_[0] == u'до':\n dict_salary['minimum'] = None\n dict_salary['maximum'] = s_pars[0] + s_pars[1]\n dict_salary['currency'] = s_pars[2]\n else:\n dict_salary['minimum'] = salary_[0].replace('\\xa0', '')\n dict_salary['maximum'] = salary_[0].replace('\\xa0', '')\n dict_salary['currency'] = salary_[2]\n final_dict['salary'] = dict_salary\n elif len(salary_) == 7:\n dict_salary['minimum'] = salary_[0].replace('\\xa0', '')\n dict_salary['maximum'] = salary_[4].replace('\\xa0', '')\n dict_salary['currency'] = salary_[6]\n final_dict['salary'] = dict_salary\n else:\n final_dict['salary'] = salary_[0]\n\n return final_dict\n","repo_name":"oshkuk22/Methods_collecting_processing_data_Internet","sub_path":"Scrapy_1/jobparser/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23395753631","text":"from __future__ import annotations\n\nfrom typing import List\nfrom utils import read_file\n\nMINUS = \"-\"\nDOUBLE_MINUS = \"=\"\nZERO = '0'\nFIVE = 5\nMAX_DIGITS = 20\n\n\nclass FuelRequirements:\n def __init__(self, data: List[str]):\n self.place_values = {\n k: 5**k for k, v in enumerate(range(MAX_DIGITS))\n }\n self.lower_bounds = [1] + [FIVE ** (i+1) - sum([2 * FIVE**j for j in range(i+1)]) for i in range(MAX_DIGITS - 1)]\n self.upper_bounds = [sum([2 * FIVE**j for j in range(i)]) for i in range(1, MAX_DIGITS + 1)]\n self.decimals = [self.snafu_to_decimal(line) for line in data]\n print()\n\n @property\n def sum_requirements(self):\n return sum(self.decimals)\n\n @property\n def answer_pt1(self):\n return self.decimal_to_snafu(self.sum_requirements)\n\n def snafu_to_decimal(self, text: str):\n return sum([self.__convert_char(v[0], v[1]) for \\\n v in [(len(text)-i-1, text[i]) for i in range(len(text)-1, -1, -1)]])\n\n def __convert_char(self, place: int, char: str):\n return int(char) * self.place_values[place] if char.isdigit() else -self.place_values[place] if \\\n char == MINUS else -2 * self.place_values[place]\n\n def decimal_to_snafu(self, num: int):\n snafu = \"\"\n most_sig_dig = next(iter([i for i in range(MAX_DIGITS) if self.lower_bounds[i] <= num <= self.upper_bounds[i]]))\n for i in range(most_sig_dig, -1, -1):\n upper_bound, lower_bound = self.upper_bounds[i], self.lower_bounds[i]\n multiple = 2 if abs(num) > (upper_bound - self.place_values[i]) else \\\n 1 if abs(num) >= lower_bound else 0\n if num < 0:\n dig = DOUBLE_MINUS if multiple == 2 else MINUS if multiple == 1 else ZERO\n else:\n dig = str(multiple)\n snafu += dig\n num -= multiple * self.place_values[i] if num > 0 else -multiple * self.place_values[i]\n return snafu\n\n\nif __name__ == '__main__':\n filename = 'input/day25.txt'\n data = read_file(filename)\n\n fuel_requirements = FuelRequirements(data)\n print(f'The answer to Pt 1 is {fuel_requirements.answer_pt1}')\n","repo_name":"ruthcaswellsmith/AdventofCode2022","sub_path":"Day25 - Full of Hot Air.py","file_name":"Day25 - Full of Hot Air.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13631637362","text":"# 1、请将该程序放到app/src/main/res文件目录下运行\n# 2、该程序是将现有xxxhdpi dimens文件转换,自动生成xxhdpi和xhdpi的dimens文件\n\nimport re\nimport os\n\n# UI设计以iPhone plus的屏幕为标准设计,\n# 换算成Android屏幕,比较接近的为2K屏,xxxhdpi屏幕\nUI_SCREEN_SCALE = 3.5\nUI_SCREEN_WIDTH = 1440.0\n\n# 1K屏,市面上大部分的手机都采用这种屏幕\nXXHDPI_SCREEN_SCALE = 3.0\nXXHDPI_SCREEN_WIDTH = 1080.0\n\nXHDPI_SCREEN_SCALE = 2.0\nXHDPI_SCREEN_WIDTH = 720.0\n\ndimen_type_xxxhdpi = 'values-xxxhdpi'\ndimen_type_xxhdpi = 'values-xxhdpi'\ndimen_type_xhdpi = 'values-xhdpi'\n\nINPUT_FILE_NAME = 'values/dimens.xml'\nOUTPUT_FILE_NAME = 'dimens_transfer.xml'\n\n\ndef create_dimen_file_from(xxxhdpi_file, dimen_type):\n folder = r'{0}'.format(dimen_type_xxxhdpi)\n if dimen_type == dimen_type_xxxhdpi:\n folder = r'{0}'.format(dimen_type_xxxhdpi)\n elif dimen_type == dimen_type_xxhdpi:\n folder = r'{0}'.format(dimen_type_xxhdpi)\n elif dimen_type == dimen_type_xhdpi:\n folder = r'{0}'.format(dimen_type_xhdpi)\n\n if not os.path.exists(folder):\n os.mkdir(folder)\n\n f = open('{0}/{1}'.format(folder, OUTPUT_FILE_NAME), 'w')\n\n f.write('\\n')\n\n f.write('\\t\\n')\n lines = [line for line in open(xxxhdpi_file)]\n for l in lines:\n sp_line = re.match(r'.*name=[\"](.*)[\"]>(.*)sp.*', l)\n if sp_line:\n dimen_name = sp_line.group(1)\n sp_value = round(float(sp_line.group(2)), 2)\n if float(sp_line.group(2)).is_integer() and sp_value > 1:\n dimen_value = get_dpi_size(sp_value, dimen_type)\n f.write('\\t{1:.2f}sp\\n'.format(dimen_name, dimen_value))\n\n f.write('\\n\\t\\n')\n for l in lines:\n sp_line = re.match(r'.*name=[\"](.*)[\"]>(.*)dp.*', l)\n if sp_line:\n dimen_name = sp_line.group(1)\n dp_value = round(float(sp_line.group(2)), 2)\n if float(sp_line.group(2)).is_integer() and dp_value > 1:\n dimen_value = get_dpi_size(dp_value, dimen_type)\n f.write('\\t{1:.2f}dp\\n'.format(dimen_name, dimen_value))\n\n f.write('\\n')\n f.close()\n\n\ndef get_dpi_size(size, dimen_type):\n if dimen_type == dimen_type_xxxhdpi:\n return size\n elif dimen_type == dimen_type_xxhdpi:\n return round(size * UI_SCREEN_SCALE / UI_SCREEN_WIDTH * XXHDPI_SCREEN_WIDTH / XXHDPI_SCREEN_SCALE, 2)\n elif dimen_type == dimen_type_xhdpi:\n return round(size * UI_SCREEN_SCALE / UI_SCREEN_WIDTH * XHDPI_SCREEN_WIDTH / XHDPI_SCREEN_SCALE, 2)\n else:\n return 0\n\n\ndef transfer_xxxhdpi_dimen(xxxhdpi_file):\n create_dimen_file_from(xxxhdpi_file, dimen_type_xxxhdpi)\n create_dimen_file_from(xxxhdpi_file, dimen_type_xxhdpi)\n create_dimen_file_from(xxxhdpi_file, dimen_type_xhdpi)\n\n\ntransfer_xxxhdpi_dimen(INPUT_FILE_NAME)\n\n","repo_name":"xionghaoo/Android-screen-adaptation","sub_path":"dimen_transfer.py","file_name":"dimen_transfer.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"8145906822","text":"import sys\nimport os\nimport datetime\nimport numpy as np\nimport pandas as pd\nsys.path.append(os.path.join(os.path.dirname(__file__), 'src'))\nimport src.utils as utils\nimport src.config as config\nfrom pathlib import Path\n\n\n#read our dataset\nrfm_data = pd.read_csv(os.path.join(\"data\",\"potentials.csv\"))\npayment = pd.read_csv(os.path.join(\"data\",\"payment.csv\"))\nsatisfaction=pd.read_csv(os.path.join(\"data\",\"satisfaction.csv\"))\n\n# Boolean condition to filter rows\ncondition = (rfm_data['is_promotion'] != 1) & (rfm_data['current_products_price'] > 0) & (rfm_data['membership_length'] > 0)\n\n# Filter the DataFrame based on the condition\nrfm_data = rfm_data[condition].copy()\n\n#filling the NaN budget_value with 1\nrfm_data['budget_value'].fillna(1, inplace=True)\n\n# Sort the satisfaction dataframe by date in descending order\nsatisfaction_sorted = satisfaction.sort_values('satisfaction_date', ascending=False)\n\n# Drop duplicate rows based on customer_id and provider_id, keeping only the first occurrence (which will be the latest date)\nsatisfaction_latest = satisfaction_sorted.drop_duplicates(['customer_id', 'provider_id'])\n\n# Merge the dataframes on provider_id and customer_id using a left join\nmerged_df = rfm_data.merge(satisfaction[['provider_id', 'customer_id', 'value']], on=['provider_id', 'customer_id'], how='left')\n\n# Add the satisfaction_status column to rfm_data_subs\nrfm_data['satisfaction_status'] = merged_df['value']\n\n# Group the payment dataframe by customer_id and get the latest payment_status_id for each customer\nlatest_payment_status = payment.groupby('customer_id')['payment_status_id'].last()\n\n# Create a new column 'payment_status_id' in rfm_dataset and fill it with the latest payment_status_ids\nrfm_data['payment_status_id'] = rfm_data['customer_id'].map(latest_payment_status)\n\n#we will count information until today\nreference_date = datetime.datetime.today().date()\n\n# creating extra columns\nrfm_data['days_since_last_call'] = (pd.to_datetime(reference_date) - pd.to_datetime(rfm_data['last_call'])).astype('timedelta64[D]')\nrfm_data['days_since_last_touch'] = (pd.to_datetime(reference_date) - pd.to_datetime(rfm_data['last_touch'])).astype('timedelta64[D]')\nrfm_data['days_since_last_seen'] = (pd.to_datetime(reference_date) - pd.to_datetime(rfm_data['last_seen_at'])).astype('timedelta64[D]')\n\n# Fill NaN values in 'days_since_last_call' with the maximum value from the column\nmax_last_call = rfm_data['days_since_last_call'].max()\nrfm_data['days_since_last_call'].fillna(max_last_call, inplace=True)\n\n# Fill NaN values in 'days_since_last_touch' with the maximum value from the column\nmax_last_touch = rfm_data['days_since_last_touch'].max()\nrfm_data['days_since_last_touch'].fillna(max_last_touch, inplace=True)\n\ncolumns_to_normalize = ['budget_value', 'lead_read_gap_min', 'lead_count','view_count','image_count','video_count','discount_count','review_count','touch_count','call_count'] # List of columns to normalize\n\nfor column in columns_to_normalize:\n min_val = rfm_data[column].min()\n max_val = rfm_data[column].max()\n rfm_data[column] = (rfm_data[column] - min_val) / (max_val - min_val)\n\nrfm_data = rfm_data[['provider_id', 'customer_id','lead_count','view_count','image_count','video_count','discount_count','review_count','touch_count','call_count',\n 'membership_length','budget_value','current_products_price','lead_read_gap_min',\n 'days_since_last_call', 'days_since_last_touch','days_since_last_seen','satisfaction_status','payment_status_id']]\n\n# Function to calculate Monetary column\ndef calculate_monetary(df):\n \"\"\"\n this function will be used to create a monetary score and assign it to a seperate column created and named as Monetary\n \"\"\"\n df['Monetary'] = df['current_products_price'] * df['budget_value']\n\n# Function to calculate Frequency column\ndef calculate_frequency(df):\n \"\"\"\n this function will be used to create a frequency score and assign it to a seperate column created and named as Frequency\n \"\"\"\n df['Frequency'] = (df['image_count'] + df['video_count'] + df['discount_count'] + df['review_count'] + df['lead_count'] + df['view_count'] +\n (1.5 * (df['touch_count'] + df['call_count']))) / df['membership_length']\n\n# Function to calculate Recency column\ndef calculate_recency(df):\n \"\"\"\n this function will be used to create a recency score and assign it to a seperate column created and named as Recency\n \"\"\"\n min_last_touch = df['days_since_last_touch'].min()\n min_last_seen = df['days_since_last_seen'].min()\n min_last_call = df['days_since_last_call'].min()\n\n df['Recency'] = np.minimum.reduce([min_last_touch, min_last_seen, min_last_call]) * df['lead_read_gap_min']\n\n# Calculate the columns using the defined functions\ncalculate_monetary(rfm_data)\ncalculate_frequency(rfm_data)\ncalculate_recency(rfm_data)\n\n#now let's create our final RFM dataset to evaluate :\nrfm_providers=rfm_data[[\"provider_id\"]]\nrfm_customers= rfm_data[[\"customer_id\"]]\nrfm_satisfaction=rfm_data[[\"satisfaction_status\"]]\nrfm_payment=rfm_data[[\"payment_status_id\"]]\nrfm_data = rfm_data[[\"Recency\",\"Monetary\",\"Frequency\"]]\n\nquantiles = rfm_data.quantile(q=[0.25,0.5,0.75])\nquantiles.to_dict()\n\ndef RScore(x,p,d):\n if x <= d[p][0.25]:\n return 4\n elif x <= d[p][0.50]:\n return 3\n elif x <= d[p][0.75]: \n return 2\n else:\n return 1\ndef FMScore(x,p,d):\n if x <= d[p][0.25]:\n return 1\n elif x <= d[p][0.50]:\n return 2\n elif x <= d[p][0.75]: \n return 3\n else:\n return 4\n\nrfm_segmentation = rfm_data\nrfm_segmentation['R_Quartile'] = rfm_segmentation['Recency'].apply(RScore, args=('Recency',quantiles,))\nrfm_segmentation['F_Quartile'] = rfm_segmentation['Frequency'].apply(FMScore, args=('Frequency',quantiles,))\nrfm_segmentation['M_Quartile'] = rfm_segmentation['Monetary'].apply(FMScore, args=('Monetary',quantiles,))\n\nrfm_segmentation['RFMScore'] = rfm_segmentation.R_Quartile.map(str) \\\n + rfm_segmentation.F_Quartile.map(str) \\\n + rfm_segmentation.M_Quartile.map(str)\n\nrfm_segmentation['RFMScore_num'] = rfm_segmentation.R_Quartile \\\n + rfm_segmentation.F_Quartile \\\n + rfm_segmentation.M_Quartile\n\ndfs=[rfm_providers,rfm_customers,rfm_satisfaction,rfm_payment,rfm_segmentation]\nfor df in dfs:\n df.reset_index(drop=True, inplace=True)\n\nmerged_df = dfs[0]\n\n# Merge the remaining dataframes on index one by one\nfor df in dfs[1:]:\n merged_df = pd.merge(merged_df, df, left_index=True, right_index=True)\n\n#Saving the feature engineering results as CSV file\nmerged_df.to_csv(Path(os.getcwd(),\"data\",\"rfm_segmentation.csv\"),index=False)\n","repo_name":"duguncom/customer-segmentation","sub_path":"segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":6776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21490835421","text":"#\n# Dynamic Programming\n#\ndef find_lis(arr):\n\n n = len( arr )\n\n best = [0] * (n)\n best[n-1] = 1\n\n # we will create the best array\n for i in range(n-2, -1, -1):\n aux = arr[i]\n max = 0\n for j in range(i+1, n):\n if arr[j] > aux and best[j] > max:\n max = best[j]\n best[i] = 1 + max\n maxBest = best[0]\n posMax = 0\n for i in range(1, n):\n if best[i]>maxBest:\n maxBest = best[i]\n posMax = i\n print(best)\n print(maxBest, posMax)\n print(arr[posMax], end = \" \")\n pos = maxBest\n pos-=1\n for i in range(posMax+1, n):\n if best[i] == pos and arr[i] > arr[posMax]:\n print(arr[i], end = \" \")\n pos-=1\n\ndef gokyo():\n\n arr = [24,12,15,15,19]\n print(arr)\n find_lis(arr)\n\ngokyo()\n","repo_name":"thinkphp/computer-science-in-python","sub_path":"foundations/dynamic-programming/lis_O(n^2).py","file_name":"lis_O(n^2).py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"72301023737","text":"import tkinter as tk\nfrom tkinter import ttk\n\nfrom events.eventaggregator import EventAggregator\nfrom factories.commandfactory import CommandFactory\nfrom models.enums import Event, Command\nfrom views.fonts import Fonts\nfrom views.styles import StyleDefinitions\nfrom views.themes import Themes\nfrom views.viewbase import ViewBase\n\n\nclass Menu(ViewBase):\n def __init__(\n self,\n field_frame: ttk.Frame,\n event_aggregator: EventAggregator,\n command_factory: CommandFactory,\n theme: str,\n ) -> None:\n super().__init__(field_frame, command_factory, style=StyleDefinitions.MENU_FRAME, padding=20)\n self.__event_aggregator = event_aggregator\n self.__theme = theme\n self.__theme_var = tk.StringVar()\n self.__theme_var.set(\"Theme: \" + self.__theme)\n\n # Setup controls:\n label = ttk.Label(\n self,\n text=\"Battle Ship\",\n style=StyleDefinitions.MENU_ITEM_HEADER_LABEL,\n font=(Fonts.MENU_TITLE, 20, \"bold\"),\n )\n\n button_singleplayer = ttk.Button(\n self,\n text=\"Singleplayer\",\n style=StyleDefinitions.MENU_ITEM_BUTTON,\n takefocus=False,\n command=lambda cmd=Command.START_SINGLE_PLAYER: self._handle_command(cmd),\n )\n\n # button_multiplayer = ttk.Button(\n # self,\n # text=\"Multiplayer\",\n # style=StyleDefinitions.MENU_ITEM_BUTTON,\n # takefocus=False,\n # command=lambda cmd=Command.START_MULTIPLAYER: self._handle_command(cmd)\n # )\n\n theme_menu = ttk.OptionMenu(\n self,\n self.__theme_var,\n None,\n *Themes.ALL_THEMES,\n direction=\"right\",\n style=StyleDefinitions.MENU_ITEM_THEME_BUTTON,\n command=self.__set_theme\n )\n\n button_quit = ttk.Button(\n self,\n text=\"Quit\",\n takefocus=False,\n style=StyleDefinitions.MENU_ITEM_QUIT_BUTTON,\n command=lambda cmd=Command.QUIT_GAME: self._handle_command(cmd),\n )\n\n theme_menu[\"menu\"].configure(font=(Fonts.NORMAL_TEXT, 11))\n\n label.grid(row=0, column=0, padx=5, pady=5)\n button_singleplayer.grid(row=1, column=0, padx=5, pady=5, sticky=\"ew\")\n # button_multiplayer.grid(row=2, column=0, padx=5, pady=5, sticky=\"ew\")\n theme_menu.grid(row=3, column=0, padx=5, pady=(20, 5), sticky=\"ew\")\n button_quit.grid(row=4, column=0, padx=5, pady=5, sticky=\"ew\")\n\n self.grid_columnconfigure(0, weight=1)\n\n def show(self) -> None:\n self.place(relx=0.5, rely=0.5, anchor=tk.CENTER, width=250)\n\n def close(self) -> None:\n self.__event_aggregator.publish(Event.MENU_CLOSED)\n self.destroy()\n\n def __set_theme(self, _) -> None:\n theme: str = self.__theme_var.get()\n self.__theme_var.set(\"Theme: {0}\".format(theme))\n self._handle_command(Command.CHANGE_THEME, theme)\n\n def __get_theme_from_var(self) -> str:\n t = self.__theme_var.get().replace(\"Theme: \", \"\")\n return t\n","repo_name":"q-g-j/TkBattleship","sub_path":"views/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"9181215282","text":"#!/usr/bin/env python3\n\nimport traceback\n\nclass Colors(object):\n class Format(object):\n RESET = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n DIM = \"\\033[2m\"\n UNDERLINED = \"\\033[4m\"\n BLINK = \"\\033[5m\"\n REVERSE = \"\\033[7m\"\n HIDDEN = \"\\033[8m\"\n\n class Foreground(object):\n DEFAULT = \"\\033[39m\"\n BLACK = \"\\033[30m\"\n RED = \"\\033[31m\"\n GREEN = \"\\033[32m\"\n YELLOW = \"\\033[33m\"\n BLUE = \"\\033[34m\"\n MAGENTA = \"\\033[35m\"\n CYAN = \"\\033[36m\"\n LIGHTGREY = \"\\033[37m\"\n DARKGREY = \"\\033[90m\"\n LIGHTRED = \"\\033[91m\"\n LIGHTGREEN = \"\\033[92m\"\n LIGHTYELLOW = \"\\033[93m\"\n LIGHTBLUE = \"\\033[94m\"\n LIGHTMAGENTA = \"\\033[95m\"\n LIGHTCYAN = \"\\033[96m\"\n WHITE = \"\\033[97m\"\n class Background(object):\n DEFAULT = \"\\033[49m\"\n BLACK = \"\\033[40m\"\n RED = \"\\033[41m\"\n GREEN = \"\\033[42m\"\n YELLOW = \"\\033[43m\"\n BLUE = \"\\033[44m\"\n MAGENTA = \"\\033[45m\"\n CYAN = \"\\033[46m\"\n LIGHTGREY = \"\\033[47m\"\n DARKGREY = \"\\033[100m\"\n LIGHTRED = \"\\033[101m\"\n LIGHTGREEN = \"\\033[102m\"\n LIGHTYELLOW = \"\\033[103m\"\n LIGHTBLUE = \"\\033[104m\"\n LIGHTMAGENTA = \"\\033[105m\"\n LIGHTCYAN = \"\\033[106m\"\n WHITE = \"\\033[107m\"\n\n def __init__(self):\n self.format = self.Format()\n self.fg = self.Foreground()\n self.bg = self.Background()\n\nclass Log(object):\n def __init__(self, debug, func=print):\n self.colors = Colors()\n self.debug = debug\n self.func = func\n\n def construct(self, *args):\n return \"\".join(args)\n\n def info(self, msg):\n if self.debug:\n self.func( self.construct( \"[\", self.colors.fg.LIGHTGREEN, \"*\", self.colors.fg.DEFAULT, \"] \", msg ) )\n\n def success(self, msg):\n if self.debug:\n self.func( self.construct( \"[\", self.colors.fg.CYAN, \"+\", self.colors.fg.DEFAULT, \"] \", msg ) )\n\n def warn(self, msg):\n if self.debug:\n self.func( self.construct( \"[\", self.colors.fg.LIGHTYELLOW, \"!\", self.colors.fg.DEFAULT, \"] \", msg ) )\n\n def error(self, msg, exception=None):\n self.func( self.construct( \"[\", self.colors.fg.LIGHTRED, \"x\", self.colors.fg.DEFAULT, \"] \", msg ) )\n if exception:\n self.func( self.construct( \"[\", self.colors.fg.LIGHTRED, \"x\", self.colors.fg.DEFAULT, \"] \", str(exception) ) )\n traceback.print_tb(exception.__traceback__)\n","repo_name":"wr34k/elf-backdoor","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"14361251355","text":"import torch\r\nimport torchvision\r\nimport os\r\nfrom PIL import Image\r\nimport pickle\r\n\r\n\"\"\"\r\nFile used to format images into datasets readable by the gan after they were formatted to a 400 x 400 \r\n\"\"\"\r\n\r\n\r\ndef formatData():\r\n rootPath = r'D:\\GAN_Gallery\\resize'\r\n dataset = list()\r\n trans = torchvision.transforms.ToTensor()\r\n counter = 0\r\n\r\n for img in os.listdir(rootPath):\r\n counter += 1\r\n if counter < 8600:\r\n continue\r\n\r\n try:\r\n image = Image.open(rootPath + '\\\\' + img)\r\n image = trans(image)\r\n dataset.append(image)\r\n except Exception as e:\r\n print(str(e))\r\n\r\n with open(r'D:\\GAN_Gallery\\src\\dataset3.db', 'wb') as file:\r\n pickle.dump(dataset, file)\r\n\r\n\r\nfor i in range(3):\r\n\r\n # load in data\r\n if i % 3 == 0:\r\n with open(r'D:\\GAN_Gallery\\src\\dataset1a.db', 'rb') as file:\r\n dataset = pickle.load(file)\r\n elif i % 3 == 1:\r\n with open(r'D:\\GAN_Gallery\\src\\dataset2a.db', 'rb') as file:\r\n dataset = pickle.load(file)\r\n else:\r\n with open(r'D:\\GAN_Gallery\\src\\dataset3a.db', 'rb') as file:\r\n dataset = pickle.load(file)\r\n\r\n # # reduce size\r\n # index = 0\r\n # for datum in dataset:\r\n # if datum.size() != torch.Size([3, 400, 400]):\r\n # dataset.pop(index)\r\n # index += 1\r\n\r\n # save data\r\n if i % 3 == 0:\r\n with open(r'D:\\GAN_Gallery\\src\\dataset1b.db', 'wb') as file:\r\n pickle.dump(dataset[0:2001], file)\r\n with open(r'D:\\GAN_Gallery\\src\\dataset2b.db', 'wb') as file:\r\n pickle.dump(dataset[2001:len(dataset)], file)\r\n elif i % 3 == 1:\r\n with open(r'D:\\GAN_Gallery\\src\\dataset3b.db', 'wb') as file:\r\n pickle.dump(dataset[0:2001], file)\r\n with open(r'D:\\GAN_Gallery\\src\\dataset4b.db', 'wb') as file:\r\n pickle.dump(dataset[2001:len(dataset)], file)\r\n else:\r\n with open(r'D:\\GAN_Gallery\\src\\dataset5b.db', 'wb') as file:\r\n pickle.dump(dataset[0:2001], file)\r\n with open(r'D:\\GAN_Gallery\\src\\dataset6b.db', 'wb') as file:\r\n pickle.dump(dataset[2001:len(dataset)], file)\r\n","repo_name":"Troy-Potter/GAN-Gallery","sub_path":"InputFormatting.py","file_name":"InputFormatting.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5384385108","text":"__author__ = 'Clive'\nfrom django.core.urlresolvers import reverse\nfrom buyside.models import Vehicle, VehiclePart\nfrom django.template.defaultfilters import slugify\n\n\ndef BaseTree(vehicle_id):\n target_vehicle = Vehicle.objects.get(pk=vehicle_id)\n vehicle_parts = VehiclePart.objects.filter(vehicles=vehicle_id)\n parts = ListNode('', target_vehicle.long_name, vehicle_id, 0)\n for vehicle_part in vehicle_parts:\n # url, name, category list\n #if vehicle_part.tree_level_5 == '':\n # print 'I am here'\n parts.add_node(\n vehicle_part.gecko_part_number,\n vehicle_part.name,\n [\n vehicle_part.tree_level_1,\n vehicle_part.tree_level_2,\n vehicle_part.tree_level_3,\n vehicle_part.tree_level_4,\n vehicle_part.tree_level_5\n ])\n return parts\n\ndef PartTree(vehicle_id, type_id):\n parts = BaseTree(vehicle_id)\n all_vehicle_parts = parts.build_html_tree(vehicle_id, type_id)\n return all_vehicle_parts\n\ndef PartList(vehicle_id):\n parts = BaseTree(vehicle_id)\n parts_list = parts.build_node_list(vehicle_id, parts.name)\n return parts_list\n\nclass ListNode:\n def __init__(self, url_input, name_input, database_id, level):\n self.part_number = url_input # String to contains the URL identifier to create the link for the node\n self.name = name_input # name to be displayed on the webpage\n self.child_nodes = [] # list to contain all the child nodes\n self.level = level\n self.database_id = database_id\n\n def add_node(self, node_url, node_name, category):\n # if current level category doesn't exist, add new node here\n node_exists = False\n # find current category in child node list if it is there\n for node in self.child_nodes:\n if node.database_id == category[self.level]:\n node_exists = True\n #if this is not final level go to current child node and run again.\n if category[self.level + 1] != '':\n node.add_node(node_url, node_name, category)\n # if this is the final level check it is blank and add data\n else:\n self.part_number = node_url\n self.name = node_name\n break\n\n # if it does not exist, add node at this level\n if node_exists is False:\n # if this is not the final level, create blank node\n if category[self.level + 1] != '':\n new_node = ListNode('', category[self.level], category[self.level], self.level + 1)\n new_node.add_node(node_url, node_name, category)\n #if this is the final level, create node with data\n else:\n new_node = ListNode(node_url, node_name, category[self.level], self.level + 1)\n # add node to list\n self.child_nodes.append(new_node)\n\n\n def build_node_list(self, vehicle_id, parent_title):\n count = 0\n padding = \"\"\n while (count < self.level):\n padding = padding + \"--\"\n count += 1\n padding = padding + \">\"\n\n id_string = parent_title + \".\" + self.name\n id_string = id_string.replace(\" \", \"_\")\n form_data = (id_string, padding + self.name)\n id_list = []\n id_list.append(form_data)\n if self.child_nodes.__len__() != 0:\n for node in self.child_nodes:\n #child_list = node.build_node_list(vehicle_id, self.name)\n id_list = id_list + node.build_node_list(vehicle_id, self.name)\n return id_list\n\n\n def build_html_tree(self, vehicle_id, html_type):\n # returns prepared HTML for current node, and list of prepared HTML for child nodes\n #requires cleanup around url builder (DRY)\n if html_type == 'shop':\n if self.part_number != '':\n url = reverse('buyside:search2', kwargs={'search_id_1': vehicle_id, 'search_id_2': self.part_number})\n else:\n url = reverse('buyside:search', kwargs={'search_id_1': vehicle_id})\n\n list_string = r'%s +' % (url, self.name)\n\n elif html_type == 'name':\n if self.name != '':\n list_string = self.name\n else:\n list_string = 'Un-named part'\n\n elif html_type == 'upload':\n if self.part_number != '':\n list_string = r'' \\\n % (self.part_number, self.name, self.part_number)\n else:\n list_string = 'no part number'\n\n\n if self.child_nodes.__len__() != 0:\n children = []\n for node in self.child_nodes:\n child_title, grandchildren = node.build_html_tree(vehicle_id, html_type)\n children.append(child_title)\n if grandchildren is not None:\n children.append(grandchildren)\n else:\n children = None\n\n return list_string, children","repo_name":"CliveL/GPTesting","sub_path":"buyside/helpers/treebuilder.py","file_name":"treebuilder.py","file_ext":"py","file_size_in_byte":5147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32203020593","text":"import threading\nimport pandas as pd\nimport os.path\nimport json\nfrom json import JSONDecoder\nfrom json import JSONEncoder\nfrom datetime import datetime\n\n\ncsvCharacteristics = \"../dataset/characteristics.csv\"\ncsvHolidays = \"../dataset/holidays.csv\"\ncsvPlaces = \"../dataset/places.csv\"\ncsvInsee = \"../dataset/code-postal-code-insee-2015.csv\"\n\njsonCharacteristics = \"json/characteristics.json\"\njsonHolidays = \"json/holidays.json\"\njsonPlaces = \"json/places.json\"\njsonInsee = \"json/insee.json\"\n\nclass DateTimeDecoder(JSONDecoder):\n\n def __init__(self, *args, **kargs):\n JSONDecoder.__init__(self, object_hook=self.dict_to_object,\n *args, **kargs)\n \n def dict_to_object(self, d): \n if '__type__' not in d:\n return d\n\n type = d.pop('__type__')\n try:\n dateobj = datetime(**d)\n return dateobj\n except:\n d['__type__'] = type\n return d\n\n\nclass DateTimeEncoder(JSONEncoder):\n \"\"\" Instead of letting the default encoder convert datetime to string,\n convert datetime objects into a dict, which can be decoded by the\n DateTimeDecoder\n \"\"\"\n \n def default(self, obj):\n if isinstance(obj, datetime):\n return {\n '__type__' : 'datetime',\n 'year' : obj.year,\n 'month' : obj.month,\n 'day' : obj.day,\n 'hour' : obj.hour,\n 'minute' : obj.minute,\n 'second' : obj.second,\n 'microsecond' : obj.microsecond,\n } \n else:\n return JSONEncoder.default(self, obj)\n\n\ndef getInsee(inseeDf):\n\tinsee = {}\n\tinsee[\"insee\"] = inseeDf[\"INSEE_COM\"]\n\tinsee[\"com\"] = inseeDf[\"NOM_COM\"]\n\tinsee[\"dep\"] = inseeDf[\"NOM_DEPT\"]\n\tinsee[\"population\"] = int(inseeDf[\"POPULATION\"])\n\tlatlong = inseeDf[\"Geo Point\"].split(',')\n\tinsee[\"lat\"] = float(latlong[0])\n\tinsee[\"long\"] = float(latlong[1])\n\t\t\n\treturn insee\n\n\ndef getInseeCode(dep, com):\n\tif com == 'nan' or dep == 'nan':\n\t\treturn None\n\t\n\tif dep == '201':\n\t\tinsee_dep = '2A'\n\telif dep == '202':\n\t\tinsee_dep = '2B'\n\telif dep in ['971', '972', '973', '974', '975', '976']:\n\t\tinsee_dep = '97'\n\telse:\n\t\tinsee_dep = dep[:-1].zfill(2)\n\t\n\tinsee_com = com.zfill(3)\n\treturn insee_dep + insee_com\n\n\ndef getPlace(placeDf):\n\tplace = {}\n\t# place[\"Num_Acc\"] = int(placeDf[\"Num_Acc\"])\n\tif not pd.isna(placeDf[\"catr\"]):\n\t\tplace[\"catr\"] = int(placeDf[\"catr\"])\n\tif not pd.isna(placeDf[\"voie\"]):\n\t\tplace[\"voie\"] = placeDf[\"voie\"]\n\t#if not pd.isna(placeDf[\"v1\"]):\n\t#\tplace[\"v1\"] = int(placeDf[\"v1\"])\n\tif str(placeDf[\"circ\"]) not in [\"0\", \"0.0\", \"nan\"]:\n\t\tplace[\"circ\"] = int(placeDf[\"circ\"])\n\tif not pd.isna(placeDf[\"nbv\"]):\n\t\tplace[\"nbv\"] = int(placeDf[\"nbv\"])\n\t#if not pd.isna(placeDf[\"pr\"]):\n\t#\tplace[\"pr\"] = float(placeDf[\"pr\"])\n\t#if not pd.isna(placeDf[\"pr1\"]):\n\t#\tplace[\"pr1\"] = int(placeDf[\"pr1\"])\n\t#if str(placeDf[\"vosp\"]) not in [\"0\", \"0.0\", \"nan\"]:\n\t#\tplace[\"vosp\"] = int(placeDf[\"vosp\"])\n\t#if not pd.isna(placeDf[\"lartpc\"]):\n\t#\tplace[\"lartpc\"] = int(placeDf[\"lartpc\"])\n\t#if not pd.isna(placeDf[\"larrout\"]):\n\t#\tplace[\"larrout\"] = int(placeDf[\"larrout\"])\n\tif str(placeDf[\"infra\"]) not in [\"0\", \"0.0\", \"nan\"]:\n\t\tplace[\"infra\"] = int(placeDf[\"infra\"])\n\tif str(placeDf[\"situ\"]) not in [\"0\", \"0.0\", \"nan\"]:\n\t\tplace[\"situ\"] = int(placeDf[\"situ\"])\n\t#if not pd.isna(placeDf[\"env1\"]):\n\t#\tplace[\"env1\"] = int(placeDf[\"env1\"])\n\t\n\tcondition = {}\n\tif str(placeDf[\"prof\"]) not in [\"0\", \"0.0\", \"nan\"]:\n\t\tcondition[\"prof\"] = int(placeDf[\"prof\"])\n\tif str(placeDf[\"plan\"]) not in [\"0\", \"0.0\", \"nan\"]:\n\t\tcondition[\"plan\"] = int(placeDf[\"plan\"])\n\tif str(placeDf[\"surf\"]) not in [\"0\", \"0.0\", \"nan\"]:\n\t\tcondition[\"surf\"] = int(placeDf[\"surf\"])\n\t\t\n\tif condition:\n\t\tplace[\"condition\"] = condition\n\t\n\treturn place\n\n\ndef getCharacteristic(dataFrame, holidaysMap, inseeMap, placesMap):\n\tc = {}\n\tc[\"Num_Acc\"] = int(dataFrame[\"Num_Acc\"])\n\thrmn = str(dataFrame[\"hrmn\"]).zfill(4)\n\tyears = \"20\" + str(dataFrame[\"an\"]).zfill(2)\n\thours = int(hrmn[0:-2])\n\tminutes = int(hrmn[-2:])\n\tdate = datetime.strptime(years + '-' + str(dataFrame['mois']) + '-' + str(dataFrame[\"jour\"]) + ' ' + str(hours) + \":\" + str(minutes), '%Y-%m-%d %H:%M')\n\tc[\"date\"] = date\n\t\n\tholiday = holidaysMap.get(date.strftime(\"%Y-%m-%d\"))\n\tif holiday is not None:\n\t\tc[\"holiday\"] = holiday\n\t\t\n\tif not pd.isna(dataFrame[\"col\"]):\n\t\tc[\"col\"] = int(dataFrame[\"col\"])\n\tif str(dataFrame[\"int\"]) not in ['0', '0.0']:\n\t\tc[\"int\"] = int(dataFrame[\"int\"])\n\t\t\n\tcondition = {}\n\tcondition[\"lum\"] = int(dataFrame[\"lum\"])\n\tif not pd.isna(dataFrame[\"atm\"]):\n\t\tcondition[\"atm\"] = int(dataFrame[\"atm\"])\n\tc[\"condition\"] = condition\n\t\n\t# c[\"agg\"] = int(dataFrame[\"agg\"])\n\t# c[\"adr\"] = str(dataFrame[\"adr\"])\n\t\n\tlocation = None\n\tinsee_code = getInseeCode(str(dataFrame[\"dep\"]), str(dataFrame[\"com\"]))\n\tif insee_code is not None:\n\t\tlocation = inseeMap.get(insee_code)\n\t#location = getLocation(str(int(dataFrame[\"dep\"])), str(int(dataFrame[\"com\"])))\n\tif location is None:\n\t\tlocation = {}\n\t\t\n\tif str(dataFrame[\"gps\"]) not in ['0', '0.0', '']:\n\t\tlocation[\"gps\"] = str(dataFrame[\"gps\"])\n\t#if str(dataFrame[\"lat\"]) not in ['0', '', '0.0', 'nan']:\n\t#\tlocation[\"lat\"] = float(dataFrame[\"lat\"] / 100000)\n\t#if str(dataFrame[\"long\"]) not in ['0', '0.0', '', 'nan']:\n\t#\tlocation[\"long\"] = float(dataFrame[\"long\"] / 100000)\n\n\tif location:\n\t\tc[\"location\"] = location\n\n\troad = placesMap.get(str(dataFrame[\"Num_Acc\"]))\n\tif road is not None:\n\t\tc[\"road\"] = road\n\t\n\treturn c\n\ndef loadHolidays():\n\tholidaysMap = {}\n\tprint(\"Started loading holidays\")\n\tif os.path.isfile(jsonHolidays):\n\t\twith open(jsonHolidays) as infile:\n\t\t\tholidaysMap = json.load(infile)\n\t\t\tprint(\"Holidays loaded from file\")\n\telse:\n\t\tholidaysData = pd.read_csv(csvHolidays)\n\t\tfor _, rowHoliday in holidaysData.iterrows():\n\t\t\tif holidaysMap.get(rowHoliday[\"ds\"]) is None:\n\t\t\t\tholidaysMap[rowHoliday[\"ds\"]] = rowHoliday[\"holiday\"]\n\t\t\t\t\n\t\twith open(jsonHolidays, 'w') as outfile:\n\t\t\tjson.dump(holidaysMap, outfile)\t\n\t\tprint(\"Holidays loaded in memory and saved to file\")\n\n\treturn holidaysMap\n\n\ndef loadPlaces():\n\tplacesMap = {}\n\tprint(\"Started loading places\")\n\tif os.path.isfile(jsonPlaces):\n\t\twith open(jsonPlaces) as infile:\n\t\t\tplacesMap = json.load(infile)\n\t\t\tprint(\"Places loaded from file\")\n\telse:\n\t\tplacesData = pd.read_csv(csvPlaces)\n\t\tfor _, rowPlace in placesData.iterrows():\n\t\t\tif placesMap.get(rowPlace[\"Num_Acc\"]) is None:\n\t\t\t\tplacesMap[rowPlace[\"Num_Acc\"]] = getPlace(rowPlace)\n\t\twith open(jsonPlaces, 'w') as outfile:\n\t\t\tjson.dump(placesMap, outfile)\t\n\t\tprint(\"Places loaded in memory and saved to file\")\n\n\treturn placesMap\n\n\ndef loadInsee():\n\tinseeMap = {}\n\tprint(\"Started loading insee\")\n\tif os.path.isfile(jsonInsee):\n\t\twith open(jsonInsee) as infile:\n\t\t\tinseeMap = json.load(infile)\n\t\t\tprint(\"Insee loaded from file\")\n\telse:\n\t\tinseePostCodeData = pd.read_csv(csvInsee, sep=\";\")\n\t\tfor _, rowInsee in inseePostCodeData.iterrows():\n\t\t\tif inseeMap.get(rowInsee[\"INSEE_COM\"]) is None:\n\t\t\t\tinseeMap[rowInsee[\"INSEE_COM\"]] = getInsee(rowInsee)\n\t\twith open(jsonInsee, 'w') as outfile:\n\t\t\tjson.dump(inseeMap, outfile)\n\t\tprint(\"Insee loaded in memory and saved to file\")\n\n\treturn inseeMap\n\n\ndef loadCharacteristics():\n\tcharacteristicsMap = {}\n\tprint(\"Started loading characteristics\")\n\tif os.path.isfile(jsonCharacteristics):\n\t\twith open(jsonCharacteristics) as infile:\n\t\t\tcharacteristicsMap = json.load(infile, cls=DateTimeDecoder)\n\t\t\tprint(\"Characteristics loaded from file\")\n\telse:\n\t\tcharacteristicsData = pd.read_csv(csvCharacteristics)\n\t\tholidaysMap = loadHolidays()\n\t\tinseeMap = loadInsee()\n\t\tplacesMap = loadPlaces()\n\n\t\tfor _, rowCharacteristic in characteristicsData.iterrows():\n\t\t\tif characteristicsMap.get(rowCharacteristic[\"Num_Acc\"]) is None:\n\t\t\t\tcharacteristicsMap[rowCharacteristic[\"Num_Acc\"]] = getCharacteristic(rowCharacteristic, holidaysMap, inseeMap, placesMap)\n\t\twith open(jsonCharacteristics, 'w') as outfile:\n\t\t\tjson.dump(characteristicsMap, outfile, cls=DateTimeEncoder)\n\t\tprint(\"Characteristics loaded in memory and saved to file\")\n\n\treturn characteristicsMap\n","repo_name":"gregVader/french-accidents-nosql","sub_path":"loaders/characteristicsLoader.py","file_name":"characteristicsLoader.py","file_ext":"py","file_size_in_byte":8063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33614608848","text":"from .models import Tactic, Technique, APT, TacticTechniqueMap, TechniqueAPTMap\nfrom .setup import SetupAPTGroups, SetupTactic, SetupTechniques\nfrom attack_cli import enterprise_attack\n\n\nclass AttackNavigator(object):\n def __init__(self):\n self.apts = {}\n self.tactics = {}\n self.techniques = {}\n\n def initialize(self):\n self._fetch_data()\n\n def get_tactics(self, query=None):\n if query is None:\n return [tactic.get_details(relation=True) for tactic in self.tactics.values()]\n\n result = self._search(self.tactics, ['name'], query)\n return result\n\n def _get_details(self, param_dict, key, raise_exception=False):\n instance = param_dict.get(key)\n if not instance:\n if raise_exception:\n raise Exception(\"Instance not found\")\n else:\n return None\n\n return instance.get_details(relation=True)\n\n def _search(self, param_dict, search_keys, search_value):\n result = set()\n for id, value in param_dict.items():\n for search_key in search_keys:\n if search_value in getattr(value, search_key, None):\n result.add(value)\n\n return [value.get_details(relation=True) for value in result]\n\n def get_tactic(self, id_param, raise_exception=False):\n return self._get_details(self.tactics, id_param, raise_exception)\n\n def get_techniques(self, query=None):\n if query is None:\n return [technique.get_details(relation=True)\n for technique in self.techniques.values()]\n\n result = self._search(self.techniques, ['name'], query)\n return result\n\n def get_technique(self, id_param, raise_exception=False):\n return self._get_details(self.techniques, id_param, raise_exception)\n\n def get_apts(self, query=None):\n if query is None:\n return [apt.get_details(relation=True) for apt in self.apts.values()]\n\n result = self._search(self.apts, ['name'], query)\n return result\n\n def get_apt(self, id_param, raise_exception=False):\n return self._get_details(self.apts, id_param, raise_exception)\n\n def _fetch_data(self):\n # a1 = Tactic('Tactic 1')\n # self.tactics[a1.id] = a1\n #\n # a2 = Tactic('Tactic 2')\n # self.tactics[a2.id] = a2\n #\n # a3 = Tactic('Tactic 3')\n # self.tactics[a3.id] = a3\n #\n #\n # b1 = Technique('Technique 1')\n # self.techniques[b1.id] = b1\n #\n # b2 = Technique('Technique 2')\n # self.techniques[b2.id] = b2\n #\n # b3 = Technique('Technique 3')\n # self.techniques[b3.id] = b3\n #\n # e = TacticTechniqueMap\n # e.add_mapping(a1, b1)\n # e.add_mapping(a1, b2)\n # e.add_mapping(a2, b2)\n # e.add_mapping(a3, b3)\n #\n # c1 = APT('APT 1')\n # self.apts[c1.id] = c1\n #\n # c2 = APT('APT 2')\n # self.apts[c2.id] = c2\n #\n #\n # c3 = APT('APT 3')\n # self.apts[c3.id] = c3\n #\n # f = TechniqueAPTMap\n # f.add_mapping(c1, b1)\n # f.add_mapping(c1, b2)\n # f.add_mapping(c1, b3)\n # f.add_mapping(c3, b3)\n # f.add_mapping(c2, b2)\n tactic_technique_map_obj = TacticTechniqueMap\n tactics = SetupTactic().do_setup()\n for tactic in tactics:\n self.tactics[tactic.id] = tactic\n techniques = SetupTechniques().do_setup()\n for technique in techniques:\n self.techniques[technique.id] = technique\n\n technique_to_tactic_map = {}\n for technique in techniques:\n technique_tactics = []\n tactic_slugs = technique.tactic_slugs\n tactics = self.tactics.values()\n for tactic in tactics:\n if tactic.slug in tactic_slugs:\n tactic_technique_map_obj.add_mapping(tactic, technique)\n\n apt_groups = SetupAPTGroups().do_setup()\n for apt in apt_groups:\n self.apts[apt.id] = apt\n\n tech_apt_map_obj = TechniqueAPTMap\n enterprise_objects = enterprise_attack.enterprise['objects']\n for enterprise_object in enterprise_objects:\n if (enterprise_object['type'] == 'relationship'\n and enterprise_object['source_ref'].startswith('intrusion-set')\n and enterprise_object['target_ref'].startswith('attack-pattern')):\n for technique in self.techniques.values():\n if technique.mitre_technique_id == enterprise_object['target_ref']:\n for apt in self.apts.values():\n if apt.mitre_id == enterprise_object['source_ref']:\n tech_apt_map_obj.add_mapping(apt, technique)\n\n","repo_name":"cyware-labs/attack-cli","sub_path":"attack_cli/attack_navigation.py","file_name":"attack_navigation.py","file_ext":"py","file_size_in_byte":4859,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"38307421452","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 20 10:21:17 2017\n\n@author: IMITA-PC-13\n\"\"\"\n#帶入套件\nimport pymysql.cursors\nimport pandas as pd\nimport time\n#連線DB\ndb = pymysql.connect(\n host='localhost',\n port=3306,\n user='root',\n passwd='',\n db='open_data',\n charset='utf8'\n )\ncursor = db.cursor()\n\n\n#url = 'http://data.tycg.gov.tw/api/v1/rest/datastore/54f0362a-2fac-46ab-9fae-2d9b04958aaa?format=csv'\nurl = 'http://file.data.gov.tw/event/dataset.csv'\n#抓回資料\ndata = pd.read_csv(url)\n\n#取出今天日期、時間,並整成變數now\nnow_data = time.strftime(\"%Y/%m/%d\")\nnow_time = time.strftime(\"%H:%M:%S\")\nnow = now_data + ' ' + now_time\n\n#資料表名稱 tycg_A(桃園)\n\n\n#尋找所有的欄位名稱 取出了欄位名稱之後,然後呢?\nindexnum = len(data.T.index)\nfor title in range(0,indexnum):\n print(data.T.index[title])\n #print(data.T.index[title])\n\ndict0 = {}\nfor titlename in range(0,indexnum):\n print(data.T.index[title])\n\n\norgan = data.T.loc['資料集提供機關']\ndataname = data.T.loc['資料集名稱']\nbrowse = data.T.loc['瀏覽次數']\ndownload = data.T.loc['下載次數']\nscore = data.T.loc['資料集評分']\n\n\n\nfor i in range(len(data)):\n #print(i)\n #input_to_db(organ,dataname,browse,download,score,i)\n cursor.execute('insert into '+ ' data01 (organ, dataname, browse, download, score, data )' + \n ' values( %s, %s, %s, %s, %s, %s)', \n ( \n str( organ.iloc[i] ) ,\n str( dataname.iloc[i] ),\n str( browse.iloc[i]),\n str( download.iloc[i]) ,\n str( score.iloc[i]) , \n str( now) ) )\n \n print('insert into '+ ' data01 (organ, dataname, browse, download, score, data )' + \n ' values( %s, %s, %s, %s, %s, %s)', \n ( \n str( organ.iloc[i] ) ,\n str( dataname.iloc[i] ),\n str( browse.iloc[i]),\n str( download.iloc[i]) ,\n str( score.iloc[i]) , \n str( now) ) )\n\ntry:\n # 執行sql語法\n db.commit()\n # 提交到資料庫執行\n print(\"成功插入\")\n db.close()\nexcept:\n db.rollback()\n print (\"MySQL DB Error\")\n # 如果有錯誤則回滾\n db.close()\n # 關閉與資料庫的連接\n\n\n'''\ndef input_to_db(organ,dataname,browse,download,score,i):\n\n db = pymysql.connect(\n host='localhost',\n port=3306,\n user='testuser',\n passwd='test1234',\n db='testuser',\n charset='utf8'\n )\n cursor = db.cursor()\n \n cursor.execute('insert into '+ ' data01 (organ, dataname, browse, download, score, data )' + \n ' values( %s, %s, %s, %s, %s, %s)', \n ( \n str( organ.iloc[i] ) ,\n str( dataname.iloc[i] ),\n str( browse.iloc[i]),\n str( download.iloc[i]) ,\n str( score.iloc[i]) , \n str( now) ) )\n try:\n # 執行sql語法\n db.commit()\n # 提交到資料庫執行\n print(\"成功插入\")\n db.close()\n except:\n db.rollback()\n print (\"MySQL DB Error\")\n # 如果有錯誤則回滾\n db.close()\n # 關閉與資料庫的連接\nfor i in range(len(data)):\n #print(i)\n input_to_db(organ,dataname,browse,download,score,i)\n'''\n","repo_name":"ts00189145/python","sub_path":"抓取csv並寫入mysql新.py","file_name":"抓取csv並寫入mysql新.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5819584068","text":"import pandas as pd\nimport time\nfrom ms_func_lib import ms_utils\n\nfrom ms_func_lib import rawdata_cleaning_functions as rdcf\nfrom ms_func_lib import replicate_analysis_functions as raf\n\n\ninput_file = '02_fitted_Kd_and_model_selection_data.csv'\nfunctionality_evaluated_filename = '03a_fitted_Kd_and_model_selection_data_with_functionality_evaluated.csv'\noutput_filename = '03b_grouped_data_including_published.csv'\n\ntotal_start_time = time.time()\n\nms_utils.print_flush('\\nAnalyzing replicate groups and identifying non-functional protein.')\n\ndf = pd.read_csv(input_file, low_memory=False)\n# reformatting signal and concentration columns\ndf = rdcf.replace_signal_and_conc_cols(df)\n# evaluate protein functionality, using groups with one or more binders as a proxy for minimally functional protein\ndf = raf.identify_replicate_groups_with_one_or_more_binders(df)\ndf = raf.evaluate_protein_functionality(df)\ndf.to_csv(functionality_evaluated_filename,index=False)\nms_utils.print_flush('Replicate groups analyzed and non-functional protein identified. Total time elapsed: ',time.time()-total_start_time, 'seconds.')\nms_utils.print_flush('Data exported to : ',functionality_evaluated_filename)\n\nms_utils.print_flush('\\nBeginning replicate analysis')\n# implement replicate analysis process as described in the manuscript\n# also, simulate original publication fitting process\nreplicates_df = raf.analyze_replicates_v2(df)\nms_utils.print_flush('Replicates analyzed, replicate calls made. Total time elapsed: ',time.time()-total_start_time, 'seconds.')\n\n\n# load published results and add them to the replicates dataframe for comparisons\npublished_df = raf.load_published_fits()\n\n# fixing dtypes to avoid errors in merging\nreplicates_df.domain = replicates_df.domain.astype(str)\npublished_df.domain = published_df.domain.astype(str)\n\nreplicates_df.gene_name = replicates_df.gene_name.astype(str)\npublished_df.gene_name = published_df.gene_name.astype(str)\n\nreplicates_df.pY_pos = replicates_df.pY_pos.astype(int)\npublished_df.pY_pos = published_df.pY_pos.astype(int)\n\n# merged published results with our analysis\nmerge_df = pd.merge(replicates_df, published_df, how='left', left_on=['domain','gene_name','pY_pos'], right_on=['domain','gene_name','pY_pos'])\nmerge_df.to_csv(output_filename,index=False)\n\nms_utils.print_flush('Results merged with published data. Total time elapsed: ',time.time()-total_start_time, 'seconds.')\nms_utils.print_flush('Data exported to : ',output_filename)\n\n\n\n\n\n","repo_name":"knaegle/SH2fp","sub_path":"03_replicate_and_functionality_analysis.py","file_name":"03_replicate_and_functionality_analysis.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"17710914670","text":"import click\nimport logging\nimport sys\n\nfrom azure_img_utils.cli.cli_utils import (\n add_options,\n get_config,\n process_shared_options,\n shared_options,\n echo_style\n)\nfrom azure_img_utils.azure_image import AzureImage\n\n\n# -----------------------------------------------------------------------------\n# Gallery commands function\n@click.group(name=\"gallery-image-version\")\ndef gallery_image_version():\n \"\"\"\n Commands for gallery image version management.\n \"\"\"\n\n\n# -----------------------------------------------------------------------------\n# exists command function\n@gallery_image_version.command()\n@click.option(\n '--gallery-image-name',\n type=click.STRING,\n required=True,\n help='Name of the gallery image to check.'\n)\n@click.option(\n '--gallery-name',\n type=click.STRING,\n required=True,\n help='Name of the gallery to check image existence.'\n)\n@click.option(\n '--gallery-image-version',\n type=click.STRING,\n required=True,\n help='Version of the gallery image to check.'\n)\n@add_options(shared_options)\n@click.pass_context\ndef exists(\n context,\n gallery_image_name,\n gallery_name,\n gallery_image_version,\n **kwargs\n):\n \"\"\"\n Checks if a gallery image version exists\n \"\"\"\n\n process_shared_options(context.obj, kwargs)\n config_data = get_config(context.obj)\n logger = logging.getLogger('azure_img_utils')\n logger.setLevel(config_data.log_level)\n\n try:\n az_img = AzureImage(\n container=config_data.container,\n storage_account=config_data.storage_account,\n credentials_file=config_data.credentials_file,\n resource_group=config_data.resource_group,\n log_level=config_data.log_level,\n log_callback=logger\n )\n exists = az_img.gallery_image_version_exists(\n gallery_name,\n gallery_image_name,\n gallery_image_version,\n config_data.resource_group\n )\n\n if exists:\n echo_style('true', config_data.no_color, fg='green')\n else:\n echo_style('false', config_data.no_color)\n\n except Exception as e:\n echo_style(\n 'Unable to check gallery image version existence',\n config_data.no_color,\n fg='red'\n )\n echo_style(str(e), config_data.no_color, fg='red')\n sys.exit(1)\n\n\n# -----------------------------------------------------------------------------\n# gallery image create command function\n@gallery_image_version.command()\n@click.option(\n '--blob-name',\n type=click.STRING,\n required=True,\n help='Name of the blob for the gallery image.'\n)\n@click.option(\n '--gallery-name',\n type=click.STRING,\n required=True,\n help='Name of the gallery where the image will be created.'\n)\n@click.option(\n '--gallery-image-name',\n type=click.STRING,\n required=True,\n help='Name of the gallery image to be created.'\n)\n@click.option(\n '--gallery-image-version',\n type=click.STRING,\n required=True,\n help='Version of the gallery image to create.'\n)\n@click.option(\n '--force-replace-image',\n is_flag=True,\n default=False,\n help='Delete the gallery image prior to create if it already exists.'\n)\n@add_options(shared_options)\n@click.pass_context\ndef create(\n context,\n blob_name,\n gallery_name,\n gallery_image_name,\n gallery_image_version,\n force_replace_image,\n **kwargs\n):\n \"\"\"\n Creates a gallery image based on the already uploaded blob.\n \"\"\"\n process_shared_options(context.obj, kwargs)\n config_data = get_config(context.obj)\n logger = logging.getLogger('azure_img_utils')\n logger.setLevel(config_data.log_level)\n\n try:\n az_img = AzureImage(\n container=config_data.container,\n storage_account=config_data.storage_account,\n credentials_file=config_data.credentials_file,\n resource_group=config_data.resource_group,\n log_level=config_data.log_level,\n log_callback=logger\n )\n img_name = az_img.create_gallery_image_version(\n blob_name,\n gallery_name,\n gallery_image_name,\n gallery_image_version,\n config_data.region,\n force_replace_image=force_replace_image,\n gallery_resource_group=config_data.resource_group\n )\n\n if img_name and config_data.log_level != logging.ERROR:\n echo_style(\n f'gallery image version {img_name} created',\n config_data.no_color,\n fg='green'\n )\n\n except Exception as e:\n echo_style(\n 'Unable to create gallery image',\n config_data.no_color,\n fg='red'\n )\n echo_style(str(e), config_data.no_color, fg='red')\n sys.exit(1)\n\n\n# -----------------------------------------------------------------------------\n# gallery image delete command function\n@gallery_image_version.command()\n@click.option(\n '--gallery-name',\n type=click.STRING,\n required=True,\n help='Name of the gallery where the image will be deleted.'\n)\n@click.option(\n '--gallery-image-name',\n type=click.STRING,\n required=True,\n help='Name of the image to delete.'\n)\n@click.option(\n '--gallery-image-version',\n type=click.STRING,\n required=True,\n help='Version of the gallery image to delete.'\n)\n@add_options(shared_options)\n@click.confirmation_option(\n help='This command will delete the specified gallery image. Are you sure?'\n)\n@click.pass_context\ndef delete(\n context,\n gallery_name,\n gallery_image_name,\n gallery_image_version,\n **kwargs\n):\n \"\"\"\n Deletes a gallery image if the image exists in the gallery\n \"\"\"\n\n process_shared_options(context.obj, kwargs)\n config_data = get_config(context.obj)\n logger = logging.getLogger('azure_img_utils')\n logger.setLevel(config_data.log_level)\n\n try:\n az_img = AzureImage(\n container=config_data.container,\n storage_account=config_data.storage_account,\n credentials_file=config_data.credentials_file,\n resource_group=config_data.resource_group,\n log_level=config_data.log_level,\n log_callback=logger\n )\n az_img.delete_gallery_image_version(\n gallery_name,\n gallery_image_name,\n gallery_image_version,\n gallery_resource_group=config_data.resource_group\n )\n\n except Exception as e:\n echo_style(\n 'Unable to delete gallery image version',\n config_data.no_color,\n fg='red'\n )\n echo_style(str(e), config_data.no_color, fg='red')\n sys.exit(1)\n","repo_name":"SUSE-Enceladus/azure-img-utils","sub_path":"azure_img_utils/cli/gallery_image_version.py","file_name":"gallery_image_version.py","file_ext":"py","file_size_in_byte":6748,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"45687014119","text":"import sys\n\ninput = sys.stdin.readline\n\n\ndx = [-1, 0, 1]\n\nr, c = map(int, input().split())\nboard = [input().rstrip() for _ in range(r)]\n\nvisited = [[0] * c for _ in range(r)]\ncnt = 0\n\n\ndef dfs(x, y):\n if y == c - 1:\n return 1\n\n for i in range(3):\n nx, ny = x + dx[i], y + 1\n\n if 0 <= nx < r and 0 <= ny < c and not visited[nx][ny] and board[nx][ny] == '.':\n visited[nx][ny] = 1\n if dfs(nx, ny) == 1:\n return 1\n return 0\n\n\nfor i in range(r):\n cnt += dfs(i, 0)\n\nprint(cnt)\n\n","repo_name":"ehdbs0903/algorithm-python","sub_path":"Depth-first Search/boj_3109.py","file_name":"boj_3109.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6726203162","text":"import os\nimport glob\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport shutil\nfrom batchgenerators.utilities.file_and_folder_operations import *\n\ndata_root='/home/lfvargas10/ProyectoVision/feta_2.1'\nwith open('/home/lfvargas10/ProyectoVision/Train.dat', 'rb') as f:\n train=pickle.load(f)\nwith open('/home/lfvargas10/ProyectoVision/Val.dat', 'rb') as f:\n val=pickle.load(f)\nwith open('/home/lfvargas10/ProyectoVision/Test.dat', 'rb') as f:\n imagesTs=pickle.load(f)\n\nimagesTr= train + val\n\n#Create new dataset folders\n\n#si no existe el directorio lo crea\nnewTrain_root=os.path.join('/home/lfvargas10/ProyectoVision', 'Data','Task15_feta', 'imagesTr')\nnewTest_root=os.path.join('/home/lfvargas10/ProyectoVision','Data', 'Task15_feta', 'imagesTs')\nnewMask_root=os.path.join('/home/lfvargas10/ProyectoVision','Data', 'Task15_feta', 'labelsTr')\npredMask_root=os.path.join('/home/lfvargas10/ProyectoVision','Data', 'Task15_feta', 'labelsTs')\n\n\ndef create_folder(split,root):\n if not os.path.exists(root):\n os.mkdir(root)\n\n if split=='test':\n if not os.path.exists(predMask_root):\n os.mkdir(predMask_root)\n mask_root= predMask_root\n split_images=imagesTs\n else:\n if not os.path.exists(newMask_root):\n os.mkdir(newMask_root)\n mask_root= newMask_root\n split_images=imagesTr\n\n\n for i in split_images:\n if i < 10:\n imagen = glob.glob(os.path.join(data_root, f'sub-00{i}', 'anat', '*T2w.nii.gz'))[0]\n mask = glob.glob(os.path.join(data_root, f'sub-00{i}', 'anat', '*dseg.nii.gz'))[0]\n shutil.copyfile(imagen, os.path.join(root, f'sub-00{i}_rec-mial_T2w.nii.gz'))\n shutil.copyfile(mask, os.path.join(mask_root, f'sub-00{i}_rec-mial_T2w.nii.gz'))\n else:\n imagen = glob.glob(os.path.join(data_root, f'sub-0{i}', 'anat', '*T2w.nii.gz'))[0]\n mask = glob.glob(os.path.join(data_root, f'sub-0{i}', 'anat', '*dseg.nii.gz'))[0]\n shutil.copyfile(imagen, os.path.join(root, f'sub-0{i}_rec-mial_T2w.nii.gz'))\n shutil.copyfile(mask, os.path.join(mask_root, f'sub-0{i}_rec-mial_T2w.nii.gz'))\n\ndef get_identifiers_from_splitted_files(folder: str):\n uniques = np.unique([i[:-7] for i in subfiles(folder, suffix='.nii.gz', join=False)])\n return uniques\n\ndef help_datasetjson():\n\n train_identifiers = get_identifiers_from_splitted_files(newTrain_root)\n test_identifiers = get_identifiers_from_splitted_files(newTest_root)\n\n json_dict = dict()\n json_dict['training'] = [\n {\"image\": \"imagesTr/%s.nii.gz\" % i, \"label\": \"labelsTr/%s.nii.gz\" % i} for i\n in\n train_identifiers]\n\n #Create a dataframe of the json_dict and save it as a csv file\n \n df_train = pd.DataFrame(json_dict['training'])\n #df_train.to_csv('/home/lfvargas10/ProyectoVision/ROG/Tasks/Task15_feta/train_fold0.csv', index=False)\n\n json_dict['test'] = [ {\"image\": \"imagesTs/%s.nii.gz\" % i, \"label\": \"labelsTs/%s.nii.gz\" % i} for i in test_identifiers]\n print(json_dict['test'])\n df_test = pd.DataFrame(json_dict['test'])\n #df_test.to_csv('/home/lfvargas10/ProyectoVision/ROG/Tasks/Task15_feta/test_fold0.csv', index=False)\n\n return json_dict\n\n#create_folder('train', newTrain_root)\n\n#create_folder('test', newTest_root)\n\nhelp_datasetjson()\n\n","repo_name":"luvargas2/Final-Project-Vision-FeTa","sub_path":"ROG/libs/preprocessing/rearrange_dataset.py","file_name":"rearrange_dataset.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29228814347","text":"from http import HTTPStatus\n\nfrom maps.b2bgeo.ya_courier.backend.test_lib.conftest import skip_if_remote\nfrom maps.b2bgeo.ya_courier.backend.test_lib.util_offline import local_post, local_get\n\n\n@skip_if_remote\ndef test_create_and_get(env):\n data = {\n \"provider\": \"yandex_taxi_cargo\",\n \"order_id\": \"rented courier order\",\n }\n path = f\"/api/v1/companies/{env.default_company.id}/rented-couriers\"\n response = local_post(env.client, path, headers=env.user_auth_headers, data=data)\n courier_id = response['id']\n\n path = f\"/api/v1/companies/{env.default_company.id}/rented-couriers/{courier_id}\"\n response = local_get(env.client, path, headers=env.user_auth_headers)\n del response[\"created_at\"]\n\n assert response == {\n \"provider\": \"yandex_taxi_cargo\",\n \"order_id\": \"rented courier order\",\n \"company_id\": env.default_company.id,\n \"id\": courier_id,\n }\n\n\n@skip_if_remote\ndef test_unknown_provider(env):\n data = {\n \"provider\": \"not taxi\",\n \"order_id\": \"rented courier order\",\n }\n path = f\"/api/v1/companies/{env.default_company.id}/rented-couriers\"\n local_post(env.client, path, headers=env.user_auth_headers, data=data,\n expected_status=HTTPStatus.UNPROCESSABLE_ENTITY)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/courier/company_courier/test_rented_courier.py","file_name":"test_rented_courier.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22850243396","text":"import os\nimport time\nimport threading\nimport pygame\nfrom pygame.locals import *\nimport asyncio\nfrom bleak import BleakClient\nfrom bleak import discover\nfrom pynput.keyboard import Key, Controller, Listener\nfrom functools import partial\n\n\n\n\n\nkeyboard = Controller()\npygame.init()\n\ndelay = 0.05\ndelay2 = 0.15\nposImg = 700\nposText = 775\n\nflagConnectionDevice = 0\n\nMODEL_NBR_UUID = \"0000aadc-0000-1000-8000-00805f9b34fb\"\n\n\n# tableau des mouvements du cube, et des touches à simuler correspondantes\nmoves = [\"U\", \"U'\", \"D\", \"D'\", \"F\", \"F'\", \"B\", \"B'\", \"L\", \"L'\", \"R\", \"R'\"]\nkeys = [\"space\", \"left\", \"right\", \"space\", \"left\", \"right\", \"space\", \"left\", \"right\", \"space\", \"left\", \"right\"]\n\nsurfaceW = 1300\nsurfaceH= 700\ntoucheW = 50\ntoucheH = 50\n\n\n\npygame.display.set_caption(\"Driver Rubisen\")\nclickable_areas, rect_surf, firstClickAreas, firstRectSurf , resetImg, img, imgSelect= [],[],[],[],[] ,[] ,[] \n\nfond,cache,Bnon,Boui ,preset1 ,preset2 ,load1,load2,save1,save2,contactImg, fenetre, font,address = \"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"\n\n\n\n\n\ndef imageDeclarationImport(): # load des images pour l'affichage de l'interface\n\tglobal img\n\tglobal imgSelect\n\tglobal resetImg\n\tglobal fond,cache,Bnon,Boui ,preset1 ,preset2 ,load1,load2,save1,save2,contactImg, fenetre, font\n\n\n\tfont = pygame.font.SysFont(\"comicsansms\", 24)\n\tfenetre = pygame.display.set_mode((surfaceW, surfaceW), RESIZABLE)\n\t\n\n\tpygame.display.set_caption(\"Driver Rubisen\")\n\n\tfond = pygame.image.load(\"img/Dapper.png\").convert()\n\tcache = pygame.image.load(\"img/Dapper1.png\").convert()\n\tBnon = pygame.image.load(\"img/Bnon.png\").convert_alpha()\n\tBoui = pygame.image.load(\"img/Boui.png\").convert_alpha()\n\tpreset1 = pygame.image.load(\"img/preset1.png\").convert()\n\tpreset2 = pygame.image.load(\"img/preset2.png\").convert()\n\tload1 = pygame.image.load(\"img/load1.png\").convert()\n\tload2 = pygame.image.load(\"img/load2.png\").convert()\n\tsave1 = pygame.image.load(\"img/save1.png\").convert()\n\tsave2 = pygame.image.load(\"img/save2.png\").convert()\n\tcontactImg = pygame.image.load(\"img/contactImg.png\").convert()\n\n\n\n\n\n\n\tfor i in range (0, 17):\n\t\tresetImg.append(pygame.image.load(\"img/res\" + str(i + 1) + \".png\").convert())\n\timgB = pygame.image.load(\"img/B.png\").convert_alpha()\n\timgBI = pygame.image.load(\"img/B'.png\").convert_alpha()\n\timgD = pygame.image.load(\"img/D.png\").convert_alpha()\n\timgDI = pygame.image.load(\"img/D'.png\").convert_alpha()\n\timgF = pygame.image.load(\"img/F.png\").convert_alpha()\n\timgFI = pygame.image.load(\"img/F'.png\").convert_alpha()\n\timgL = pygame.image.load(\"img/L.png\").convert_alpha()\n\timgLI = pygame.image.load(\"img/L'.png\").convert_alpha()\n\timgR = pygame.image.load(\"img/R.png\").convert_alpha()\n\timgRI = pygame.image.load(\"img/R'.png\").convert_alpha()\n\timgU = pygame.image.load(\"img/U.png\").convert_alpha()\n\timgUI = pygame.image.load(\"img/U'.png\").convert_alpha()\n\timgBSelect = pygame.image.load(\"img/B1.png\").convert_alpha()\n\timgBISelect = pygame.image.load(\"img/B'1.png\").convert_alpha()\n\timgDSelect = pygame.image.load(\"img/D1.png\").convert_alpha()\n\timgDISelect = pygame.image.load(\"img/D'1.png\").convert_alpha()\n\timgFSelect = pygame.image.load(\"img/F1.png\").convert_alpha()\n\timgFISelect = pygame.image.load(\"img/F'1.png\").convert_alpha()\n\timgLSelect = pygame.image.load(\"img/L1.png\").convert_alpha()\n\timgLISelect = pygame.image.load(\"img/L'1.png\").convert_alpha()\n\timgRSelect = pygame.image.load(\"img/R1.png\").convert_alpha()\n\timgRISelect = pygame.image.load(\"img/R'1.png\").convert_alpha()\n\timgUSelect = pygame.image.load(\"img/U1.png\").convert_alpha()\n\timgUISelect = pygame.image.load(\"img/U'1.png\").convert_alpha()\n\timg = [imgU, imgUI, imgD, imgDI, imgF, imgFI, imgB, imgBI, imgL, imgLI, imgR, imgRI]\n\timgSelect = [imgUSelect, imgUISelect, imgDSelect, imgDISelect, imgFSelect, imgFISelect, imgBSelect, imgBISelect, imgLSelect, imgLISelect, imgRSelect, imgRISelect]\n\n\n\ndef AffichageFenetreContactPygame(): #affiche une fenetre de contact\n\tfenetrea = pygame.display.set_mode((600, 300), RESIZABLE)\n\tcontact = pygame.image.load(\"img/contact.png\").convert()\n\tfenetre.blit(contact, (0,0))\n\tpygame.display.update()\n\ta = 0\n\twhile a == 0:\n\t\t#await asyncio.sleep(0)\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == MOUSEBUTTONUP and event.button == 1:\n\t\t\t\t\n\t\t\t\tpygame.display.update()\n\t\t\t\ta = 1\n\t\t\t\tquit()\n\n\ndef lire(path): #fonction de lecture d'un fichier\n\ttry:\n\t\tfichier = open(path, \"r\")\n\t\ttext = fichier.readlines()\n\t\tfichier.close()\n\texcept IOError:\n\t\tprint(\"err ecrire\")\n\t\treturn 0\n\tfor t in range(len(text)):\n\t\ttext[t] = text[t][:-1]\n\n\treturn text\n\ndef ecrire(pathplusNomfichier, text, modeOuverture ): #fonction écriture fichier\n\ttry:\n\t\tfichier = open(pathplusNomfichier, modeOuverture)\n\t\tfichier.write(str(text) + '\\n' )\n\n\t\tfichier.close()\n\texcept IOError:\n\t\tprint(\"err ecrire\")\n\t\treturn 0\n\treturn 1\n\ndef enregistrer(filePath): #enregistre les combinaisons mouvements cube/ touches à simuler\n\tfirst = 1\n\tfor i in keys:\n\t\tif first == 1:\n\t\t\tfirst=0\n\t\t\tecrire(filePath,i,\"w\")\n\t\telse:\n\t\t\tecrire(filePath,i,\"a\")\n\t\t\n\ndef loadenregistrer(filePath):#load les combinaisons mouvements cube/ touches à simuler\n\n\tglobal keys\n\ttempo=lire(filePath)\n\tif tempo != 0:\n\t\treset()\n\t\tkeys = tempo\n\t\tfor i in range(0, 12):\n\t\t\tif i % 2 == 0:\n\t\t\t\tfenetre.blit(font.render(keys[i], 1, (0, 0, 0)), (posText, 50 * (i + 1)))\n\t\t\tif i % 2 == 1:\n\t\t\t\tfenetre.blit(font.render(keys[i], 1, (0, 0, 0)), (posText + 300, 50 * i))\n\telse:\n\t\tprint(\"echec lecture save\")\n\n\n\ndef reset():#reset les combinaisons mouvements cube/ touches à simuler\n\tglobal keys\n\tfor i in range(0, 12):\n\t\tkeys[i] = 0\n\t\tif i % 2 == 0:\n\t\t\tfenetre.blit(cache, (posText, 50 * (i + 1)))\n\t\tif i % 2 == 1:\n\t\t\tfenetre.blit(cache, (posText + 300, 50 * i))\n\ndef initFirstClickAreas(tabDevices): #déclare les zones cliquables pour PyGame pour la fenetre de connexion d'un device \n\tfor i in range(len(tabDevices)):\n\t\tfirstClickAreas.append(pygame.Rect((550, 25 * (i+1)), (200, 50)))\n\t\tfirstRectSurf.append(pygame.Surface(firstClickAreas[i].size))\n\n\ndef initclickable_areas(): #déclare les zones cliquables pour PyGame pour la fenetre des combinaisons mouvements cube/ touches à simuler\n\tfor i in range(0, 20):\n\t\tif i % 2 == 1 and i <= 13:\n\t\t\tclickable_areas.append(pygame.Rect((600, 50 * i - 5), (200, 55)))\n\t\t\trect_surf.append(pygame.Surface(clickable_areas[i].size))\n\t\tif i % 2 == 0 and i <= 13:\n\t\t\tclickable_areas.append(pygame.Rect((600 + 300, 50 * (i - 1) - 5), (200, 55)))\n\t\t\trect_surf.append(pygame.Surface(clickable_areas[i].size))\n\t\tif i == 14:\n\t\t\tclickable_areas.append(pygame.Rect((1200, 200), (50, 50)))\n\t\t\trect_surf.append(pygame.Surface(clickable_areas[i].size))\n\t\tif i == 15:\n\t\t\tclickable_areas.append(pygame.Rect((1175, 400), (50, 18)))\n\t\t\trect_surf.append(pygame.Surface(clickable_areas[i].size))\n\t\tif i == 16:\n\t\t\tclickable_areas.append(pygame.Rect((1225, 400), (50, 18)))\n\t\t\trect_surf.append(pygame.Surface(clickable_areas[i].size))\n\t\tif i == 17:\n\t\t\tclickable_areas.append(pygame.Rect((1175, 525), (50, 18)))\n\t\t\trect_surf.append(pygame.Surface(clickable_areas[i].size))\n\t\tif i == 18:\n\t\t\tclickable_areas.append(pygame.Rect((1225, 525), (50, 18)))\n\t\t\trect_surf.append(pygame.Surface(clickable_areas[i].size))\n\t\tif i == 19:\n\t\t\tclickable_areas.append(pygame.Rect((1200, 625), (50, 50)))\n\t\t\trect_surf.append(pygame.Surface(clickable_areas[i].size))\n\n\n\n\n\n\ndef press(indexKeyPressed): #simule la touche qui correspond au mouvement du cube\n\tif len(keys[indexKeyPressed]) == 1:\n\t\tkeyboard.press(keys[indexKeyPressed])\n\t\ttime.sleep(delay)\n\t\tkeyboard.release(keys[indexKeyPressed])\n\telse:\n\t\tif keys[indexKeyPressed] == \"space\":\n\n\t\t\tkeyboard.press(Key.space)\n\t\t\tkeyboard.release(Key.space)\n\t\tif keys[indexKeyPressed] == \"alt\":\n\t\t\tkeyboard.press(Key.alt)\n\t\t\tkeyboard.release(Key.alt)\n\t\tif keys[indexKeyPressed] == \"shift\":\n\t\t\tkeyboard.press(Key.shift)\n\t\t\tkeyboard.release(Key.shift)\n\t\tif keys[indexKeyPressed] == \"right\":\n\t\t\tkeyboard.press(Key.right)\n\t\t\tkeyboard.release(Key.right)\n\t\tif keys[indexKeyPressed] == \"pause\":\n\t\t\tkeyboard.press(Key.pause)\n\t\t\tkeyboard.release(Key.pause)\n\t\tif keys[indexKeyPressed] == \"left\":\n\t\t\tkeyboard.press(Key.left)\n\t\t\tkeyboard.release(Key.left)\n\t\tif keys[indexKeyPressed] == \"esc\":\n\t\t\tkeyboard.press(Key.esc)\n\t\t\tkeyboard.release(Key.esc)\n\t\tif keys[indexKeyPressed] == \"enter\":\n\t\t\tkeyboard.press(Key.enter)\n\t\t\tkeyboard.release(Key.enter)\n\t\tif keys[indexKeyPressed] == \"down\":\n\t\t\tkeyboard.press(Key.down)\n\t\t\tkeyboard.release(Key.down)\n\t\tif keys[indexKeyPressed] == \"delete\":\n\t\t\tkeyboard.press(Key.delete)\n\t\t\tkeyboard.release(Key.delete)\n\t\tif keys[indexKeyPressed] == \"ctrl\":\n\t\t\tkeyboard.press(Key.ctrl)\n\t\t\tkeyboard.release(Key.ctrl)\n\t\tif keys[indexKeyPressed] == \"ctrl_r\":\n\t\t\tkeyboard.press(Key.ctrl_r)\n\t\t\tkeyboard.release(Key.ctrl_r)\n\t\tif keys[indexKeyPressed] == \"ctrl_l\":\n\t\t\tkeyboard.press(Key.ctrl_l)\n\t\t\tkeyboard.release(Key.ctrl_l)\n\t\tif keys[indexKeyPressed] == \"up\":\n\t\t\tkeyboard.press(Key.up)\n\t\t\tkeyboard.release(Key.up)\n\t\tif keys[indexKeyPressed] == \"f1\":\n\t\t\tkeyboard.press(Key.f1)\n\t\t\tkeyboard.release(Key.f1)\n\t\tif keys[indexKeyPressed] == \"f2\":\n\t\t\tkeyboard.press(Key.f2)\n\t\t\tkeyboard.release(Key.f2)\n\t\tif keys[indexKeyPressed] == \"f3\":\n\t\t\tkeyboard.press(Key.f3)\n\t\t\tkeyboard.release(Key.f3)\n\t\tif keys[indexKeyPressed] == \"f4\":\n\t\t\tkeyboard.press(Key.f4)\n\t\t\tkeyboard.release(Key.f4)\n\t\tif keys[indexKeyPressed] == \"f5\":\n\t\t\tkeyboard.press(Key.f5)\n\t\t\tkeyboard.release(Key.f5)\n\t\tif keys[indexKeyPressed] == \"f6\":\n\t\t\tkeyboard.press(Key.f6)\n\t\t\tkeyboard.release(Key.f6)\n\t\tif keys[indexKeyPressed] == \"f7\":\n\t\t\tkeyboard.press(Key.f7)\n\t\t\tkeyboard.release(Key.f7)\n\t\tif keys[indexKeyPressed] == \"f8\":\n\t\t\tkeyboard.press(Key.f8)\n\t\t\tkeyboard.release(Key.f8)\n\t\tif keys[indexKeyPressed] == \"f9\":\n\t\t\tkeyboard.press(Key.f9)\n\t\t\tkeyboard.release(Key.f9)\n\t\tif keys[indexKeyPressed] == \"f10\":\n\t\t\tkeyboard.press(Key.f10)\n\t\t\tkeyboard.release(Key.f10)\n\t\tif keys[indexKeyPressed] == \"f11\":\n\t\t\tkeyboard.press(Key.f11)\n\t\t\tkeyboard.release(Key.f11)\n\t\tif keys[indexKeyPressed] == \"f12\":\n\t\t\tkeyboard.press(Key.f12)\n\t\t\tkeyboard.release(Key.f12)\n\n\n\n\nclass GiikerMove(): # class qui correspond à l'etat du cube\n\tdef __init__(self, value):\n\t\tface = value // 16\n\t\tamount = value % 16\n\n\t\tself.face = [\"?\", \"B\", \"D\", \"L\", \"U\", \"R\", \"F\"][face]\n\t\tself.amount = [0, 1, 2, -1][amount]\n\n\tdef __str__(self):\n\t\treturn self.face + { 0: \"0\", 1: \"\", 2: \"2\", -1: \"'\" }[self.amount]\n\n\n\ndef change_handle(sender, data): #lorsque le cube change d'état, cette fonction est appelée\n\tmovesC = list(map(GiikerMove, data[16:]))\n\tlast_move = movesC[0]\n\ttry:\n\n\t\tprint(\"index\", moves.index(last_move.__str__()))\n\t\tpress(moves.index(last_move.__str__()))\n\texcept:\n\t\tprint(\"failed find index\")\n\n\n\nasync def run(other, loop): #lance une connexion avec un device/cube\n\t\n\tglobal flagConnectionDevice\n\twhile flagConnectionDevice != 1: # attendre que certaines conditions soient remplies pour lancer la connexion\n\t\tawait asyncio.sleep(1)\n\t\n\t\n\ttry :\n\t\tasync with BleakClient(address, loop=loop) as client:\n\t\t\tvalue = await client.read_gatt_char(MODEL_NBR_UUID)\n\n\n\n\t\t\tprint(\"len initial value : \", len(value))\n\t\t\tprint(\"initial value : {0}\".format(\"\".join(map(chr, value))))\n\t\t\trecent_moves = list(map(GiikerMove, value[16:]))\n\t\t\tlast_move = recent_moves[0]\n\t\t\tprint(last_move)\n\n\n\t\t\tprint(\"listening cube : \")\n\t\t\tfenetre.blit(pygame.transform.scale(Boui, (60, 90)), (1185, 20))\n\t\t\tawait client.start_notify(MODEL_NBR_UUID, change_handle) # si la connexion fonctionne, la fonction change_handle sera lancée dès la réception de nouvelles informations venant du cube.\n\t\t\twhile True:\n\t\t\t\tawait asyncio.sleep(1)\n\t\t\t\t\n\texcept:\n\n\t\tprint(\"failed to connect\")\n\t\tfenetre.blit(pygame.transform.scale(Bnon, (60, 90)), (1185, 20))\n\n\n\n\n\ndef AffichageDevicesPyGame(tabDevices): # affiche les devices détectés sur la fenetre\n\tfenetre.blit(pygame.transform.scale(cache, (surfaceW, surfaceW)), (0,0))\n\tif len(tabDevices) == 0:\n\t\tfenetre.blit(font.render(\"Aucun cube trouvé,\", 1, (0, 0, 0)), (500, 300))\n\t\tfenetre.blit(font.render(\"vérifiez votre connexion Bluetooth.\", 1, (0, 0, 0)), (500, 350))\n\telse:\n\t\tfor i in range(len(tabDevices)):\n\t\t\tfenetre.blit(font.render(tabDevices[i][0] + \" : \" + tabDevices[i][1], 1, (0, 0, 0)), (525, 25 * (i+1)))\n\tpygame.display.update()\n\tinitFirstClickAreas(tabDevices)\n\n\ndef AffichageMainFenetrePygame(moves, keys): # affiche des elements de la fenêtre principale\n\tx = 650\n\ty = 50\n\n\n\tfenetre.blit(fond, (0,0))\n\tfenetre.blit(pygame.transform.scale(resetImg[0], (50, 50)), (1200, 200))\n\tfenetre.blit(preset1, (1175,350))\n\tfenetre.blit(preset2, (1175,475))\n\tfenetre.blit(contactImg, (1200,625))\n\n\tfor val in range (len(moves)):\n\t\tif val % 2 == 0:\n\t\t\tfenetre.blit(pygame.transform.scale(imgSelect[val], (50, 50)), (posImg, 50 * (val + 1) - 5))\n\t\t\tfenetre.blit(font.render(moves[val], 1, (0, 0, 0)),(x, y))\n\t\t\tfenetre.blit(font.render(keys[val], 1, (0, 0, 0)),(posText, y))\n\t\tif val % 2 == 1:\n\t\t\tfenetre.blit(pygame.transform.scale(imgSelect[val], (50, 50)), (posImg + 300, 50 * val - 5))\n\t\t\tfenetre.blit(font.render(moves[val], 1, (0, 0, 0)),(x + 300, y - 50))\n\t\t\tfenetre.blit(font.render(keys[val], 1, (0, 0, 0)),(posText + 300, y - 50))\n\t\ty = y + 50\n\t\n\n\n\n\ndef on_press(index,key): # fonction qui gère l'assignation d'une touche à un mouvement du cube.\n\tkeypressed = '{0}'.format(key)\n\t\n\tif len(keypressed) - 2 == 1:\n\t\tkeypressed = keypressed.replace(\"\\'\",'')\n\t\tkeys[index - 1] = keypressed\n\t\tif (index) % 2 == 1:\n\t\t\tfenetre.blit(font.render(keypressed, 1, (0, 0, 0)), (posText, 50 * (index)))\n\t\t\t\n\t\tif (index) % 2 == 0:\n\t\t\tfenetre.blit(font.render(keypressed, 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\n\n\telse:\n\t\tif \"Key.\" in keypressed:\n\t\t\tkeypressed = keypressed.replace(\"Key.\",'')\n\t\t\t#\n\t\t\t\n\n\t\t\tif \"delete\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"delete\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"delete\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"delete\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"delete\"\n\t\t\t\tquit = 0\n\t\t\tif \"tab\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"tab\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"tab\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"tab\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"tab\"\n\t\t\t\tquit = 0\n\t\t\tif \"enter\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"enter\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"enter\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"enter\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"enter\"\n\t\t\t\tquit = 0\n\t\t\tif \"esc\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"esc\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"esc\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"esc\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"esc\"\n\t\t\t\tquit = 0\n\t\t\tif \"space\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"space\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"space\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"space\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"space\"\n\t\t\t\tquit = 0\n\n\n\t\t\tif \"up\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"up\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"up\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"up\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"up\"\n\t\t\t\tquit = 0\n\t\t\tif \"down\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"down\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"down\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"down\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"down\"\n\t\t\t\tquit = 0\n\t\t\tif \"right\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"right\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"right\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"right\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"right\"\n\t\t\t\tquit = 0\n\t\t\tif \"left\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"left\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"left\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"left\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"left\"\n\t\t\t\tquit = 0\n\t\t\tif \"f1\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"f1\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"f1\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"f1\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"f1\"\n\t\t\t\tquit = 0\n\t\t\tif \"f2\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"f2\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"f2\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"f2\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"f2\"\n\t\t\t\tquit = 0\n\t\t\tif \"f3\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"f3\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"f3\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"f3\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"f3\"\n\t\t\t\tquit = 0\n\t\t\tif \"f4\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"f4\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"f4\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"f4\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"f4\"\n\t\t\t\tquit = 0\n\t\t\tif \"f5\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"f5\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"f5\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"f5\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"f5\"\n\t\t\t\tquit = 0\n\t\t\tif \"f6\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"f6\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"f6\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"f6\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"f6\"\n\t\t\t\tquit = 0\n\t\t\tif \"f7\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"f7\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"f7\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"f7\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"f7\"\n\t\t\t\tquit = 0\n\t\t\tif \"f8\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"f8\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"f8\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"f8\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"f8\"\n\t\t\t\tquit = 0\n\t\t\tif \"f9\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"f9\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"f9\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"f9\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"f9\"\n\t\t\t\tquit = 0\n\t\t\tif \"f10\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"f10\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"f10\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"f10\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"f10\"\n\t\t\t\tquit = 0\n\t\t\tif \"f11\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"f11\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"f11\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"f11\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"f11\"\n\t\t\t\tquit = 0\n\t\t\tif \"f12\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"f12\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"f12\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"f12\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"f12\"\n\t\t\t\tquit = 0\n\t\t\tif \"shift\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"shift\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"shift\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"shift\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"shift\"\n\t\t\t\tquit = 0\n\t\t\tif \"shift_l\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"shift_l\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"shift_l\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"shift_l\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"shift_l\"\n\t\t\t\tquit = 0\n\t\t\tif \"ctrl_r\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"ctrl_r\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"ctrl_r\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"ctrl_r\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"ctrl_r\"\n\t\t\t\tquit = 0\n\t\t\tif \"ctrl_l\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"ctrl_l\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"ctrl_l\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"ctrl\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"ctrl_l\"\n\t\t\t\tquit = 0\n\t\t\tif \"alt_gr\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(cache, (posImg, 50 * index - 5))\n\t\t\t\t\tfenetre.blit(font.render(\"alt_gr\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"alt_gr\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(cache, (posImg + 300, 50 * index - 5))\n\t\t\t\t\tfenetre.blit(font.render(\"alt_gr\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"alt_gr\"\n\t\t\t\tquit = 0\n\t\t\tif \"alt_l\" == keypressed:\n\t\t\t\tif (index - 1) % 2 == 0:\n\t\t\t\t\tfenetre.blit(font.render(\"alt_l\", 1, (0, 0, 0)), (posText, 50 * index))\n\t\t\t\t\tkeys[index - 1] = \"alt_l\"\n\t\t\t\tif (index - 1) % 2 == 1:\n\t\t\t\t\tfenetre.blit(font.render(\"alt_l\", 1, (0, 0, 0)), (posText + 300, 50 * (index - 1)))\n\t\t\t\t\tkeys[index - 1] = \"alt_l\"\n\t\t\t\tquit = 0\n\n\n\n\n\t\telse:\n\t\t\tprint(\"invalid key\")\n\tenregistrer(\"save/currentsave.txt\")\n\tprint(\"key added\")\n\tprint(keys)\n\treturn False\n\n\n\n\ndef on_release(key):\n\tpass\n\n\n\ndef HandleEventTouchePression(index, keys): # gère l'evenement de pression d'une touche clavier\n\n\te = partial(on_press,index)\n\twith Listener(on_press=e, on_release=on_release) as listener:\n\t\tlistener.join()\n\n\n# asyncr\n\nasync def EventDeviceDiscoverClickPyGame(tabDevices):# gère l'événement de clique de l'utilisateur pour sélectionner une device\n\tglobal address\n\ta = 0\n\twhile a == 0:\n\t\tlevent = pygame.event.get()\n\t\tif len(levent) == 0:\n\t\t\tawait asyncio.sleep(0.1)\n\t\telse:\n\t\t\tfor event in levent:\n\t\t\t\tif event.type == QUIT:\n\t\t\t\t\tasyncio.get_event_loop().stop()\n\t\t\t\t\treturn False\n\t\t\t\tif event.type == MOUSEBUTTONUP and event.button == 1: # 1= clique gauche\n\t\t\t\t\tfor element in range (len(firstClickAreas)):\n\t\t\t\t\t\tif firstClickAreas[element].collidepoint(event.pos):\n\t\t\t\t\t\t\tif element < (len(firstClickAreas)):\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tprint(element)\n\t\t\t\t\t\t\t\taddress = tabDevices[element][1][0:-1]\n\t\t\t\t\t\t\t\ta = 1\n\nasync def HandleEventMainPyGame(): # gère la detection d'évenements de type clique sur le Fenêtre principale.\n\twhile 1:\n\t\tlevent = pygame.event.get()\n\t\tif len(levent) == 0:\n\t\t\tawait asyncio.sleep(0.1)\n\t\telse:\n\t\t\tfor event in levent:\n\t\t\t\tif event.type == QUIT:\n\t\t\t\t\tasyncio.get_event_loop().stop()\n\t\t\t\t\treturn False\n\t\t\t\tif event.type == MOUSEBUTTONUP and event.button == 1: # 1= clique gauche\n\t\t\t\t\tfor element in range (len(clickable_areas)):\n\t\t\t\t\t\tif clickable_areas[element].collidepoint(event.pos):\n\t\t\t\t\t\t\tif (element - 1) % 2 == 0 and element <= 13:\n\t\t\t\t\t\t\t\tfenetre.blit(cache, (posImg, 50 * element - 5))\n\t\t\t\t\t\t\t\tfenetre.blit(cache, (posText, 50 * element - 5))\n\t\t\t\t\t\t\t\tpygame.display.update()\n\t\t\t\t\t\t\t\tfenetre.blit(pygame.transform.scale(img[element - 1], (50, 50)), (posImg, 50 * element - 5))\n\t\t\t\t\t\t\t\tpygame.display.update()\n\t\t\t\t\t\t\t\tprint(\"click : \" + str(element))\n\t\t\t\t\t\t\t\tHandleEventTouchePression(element, keys)\n\t\t\t\t\t\t\t\tfenetre.blit(cache, (posImg - 25, 50 * element - 5))\n\t\t\t\t\t\t\t\tfenetre.blit(pygame.transform.scale(imgSelect[element - 1], (50, 50)), (posImg, 50 * element - 5))\n\t\t\t\t\t\t\tif (element - 1) % 2 == 1 and element <= 13:\n\t\t\t\t\t\t\t\tfenetre.blit(cache, (posImg + 300, 50 * (element - 1) - 5))\n\t\t\t\t\t\t\t\tfenetre.blit(cache, (posText + 300, 50 * (element - 1) - 5))\n\t\t\t\t\t\t\t\tpygame.display.update()\n\t\t\t\t\t\t\t\tfenetre.blit(pygame.transform.scale(img[element - 1], (50, 50)), (posImg + 300, 50 * (element - 1) - 5))\n\t\t\t\t\t\t\t\tpygame.display.update()\n\t\t\t\t\t\t\t\tprint(\"click : \" + str(element))\n\t\t\t\t\t\t\t\tHandleEventTouchePression(element, keys)\n\t\t\t\t\t\t\t\tfenetre.blit(cache, (posImg + 275, 50 * (element - 1) - 5))\n\t\t\t\t\t\t\t\tfenetre.blit(pygame.transform.scale(imgSelect[element - 1], (50, 50)), (posImg + 300, 50 * (element - 1) - 5))\n\t\t\t\t\t\t\tif element == 14:\n\t\t\t\t\t\t\t\treset()\n\t\t\t\t\t\t\t\tfor i in range(2 * len(resetImg)):\n\t\t\t\t\t\t\t\t\tfenetre.blit(pygame.transform.scale(resetImg[i % len(resetImg)], (50, 50)), (1200, 200))\n\t\t\t\t\t\t\t\t\tpygame.display.update()\n\t\t\t\t\t\t\t\t\ttime.sleep(0.5/36)\n\t\t\t\t\t\t\t\tfenetre.blit(pygame.transform.scale(resetImg[0], (50, 50)), (1200, 200))\n\t\t\t\t\t\t\t\tpygame.display.update()\n\t\t\t\t\t\t\tif element >= 15 and element <= 18:\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif (element - 14) % 2 == 0:\n\t\t\t\t\t\t\t\t\tif (element - 14) == 2:\n\t\t\t\t\t\t\t\t\t\tfenetre.blit(load1, (1175,350))\n\t\t\t\t\t\t\t\t\t\tpygame.display.update()\n\t\t\t\t\t\t\t\t\t\ttime.sleep(delay2)\n\t\t\t\t\t\t\t\t\t\tfenetre.blit(preset1, (1175,350))\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\tloadenregistrer(\"save/save1.txt\")\n\t\t\t\t\t\t\t\t\tif (element - 14) == 4:\n\t\t\t\t\t\t\t\t\t\tfenetre.blit(load2, (1175,475))\n\t\t\t\t\t\t\t\t\t\tpygame.display.update()\n\t\t\t\t\t\t\t\t\t\ttime.sleep(delay2)\n\t\t\t\t\t\t\t\t\t\tfenetre.blit(preset2, (1175,475))\n\t\t\t\t\t\t\t\t\t\tloadenregistrer(\"save/save2.txt\")\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tif (element - 14) == 1:\n\t\t\t\t\t\t\t\t\t\tfenetre.blit(save1, (1175,350))\n\t\t\t\t\t\t\t\t\t\tpygame.display.update()\n\t\t\t\t\t\t\t\t\t\ttime.sleep(delay2)\n\t\t\t\t\t\t\t\t\t\tfenetre.blit(preset1, (1175,350))\n\t\t\t\t\t\t\t\t\t\tenregistrer(\"save/save1.txt\")\n\t\t\t\t\t\t\t\t\tif (element - 14) == 3:\n\t\t\t\t\t\t\t\t\t\tfenetre.blit(save2, (1175,475))\n\t\t\t\t\t\t\t\t\t\tpygame.display.update()\n\t\t\t\t\t\t\t\t\t\ttime.sleep(delay2)\n\t\t\t\t\t\t\t\t\t\tfenetre.blit(preset2, (1175,475))\n\t\t\t\t\t\t\t\t\t\tenregistrer(\"save/save2.txt\")\n\t\t\t\t\t\t\tif element == 19:\n\t\t\t\t\t\t\t\tAffichageFenetreContactPygame()\n\t\t\t\tpygame.display.update()\n\n\n\n\nasync def main():\n\tglobal flagConnectionDevice\n\t\n\t\n\timageDeclarationImport() # déclare des variables globales pour l'affichage\n\n\tloadenregistrer(\"save/currentsave.txt\") #load les mouvements récents\n\n\tdevices = await discover() # découvre les devices bluetooth\n\ttabDevices = []\n\tfor d in devices:\n\t\ttempo = (d.__str__().split(\" \"))\n\t\tif tempo[1][0:2] == \"Gi\":\n\t\t\ttabDevices.append((tempo[1],tempo[0]) )\n\t\n\tAffichageDevicesPyGame(tabDevices) # afficher les devices détectés pour que l'utilisateur en selectionne un\n\tawait EventDeviceDiscoverClickPyGame(tabDevices) # utilisaeur clique sur un device affiché\n\n\tflagConnectionDevice = 1 #pour libérer la connexion au cube\n\n\tAffichageMainFenetrePygame(moves, keys) #affiche les combinaisons du cube sur l'écran\n\tinitclickable_areas()\n\t#******************************************************\n\tawait HandleEventMainPyGame()\n\treturn True\n\n\n##############################################################\"\"\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(asyncio.gather(run(address, loop), main()))\n\t\t\t\t\t# lance la connexion et l'affichage en asynchrone\n\n\nquit()\n","repo_name":"JulesMicho/rubisenMappy","sub_path":"finalcube.py","file_name":"finalcube.py","file_ext":"py","file_size_in_byte":27792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"21836661989","text":"def check(i):\n for j in range(m):\n if need[i][j] > available[j]:\n return False\n return True\n\nn = int(input(\"Enter the number of Processes: \"))\nm = int(input(\"Enter the number of Resources: \"))\n\nallocation = []\nfor i in range(n):\n allocation.append(list(map(int, input('\\nEnter the number of instances allocated for Process P'+str(i)+\" : \").strip().split())))\n \nmaX = []\nfor i in range(n):\n maX.append(list(map(int, input(\"\\nEnter Max matrix entry for Process P\"+str(i)+\" : \").strip().split())))\n\navailable = list(map(int, input(\"\\nEnter the number of instances available of Resources : \").strip().split())) \n\n# Compute the need matrix\nneed = [[0 for i in range(m)] for j in range(n)]\nfor i in range(n):\n for j in range(m):\n need[i][j] = maX[i][j] - allocation[i][j]\n\n# Implements Banker's Algorithm \nsequence = ['0']*n\nvisited = [0]*n\ncount = 0\nwhile countИмя Фамилия login@\n+79999999999\nТелеграм: @telegram\nВнутренний телефон: 1234\nОфис\nЭтаж\nСтол: 1111\nонлайн'''\n m2.return_value = 'онлайн'\n m3.return_value = []\n m4.return_value = []\n m5.return_value = 'login'\n handle_utterance(tg_app, uid, 'где login', rendered_text)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/tests/test_find_table.py","file_name":"test_find_table.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29652561971","text":"'''\n给你一个整数数组 nums ,数组中的元素 互不相同 。返回该数组所有可能的子集(幂集)。\n解集 不能 包含重复的子集。你可以按 任意顺序 返回解集。\n\n思路:\n1.定义一个函数helper(i, tmp),i表示目前的nums索引号,tmp表示当前的子集\n2.首先将tmp加入res中,然后遍历寻找nums[i:]之后的索引\n helper(j+1, tmp+[nums[j]]) 将之前的tmp和nums[j]组成新的组合\n3. 为什么没有重复:\n 因为helper中的for循环是从i开始的\n'''\n\nclass Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n res = []\n n = len(nums)\n\n def helper(i, tmp):\n res.append(tmp)\n for j in range(i, n):\n helper(j+1, tmp+[nums[j]])\n helper(0, [])\n return res","repo_name":"miyagipipi/studying","sub_path":"BackTracking/78. 子集.py","file_name":"78. 子集.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"20754372770","text":"# -*- coding: utf-8 -*-\nfrom mongoengine import *\nimport enum\nimport json\nimport random\nimport unittest\n\nimport queries\nimport json_utils\n\n\nclass ImagesEnum(enum.Enum):\n cover = 'cover'\n background = 'background'\n foreground = 'foreground'\n\n\nclass QualityEnum(enum.IntEnum):\n LD = 0\n SD = 1\n HD = 2\n FULL_HD = 3\n\n\nclass File(EmbeddedDocument):\n path = StringField()\n quality = IntField()\n\n\nclass Quote(EmbeddedDocument):\n source = StringField()\n text = StringField()\n\n\nclass Episode(EmbeddedDocument):\n num = IntField()\n alias = StringField()\n files = EmbeddedDocumentListField('File')\n\n\nclass Season(Document):\n num = IntField()\n alias = StringField()\n episodes = EmbeddedDocumentListField('Episode', db_field='items')\n meta = {\n 'collection': 'products',\n 'allow_inheritance': True\n }\n\n\nclass Series(Document):\n title = StringField()\n alias = StringField()\n description = StringField()\n seasons = ListField(ReferenceField('Season'), db_field='items')\n quote = EmbeddedDocumentField('Quote')\n images = MapField(URLField())\n meta = {\n 'collection': 'products',\n 'allow_inheritance': True\n }\n\n\nclass TestTask(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n connect('test', host=\"mongo\")\n\n def test_01_create_documents(self):\n def __quote(i):\n source = 'QuoteSource %i' % i\n return {'source': source, 'text': 'test quote'}\n\n def __images(i):\n return {img.value: 'image path %i' % i for img in ImagesEnum}\n\n def __files():\n files = list()\n for i in QualityEnum:\n f = File(quality=i, path='file path %i' % i)\n files.append(f)\n return files\n\n def __episodes():\n episodes = list()\n for i in range(0, random.randint(1, 30)):\n s = Episode(num=i, alias='episode%i' % i, files=__files())\n episodes.append(s)\n return episodes\n\n def __seasons():\n seasons = list()\n for i in range(0, random.randint(1, 10)):\n s = Season(num=i, alias='season%i' % i, episodes=__episodes())\n s.save()\n seasons.append(s)\n return seasons\n\n def __series():\n series = list()\n for i in range(0, random.randint(1, 10)):\n s = Series.objects(\n title='series %i' % i,\n alias='series%i' % i\n ).modify(\n upsert=True,\n new=True,\n set__quote=__quote(i),\n set__images=__images(i),\n set__description='description %i' % i,\n set__seasons=__seasons())\n series.append(s)\n return series\n self.assertTrue(__series())\n\n def test_02_get_series(self):\n \"\"\"Check structure of result of get_series method.\"\"\"\n\n expected_response = \"\"\"\n {\n \"path\": \"/series/series4\",\n \"slide\": {\n \"background\": \"image path 4\",\n \"foreground\": \"image path 4\"\n },\n \"title\": \"series 4\",\n \"description\": \"description 4\",\n \"cover\": \"image path 4\",\n \"quote\": \"test quote\",\n \"quote_source\": \"QuoteSource 4\",\n \"seasons\": [\n {\n \"path\": \"/series/series4/season0\",\n \"title\": \"0 сезон\",\n \"episodes\": [\n {\n \"path\": \"/series/series4/season0/episode0\",\n \"title\": \"Эпизод 0 сезона\",\n \"files\": [\n {\n \"path\": \"file path 0\",\n \"label\": \"LD\",\n \"quality\": 0\n }\n ]\n }\n ]\n }\n ]\n }\n \"\"\"\n target_json = queries.get_series()\n\n # Compare structure of two json objects\n self.assertTrue(json_utils.compare_json(json.loads(expected_response),\n target_json))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"artur-garifulov/mongo_test","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29182909497","text":"\"\"\"Tests audit log.\"\"\"\n\nimport time\nfrom unittest.mock import Mock\n\nimport pytest\n\nfrom infra.walle.server.tests.lib.util import monkeypatch_audit_log, AUDIT_LOG_ID\nfrom sepelib.mongo.mock import ObjectMocker\nfrom walle import audit_log\nfrom walle import authorization\nfrom walle.errors import ApiError\n\n\n@pytest.fixture\ndef log(database):\n return ObjectMocker(\n audit_log.LogEntry,\n {\n \"id\": \"uuid\",\n \"time\": 999.99,\n \"issuer\": authorization.ISSUER_WALLE,\n \"type\": audit_log.TYPES[0],\n \"status\": audit_log.STATUS_UNKNOWN,\n \"status_time\": 999.99,\n },\n )\n\n\n@pytest.fixture(autouse=True)\ndef patch_audit_log_time(monkeypatch):\n cur_time = time.time()\n monkeypatch_audit_log(monkeypatch, uuid=None, time=cur_time, patch_create=False)\n\n return cur_time\n\n\ndef test_create_entry(monkeypatch, log):\n cur_time = time.time()\n monkeypatch_audit_log(monkeypatch, uuid=AUDIT_LOG_ID, time=cur_time, patch_create=False)\n\n entry = log.mock(\n {\n \"id\": AUDIT_LOG_ID,\n \"time\": cur_time,\n \"issuer\": authorization.ISSUER_WALLE,\n \"type\": audit_log.TYPES[0],\n \"status\": audit_log.STATUS_UNKNOWN,\n \"status_time\": cur_time,\n },\n save=False,\n )\n\n created_entry = audit_log.create(issuer=authorization.ISSUER_WALLE, type=audit_log.TYPES[0])\n assert created_entry.to_mongo() == entry.to_mongo()\n\n log.assert_equal()\n\n\n@pytest.mark.parametrize(\"status\", audit_log.STATUSES)\ndef test_complete_task(patch_audit_log_time, log, status):\n for i in range(2):\n entry = log.mock({\"id\": audit_log._uuid(), \"status\": status})\n\n task = Mock(audit_log_id=entry.id)\n audit_log.complete_task(task)\n if status in (audit_log.STATUS_UNKNOWN, audit_log.STATUS_ACCEPTED):\n entry.status = audit_log.STATUS_COMPLETED\n entry.status_time = patch_audit_log_time\n\n log.assert_equal()\n\n\n@pytest.mark.parametrize(\"payload\", (None, {\"some-key\": \"some-value\"}))\ndef test_complete_with_payload(patch_audit_log_time, log, payload):\n entry = log.mock({\"id\": audit_log._uuid(), \"status\": audit_log.STATUS_UNKNOWN, \"payload\": payload})\n\n extra_payload = {\n \"some-extra-key\": \"some-extra-value\",\n \"some-extra-hash\": {\n \"a\": 1,\n \"b\": 2,\n },\n }\n\n audit_log.complete_request(entry.copy(), extra_payload=extra_payload)\n\n entry.status = audit_log.STATUS_COMPLETED\n entry.status_time = patch_audit_log_time\n entry.payload = dict(payload or {}, **extra_payload)\n\n log.assert_equal()\n\n\n@pytest.mark.parametrize(\"status\", audit_log.STATUSES)\ndef test_fail_task(patch_audit_log_time, log, status):\n for i in range(2):\n entry = log.mock({\"id\": audit_log._uuid(), \"status\": status})\n\n task = Mock(audit_log_id=entry.id)\n audit_log.fail_task(task, \"test error\")\n if status in (audit_log.STATUS_UNKNOWN, audit_log.STATUS_ACCEPTED):\n entry.status = audit_log.STATUS_FAILED\n entry.status_time = patch_audit_log_time\n entry.error = \"test error\"\n\n log.assert_equal()\n\n\n@pytest.mark.parametrize(\"type\", audit_log.TYPES)\ndef test_context_manager_accepted(patch_audit_log_time, log, type):\n with log.mock({\"type\": type}) as entry:\n log.assert_equal()\n\n entry.status = audit_log.STATUS_COMPLETED if type in audit_log.INSTANT_TYPES else audit_log.STATUS_ACCEPTED\n entry.status_time = patch_audit_log_time\n log.assert_equal()\n\n\ndef test_context_manager_rejected(patch_audit_log_time, log):\n with pytest.raises(ApiError):\n with log.mock() as entry:\n raise ApiError(0, \"test error\")\n\n entry.status = audit_log.STATUS_REJECTED\n entry.status_time = patch_audit_log_time\n entry.error = \"test error\"\n log.assert_equal()\n\n\ndef test_context_manager_failed(patch_audit_log_time, log):\n class SomeException(Exception):\n pass\n\n with pytest.raises(SomeException):\n with log.mock() as entry:\n raise SomeException(\"test error\")\n\n entry.status = audit_log.STATUS_FAILED\n entry.status_time = patch_audit_log_time\n entry.error = \"test error\"\n log.assert_equal()\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"infra/tests/test_audit_log.py","file_name":"test_audit_log.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33274146560","text":"import pygame as pg\nimport random, time, sys\nfrom pygame.locals import *\n\nfps = 25\nwindow_w, window_h = 600, 500\nblock, cup_h, cup_w = 20, 20, 10\n\nside_freq, down_freq = 0.15, 0.1 # передвижение в сторону и вниз\n\nside_margin = int((window_w - cup_w * block) / 2)\ntop_margin = window_h - (cup_h * block) - 5\n\ncolors = ((0, 0, 225), (0, 225, 0), (225, 0, 0), (225, 225, 0)) # синий, зеленый, красный, желтый\nlightcolors = ((30, 30, 255), (50, 255, 50), (255, 30, 30), (255, 255, 30)) # светло-синий, светло-зеленый, светло-красный, светло-желтый\n\nwhite, gray, black = (255, 255, 255), (185, 185, 185), (0, 0, 0)\nbrd_color, bg_color, txt_color, title_color, info_color = white, black, white, colors[3], colors[0]\n\nfig_w, fig_h = 5, 5\nempty = 'o'\n\nfigures = {'S': [['ooooo',\n 'ooooo',\n 'ooxxo',\n 'oxxoo',\n 'ooooo'],\n ['ooooo',\n 'ooxoo',\n 'ooxxo',\n 'oooxo',\n 'ooooo']],\n 'Z': [['ooooo',\n 'ooooo',\n 'oxxoo',\n 'ooxxo',\n 'ooooo'],\n ['ooooo',\n 'ooxoo',\n 'oxxoo',\n 'oxooo',\n 'ooooo']],\n 'J': [['ooooo',\n 'oxooo',\n 'oxxxo',\n 'ooooo',\n 'ooooo'],\n ['ooooo',\n 'ooxxo',\n 'ooxoo',\n 'ooxoo',\n 'ooooo'],\n ['ooooo',\n 'ooooo',\n 'oxxxo',\n 'oooxo',\n 'ooooo'],\n ['ooooo',\n 'ooxoo',\n 'ooxoo',\n 'oxxoo',\n 'ooooo']],\n 'L': [['ooooo',\n 'oooxo',\n 'oxxxo',\n 'ooooo',\n 'ooooo'],\n ['ooooo',\n 'ooxoo',\n 'ooxoo',\n 'ooxxo',\n 'ooooo'],\n ['ooooo',\n 'ooooo',\n 'oxxxo',\n 'oxooo',\n 'ooooo'],\n ['ooooo',\n 'oxxoo',\n 'ooxoo',\n 'ooxoo',\n 'ooooo']],\n 'I': [['ooxoo',\n 'ooxoo',\n 'ooxoo',\n 'ooxoo',\n 'ooooo'],\n ['ooooo',\n 'ooooo',\n 'xxxxo',\n 'ooooo',\n 'ooooo']],\n 'O': [['ooooo',\n 'ooooo',\n 'oxxoo',\n 'oxxoo',\n 'ooooo']],\n 'T': [['ooooo',\n 'ooxoo',\n 'oxxxo',\n 'ooooo',\n 'ooooo'],\n ['ooooo',\n 'ooxoo',\n 'ooxxo',\n 'ooxoo',\n 'ooooo'],\n ['ooooo',\n 'ooooo',\n 'oxxxo',\n 'ooxoo',\n 'ooooo'],\n ['ooooo',\n 'ooxoo',\n 'oxxoo',\n 'ooxoo',\n 'ooooo']]}\n\ndef pauseScreen():\n pause = pg.Surface((600, 500), pg.SRCALPHA) \n pause.fill((0, 0, 255, 127)) \n display_surf.blit(pause, (0, 0))\n\ndef main():\n global fps_clock, display_surf, basic_font, big_font\n pg.init()\n fps_clock = pg.time.Clock()\n display_surf = pg.display.set_mode((window_w, window_h))\n basic_font = pg.font.SysFont('arial', 20)\n big_font = pg.font.SysFont('verdana', 45)\n pg.display.set_caption('Тетрис Lite')\n showText('Тетрис Lite')\n while True: # начинаем игру\n runTetris()\n pauseScreen()\n showText('Игра закончена')\n\n\ndef runTetris():\n cup = emptycup()\n last_move_down = time.time()\n last_side_move = time.time()\n last_fall = time.time()\n going_down = False \n going_left = False\n going_right = False\n points = 0\n level, fall_speed = calcSpeed(points)\n fallingFig = getNewFig()\n nextFig = getNewFig()\n\n while True: \n if fallingFig == None:\n # если нет падающих фигур, генерируем новую\n fallingFig = nextFig\n nextFig = getNewFig()\n last_fall = time.time()\n \n\n if not checkPos(cup, fallingFig):\n return # если на игровом поле нет свободного места - игра закончена\n quitGame()\n for event in pg.event.get(): \n if event.type == KEYUP:\n if event.key == K_SPACE:\n pauseScreen()\n showText('Пауза')\n last_fall = time.time()\n last_move_down = time.time()\n last_side_move = time.time()\n elif event.key == K_LEFT:\n going_left = False\n elif event.key == K_RIGHT:\n going_right = False\n elif event.key == K_DOWN:\n going_down = False\n\n elif event.type == KEYDOWN:\n # перемещение фигуры вправо и влево\n if event.key == K_LEFT and checkPos(cup, fallingFig, adjX=-1):\n fallingFig['x'] -= 1\n going_left = True\n going_right = False\n last_side_move = time.time()\n\n elif event.key == K_RIGHT and checkPos(cup, fallingFig, adjX=1):\n fallingFig['x'] += 1\n going_right = True\n going_left = False\n last_side_move = time.time()\n\n # поворачиваем фигуру, если есть место\n elif event.key == K_UP:\n fallingFig['rotation'] = (fallingFig['rotation'] + 1) % len(figures[fallingFig['shape']])\n if not checkPos(cup, fallingFig):\n fallingFig['rotation'] = (fallingFig['rotation'] - 1) % len(figures[fallingFig['shape']])\n\n # ускоряем падение фигуры\n elif event.key == K_DOWN:\n going_down = True\n if checkPos(cup, fallingFig, adjY=1):\n fallingFig['y'] += 1\n last_move_down = time.time()\n\n # мгновенный сброс вниз\n elif event.key == K_RETURN:\n going_down = False\n going_left = False\n going_right = False\n for i in range(1, cup_h):\n if not checkPos(cup, fallingFig, adjY=i):\n break\n fallingFig['y'] += i - 1\n\n # управление падением фигуры при удержании клавиш\n if (going_left or going_right) and time.time() - last_side_move > side_freq:\n if going_left and checkPos(cup, fallingFig, adjX=-1):\n fallingFig['x'] -= 1\n elif going_right and checkPos(cup, fallingFig, adjX=1):\n fallingFig['x'] += 1\n last_side_move = time.time()\n\n if going_down and time.time() - last_move_down > down_freq and checkPos(cup, fallingFig, adjY=1):\n fallingFig['y'] += 1\n last_move_down = time.time()\n\n\n if time.time() - last_fall > fall_speed: # свободное падение фигуры \n if not checkPos(cup, fallingFig, adjY=1): # проверка \"приземления\" фигуры\n addToCup(cup, fallingFig) # фигура приземлилась, добавляем ее в содержимое стакана\n points += clearCompleted(cup)\n level, fall_speed = calcSpeed(points)\n fallingFig = None\n else: # фигура пока не приземлилась, продолжаем движение вниз\n fallingFig['y'] += 1\n last_fall = time.time()\n\n # рисуем окно игры со всеми надписями\n display_surf.fill(bg_color)\n drawTitle()\n gamecup(cup)\n drawInfo(points, level)\n drawnextFig(nextFig)\n if fallingFig != None:\n drawFig(fallingFig)\n pg.display.update()\n fps_clock.tick(fps)\n\n\ndef txtObjects(text, font, color):\n surf = font.render(text, True, color)\n return surf, surf.get_rect()\n\n\ndef stopGame():\n pg.quit()\n sys.exit()\n\n\ndef checkKeys():\n quitGame()\n\n for event in pg.event.get([KEYDOWN, KEYUP]):\n if event.type == KEYDOWN:\n continue\n return event.key\n return None\n\n\ndef showText(text):\n titleSurf, titleRect = txtObjects(text, big_font, title_color)\n titleRect.center = (int(window_w / 2) - 3, int(window_h / 2) - 3)\n display_surf.blit(titleSurf, titleRect)\n \n pressKeySurf, pressKeyRect = txtObjects('Нажмите любую клавишу для продолжения', basic_font, title_color)\n pressKeyRect.center = (int(window_w / 2), int(window_h / 2) + 100)\n display_surf.blit(pressKeySurf, pressKeyRect)\n\n while checkKeys() == None:\n pg.display.update()\n fps_clock.tick()\n\n\ndef quitGame():\n for event in pg.event.get(QUIT): # проверка всех событий, приводящих к выходу из игры\n stopGame() \n for event in pg.event.get(KEYUP): \n if event.key == K_ESCAPE:\n stopGame() \n pg.event.post(event) \n\n\ndef calcSpeed(points):\n # вычисляет уровень\n level = int(points / 10) + 1\n fall_speed = 0.27 - (level * 0.02)\n return level, fall_speed\n\ndef getNewFig():\n # возвращает новую фигуру со случайным цветом и углом поворота\n shape = random.choice(list(figures.keys()))\n newFigure = {'shape': shape,\n 'rotation': random.randint(0, len(figures[shape]) - 1),\n 'x': int(cup_w / 2) - int(fig_w / 2),\n 'y': -2, \n 'color': random.randint(0, len(colors)-1)}\n return newFigure\n\n\ndef addToCup(cup, fig):\n for x in range(fig_w):\n for y in range(fig_h):\n if figures[fig['shape']][fig['rotation']][y][x] != empty:\n cup[x + fig['x']][y + fig['y']] = fig['color']\n\n\ndef emptycup():\n # создает пустой стакан\n cup = []\n for i in range(cup_w):\n cup.append([empty] * cup_h)\n return cup\n\n\ndef incup(x, y):\n return x >= 0 and x < cup_w and y < cup_h\n\n\ndef checkPos(cup, fig, adjX=0, adjY=0):\n # проверяет, находится ли фигура в границах стакана, не сталкиваясь с другими фигурами\n for x in range(fig_w):\n for y in range(fig_h):\n abovecup = y + fig['y'] + adjY < 0\n if abovecup or figures[fig['shape']][fig['rotation']][y][x] == empty:\n continue\n if not incup(x + fig['x'] + adjX, y + fig['y'] + adjY):\n return False\n if cup[x + fig['x'] + adjX][y + fig['y'] + adjY] != empty:\n return False\n return True\n\ndef isCompleted(cup, y):\n # проверяем наличие полностью заполненных рядов\n for x in range(cup_w):\n if cup[x][y] == empty:\n return False\n return True\n\n\ndef clearCompleted(cup):\n # Удаление заполенных рядов и сдвиг верхних рядов вниз\n removed_lines = 0\n y = cup_h - 1 \n while y >= 0:\n if isCompleted(cup, y):\n for pushDownY in range(y, 0, -1):\n for x in range(cup_w):\n cup[x][pushDownY] = cup[x][pushDownY-1]\n for x in range(cup_w):\n cup[x][0] = empty\n removed_lines += 1\n else:\n y -= 1 \n return removed_lines\n\n\ndef convertCoords(block_x, block_y):\n return (side_margin + (block_x * block)), (top_margin + (block_y * block))\n\n\ndef drawBlock(block_x, block_y, color, pixelx=None, pixely=None):\n #отрисовка квадратных блоков, из которых состоят фигуры\n if color == empty:\n return\n if pixelx == None and pixely == None:\n pixelx, pixely = convertCoords(block_x, block_y)\n pg.draw.rect(display_surf, colors[color], (pixelx + 1, pixely + 1, block - 1, block - 1), 0, 3)\n pg.draw.rect(display_surf, lightcolors[color], (pixelx + 1, pixely + 1, block - 4, block - 4), 0, 3)\n pg.draw.circle(display_surf, colors[color], (pixelx + block / 2, pixely + block / 2), 5)\n \ndef gamecup(cup):\n # граница игрового поля-стакана\n pg.draw.rect(display_surf, brd_color, (side_margin - 4, top_margin - 4, (cup_w * block) + 8, (cup_h * block) + 8), 5)\n\n # фон игрового поля\n pg.draw.rect(display_surf, bg_color, (side_margin, top_margin, block * cup_w, block * cup_h))\n for x in range(cup_w):\n for y in range(cup_h):\n drawBlock(x, y, cup[x][y])\n\ndef drawTitle():\n titleSurf = big_font.render('Тетрис Lite', True, title_color)\n titleRect = titleSurf.get_rect()\n titleRect.topleft = (window_w - 425, 30)\n display_surf.blit(titleSurf, titleRect)\n \n\ndef drawInfo(points, level):\n\n pointsSurf = basic_font.render(f'Баллы: {points}', True, txt_color)\n pointsRect = pointsSurf.get_rect()\n pointsRect.topleft = (window_w - 550, 180)\n display_surf.blit(pointsSurf, pointsRect)\n\n levelSurf = basic_font.render(f'Уровень: {level}', True, txt_color)\n levelRect = levelSurf.get_rect()\n levelRect.topleft = (window_w - 550, 250)\n display_surf.blit(levelSurf, levelRect)\n\n pausebSurf = basic_font.render('Пауза: пробел', True, info_color)\n pausebRect = pausebSurf.get_rect()\n pausebRect.topleft = (window_w - 550, 420)\n display_surf.blit(pausebSurf, pausebRect)\n \n escbSurf = basic_font.render('Выход: Esc', True, info_color)\n escbRect = escbSurf.get_rect()\n escbRect.topleft = (window_w - 550, 450)\n display_surf.blit(escbSurf, escbRect)\n\ndef drawFig(fig, pixelx=None, pixely=None):\n figToDraw = figures[fig['shape']][fig['rotation']]\n if pixelx == None and pixely == None: \n pixelx, pixely = convertCoords(fig['x'], fig['y'])\n\n #отрисовка элементов фигур\n for x in range(fig_w):\n for y in range(fig_h):\n if figToDraw[y][x] != empty:\n drawBlock(None, None, fig['color'], pixelx + (x * block), pixely + (y * block))\n\n\ndef drawnextFig(fig): # превью следующей фигуры\n nextSurf = basic_font.render('Следующая:', True, txt_color)\n nextRect = nextSurf.get_rect()\n nextRect.topleft = (window_w - 150, 180)\n display_surf.blit(nextSurf, nextRect)\n drawFig(fig, pixelx=window_w-150, pixely=230)\n\n\nif __name__ == '__main__':\n main()","repo_name":"natkaida/tetris","sub_path":"tetris_lite.py","file_name":"tetris_lite.py","file_ext":"py","file_size_in_byte":15506,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"} +{"seq_id":"37038543480","text":"# This Python file uses the following encoding: utf-8\r\nimport sys, datetime, random, os, pickle\r\n#from PySide6.QtWidgets import QApplication, QWidget, QMainWindow, QLabel, QPushButton, QLineEdit, QTextEdit, QComboBox, QGroupBox, QMenuBar, QMenu, QDialog, QListWidget, QMessageBox\r\nfrom PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QOpenGLWidget, QLabel, QPushButton, QLineEdit, QTextEdit, QComboBox, QGroupBox, QMenuBar, QMenu, QDialog, QListWidget, QMessageBox\r\nfrom PyQt5.QtGui import QPalette, QColor, QBrush, QPen, QFont, QPainter\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtOpenGL import QGLWidget\r\n#from PySide6.QtGui import QPalette, QColor, QBrush, QPen, QFont, QPainter, QOpenGLFunctions\r\n#from PySide6.QtCore import Qt\r\n#from PySide6.QtOpenGL import QGLWidget\r\nfrom chess import Chess\r\n\r\nclass GLWidget(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.setWindowTitle(\"Moj Draw\")\r\n self.setGeometry(450, 200, 450, 450)\r\n\r\n def paintEvent(self, event):\r\n qp = QPainter(self)\r\n qp.setViewport(self.rect())\r\n qp.drawRect(0, 0, 200, 200)\r\n qp.setBrush(QBrush(Qt.blue, Qt.VerPattern))\r\n qp.setPen(QPen(Qt.yellow, 5, Qt.SolidLine))\r\n qp.drawEllipse(25, 25, 250, 250)\r\n qp.drawEllipse(25, 25, 250, 25)\r\n qp.drawEllipse(25, 25, 25, 250)\r\n print(self.rect())\r\n\r\n def initializeGL(self):\r\n pass\r\n\r\n def resizeGL(self, w, h):\r\n #viewport = self.rect()\r\n #w = viewport.width()\r\n #h = viewport.height()\r\n #QOpenGLFunctions.glViewport(0, 0, w, h)\r\n #QOpenGLFunctions.glMatrixMode(QOpenGLFunctions.GL_PROJECTION)\r\n #QOpenGLFunctions.glLoadIdentity()\r\n #QOpenGLFunctions.glMatrixMode(QOpenGLFunctions.GL_MODELVIEW)\r\n #QOpenGLFunctions.glLoadIdentity()\r\n pass\r\n\r\n def paintGL(self):\r\n #QOpenGLFunctions.glClear(QOpenGLFunctions.GL_COLOR_BUFFER_BIT|QOpenGLFunctions.GL_DEPTH_BUFFER_BIT)\r\n #QOpenGLFunctions.glColor3f(1.0, 0.0, 0.0)\r\n #QOpenGLFunctions.glBegin(QOpenGLFunctions.GL_QUADS)\r\n #QOpenGLFunctions.glVertex3f(-0.5, -0.5)\r\n #QOpenGLFunctions.glVertex3f(0.5, -0.5)\r\n #QOpenGLFunctions.glVertex3f(-0.5, 0.5)\r\n #QOpenGLFunctions.glVertex3f(0.5, 0.5)\r\n #QOpenGLFunctions.glEnd()\r\n #QOpenGLFunctions.glFlush()\r\n pass\r\n\r\n\r\nclass MainWindow(QMainWindow):\r\n def __init__(self, folder):\r\n super().__init__()\r\n self.package = list()\r\n files = self.parser(folder)\r\n keys = list(files.keys())\r\n self.menu(self, keys, files)\r\n\r\n QLabel(\"What is Your name?\", self).setGeometry(50, 50, 100, 25)\r\n self.name = QLineEdit(\"lorem ipsum\", self)\r\n self.name.setGeometry(150, 50, 100, 25)\r\n\r\n self.button = QPushButton(\"Let's welcome\", self)\r\n self.button.setGeometry(50, 100, 100, 25)\r\n self.button.clicked.connect(lambda: self.func(self.name.text()))\r\n\r\n self.output = QTextEdit(self)\r\n self.output.setGeometry(150, 100, 300, 25)\r\n\r\n R = random.Random()\r\n QLabel(f\"Кидок кубика - {random.Random.randint(R, 1, 20)}\", self).setGeometry(50, 150, 300, 25)\r\n print(f\"Кидок кубика - {random.Random.randint(R, 1, 20)}\")\r\n\r\n D = datetime.datetime.date(datetime.datetime.today()).weekday() + 1\r\n QLabel(f\"Поточний день тижня - {D}\", self).setGeometry(50, 200, 300, 25)\r\n print(\"Поточний день тижня - \", D)\r\n\r\n QGroupBox(f\"Parsing of the folder {folder}\", self).setGeometry(10, 220, 400, 650)\r\n for i in range(len(keys)):\r\n QLabel(keys[i], self).setGeometry(50, 250+20*i, 300, 25)\r\n type_files_1 = QComboBox(self)\r\n type_files_1.setGeometry(100, 250+20*i, 300, 25)\r\n type_files_1.addItems(files[keys[i]])\r\n\r\n def func(self, text):\r\n print(f\"Hello dear, {text} !!!\")\r\n self.output.setText(f\"Hello dear, {text} !!!\")\r\n\r\n def parser(self, folder):\r\n cortImage = (\".jpeg\", \".png\", \".jpg\", \".svg\")\r\n cortVideo = (\".avi\", \".mp4\", \".mov\", \".mpg\")\r\n cortText = (\".doc\", \".ini\", \".txt\", \".odt\")\r\n cortMusic = (\".mp3\", \".ogg\", \".wav\", \".amr\")\r\n cortVarious = (\".bak\", \".dmp\", \".log\", \".tga\")\r\n result = {\"folder\": [], \"other\": []}\r\n result.update({j: [] for j in cortImage})\r\n result.update({j: [] for j in cortVideo})\r\n result.update({j: [] for j in cortText})\r\n result.update({j: [] for j in cortMusic})\r\n result.update({j: [] for j in cortVarious})\r\n\r\n for path, folder, file in os.walk(folder):\r\n for i in range(len(folder)):\r\n result[\"folder\"] += (lambda temp: (temp.append(f\"{path}\\\\{folder[i]}\") == None and temp))([])\r\n for i in range(len(file)):\r\n for key in list(result.keys()):\r\n if key in file[i]:\r\n result[key] += (lambda temp: (temp.append(f\"{path}\\\\{file[i]}\") == None and temp))([])\r\n if not set(result.keys()).isdisjoint(set(file[i].split(\".\"))):\r\n result[\"other\"] += (lambda temp: (temp.append(f\"{path}\\\\{file[i]}\") == None and temp))([])\r\n return result\r\n\r\n def menu(self, oldwindow, keys, files):\r\n menubar = QMenuBar(oldwindow)\r\n menubar.setGeometry(0, 0, 500, 25)\r\n menu1 = QMenu(\"Window #1\", self)\r\n menu1.addAction(\"Action #1\")\r\n menu1.addAction(\"Action #2\")\r\n menu1.addSeparator()\r\n menu1.addAction(\"Save package\", lambda: self.save_package(self))\r\n menu1.addAction(\"Load package\", lambda: self.load_package(self))\r\n menubar.addMenu(menu1)\r\n menu2 = QMenu(\"Window #2\", self)\r\n menu2.addAction(\"Action #1\")\r\n menu2.addAction(\"Action #2\")\r\n menu2.addSeparator()\r\n menu2.addAction(\"Action #3\")\r\n menubar.addMenu(menu2)\r\n menu3 = QMenu(\"Painting\", self)\r\n menu3.addAction(\"QOpenGLWidget\", lambda: self.f_paint())\r\n menu3.addAction(\"QChessWidget\", lambda: self.f_chess())\r\n menubar.addMenu(menu3)\r\n menu4 = QMenu(\"Parsing folder\", self)\r\n menu4.addAction(keys[0], lambda: self.f1(oldwindow, keys, files))\r\n menu4.addAction(keys[1], lambda: self.f2(oldwindow, keys, files))\r\n menu4.addAction(keys[2], lambda: self.f3(oldwindow, keys, files))\r\n menu4.addAction(keys[3], lambda: self.f4(oldwindow, keys, files))\r\n menu4.addAction(keys[4], lambda: self.f5(oldwindow, keys, files))\r\n menu4.addAction(keys[5], lambda: self.f6(oldwindow, keys, files))\r\n menu4.addAction(keys[6], lambda: self.f7(oldwindow, keys, files))\r\n menu4.addAction(keys[7], lambda: self.f8(oldwindow, keys, files))\r\n menu4.addAction(keys[8], lambda: self.f9(oldwindow, keys, files))\r\n menubar.addMenu(menu4)\r\n menu5 = QMenu(\"Help\", self)\r\n menu5.addAction(\"About me\", lambda: self.f_about(oldwindow))\r\n menubar.addMenu(menu5)\r\n\r\n def save_package(self, oldwindow):\r\n with open(\"Woohoo_Moodlets.package\", \"wb\") as file1:\r\n pickle.dump(self.package, file1)\r\n dialog = QMessageBox(oldwindow)\r\n dialog.setWindowTitle(\"The inbox of the package been saved\")\r\n dialog.resize(250, 250)\r\n dialog.setModal(True)\r\n dialog.setText(\"The inbox of the package been saved\")\r\n dialog.exec_()\r\n\r\n def load_package(self, oldwindow):\r\n with open(\"Woohoo_Moodlets.package\", \"rb\") as file1:\r\n self.package = pickle.load(file1)\r\n print(\"The inbox of the package\")\r\n print(self.package)\r\n dialog = QDialog(oldwindow)\r\n dialog.setWindowTitle(\"The inbox of the package\")\r\n dialog.resize(500, 500)\r\n dialog.setModal(True)\r\n label = QLabel(\"The inbox of the package\", dialog)\r\n #label.setGeometry(200, 0, 100, 50)\r\n #label.setFont(QFont(\"Times\", 20))\r\n #listwidget = QListWidget(dialog)\r\n #listwidget.setGeometry(50, 50, 400, 400)\r\n #listwidget.addItems(self.package)\r\n dialog.exec_()\r\n\r\n def f_paint(self):\r\n global paint_window\r\n paint_window = GLWidget()\r\n paint_window.show()\r\n\r\n def f_chess(self):\r\n global chess_window\r\n chess_window = Chess()\r\n chess_window.show()\r\n\r\n def f1(self, oldwindow, keys, files):\r\n print(\"The list of the folders by primery folder\")\r\n dialog = QDialog(oldwindow)\r\n dialog.setWindowTitle(\"Moj Dialog\")\r\n dialog.resize(500, 500)\r\n dialog.setModal(True)\r\n label = QLabel(keys[0], dialog)\r\n label.setGeometry(200, 0, 100, 50)\r\n label.setFont(QFont(\"Times\", 20))\r\n listwidget = QListWidget(dialog)\r\n listwidget.setGeometry(50, 50, 400, 400)\r\n listwidget.addItems(files[keys[0]])\r\n dialog.exec_()\r\n\r\n def f2(self, oldwindow, keys, files):\r\n print(f\"The list of the files ({keys[1]}) by primery folder\")\r\n dialog = QDialog(oldwindow)\r\n dialog.setWindowTitle(\"Moj Dialog\")\r\n dialog.resize(500, 500)\r\n dialog.setModal(True)\r\n label = QLabel(keys[1], dialog)\r\n label.setGeometry(200, 0, 100, 50)\r\n label.setFont(QFont(\"Times\", 20))\r\n listwidget = QListWidget(dialog)\r\n listwidget.setGeometry(50, 50, 400, 400)\r\n listwidget.addItems(files[keys[1]])\r\n dialog.exec_()\r\n\r\n def f3(self, oldwindow, keys, files):\r\n print(f\"The list of the files ({keys[2]}) by primery folder\")\r\n dialog = QDialog(oldwindow)\r\n dialog.setWindowTitle(\"Moj Dialog\")\r\n dialog.resize(500, 500)\r\n dialog.setModal(True)\r\n label = QLabel(keys[2], dialog)\r\n label.setGeometry(200, 0, 100, 50)\r\n label.setFont(QFont(\"Times\", 20))\r\n listwidget = QListWidget(dialog)\r\n listwidget.setGeometry(50, 50, 400, 400)\r\n listwidget.addItems(files[keys[2]])\r\n dialog.exec_()\r\n\r\n def f4(self, oldwindow, keys, files):\r\n print(f\"The list of the files ({keys[3]}) by primery folder\")\r\n dialog = QDialog(oldwindow)\r\n dialog.setWindowTitle(\"Moj Dialog\")\r\n dialog.resize(500, 500)\r\n dialog.setModal(True)\r\n label = QLabel(keys[3], dialog)\r\n label.setGeometry(200, 0, 100, 50)\r\n label.setFont(QFont(\"Times\", 20))\r\n listwidget = QListWidget(dialog)\r\n listwidget.setGeometry(50, 50, 400, 400)\r\n listwidget.addItems(files[keys[3]])\r\n dialog.exec_()\r\n\r\n def f5(self, oldwindow, keys, files):\r\n print(f\"The list of the files ({keys[4]}) by primery folder\")\r\n dialog = QDialog(oldwindow)\r\n dialog.setWindowTitle(\"Moj Dialog\")\r\n dialog.resize(500, 500)\r\n dialog.setModal(True)\r\n label = QLabel(keys[4], dialog)\r\n label.setGeometry(200, 0, 100, 50)\r\n label.setFont(QFont(\"Times\", 20))\r\n listwidget = QListWidget(dialog)\r\n listwidget.setGeometry(50, 50, 400, 400)\r\n listwidget.addItems(files[keys[4]])\r\n dialog.exec_()\r\n\r\n def f6(self, oldwindow, keys, files):\r\n print(f\"The list of the files ({keys[5]}) by primery folder\")\r\n dialog = QDialog(oldwindow)\r\n dialog.setWindowTitle(\"Moj Dialog\")\r\n dialog.resize(500, 500)\r\n dialog.setModal(True)\r\n label = QLabel(keys[5], dialog)\r\n label.setGeometry(200, 0, 100, 50)\r\n label.setFont(QFont(\"Times\", 20))\r\n listwidget = QListWidget(dialog)\r\n listwidget.setGeometry(50, 50, 400, 400)\r\n listwidget.addItems(files[keys[5]])\r\n dialog.exec_()\r\n\r\n def f7(self, oldwindow, keys, files):\r\n print(f\"The list of the files ({keys[6]}) by primery folder\")\r\n dialog = QDialog(oldwindow)\r\n dialog.setWindowTitle(\"Moj Dialog\")\r\n dialog.resize(500, 500)\r\n dialog.setModal(True)\r\n label = QLabel(keys[6], dialog)\r\n label.setGeometry(200, 0, 100, 50)\r\n label.setFont(QFont(\"Times\", 20))\r\n listwidget = QListWidget(dialog)\r\n listwidget.setGeometry(50, 50, 400, 400)\r\n listwidget.addItems(files[keys[6]])\r\n dialog.exec_()\r\n\r\n def f8(self, oldwindow, keys, files):\r\n print(f\"The list of the files ({keys[7]}) by primery folder\")\r\n dialog = QDialog(oldwindow)\r\n dialog.setWindowTitle(\"Moj Dialog\")\r\n dialog.resize(500, 500)\r\n dialog.setModal(True)\r\n label = QLabel(keys[7], dialog)\r\n label.setGeometry(200, 0, 100, 50)\r\n label.setFont(QFont(\"Times\", 20))\r\n listwidget = QListWidget(dialog)\r\n listwidget.setGeometry(50, 50, 400, 400)\r\n listwidget.addItems(files[keys[7]])\r\n dialog.exec_()\r\n\r\n def f9(self, oldwindow, keys, files):\r\n print(f\"The list of the files ({keys[8]}) by primery folder\")\r\n dialog = QDialog(oldwindow)\r\n dialog.setWindowTitle(\"Moj Dialog\")\r\n dialog.resize(500, 500)\r\n dialog.setModal(True)\r\n label = QLabel(keys[8], dialog)\r\n label.setGeometry(200, 0, 100, 50)\r\n label.setFont(QFont(\"Times\", 20))\r\n listwidget = QListWidget(dialog)\r\n listwidget.setGeometry(50, 50, 400, 400)\r\n listwidget.addItems(files[keys[8]])\r\n dialog.exec_()\r\n\r\n def f_about(self, oldwindow):\r\n dialog = QMessageBox(oldwindow)\r\n dialog.setWindowTitle(\"About me\")\r\n dialog.setModal(True)\r\n dialog.setText(\"The program by Teosoph Geliebter\")\r\n dialog.exec_()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n folder = \"C:\\\\Users\\\\Teosoph\\\\Documents\\\\My Games\"\r\n app = QApplication([])\r\n window = MainWindow(folder)\r\n window.setWindowTitle(\"Moja Applikacja\")\r\n window.setFixedSize(1000, 1000)\r\n window.show()\r\n sys.exit(app.exec_())\r\n","repo_name":"sergiikyzyma/teo","sub_path":"widget.py","file_name":"widget.py","file_ext":"py","file_size_in_byte":13861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42031073107","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef euler_int(conditions, dt, t_array, plot=False):\n # constants and initial conditions\n m, k, x, v = conditions\n\n # initialise empty lists to record trajectories\n x_list = []\n v_list = []\n\n # Euler integration\n for t in t_array:\n\n # append current state to trajectories\n x_list.append(x)\n v_list.append(v)\n\n # calculate new position and velocity\n a = -k * x / m\n x = x + dt * v\n v = v + dt * a\n\n # convert trajectory lists into arrays, so they can be sliced (useful for Assignment 2)\n x_array = np.array(x_list)\n v_array = np.array(v_list)\n\n if plot:\n # plot the position-time graph\n plt.figure(1)\n plt.clf()\n plt.xlabel('time (s)')\n plt.grid()\n plt.plot(t_array, x_array, label='x (m)')\n plt.plot(t_array, v_array, label='v (m/s)')\n plt.legend()\n plt.show()\n \n return x_array, v_array\n\ndef verlet_int(conditions, dt, t_array, plot=False):\n # constants and initial conditions\n m, k, x, v = conditions\n\n # initialise lists to record trajectories\n x_list = [x, x + dt*v]\n v_list = [v]\n\n # Verlet integration\n # will result in x_list that has an extra entry of t = t_max+1\n for t in range(len(t_array)-1):\n\n # note: x_list[-1] is current position\n # calculate new position and current velocity\n a = -k * x_list[-1] / m # current acceleration\n x = 2 * x_list[-1] - x_list[-2] + (dt**2) * a\n v = (x - x_list[-2]) / (2 * dt)\n\n # append new state to trajectories\n x_list.append(x)\n v_list.append(v)\n\n # convert trajectory lists into arrays, so they can be sliced (useful for Assignment 2)\n x_array = np.array(x_list[:-1])\n v_array = np.array(v_list)\n\n if plot:\n # plot the position-time graph\n plt.figure(1)\n plt.clf()\n plt.xlabel('time (s)')\n plt.grid()\n plt.plot(t_array, x_array, label='x (m)')\n plt.plot(t_array, v_array, label='v (m/s)')\n plt.legend()\n plt.show()\n\n return x_array, v_array\n\n# Testing\n\n# mass, spring constant, initial position and velocity\nm = 1\nk = 1\nx = 0\nv = 1\n\nconditions = (m, k, x, v)\n\n# simulation time, timestep and time\nt_max = 100\ndt = 0.1\nt_array = np.arange(0, t_max, dt)\n\nif __name__ == \"__main__\":\n euler_int(conditions, dt, t_array, plot=True)\n verlet_int(conditions, dt, t_array, plot=True)","repo_name":"Nicholas-Ho/Mars-Lander-Simulator","sub_path":"Assignment 1/spring.py","file_name":"spring.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"15374830071","text":"from libkludge.type_info import TypeInfo\nfrom libkludge.selector import Selector\nfrom libkludge.cpp_type_expr_parser import dir_qual\nfrom libkludge.dir_qual_type_info import DirQualTypeInfo\nfrom libkludge.cpp_type_expr_parser import *\nfrom libkludge.generate.builtin_decl import BuiltinDecl\n\nbuiltin_kl_type_names = [\n 'Boolean',\n 'SInt8',\n 'UInt8',\n 'SInt16',\n 'UInt16',\n 'SInt32',\n 'UInt32',\n 'SInt64',\n 'UInt64',\n 'Float32',\n 'Float64',\n ]\n\ndef build_edk_name(kl_type_name):\n if kl_type_name in builtin_kl_type_names:\n return \"Fabric::EDK::KL::\" + kl_type_name\n else:\n return \"Fabric_EDK_KL_\" + kl_type_name\n\nclass InPlaceTypeInfo(TypeInfo):\n\n def __init__(\n self,\n jinjenv,\n kl_type_name,\n kl_type_name_for_derivatives,\n cpp_type_expr,\n extends,\n record,\n is_simple,\n forbid_copy,\n ):\n TypeInfo.__init__(\n self,\n jinjenv,\n kl_name_base=kl_type_name,\n kl_name_base_for_derivatives=kl_type_name_for_derivatives,\n edk_name=build_edk_name(kl_type_name),\n lib_expr=cpp_type_expr,\n extends=extends,\n record=record,\n is_simple=is_simple,\n forbid_copy=forbid_copy,\n )\n\n def build_codec_lookup_rules(self):\n tds = TypeInfo.build_codec_lookup_rules(self)\n if self.is_simple:\n tds[\"conv\"][\"*\"] = \"types/builtin/in_place/simple/conv\"\n tds[\"result\"][\"*\"] = \"protocols/result/builtin/direct\"\n tds[\"repr\"][\"*\"] = \"protocols/repr/builtin/in_place\"\n tds[\"repr\"][\"new_begin\"] = \"types/builtin/in_place/simple/repr\"\n else:\n tds[\"conv\"][\"*\"] = \"protocols/conv/builtin/none\"\n tds[\"result\"][\"*\"] = \"protocols/result/builtin/indirect\"\n tds[\"result\"][\"decl_and_assign_lib_begin\"] = \"types/builtin/in_place/complex/result\"\n tds[\"result\"][\"decl_and_assign_lib_end\"] = \"types/builtin/in_place/complex/result\"\n tds[\"result\"][\"indirect_lib_to_edk\"] = \"types/builtin/in_place/complex/result\"\n tds[\"repr\"][\"*\"] = \"protocols/repr/builtin/in_place\"\n return tds\n\nclass InPlaceBuiltinDecl(BuiltinDecl):\n\n def __init__(self, ext, is_simple, type_info):\n BuiltinDecl.__init__(\n self,\n ext.root_namespace,\n desc=\"InPlace %s\" % (type_info),\n template_path=\"types/builtin/in_place/in_place\",\n test_name=\"InPlace_%s\" % (type_info.kl.name),\n )\n self.is_simple = is_simple\n self.type_info = type_info\n\n def render_method_impls(self, lang):\n result = ''\n if self.type_info.record:\n result += self.type_info.record.render('impls', lang, {\n 'type_info': self.type_info,\n 'is_direct': True,\n 'is_const_ptr': False,\n 'is_mutable_ptr': False,\n 'is_const_ref': False,\n 'is_mutable_ref': False,\n 'allow_static_methods': True,\n 'allow_mutable_methods': True,\n 'allow_const_methods': True,\n 'is_ptr': False,\n })\n return result\n\nclass InPlaceSpec(object):\n\n def __init__(\n self,\n kl_type_name,\n cpp_type_expr,\n extends,\n record,\n is_simple=False,\n kl_type_name_for_derivatives=None,\n forbid_copy=False,\n ):\n self.kl_type_name = kl_type_name\n if not kl_type_name_for_derivatives:\n kl_type_name_for_derivatives = kl_type_name\n self.kl_type_name_for_derivatives = kl_type_name_for_derivatives\n self.cpp_type_expr = cpp_type_expr\n self.is_simple = is_simple\n self.extends = extends\n self.record = record\n self.forbid_copy = forbid_copy\n\nclass InPlaceSelector(Selector):\n\n def __init__(self, ext):\n Selector.__init__(self, ext)\n\n boolean_spec = InPlaceSpec(\"Boolean\", Bool(), None, None, True)\n char_spec = InPlaceSpec(\"CxxChar\", Char(), None, None, True)\n sint8_spec = InPlaceSpec(\"SInt8\", SimpleNamed(\"int8_t\"), None, None, True)\n uint8_spec = InPlaceSpec(\"UInt8\", SimpleNamed(\"uint8_t\"), None, None, True)\n sint16_spec = InPlaceSpec(\"SInt16\", SimpleNamed(\"int16_t\"), None, None, True)\n uint16_spec = InPlaceSpec(\"UInt16\", SimpleNamed(\"uint16_t\"), None, None, True)\n sint32_spec = InPlaceSpec(\"SInt32\", SimpleNamed(\"int32_t\"), None, None, True)\n uint32_spec = InPlaceSpec(\"UInt32\", SimpleNamed(\"uint32_t\"), None, None, True)\n sint64_spec = InPlaceSpec(\"SInt64\", SimpleNamed(\"int64_t\"), None, None, True)\n uint64_spec = InPlaceSpec(\"UInt64\", SimpleNamed(\"uint64_t\"), None, None, True)\n float32_spec = InPlaceSpec(\"Float32\", Float(), None, None, True)\n float64_spec = InPlaceSpec(\"Float64\", Double(), None, None, True)\n long_spec = InPlaceSpec(\"SInt64\", Long(), None, None, True)\n ulong_spec = InPlaceSpec(\"UInt64\", Unsigned(Long()), None, None, True)\n\n self.cpp_type_expr_to_spec = {\n Bool(): boolean_spec,\n Char(): char_spec,\n SimpleNamed(\"int8_t\"): sint8_spec,\n Unsigned(Char()): uint8_spec,\n SimpleNamed(\"uint8_t\"): uint8_spec,\n Short(): sint16_spec,\n SimpleNamed(\"int16_t\"): sint16_spec,\n Unsigned(Short()): uint16_spec,\n SimpleNamed(\"uint16_t\"): uint16_spec,\n Int(): sint32_spec,\n SimpleNamed(\"int32_t\"): sint32_spec,\n Unsigned(Int()): uint32_spec,\n SimpleNamed(\"uint32_t\"): uint32_spec,\n LongLong(): sint64_spec,\n SimpleNamed(\"int64_t\"): sint64_spec,\n Unsigned(LongLong()): uint64_spec,\n SimpleNamed(\"uint64_t\"): uint64_spec,\n SimpleNamed(\"size_t\"): uint64_spec,\n SimpleNamed(\"ptrdiff_t\"): uint64_spec,\n SimpleNamed(\"intptr_t\"): uint64_spec,\n Float(): float32_spec,\n Double(): float64_spec,\n #######################################################################\n # Warning: Linux + OS X ONLY\n # On Windows, these are 64-bit. Not sure what to do about this.\n Long(): long_spec, \n Unsigned(Long()): ulong_spec,\n #######################################################################\n }\n\n self.type_info_cache = {}\n\n def get_desc(self):\n return \"InPlace\"\n\n def register(\n self,\n kl_type_name,\n kl_type_name_for_derivatives,\n cpp_type_expr,\n extends,\n record,\n forbid_copy=False,\n ):\n self.cpp_type_expr_to_spec[cpp_type_expr] = InPlaceSpec(\n kl_type_name,\n cpp_type_expr,\n extends,\n record,\n kl_type_name_for_derivatives=kl_type_name_for_derivatives,\n forbid_copy=forbid_copy,\n )\n \n def maybe_create_dqti(self, type_mgr, cpp_type_expr):\n undq_cpp_type_expr, dq = cpp_type_expr.get_undq()\n if dq.is_direct:\n spec = self.cpp_type_expr_to_spec.get(undq_cpp_type_expr)\n if spec:\n kl_type_name = spec.kl_type_name\n kl_type_name_for_derivatives = spec.kl_type_name_for_derivatives\n undq_cpp_type_expr = spec.cpp_type_expr\n is_simple = spec.is_simple\n extends = spec.extends\n record = spec.record\n forbid_copy = spec.forbid_copy\n\n type_info_cache_key = kl_type_name\n type_info = self.type_info_cache.get(type_info_cache_key)\n if not type_info:\n type_info = InPlaceTypeInfo(\n self.jinjenv,\n kl_type_name,\n kl_type_name_for_derivatives,\n undq_cpp_type_expr,\n extends=extends,\n record=record,\n is_simple=is_simple,\n forbid_copy=forbid_copy,\n )\n self.type_info_cache.setdefault(type_info_cache_key, type_info)\n self.ext.add_decl(InPlaceBuiltinDecl(self.ext, is_simple, type_info))\n\n return DirQualTypeInfo(dq, type_info)\n","repo_name":"zhangxiao6776/kludge","sub_path":"libkludge/types/in_place/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13130816006","text":"from decouple import config\n\nfrom .base import *\n\n\nDEBUG = config(\"DEBUG\", cast=bool, default=False)\n\nALLOWED_HOSTS = [\"127.0.0.1\"]\n\n# Database\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": BASE_DIR / \"db.sqlite3\",\n }\n}\n\n# Installed apps definition for development environment\n\n# Adding and configuring Drf_spectacular\n\nINSTALLED_APPS.append(\"drf_spectacular\")\nREST_FRAMEWORK.update({\"DEFAULT_SCHEMA_CLASS\": \"drf_spectacular.openapi.AutoSchema\"})\nSPECTACULAR_SETTINGS = {\n \"TITLE\": \"Project API\",\n \"DESCRIPTION\": \"API Schema for the project\",\n \"VERSION\": \"0.5.0\",\n \"SERVE_INCLUDE_SCHEMA\": False,\n \"COMPONENT_SPLIT_REQUEST\": True,\n}\n","repo_name":"atfhshm/django-auth","sub_path":"core/settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72097339923","text":"import json\nimport os\n\nfrom rest_framework import status\nfrom rest_framework.test import APIClient, APITestCase\n\nfrom friends.models import Status, User\nfrom friends.serializers import CustomUserSerializer\nfrom test_task_VK.settings import BASE_DIR\n\n\nclass UsersTests(APITestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n with open(\n os.path.join(BASE_DIR, 'data/status.json'),\n encoding='utf-8'\n ) as data:\n statuses = json.loads(data.read())\n for status_item in statuses:\n Status.objects.get_or_create(**status_item)\n data = [{\n 'username': 'name1',\n 'email': 'name1@mail.ru',\n 'password': 'password12345678'\n }, {\n 'username': 'name2',\n 'email': 'name2@mail.ru',\n 'password': 'password12345678'\n }]\n for item in data:\n serializer = CustomUserSerializer(data=item)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n user_1 = User.objects.get(username='name1')\n user_2 = User.objects.get(username='name2')\n cls.auth_client_1 = APIClient()\n cls.auth_client_1.force_authenticate(user=user_1)\n cls.auth_client_2 = APIClient()\n cls.auth_client_2.force_authenticate(user=user_2)\n cls.client = APIClient()\n\n def test_create_user(self):\n url = '/api/users/'\n data = {\n 'username': 'name3',\n 'email': 'name3@mail.ru',\n 'password': 'password12345678'\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 3)\n self.assertEqual(User.objects.get(id=2).username, 'name2')\n data = {\n 'username': 'name3',\n 'email': 'name3@mail.ru',\n 'password': 'password12345678'\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_get_users(self):\n urls = ['/api/users/', '/api/users/2/', '/api/users/me/']\n for url in urls:\n response = self.auth_client_1.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n if url == '/api/users/':\n self.assertEqual(len(response.data), 2)\n elif url == '/api/users/2/':\n self.assertEqual(response.data.get('username'), 'name2')\n else:\n self.assertEqual(response.data.get('username'), 'name1')\n response = self.client.get(url, format='json')\n self.assertEqual(\n response.status_code,\n status.HTTP_401_UNAUTHORIZED\n )\n\n def test_friend_requests(self):\n urls = [\n '/api/friends/2/', '/api/users/i_follow/',\n '/api/users/my_followers/', '/api/users/my_friends/'\n ]\n for url in urls:\n if url == '/api/friends/2/':\n response = self.client.post(url, format='json')\n else:\n response = self.client.get(url, format='json')\n self.assertEqual(\n response.status_code,\n status.HTTP_401_UNAUTHORIZED\n )\n response = self.auth_client_1.post('/api/friends/2/', format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n response = self.auth_client_1.post('/api/friends/2/', format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n response = self.auth_client_1.post('/api/friends/1/', format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n response = self.auth_client_1.get(\n '/api/users/i_follow/',\n format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[0].get('username'), 'name2')\n self.assertEqual(response.data[0].get('status'), 'I follow')\n response = self.auth_client_2.get(\n '/api/users/my_followers/',\n format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[0].get('username'), 'name1')\n self.assertEqual(response.data[0].get('status'), 'My follower')\n self.auth_client_2.post('/api/friends/1/', format='json')\n response = self.auth_client_2.get(\n '/api/users/my_followers/',\n format='json'\n )\n self.assertEqual(len(response.data), 0)\n response = self.auth_client_2.get(\n '/api/users/my_friends/',\n format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[0].get('username'), 'name1')\n self.assertEqual(response.data[0].get('status'), 'friend')\n response = self.auth_client_1.get(\n '/api/users/my_friends/',\n format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[0].get('username'), 'name2')\n self.assertEqual(response.data[0].get('status'), 'friend')\n self.auth_client_2.delete('/api/friends/1/', format='json')\n response = self.auth_client_2.get(\n '/api/users/my_followers/',\n format='json'\n )\n self.assertEqual(len(response.data), 1)\n","repo_name":"AnastasiaNB/test_task_VK","sub_path":"test_task_VK/friends/tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17235578388","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:Speciallan\n\nimport os\nimport glob\nimport pandas as pd\nimport xml.etree.ElementTree as ET\n\n\ndef xml_to_csv(path):\n\n xml_list = []\n for xml_file in glob.glob(path + '/*.xml'):\n print(xml_file)\n tree = ET.parse(xml_file)\n root = tree.getroot()\n print(root.find('filename').text)\n\n for member in root.findall('object'):\n value = (root.find('filename').text, # filenamend('size').find('height').text)\n int(root.find('size').find('width').text), # width\n int(root.find('size').find('height').text), # height\n int(member.find('name').text), # class\n int(member.find('bndbox').find('xmin').text), # xmin\n int(member.find('bndbox').find('ymin').text), # ymin\n int(member.find('bndbox').find('xmax').text), # xmax\n int(member.find('bndbox').find('ymax').text), # ymax\n )\n # print(value)\n xml_list.append(value)\n\n column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']\n xml_df = pd.DataFrame(xml_list, columns=column_name)\n return xml_df\n\n\ndef main():\n\n data_dir = '../data/n17_cooling_bed_loading_ic/'\n for directory in ['train', 'test', 'validation']:\n xml_path = os.path.join(os.getcwd(), data_dir + 'annotations/{}'.format(directory))\n os.makedirs(xml_path) if not os.path.exists(xml_path) else None\n\n xml_df = xml_to_csv(xml_path)\n # xml_df.to_csv('whsyxt.csv', index=None) \n xml_df.to_csv(os.path.join(os.getcwd(), data_dir + 'data/{}_labels.csv'.format(directory)), index=None)\n print('Successfully converted xml to csv.')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jasper-cell/cisdi_proj_utils","sub_path":"xml_to_csv.py","file_name":"xml_to_csv.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31706129420","text":"def fac(n):\n if n == 0 or n == 1:\n return 1\n return n * fac(n-1)\n\ndef solve(A, N, P):\n res = A**(fac(N))\n return res%P\n\nT = int(input())\n\nfor t in range(T):\n A,N,P = [int(_) for _ in input().split()]\n print(\"Case #{}: {}\".format(t+1, solve(A,N,P)))\n","repo_name":"callistusystan/algorithmsPractice","sub_path":"Google/2017/Kickstart/Round G/A/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"42700108473","text":"class FTC():\r\n def __init__(self):\r\n \r\n import os\r\n\r\n try:\r\n from nltk.stem.lancaster import LancasterStemmer\r\n import nltk\r\n except:\r\n os.system(\"pip3 install nltk\")\r\n from nltk.stem.lancaster import LancasterStemmer\r\n import nltk\r\n\r\n try:\r\n from tensorflow.python.framework import ops\r\n import tensorflow\r\n except:\r\n os.system(\"pip3 install tensorflow\")\r\n from tensorflow.python.framework import ops\r\n import tensorflow\r\n\r\n try:\r\n import tflearn\r\n except:\r\n os.system(\"pip3 install tflearn\")\r\n import tflearn\r\n\r\n try:\r\n import pickle\r\n except:\r\n os.system(\"pip3 install pickle\")\r\n import pickle\r\n\r\n try:\r\n import numpy\r\n except:\r\n os.system(\"pip3 install numpy\")\r\n import numpy\r\n\r\n self.RunningPath = os.getcwd()\r\n\r\n self.stemmer = LancasterStemmer()\r\n\r\n self.data = {\"intents\":[]}\r\n\r\n def CreateGroup(self, Thetag, Thepatterns):\r\n\r\n self.data[\"intents\"].append({\r\n \"tag\": Thetag, \r\n \"patterns\": Thepatterns, \r\n })\r\n \r\n def ShowClassification(self):\r\n print(\"{\\\"intents\\\": [\")\r\n for i in range(len(self.data[\"intents\"])):\r\n print()\r\n print(\"\\t{\"+\"\\\"tag\\\": {The_tag},\\n\\t\\t\\\"patterns\\\": {The_pat}\".format(The_tag = self.data[\"intents\"][i][\"tag\"], The_pat = self.data[\"intents\"][i][\"patterns\"]))\r\n print(\"\\n\\t]\")\r\n print(\"\\n}\")\r\n\r\n def StartUp(self, TrainModel=False, epoch=1000, batch=8):\r\n\r\n self.ReFlashModel = False\r\n self.TrainModel = TrainModel\r\n \r\n try:\r\n if self.TrainModel == False:\r\n with open(self.RunningPath+r\"\\data.pickle\", \"rb\") as f:\r\n self.words, self.labels, self.training, self.output = pickle.load(f)\r\n self.checkList = []\r\n for self.checkingIntent in self.data[\"intents\"]:\r\n if self.checkingIntent[\"tag\"] not in self.checkList:\r\n self.checkList.append(self.checkingIntent[\"tag\"])\r\n if len(self.checkList) != len(self.labels):\r\n self.ReFlashModel = True\r\n raise BaseException()\r\n else:\r\n raise BaseException()\r\n except BaseException:\r\n\r\n self.words = []\r\n self.labels = []\r\n self.docs_x = []\r\n self.docs_y = []\r\n\r\n for self.intent in self.data[\"intents\"]:\r\n for self.pattern in self.intent[\"patterns\"]:\r\n self.wrds = nltk.word_tokenize(self.pattern)\r\n self.words.extend(self.wrds)\r\n self.docs_x.append(self.wrds)\r\n self.docs_y.append(self.intent[\"tag\"])\r\n\r\n if self.intent[\"tag\"] not in self.labels:\r\n self.labels.append(self.intent[\"tag\"])\r\n\r\n self.words = sorted(list(set([self.stemmer.stem(w.lower()) for w in self.words if w != \"?\"])))\r\n\r\n self.labels = sorted(self.labels)\r\n\r\n self.training = []\r\n self.output = []\r\n\r\n self.out_empty = [0 for _ in range(len(self.labels))]\r\n\r\n for self.x, self.doc in enumerate(self.docs_x):\r\n self.bag = []\r\n\r\n self.wrds = [self.stemmer.stem(w.lower()) for w in self.doc]\r\n\r\n for w in self.words:\r\n if w in self.wrds:\r\n self.bag.append(1)\r\n else:\r\n self.bag.append(0)\r\n\r\n self.output_row = self.out_empty[:]\r\n self.output_row[self.labels.index(self.docs_y[self.x])] = 1\r\n\r\n self.training.append(self.bag)\r\n self.output.append(self.output_row)\r\n\r\n\r\n self.training = numpy.array(self.training)\r\n self.output = numpy.array(self.output)\r\n\r\n with open(self.RunningPath+r\"\\data.pickle\", \"wb\") as f:\r\n pickle.dump((self.words, self.labels, self.training, self.output), f)\r\n\r\n if self.ReFlashModel == True:\r\n shutil.rmtree(os.getcwd()+r\"\\FTCmodels\", ignore_errors=True)\r\n\r\n ops.reset_default_graph()\r\n\r\n self.model = tflearn.DNN(\r\n tflearn.regression(\r\n tflearn.fully_connected(\r\n tflearn.fully_connected(\r\n tflearn.fully_connected(\r\n tflearn.input_data(shape=[None, len(self.training[0])]), \r\n 8), \r\n 8), \r\n len(self.output[0]), activation=\"softmax\"\r\n )\r\n )\r\n )\r\n\r\n self.epoch = epoch\r\n self.batch = batch\r\n \r\n if os.path.exists(self.RunningPath+r\"\\FTCmodels\") and self.TrainModel == False:\r\n self.model.load(self.RunningPath+r\"\\FTCmodels\\model.tflearn\")\r\n else:\r\n shutil.rmtree(os.getcwd()+r\"\\FTCmodels\", ignore_errors=True)\r\n os.mkdir(self.RunningPath+r\"\\FTCmodels\")\r\n self.model.fit(self.training, self.output, n_epoch=self.epoch, batch_size=self.batch, show_metric=True)\r\n self.model.save(self.RunningPath + r\"\\FTCmodels\\model.tflearn\")\r\n\r\n def bag_of_words(self, s, words):\r\n\r\n self.words = words\r\n self.s = s\r\n \r\n self.bag = [0 for _ in range(len(self.words))]\r\n self.s_words = [self.stemmer.stem(word.lower()) for word in nltk.word_tokenize(self.s)]\r\n\r\n for se in self.s_words:\r\n for t, w in enumerate(self.words):\r\n if w == se:\r\n self.bag[t] = 1\r\n \r\n return numpy.array(self.bag)\r\n\r\n\r\n def TestingAccuracy(self, AccuracyFilter=0):\r\n if type(AccuracyFilter) == float or type(AccuracyFilter) == int:\r\n if AccuracyFilter <= 1 and AccuracyFilter >=0:\r\n\r\n self.AccuracyFilter = AccuracyFilter\r\n \r\n print(\"Type QUIT to quit testing.\")\r\n \r\n while True:\r\n inp = input(\"Input: \")\r\n if inp.lower() == \"quit\":\r\n break\r\n\r\n self.results = self.model.predict([self.bag_of_words(inp, self.words)])\r\n self.results_index = numpy.argmax(self.results)\r\n self.tag = self.labels[self.results_index]\r\n\r\n if self.results[0][self.results_index] > self.AccuracyFilter:\r\n print(self.tag)\r\n else:\r\n print(\"--Prediction Accuracy lower then {}, (In GetResponse function will return \".format(self.AccuracyFilter))\r\n else:\r\n raise BaseException(\"Prediction-Accuracy-Filter needs to be in between 0~1\")\r\n else:\r\n raise BaseException(\"Prediction-Accuracy-Filter needs to be type \")\r\n\r\n def Predict(self, text, AccuracyFilter=0):\r\n if type(AccuracyFilter) == float or type(AccuracyFilter) == int:\r\n if AccuracyFilter <= 1 and AccuracyFilter >=0:\r\n\r\n self.AccuracyFilter = AccuracyFilter\r\n\r\n self.results = self.model.predict([self.bag_of_words(text, self.words)])\r\n self.results_index = numpy.argmax(self.results)\r\n self.tag = self.labels[self.results_index]\r\n\r\n if self.results[0][self.results_index] > self.AccuracyFilter:\r\n return self.tag\r\n else:\r\n return None\r\n else:\r\n raise BaseException(\"Prediction-Accuracy-Filter needs to be in between 0~1\")\r\n else:\r\n raise BaseException(\"Prediction-Accuracy-Filter needs to be type \")","repo_name":"LeeFuuChang/FastTextClassification","sub_path":"src/FastTextClassification/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20383943706","text":"from aiogram import types, Dispatcher\nfrom config import dp, bot\n\n\n@dp.message_handler()\nasync def other_commands(message: types.Message):\n await bot.send_message(message.from_user.id, \"Такой команды нет\")\n\n\ndef register_handlers_admin(dp: Dispatcher):\n dp.register_message_handler(other_commands)\n","repo_name":"SPAWN21043/Nicole","sub_path":"handlers/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6850338590","text":"from resnet import resnet18\r\nfrom simclr import Projection, simclr_loss, train_simclr\r\nfrom data import custom_data_loader\r\nimport torch\r\n\r\nbatch_size = 256\r\nin_channels = 3\r\nencoder = resnet18(in_channels = in_channels)\r\nin_dim = 512\r\nproj_dim = 128\r\ntemperature = 0.1\r\nlr = 0.001\r\nepochs = 100\r\n\r\n\r\nif torch.cuda.is_available():\r\n\tdevice = torch.device('cuda')\r\n\tprint(\"gpu detected for trainig\")\r\nelse :\r\n\tdevice = torch.device('cpu')\r\n\tprint(\"cpu used for training\")\r\n\r\nmodel = Projection(encoder, in_dim = in_dim, proj_dim = proj_dim)\r\nmodel = model.to(device)\r\noptimizer = torch.optim.SGD(model.parameters(), lr = lr)\r\ncriterion = simclr_loss(temperature = temperature, device = device)\r\n\r\ndata_loader = custom_data_loader(batch_size = batch_size)\r\ntrain_simclr(batch_size, data_loader, model, criterion, optimizer, device, epochs)\r\n\r\n\r\n\r\n\r\n","repo_name":"sarang7m/SimCLR","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22806108395","text":"# -*- coding: utf-8 -*-\n\"\"\"\n[russian_roulette.py]\nRussian Roulette Plugin\n\n[Author]\nAngelo Giacco\n\n[About]\nIn the original russian roulette, you have a one sixth chance of committing\nsuicide. In this version, you have a one sixth chance of being kicked from\nthe channel. The bot will utilise the IRC command:\nKICK #channel nickname\nwhere #channel is the channel name and nicknames is the nickname of the user\n\nWARNING: BOT MUST BE CHANNEL OPERATOR!!!\n\n[Commands]\n>>> .russian_roulette\neither returns a string saying you survived or kicks you off the channel\n\"\"\"\nimport random\n\nclass Plugin:\n\n def __init__(self):\n pass\n\n def risk(self, incoming, methods, info):\n kill = True if random.random() < 0.2 else False #should the user be kicked\n if kill:\n name = info[\"prefix\"].split(\"!\")[0]\n channel = info[\"address\"]\n kill_command = \"KICK \"+ channel + \" \" + name + \" \\r\\n\"\n methods[\"send_raw\"](kill_command)\n #code to quit the channel\n return \"Suicide is always a risk when playing russian roulette... RIP...\"\n else:\n return \"You survived...\"\n\n def run(self, incoming, methods, info, bot_info):\n try:\n if info['command'] == 'PRIVMSG' and info['args'][1] == '.russian_roulette':\n methods['send'](info['address'], Plugin.risk(self, incoming, methods, info))\n except Exception as e:\n print('woops russian roulette plugin error ', e)\n","repo_name":"Abdur-rahmaanJ/honeybot","sub_path":"honeybot/plugins/russian_roulette.py","file_name":"russian_roulette.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"3"} +{"seq_id":"44840296518","text":"\r\n\r\n# if num % 7 == 0 print true \r\n# else Find the first 7-dimensional multiple of that number\r\n\r\nimport os \r\nos.system('cls')\r\n\r\n\r\nnum = int (input(\"Enter a number:\"))\r\ntemp = num \r\n\r\nif num % 7 == 0 :\r\n print (\"The number you entered is: \" + str(num) + \" and its multiple of 7 :D \")\r\n \r\n\r\nelse :\r\n num1 = num // 7\r\n num2 = (num1*7)+7\r\n \r\n print(\"The number you Enter is: \" + str(temp) +\"\\tIts not in multiples of 7 !! \" ,\"\\nI showed the nearest number that was a multiple of 7:\" , num2 )\r\n \r\n","repo_name":"faezeQafouri/python_assignments","sub_path":"assignment-2/multiples_of_7.py","file_name":"multiples_of_7.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"21306040736","text":"from django.shortcuts import render\nfrom .models import QuestionsPython, Record, Solution, SolutionFace, SolutionBodyPosture, SolutionEyeContact\nimport random\nimport ast\n#FOR RECORDING\nimport speech_recognition as sr\nimport pyaudio\n\n# Specifically for FER\nfrom tensorflow.keras.models import load_model\nfrom time import sleep\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.preprocessing import image\nimport cv2\nimport numpy as np\n# import base64\n\n#IMPORTS FOR BODY POSTURE\nimport cv2\nimport mediapipe as mp\nimport numpy as np\nmp_drawing = mp.solutions.drawing_utils\nmp_pose = mp.solutions.pose\n\n#IMPORTS FOR eye_detection\nimport cv2 as cv\nimport mediapipe as mp\nimport time , math\nimport cipt_app.utils\nimport numpy as np\n\n# IMPORTS FOR EYE DETECTION********************************\nimport cv2 as cv\nimport mediapipe as mp\nimport time\nimport second_phase.utils\nimport math\nimport numpy as np\n\n#IMPORTS FOR BODY POSTURE DETECTION*************************\nimport cv2\nimport mediapipe as mp\nimport numpy as np\nmp_drawing = mp.solutions.drawing_utils\nmp_pose = mp.solutions.pose\n\n#IMPORTS FOR FACE EMOTION DETECTION*************************\nfrom tensorflow.keras.models import load_model\nfrom time import sleep\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.preprocessing import image\nimport cv2\nimport numpy as np\n\n#FOR RECORDING\nimport sounddevice\nfrom scipy.io.wavfile import write\nimport tkinter\nfrom tkinter import messagebox\n\n# imports for text processing\nfrom nltk.stem import PorterStemmer\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nimport nltk\nimport pandas as pd\nimport string\n# Create your views here.\n\ndef tech_int_home(request):\n '''\n Here we inform user what is going to happen : Info page!!\n '''\n return render(request, 'phase2/tech_int_home.html')\n\ndef tech_int_select(request):\n '''\n Here candidate will select their topic of interest\n '''\n return render(request, 'phase2/tech_int_select.html')\n\ndef tech_int_python(request):\n '''\n Interview for Python\n '''\n ran_lst = [1,2,3,4,5,6]\n ran_int = random.choice(ran_lst)\n ques = QuestionsPython.objects.get(question_number=1)\n context = {\n 'question' : ques\n }\n return render(request, 'phase2/tech_int_python.html',context)\n\ndef tech_int_ds(request):\n '''\n Interview for Python\n '''\n ran_lst = [1,2,3,4,5,6]\n ran_int = random.choice(ran_lst)\n ques = QuestionsDS.objects.get(question_number=1)\n context = {\n 'question' : ques\n }\n return render(request, 'phase2/tech_int_python.html',context)\n\ndef tech_int_dbms(request):\n '''\n Interview for Python\n '''\n ran_lst = [1,2,3,4,5,6]\n ran_int = random.choice(ran_lst)\n ques = QuestionsDBMS.objects.get(question_number=1)\n context = {\n 'question' : ques\n }\n return render(request, 'phase2/tech_int_python.html',context)\n\ndef tech_int_os(request):\n '''\n Interview for Python\n '''\n ran_lst = [1,2,3,4,5,6]\n ran_int = random.choice(ran_lst)\n ques = QuestionsOS.objects.get(question_number=1)\n context = {\n 'question' : ques\n }\n return render(request, 'phase2/tech_int_python.html',context)\n\ndef tech_int_cn(request):\n '''\n Interview for Python\n '''\n ran_lst = [1,2,3,4,5,6]\n ran_int = random.choice(ran_lst)\n ques = QuestionsCN.objects.get(question_number=1)\n context = {\n 'question' : ques\n }\n return render(request, 'phase2/tech_int_python.html',context)\n\n# def ans_python(request):\n# return render(request, 'phase2/ans_python.html')\n\n\ndef record(request):\n #For recording the answers\n # VARIABLES FOR FER\n emotion_report = {'Angry':0, 'Disgust': 0, 'Fear':0, 'Happy':0, 'Neutral':0, 'Sad':0, 'Surprise':0}\n face_classifier = cv2.CascadeClassifier(r'second_phase/haarcascade_frontalface_default.xml')\n classifier =load_model(r'second_phase/model.h5')\n emotion_labels = ['Angry','Disgust','Fear','Happy','Neutral', 'Sad', 'Surprise']\n illegal_movements_count_face=0\n cap = cv2.VideoCapture(0)\n # VARIABLES FOR BODY POSTURE\n illegal_movements_count_body = 0\n movement = None\n # VARIABLES FOR EYE CONTACT\n frame_counter =0\n CEF_COUNTER =0\n TOTAL_BLINKS =0\n illegal_movements_count_eye = 0\n start_time = time.time()\n # constants\n CLOSED_EYES_FRAME =3\n FONTS =cv2.FONT_HERSHEY_COMPLEX\n # face bounder indices\n FACE_OVAL=[ 10, 338, 297, 332, 284, 251, 389, 356, 454, 323, 361, 288, 397, 365, 379, 378, 400, 377, 152, 148, 176, 149, 150, 136, 172, 58, 132, 93, 234, 127, 162, 21, 54, 103,67, 109]\n # lips indices for Landmarks\n LIPS=[ 61, 146, 91, 181, 84, 17, 314, 405, 321, 375,291, 308, 324, 318, 402, 317, 14, 87, 178, 88, 95,185, 40, 39, 37,0 ,267 ,269 ,270 ,409, 415, 310, 311, 312, 13, 82, 81, 42, 183, 78 ]\n LOWER_LIPS =[61, 146, 91, 181, 84, 17, 314, 405, 321, 375, 291, 308, 324, 318, 402, 317, 14, 87, 178, 88, 95]\n UPPER_LIPS=[ 185, 40, 39, 37,0 ,267 ,269 ,270 ,409, 415, 310, 311, 312, 13, 82, 81, 42, 183, 78]\n # Left eyes indices\n LEFT_EYE =[ 362, 382, 381, 380, 374, 373, 390, 249, 263, 466, 388, 387, 386, 385,384, 398 ]\n LEFT_EYEBROW =[ 336, 296, 334, 293, 300, 276, 283, 282, 295, 285 ]\n # right eyes indices\n RIGHT_EYE=[ 33, 7, 163, 144, 145, 153, 154, 155, 133, 173, 157, 158, 159, 160, 161 , 246 ]\n RIGHT_EYEBROW=[ 70, 63, 105, 66, 107, 55, 65, 52, 53, 46 ]\n map_face_mesh = mp.solutions.face_mesh\n #VARIAVLES FOR RECORDING\n count_text = ['x']\n\n\n # FUNCTIONS FOR BODY POSTURE\n def calculate_angle(a,b,c):\n a = np.array(a) # First\n b = np.array(b) # Mid\n c = np.array(c) # End\n\n radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0])\n angle = np.abs(radians*180.0/np.pi)\n\n if angle >180.0:\n angle = 360-angle\n\n return angle\n # FUNCTIONS FOR EYE CONTACT\n def landmarksDetection(img, results, draw=False):\n img_height, img_width= img.shape[:2]\n # list[(x,y), (x,y)....]\n mesh_coord = [(int(point.x * img_width), int(point.y * img_height)) for point in results.multi_face_landmarks[0].landmark]\n if draw :\n [cv2.circle(img, p, 2, (0,255,0), -1) for p in mesh_coord]\n # returning the list of tuples for each landmarks\n return mesh_coord\n # Euclaidean distance\n def euclaideanDistance(point, point1):\n x, y = point\n x1, y1 = point1\n distance = math.sqrt((x1 - x)**2 + (y1 - y)**2)\n return distance\n # Blinking Ratio\n def blinkRatio(img, landmarks, right_indices, left_indices):\n # Right eyes\n # horizontal line\n rh_right = landmarks[right_indices[0]]\n rh_left = landmarks[right_indices[8]]\n # vertical line\n rv_top = landmarks[right_indices[12]]\n rv_bottom = landmarks[right_indices[4]]\n # draw lines on right eyes\n # cv.line(img, rh_right, rh_left, utils.GREEN, 2)\n # cv.line(img, rv_top, rv_bottom, utils.WHITE, 2)\n # LEFT_EYE\n # horizontal line\n lh_right = landmarks[left_indices[0]]\n lh_left = landmarks[left_indices[8]]\n # vertical line\n lv_top = landmarks[left_indices[12]]\n lv_bottom = landmarks[left_indices[4]]\n rhDistance = euclaideanDistance(rh_right, rh_left)\n rvDistance = euclaideanDistance(rv_top, rv_bottom)\n lvDistance = euclaideanDistance(lv_top, lv_bottom)\n lhDistance = euclaideanDistance(lh_right, lh_left)\n reRatio = rhDistance/rvDistance\n leRatio = lhDistance/lvDistance\n ratio = (reRatio+leRatio)/2\n return ratio\n # Eyes Extrctor function,\n def eyesExtractor(img, right_eye_coords, left_eye_coords):\n # converting color image to scale image\n gray = cv2.cvtColor(img, cv.COLOR_BGR2GRAY)\n\n # getting the dimension of image\n dim = gray.shape\n # creating mask from gray scale dim\n mask = np.zeros(dim, dtype=np.uint8)\n # drawing Eyes Shape on mask with white color\n cv2.fillPoly(mask, [np.array(right_eye_coords, dtype=np.int32)], 255)\n cv2.fillPoly(mask, [np.array(left_eye_coords, dtype=np.int32)], 255)\n # showing the mask\n # cv.imshow('mask', mask)\n\n # draw eyes image on mask, where white shape is\n eyes = cv2.bitwise_and(gray, gray, mask=mask)\n # change black color to gray other than eys\n # cv.imshow('eyes draw', eyes)\n eyes[mask==0]=155\n\n # getting minium and maximum x and y for right and left eyes\n # For Right Eye\n r_max_x = (max(right_eye_coords, key=lambda item: item[0]))[0]\n r_min_x = (min(right_eye_coords, key=lambda item: item[0]))[0]\n r_max_y = (max(right_eye_coords, key=lambda item : item[1]))[1]\n r_min_y = (min(right_eye_coords, key=lambda item: item[1]))[1]\n # For LEFT Eye\n l_max_x = (max(left_eye_coords, key=lambda item: item[0]))[0]\n l_min_x = (min(left_eye_coords, key=lambda item: item[0]))[0]\n l_max_y = (max(left_eye_coords, key=lambda item : item[1]))[1]\n l_min_y = (min(left_eye_coords, key=lambda item: item[1]))[1]\n # croping the eyes from mask\n cropped_right = eyes[r_min_y: r_max_y, r_min_x: r_max_x]\n cropped_left = eyes[l_min_y: l_max_y, l_min_x: l_max_x]\n # returning the cropped eyes\n return cropped_right, cropped_left\n # Eyes Postion Estimator\n def positionEstimator(cropped_eye):\n # getting height and width of eye\n h, w =cropped_eye.shape\n\n # remove the noise from images\n gaussain_blur = cv2.GaussianBlur(cropped_eye, (9,9),0)\n\n # applying thresholding to convert binary_image\n ret, threshed_eye = cv2.threshold(gaussain_blur, 130, 255, cv.THRESH_BINARY)\n # create fixed part for eye with\n piece = int(w/3)\n # slicing the eyes into three parts\n right_piece = threshed_eye[0:h, 0:piece]\n center_piece = threshed_eye[0:h, piece: piece+piece]\n left_piece = threshed_eye[0:h, piece +piece:w]\n\n # calling pixel counter function\n eye_position, color = pixelCounter(right_piece, center_piece, left_piece)\n return eye_position, color\n\n # creating pixel counter function\n def pixelCounter(first_piece, second_piece, third_piece):\n # counting black pixel in each part\n right_part = np.sum(first_piece==0)\n center_part = np.sum(second_piece==0)\n left_part = np.sum(third_piece==0)\n # creating list of these values\n eye_parts = [right_part, center_part, left_part]\n # getting the index of max values in the list\n max_index = eye_parts.index(max(eye_parts))\n pos_eye =''\n if max_index==0:\n pos_eye=\"RIGHT\"\n color=[utils.BLACK, utils.WHITE]\n elif max_index==1:\n pos_eye = 'CENTER'\n color = [utils.BLACK, utils.WHITE]\n elif max_index ==2:\n pos_eye = 'LEFT'\n color = [utils.BLACK, utils.WHITE]\n else:\n pos_eye=\"Closed\"\n color = [utils.BLACK, utils.WHITE]\n return pos_eye, color\n #FUNCTION FOR RECORDING\n def record_audio():\n\n #It takes microphone input from the user and returns string output\n\n global count_text\n\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening...\")\n r.pause_threshold = 1\n r.energy_threshold = 100 # minimum audio energy to consider for recording\n audio = r.listen(source)\n\n try:\n print(\"Recognizing...\")\n text = r.recognize_google(audio, language='en-in')\n count_text.append(text)\n print(f\"Your Command: {text}\\n\")\n\n except Exception as e:\n print(\"Say that again please...\")\n return \"None\"\n return count_text\n\n #FUNCTION FOR TEXT PROCESSING\n def text_process(mess):\n ps = PorterStemmer()\n stemming = []\n \"\"\"\n Takes in a string of text, then performs the following:\n 1. Remove all punctuation\n 2. Remove all stopwords\n 3. Returns a list of the cleaned text\n 4. Returns stemming words from sentences\n \"\"\"\n # Check characters to see if they are in punctuation\n nopunc = [char for char in mess if char not in string.punctuation]\n # Join the characters again to form the string.\n nopunc = ''.join(nopunc)\n # Now just remove any stopwords\n # And stemming words from sentences\n nosw = [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]\n for w in nosw:\n stemming.append(ps.stem(w))\n return stemming\n\n def comparison(lst1, lst2):\n lst3 = [word for word in lst1 if word in lst2]\n return len(lst3)\n\n #MAIN PROGRAM\n with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:\n with map_face_mesh.FaceMesh(min_detection_confidence =0.5, min_tracking_confidence=0.5) as face_mesh:\n while True:\n _, frame = cap.read()\n\n\n #FER\n labels = []\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n faces = face_classifier.detectMultiScale(gray,1.2,4)\n # BODY POSTURE\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)# Recolor image to RGB\n image.flags.writeable = False\n results = pose.process(image) # Make detection\n image.flags.writeable = True # Recolor back to BGR\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n #EYE CONTACT\n frame_eye = frame\n frame_eye = cv.resize(frame_eye, None, fx=1.5, fy=1.5, interpolation=cv.INTER_CUBIC)\n frame_height, frame_width= frame_eye.shape[:2]\n rgb_frame = cv.cvtColor(frame_eye, cv.COLOR_RGB2BGR)\n results_eye = face_mesh.process(rgb_frame)\n\n\n #FER\n for (x,y,w,h) in faces:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)\n roi_gray = gray[y:y+h,x:x+w]\n roi_gray = cv2.resize(roi_gray,(48,48),interpolation=cv2.INTER_AREA)\n\n\n\n if np.sum([roi_gray])!=0:\n roi = roi_gray.astype('float')/255.0\n roi = img_to_array(roi)\n roi = np.expand_dims(roi,axis=0)\n\n prediction = classifier.predict(roi)[0]\n label=emotion_labels[prediction.argmax()]\n emotion_report[label] += 1\n if label=='Angry' or label=='Disgust' or label == 'Fear' or label == 'Sad' or label == 'Surprise':\n illegal_movements_count_face+=1\n label_position = (x,y)\n else:\n pass\n\n # BODY POSTURE\n # Extract landmarks\n try:\n landmarks = results.pose_landmarks.landmark\n\n # Get coordinates\n elbow = [landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].x,landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].y]\n lt_shoulder = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x,landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y]\n rt_shoulder = [landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].y]\n\n # Calculate angle\n angle = calculate_angle(elbow, lt_shoulder, rt_shoulder)\n # illegal movement counter logic\n if angle > 96 and angle <130:\n movement = \"Straight :D\"\n if angle>110:\n illegal_movements_count_body +=1\n if angle < 96:\n illegal_movements_count_body +=1\n\n # EYE CONTACT\n if results_eye.multi_face_landmarks:\n mesh_coords = landmarksDetection(frame_eye, results_eye, False)\n ratio = blinkRatio(frame, mesh_coords, RIGHT_EYE, LEFT_EYE)\n if ratio >4.1:\n CEF_COUNTER +=1\n else:\n if CEF_COUNTER>CLOSED_EYES_FRAME:\n TOTAL_BLINKS +=1\n CEF_COUNTER =0\n # Blink Detector Counter Completed\n right_coords = [mesh_coords[p] for p in RIGHT_EYE]\n left_coords = [mesh_coords[p] for p in LEFT_EYE]\n crop_right, crop_left = eyesExtractor(frame_eye, right_coords, left_coords)\n eye_position, color = positionEstimator(crop_right)\n eye_position_left, color = positionEstimator(crop_left)\n #counting illegal eye movements\n if eye_position == \"RIGHT\" or eye_position == \"LEFT\" or eye_position_left == \"RIGHT\" or eye_position_left == \"LEFT\":\n illegal_movements_count_eye +=1\n # calculating frame per seconds FPS\n end_time = time.time()-start_time\n fps = frame_counter/end_time\n\n\n\n except:\n pass\n\n cv2.imshow(\"PROCTURING (Please press 'q' or say 'stop' to quit\",frame)\n # RECORD\n record_audio()\n if (cv2.waitKey(1) & 0xFF == ord('q')) or count_text[-1] == 'stop':\n break\n cap.release()\n cv2.destroyAllWindows()\n\n df = pd.read_csv(\"second_phase/Interview_questions.csv\", index_col='Index')\n df['Keywords'] = df['Answers'].apply(text_process)\n text = text_process(count_text)\n text_match_count = comparison(df['Keywords'][1], text)\n\n record_obj = Record(record_audio_text=count_text, illegal_face = illegal_movements_count_face,\n illegal_body = illegal_movements_count_body,\n illegal_eye = illegal_movements_count_eye,\n eye_blink_count = TOTAL_BLINKS,\n text_match = text_match_count,\n emotion_report = emotion_report)\n record_obj.save()\n cv.destroyAllWindows()\n cap.release()\n\n context = {\n 'recording' : record_obj\n }\n return render(request, 'phase2/ans_python.html',context)\n\ndef ans_python(request):\n return render(request,'phase2/ans_python.html' )\n\ndef report(request):\n\n #report = Record.objects.latest('pk')\n report = Record.objects.get(id=10)\n #Variables\n ief = 0\n ieb = 0\n iee = 0\n tmp = 0\n tm = 0\n # for face\n if report.illegal_face <= 10:\n ief =100\n elif report.illegal_face > 10 and report.illegal_face <= 30:\n ief = 80\n elif report.illegal_face > 30 and report.illegal_face <= 50:\n ief = 60\n elif report.illegal_face > 50 and report.illegal_face <= 80:\n ief = 40\n else:\n ief = 20\n\n # for body\n if report.illegal_body <= 10:\n ieb = 100\n elif report.illegal_body > 10 and report.illegal_body <= 30:\n ieb = 80\n elif report.illegal_body > 30 and report.illegal_body <= 50:\n ieb = 60\n elif report.illegal_body > 50 and report.illegal_body <= 80:\n ieb = 40\n else:\n ieb = 20\n\n # for body\n if report.illegal_eye <= 10:\n iee = 100\n elif report.illegal_eye > 10 and report.illegal_eye <= 30:\n iee = 80\n elif report.illegal_eye > 30 and report.illegal_eye <= 50:\n iee = 60\n elif report.illegal_eye > 50 and report.illegal_eye <= 80:\n iee = 40\n else:\n iee = 20\n\n #FOR TECHNICAL INTERVIEW TEXT MATCH\n text_match = report.text_match\n\n if text_match < 2 :\n tmp = 80\n elif text_match >= 10:\n tm = 10\n else:\n tm = text_match\n tmp = 20\n\n tmu = tm * 10\n context = {\n 'recording' : report,\n 'ief' : ief,\n 'ieb' : ieb,\n 'iee' : iee,\n 'tmu' : tmu,\n 'tmp' : tmp\n }\n\n\n return render(request, 'phase2/report.html',context)\n\n\ndef illegal_face(request):\n videos = SolutionFace.objects.all()\n #report = Record.objects.latest('pk')\n report = Record.objects.get(id=10)\n #Variables\n ief = 0\n Angry = 0\n Disgust = 0\n Fear = 0\n Happy = 0\n Neutral = 0\n Sad = 0\n Surprise =0\n\n # for face\n if report.illegal_face <= 10:\n ief =100\n elif report.illegal_face > 10 and report.illegal_face <= 30:\n ief = 80\n elif report.illegal_face > 30 and report.illegal_face <= 50:\n ief = 60\n elif report.illegal_face > 50 and report.illegal_face <= 80:\n ief = 40\n else:\n ief = 20\n\n # d = dict(str(report.emotion_report))\n # Angry = d['Angry']\n # Disgust = d['Disgust ']\n # Fear = d['Fear']\n # Happy = d['Happy']\n # Neutral = d['Neutral']\n # Sad = d['Sad']\n # Surprise = d['Surprise']\n\n context = {\n 'recording' : report,\n 'ief' : ief,\n 'videos' : videos,\n # 'd' : d,\n # 'Angry' : Angry,\n # 'Disgust' : Disgust,\n # 'Fear' : Fear,\n # 'Happy' : Happy,\n # 'Neutral' : Neutral,\n # 'Sad ':Sad ,\n # 'Surprise' : Surprise\n }\n\n return render(request, 'phase2/illegal_face.html', context)\n\ndef illegal_body(request):\n videos = SolutionBodyPosture.objects.all()\n report = Record.objects.latest('pk')\n #Variables\n ieb = 0\n\n # for body\n if report.illegal_body <= 10:\n ieb = 100\n elif report.illegal_body > 10 and report.illegal_body <= 30:\n ieb = 80\n elif report.illegal_body > 30 and report.illegal_body <= 50:\n ieb = 60\n elif report.illegal_body > 50 and report.illegal_body <= 80:\n ieb = 40\n else:\n ieb = 20\n\n context = {\n 'recording' : report,\n 'ieb' : ieb,\n 'videos' : videos\n }\n return render(request, 'phase2/illegal_body.html', context)\n\ndef illegal_eye(request):\n videos = SolutionEyeContact.objects.all()\n report = Record.objects.latest('pk')\n #Variables\n iee = 0\n\n # for eye\n if report.illegal_eye <= 10:\n iee = 100\n elif report.illegal_eye > 10 and report.illegal_eye <= 30:\n iee = 80\n elif report.illegal_eye > 30 and report.illegal_eye <= 50:\n iee = 60\n elif report.illegal_eye > 50 and report.illegal_eye <= 80:\n iee = 40\n else:\n iee = 20\n\n context = {\n 'recording' : report,\n 'iee' : iee,\n 'videos' : videos\n }\n return render(request, 'phase2/illegal_eye.html', context)\n\ndef tech_int_issue(request):\n videos = Solution.objects.all()\n report = Record.objects.latest('pk')\n #Variables\n tmp = 0\n tm = 0\n tech_int_score = 0\n\n #FOR TECHNICAL INTERVIEW TEXT MATCH\n text_match = report.text_match\n\n if text_match < 2 :\n tmp = 80\n elif text_match >= 10:\n tm = 10\n else:\n tm = text_match\n tmp = 20\n\n tmu = tm * 10\n\n if tmu <= 20:\n tech_int_score = 20\n elif tmu > 20 and tmu <= 40:\n tech_int_score = 40\n elif tmu > 40 and tmu <= 60:\n tech_int_score = 60\n elif tmu > 60 and tmu <= 80:\n tech_int_score = 80\n else:\n tech_int_score = 100\n\n context = {\n 'recording' : report,\n 'tmu' : tmu,\n 'tmp' : tmp,\n 'tech_int_score' : tech_int_score,\n 'videos' : videos\n }\n return render(request, 'phase2/tech_int_issue.html', context)\n","repo_name":"trajendra0242/Campus-Interview-Preparation-Toolkit","sub_path":"second_phase/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":23865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22479733464","text":"import neo4j\nimport json\nfrom pathlib import Path\nfrom tqdm import tqdm\nimport argparse\n\ndelete_all_query = \"\"\"\nMATCH (n)-[r]-()\n\nCALL { WITH r\nDELETE r\n} IN TRANSACTIONS OF 10000 ROWS\n\nWITH distinct n\nCALL { WITH n\nDELETE n\n} IN TRANSACTIONS OF 10000 ROWS;\n\"\"\"\n\nobject_cypher_query = \"\"\"\n UNWIND $objects AS object\n WITH object,\n object.type AS type,\n coalesce(object.properties, {}) AS properties\n WITH object,\n type,\n properties,\n CASE WHEN type IS NULL THEN 'Missing type property'\n WHEN properties IS NULL THEN 'Missing properties'\n ELSE NULL\n END AS error\n CALL apoc.create.node(type, properties) YIELD node\n RETURN node, error\n \"\"\"\n \nrel_cypher_query = \"\"\"\nUNWIND $objects AS object\nMATCH (source:stixnode WHERE source.id = object.source_ref )\nUSING INDEX source:stixnode(id)\nMATCH (target:stixnode where target.id = object.target_ref)\nUSING INDEX target:stixnode(id)\n\nCALL apoc.create.relationship(source, object.rel_type, object.rel_properties, target) YIELD rel\nRETURN rel\n\"\"\"\ndedup = set()\n# Define function to process STIX 2 objects\ndef process_stix2_objects(tx, objects):\n new_obj_list = []\n new_rel_list = []\n\n for obj in objects:\n if obj[\"id\"] in dedup:\n continue\n else:\n dedup.add(obj[\"id\"])\n # Check if object is a relationship\n if obj[\"type\"] == \"relationship\":\n # Extract relationship properties\n rel_type = obj.pop(\"relationship_type\", \"RELATED_TO\")\n source_ref = obj.pop(\"source_ref\")\n target_ref = obj.pop(\"target_ref\")\n rel_properties = obj.copy()\n rel_properties.pop(\"type\", None)\n for k, v in rel_properties.items():\n if isinstance(v, (list, dict)):\n rel_properties[k] = json.dumps(v)\n new_rel_list.append(\n {\n \"source_ref\": source_ref,\n \"target_ref\": target_ref,\n \"rel_type\": rel_type,\n \"rel_properties\": rel_properties,\n }\n )\n\n else:\n # Extract STIX 2 object type and properties\n obj_properties = obj.copy()\n obj_properties.pop(\"type\")\n for k, v in obj_properties.items():\n if isinstance(v, (list, dict)):\n obj_properties[k] = json.dumps(v)\n\n # Create Neo4j node for STIX 2 object\n obj = {\"type\": ['stixnode', obj[\"type\"]], \"properties\": obj_properties}\n new_obj_list.append(obj)\n\n # Create Neo4j nodes for non-relationship objects\n for obj_batch in batch(new_obj_list):\n session.run(\n object_cypher_query,\n objects=obj_batch\n )\n # Create Neo4j relationship for relationship object\n for obj_batch in batch(new_rel_list):\n session.run(\n rel_cypher_query,\n objects=obj_batch,\n )\n\ndef batch(iterable, n=1000):\n l = len(iterable)\n for ndx in range(0, l, n):\n yield iterable[ndx:min(ndx + n, l)]\n \ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input_dir\")\n parser.add_argument(\"user\")\n parser.add_argument(\"password\")\n parser.add_argument(\"--delete\", action=\"store_true\", help=\"delete everything first then load\")\n\n return parser.parse_args()\n \nif __name__ == \"__main__\":\n args = parse_args()\n input_dir = Path(args.input_dir)\n\n driver = neo4j.GraphDatabase.driver(\"bolt://localhost:7687\", auth=(args.user, args.password))\n \n if args.delete:\n with driver.session() as session:\n session.run(delete_all_query)\n\n # Create or delete database as needed\n # with driver.session() as session:\n # results1 = session.run(\"SHOW DATABASES\")\n # print(\"after running show db\")\n \n\n # Load STIX 2 bundle from JSON file\n with driver.session() as session:\n session.run('CREATE INDEX stixnode_id IF NOT EXISTS FOR (n:stixnode) ON n.id')\n for file_path in tqdm(input_dir.iterdir(), total=len(list(input_dir.iterdir()))):\n file_path = input_dir.joinpath(file_path)\n with open(file_path, \"r\") as f:\n try:\n bundle = json.load(f)\n except Exception as e:\n print(file_path, 'failed to load as json')\n \n # Start Neo4j session and load STIX 2 objects\n process_stix2_objects(session, bundle[\"objects\"])\n","repo_name":"idaholab/cape2stix","sub_path":"cape2stix/todb/neo4j_bulk.py","file_name":"neo4j_bulk.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"11278102636","text":"import sys\n\nimport psutil\nimport numpy\nfrom matplotlib import pyplot\n\n\nclass CpuSnapshot:\n def __init__(self, label):\n self.label = label\n self.samples = []\n\n def Capture(self, sample_count):\n print(('Capturing %d CPU samples for %s...' %\n ((sample_count - len(self.samples)), self.label)))\n while len(self.samples) < sample_count:\n self.samples.append(psutil.cpu_percent(1.0, False))\n\n def Text(self):\n return (\n '%s: avg=%s, median=%s, min=%s, max=%s' %\n (self.label, numpy.average(self.samples), numpy.median(\n self.samples), numpy.min(self.samples), numpy.max(self.samples)))\n\n def Max(self):\n return numpy.max(self.samples)\n\n\ndef GrabCpuSamples(sample_count):\n print('Label for snapshot (enter to quit): ')\n label = eval(input().strip())\n if len(label) == 0:\n return None\n\n snapshot = CpuSnapshot(label)\n snapshot.Capture(sample_count)\n\n return snapshot\n\n\ndef main():\n print('How many seconds to capture per snapshot (enter for 60)?')\n sample_count = eval(input().strip())\n if len(sample_count) > 0 and int(sample_count) > 0:\n sample_count = int(sample_count)\n else:\n print('Defaulting to 60 samples.')\n sample_count = 60\n\n snapshots = []\n while True:\n snapshot = GrabCpuSamples(sample_count)\n if snapshot is None:\n break\n snapshots.append(snapshot)\n\n if len(snapshots) == 0:\n print('no samples captured')\n return -1\n\n pyplot.title('CPU usage')\n\n for s in snapshots:\n pyplot.plot(s.samples, label=s.Text(), linewidth=2)\n\n pyplot.legend()\n\n pyplot.show()\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"WebKit/WebKit","sub_path":"Source/ThirdParty/libwebrtc/Source/webrtc/tools_webrtc/cpu/cpu_mon.py","file_name":"cpu_mon.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":6880,"dataset":"github-code","pt":"3"} +{"seq_id":"18103305213","text":"import numpy as np\nimport pylab as pl\nfrom VBLinRegARD import bayes_linear_fit_ard as VBF\nfrom stats import cdpp, medransig\nimport astropy.io.fits as pyfits\n\ndef fit_basis(flux, basis, scl = None):\n '''\n weights = fit_basis(flux, basis, scl = None)\n fit VB linear basis model to one or more light curves\n\n Inputs:\n flux: (nobj x nobs) light curve(s) \n basis: (nobs x nb) basis trends\n scl: (nb) prior scaling factors for the basis trends\n Outputs:\n weights: (nobj x nb) weights\n '''\n # pre-process basis\n nb,nobs = basis.shape\n B = np.matrix(basis.T)\n if scl == None: scl = np.ones(nb)\n Bnorm = np.multiply(B, scl)\n Bs = Bnorm.std()\n Bnorm /= Bs\n Bnorm = np.concatenate((Bnorm, np.ones((nobs,1))), axis=1)\n # array to store weights\n nobj = flux.shape[0]\n weights = np.zeros((nobj,nb))\n for iobj in np.arange(nobj): \n # pre-process flux\n F = np.matrix(flux[iobj,:]).T\n l = np.isfinite(F)\n Fm = F.mean()\n Fs = F.std()\n Fnorm = (F - Fm) / Fs\n res = VBF(Bnorm, Fnorm)\n w, V, invV, logdetV, an, bn, E_a, L = res\n weights[iobj,:] = np.array(res[0][:-1]).flatten() * scl * Fs / Bs\n return weights\n\ndef apply_basis(weights, basis):\n '''\n model = apply_basis(weights, basis) \n Compute linear basis model given weights and basis matrix\n\n Inputs:\n weights: (nobj x nb) weights\n basis: (nobs x nb) basis trends\n Outputs:\n corr: (nobj x nobs) correction to apply to light curves\n '''\n return np.dot(weights, basis)\n\ndef fixed_nb(flux, cbv, nB = 4, use = None, doPlot = True):\n '''\n corrected_flux = fixed_nb(flux, basis, nB = 4, use = None, \\\n doPlot = True)\n Correct light curve for systematics using first nB CBVs.\n\n Inputs:\n flux: (1-D array) light curves \n cbv: (2-D array) co-trending basis vectors trends\n Optional inputs:\n nB: number of CBVs to use (the first nB are used)\n use: boolean array, True for data points to use in evaluating correction, \n False for data points to ignore (NaNs are also ignored)\n doPlot: set to False to suppress plot\n Outputs:\n corrected_flux: (same shape as flux) corrected light curves\n weights: (nB array) basis vector coefficients\n '''\n nobs = len(flux) \n if cbv.shape[1] == nobs: cbv_ = cbv[:nB,:]\n else: cbv_ = cbv[:,:nB].T\n corrected_flux = np.copy(flux)\n l = np.isfinite(flux)\n if not use is None: l *= use\n weights = fit_basis(flux[l].reshape((1,l.sum())), cbv_[:,l])\n corr = apply_basis(weights, cbv_).reshape(flux.shape)\n corrected_flux = flux - corr\n if doPlot == True:\n pl.clf()\n x = np.arange(nobs)\n pl.plot(x, flux, '-', c = 'grey')\n pl.plot(x[l], flux[l], 'k-')\n pl.plot(x, corr, 'c-')\n pl.plot(x, corrected_flux, 'm-')\n pl.xlabel('Observation number')\n pl.xlabel('Flux')\n return corrected_flux, weights\n\ndef sel_nb(flux, cbv, nBmax = None, use = None):\n '''\n (nb_opt, flux_opt, weights_opt), (corr_flux_multi, weights_multi)\n = sel_nb(flux, basis, nBmax = 8, use = None)\n Correct light curve for systematics using upt to nB CBVs \n (automatically select best number).\n\n Inputs:\n flux: (1-D array) light curves \n cbv: (2-D array) co-trending basis vectors trends\n Optional inputs:\n nBmax: maximum number of CBVs to use (starting with the first)\n use: boolean array, True for data points to use in evaluating \n correction, False for data points to ignore (NaNs are also ignored)\n Outputs:\n nBopt: automatically selected number of CBVs used (<= nBmax)\n corr_flux: (same shape as flux) corrected light curves\n weights: (nBopt array) basis vector coefficients\n '''\n nobs = len(flux)\n if cbv.shape[1] == nobs: cbv_ = np.copy(cbv)\n else: cbv_ = cbv.T\n if nBmax is None: nBmax = cbv.shape[0]\n else: cbv_ = cbv_[:nBmax,:]\n \n corr_flux = np.zeros(nobs)\n corr_flux_multi = np.zeros((nBmax,nobs))\n weights_multi = np.zeros((nBmax,nBmax))\n ran_multi = np.zeros(nBmax)\n sig_multi = np.zeros(nBmax)\n\n l = np.isfinite(flux)\n if not use is None: l *= use\n\n med_raw, ran_raw, sig_raw = medransig(flux[l])\n\n for i in range(nBmax):\n cbv_c = cbv_[:i+1,:]\n w_c = fit_basis(flux[l].reshape((1,l.sum())), cbv_c[:,l])\n w_ext = np.zeros(nBmax)\n w_ext[:i+1] = w_c\n weights_multi[i,:] = w_ext\n corr = apply_basis(w_c, cbv_c).reshape(flux.shape)\n c = flux - corr\n med, ran, sig = medransig(c[l])\n corr_flux_multi[i,:] = c - med + med_raw\n ran_multi[i] = ran\n sig_multi[i] = sig\n\n # Select the best number of basis functions\n # (smallest number that significantly reduces range)\n med_ran = np.median(ran_multi)\n sig_ran = 1.48 * np.median(abs(ran_multi - med_ran))\n jj = np.where(ran_multi < med_ran + 3 * sig_ran)[0][0]\n # Does that introduce noise? If so try to reduce nB till it doesn't\n while (sig_multi[jj] > 1.1 * sig_raw) and (jj > 0): jj -= 1\n\n nb_opt = jj + 1\n flux_opt = corr_flux_multi[jj,:].flatten()\n weights_opt = weights_multi[jj,:][:jj+1].flatten()\n ran_opt = ran_multi[jj]\n sig_opt = sig_multi[jj]\n return (nb_opt, flux_opt, weights_opt), \\\n (corr_flux_multi, weights_multi)\n\n# def correct_file(infile, cbvfile, outfile, input_type = 'SAP', \\\n# verbose = False, doplot = False, #\n# exclude_func = None, exclude_func_par = None):\n# '''\n# time, cadence, corrected_flux = correct_file_nB(infile, cbvfile, outfile, \\\n# input_type = 'SAP', exclude_func = None, \\\n# exclude_func_par = None)\n\n# Correct light curve containined in infile using CBVs contained in\n# cbvfile, using up to nBmax CBVs\n\n# Inputs:\n# infile: input (FITS) light curve file\n# cbvfile: input (FITS) CBV file\n# outfile: output (FITS) file to save results in. This is a copy of the input\n# file with extra column 'CBV_FLUX', containing the systematics-corrected fluxes.\n# The weights associated with each CBV are saved in the header (CBVW_X, where\n# X is the index of the CBV to which the weight is applied). Also stored are\n# the range, point-to-point scatter and 6.5-hour CDPPs after correction\n# (RAN_CBV, SIG_CBV, and CDPP_CBV).\n# Optional inputs:\n# input_type: type of data to use as input. Options are:\n# SAP: \"raw\" (simple aperture photometry) data\n# JCR: \"jump-corrected\" data \n# verbose: toggle to produce more / less text output\n# doplot: toggle to produce plots on screen showing the evolution of the correction\n# exclude_func: function f(t,par), which returns list of indices to ignore\n# exclude_func_par: parameters of exclude function\n# as more CBVs are added\n# '''\n# nBmax = 8\n# # Read in light curve data\n# h1 = pyfits.open(infile, mode = 'readonly')\n# kic = h1[0].header['KEPLERID']\n# quarter = h1[0].header['QUARTER']\n# module = h1[0].header['MODULE']\n# output = h1[0].header['OUTPUT']\n# if verbose:\n# print 'Reading in quarter %d light curve data for KIC %d.' % \\\n# (quarter, kic)\n# print 'Object is located on module %d, output channel %d.' \\\n# % (module, output)\n# if input_type == 'SAP':\n# if verbose: print 'Reading SAP data'\n# flux = h1[1].data.field('SAP_FLUX').astype('float64')\n# elif input_type == 'JCR':\n# if verbose: print 'Reading JCR data'\n# flux = h1[1].data.field('JCR_FLUX').astype('float64')\n# else:\n# print 'Error: input type %s not supported'\n# return\n# time = h1[1].data.field('TIME').astype('float64')\n# pdc = h1[1].data.field('PDCSAP_FLUX').astype('float64')\n# if doplot == True:\n# pl.clf()\n# l = np.isfinite(time)\n# tmin = time[l].min()\n# tmax = time[l].max()\n# nobs = len(flux)\n# l = np.isfinite(flux)\n# nval = l.sum()\n# print 'Read in %d observations of which %d valid.' % (nobs, nval)\n# # Read in CBV data\n# cbv = np.zeros((nobs, 16))\n# h2 = pyfits.open(cbvfile)\n# if h2[0].header['QUARTER'] != quarter:\n# print 'Error: CBV file is for quarter %d.' % h2[0].header['QUARTER']\n# return\n# n_ext = len(h2) - 1\n# for i in np.arange(n_ext)+1:\n# if h2[i].header['MODULE'] != module: continue\n# if h2[i].header['OUTPUT'] != output: continue\n# for j in np.arange(16):\n# cbv[:,j] = h2[i].data.field('VECTOR_%d' % (j+1)).astype('float64')\n# break\n# h2.close()\n# # Identify any observations to ignore\n# if exclude_func != None:\n# if exclude_func_par == None:\n# exclude_indices = exclude_func(time)\n# else:\n# exclude_indices = exclude_func(time, exclude_func_par)\n# use = np.ones(nobs, 'bool')\n# use[exclude_indices] = False\n# else:\n# use = None\n# # Stats before correction - store in header keywords\n# mms, sap_ran, sap_sig = medransig(flux[np.isfinite(flux)])\n# if verbose: print 'Median flux: %f' % mms\n# h1[1].header['MED_FLUX'] = repr(mms)\n# if verbose: print 'Input range: %f' % sap_ran\n# h1[1].header['SAP_RAN'] = repr(sap_ran)\n# if verbose: print 'Input p2p scatter: %f' % sap_sig \n# h1[1].header['SAP_SIG'] = repr(sap_sig)\n# sap_cdpp = cdpp(time, flux)\n# if verbose: print 'Input CDPP: %f' % sap_cdpp\n# h1[1].header['SAP_CDPP'] = repr(sap_cdpp)\n# mmp = np.median(pdc[np.isfinite(pdc)])\n# pdc = pdc - mmp + mms\n# _, pdc_ran, pdc_sig = medransig(pdc[np.isfinite(pdc)])\n# if verbose: print 'PDC range: %f' % pdc_ran\n# h1[1].header['PDC_RAN'] = repr(pdc_ran)\n# if verbose: print 'PDC p2p scatter: %f' % pdc_sig \n# h1[1].header['PDC_SIG'] = repr(pdc_sig)\n# pdc_cdpp = cdpp(time, pdc)\n# if verbose: print 'PDC CDPP: %f' % pdc_cdpp\n# h1[1].header['PDC_CDPP'] = repr(pdc_cdpp)\n# # Preliminary plotting commands\n# if doplot == True:\n# ax1 = pl.subplot(211)\n# diff1 = flux[1:] - flux[:-1]\n# ll1 = np.isfinite(diff1)\n# mm1 = np.median(diff1[ll1])\n# offset1 = 5 * 1.48 * np.median(abs(diff1[ll1] - mm1))\n# pl.plot(time, flux, 'k-')\n# pl.plot(time, flux - pdc + mms - offset1, 'g-')\n# pl.ylabel('raw flux')\n# pl.title('KID%d Q%d (module %d output %d)' % \\\n# (kic, quarter, module, output))\n# ax2 = pl.subplot(212, sharex = ax1)\n# pl.plot(time, pdc, 'g-') \n# diff2 = pdc[1:] - pdc[:-1]\n# ll2 = np.isfinite(diff2)\n# mm2 = np.median(diff2[ll2])\n# offset2 = 5 * 1.48 * np.median(abs(diff2[ll2] - mm2))\n# pl.ylabel('corr. flux')\n# pl.xlabel('time')\n# # Perform correction\n# (nb, flux_cbv, weights, cbv_ran, cbv_sig), \\\n# (flux_multi, _, _, _) = \\\n# sel_nb(flux, cbv, nBmax = nBmax, use = use)\n# # Plot results for individual nB values, if requested\n# if doplot == True:\n# for i in np.arange(nBmax):\n# flux_cbv = flux_multi[i,:].flatten()\n# mmc = np.median(flux_cbv[np.isfinite(flux_cbv)])\n# flux_cbv = flux_cbv - mmc + mms\n# corr = flux - flux_cbv + mms\n# dr = i/float(nBmax-1)\n# rgb = (1-dr,0,dr)\n# pl.sca(ax1)\n# pl.plot(time, corr - offset1 * (i+2), c = rgb)\n# pl.sca(ax2)\n# pl.plot(time, flux_cbv - offset2 * (i+1), c = rgb)\n# pl.xlim(tmin, tmax)\n# # Store results in relevant FITS column and header keywords\n# if verbose: print 'Optimal no. CBVs: %d' % nb\n# h1[1].header['CBV_NSEL'] = repr(nb)\n# if verbose: print 'Weights:', weights\n# for i in range(nb):\n# h1[1].header['CBVW_%02d' % i] = repr(weights[i])\n# if verbose: print 'CBV range: %f' % cbv_ran\n# h1[1].header['CBV_RAN'] = repr(cbv_ran)\n# if verbose: print 'CBV p2p scatter: %f' % cbv_sig \n# h1[1].header['CBV_SIG'] = repr(cbv_sig)\n# mmc = np.median(flux_cbv[np.isfinite(flux_cbv)])\n# flux_cbv = flux_cbv - mmc + mms\n# cbv_cdpp = cdpp(time, flux_cbv)\n# if verbose: print 'CBV CDPP: %f' % cbv_cdpp\n# h1[1].header['CBV_CDPP'] = repr(cbv_cdpp)\n# unit = h1[1].header['TUNIT4']\n# cols = h1[1].columns\n# col = pyfits.Column(name = 'CBV_FLUX', format = 'E', disp = 'E14.7', \\\n# unit = unit, array = flux_cbv)\n# cols += col\n# # Save\n# hdr_save = h1\n# h1[1] = pyfits.BinTableHDU.from_columns(cols, header=h1[1].header)\n# if verbose: print 'Saving to file %s' % outfile\n# h1.writeto(outfile, clobber = True)\n# h1.close()\n# return \n","repo_name":"saigrain/CBVshrink","sub_path":"src/cbv.py","file_name":"cbv.py","file_ext":"py","file_size_in_byte":12999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"63834436","text":"#!/usr/bin/python3\n\"\"\"This module defines the text_indentation function\"\"\"\n\n\ndef text_indentation(text):\n \"\"\"\n Function that prints a text\n Args:\n text (str): Text given by the user.\n Raises:\n TypeError: \"text must be a string\"\n \"\"\"\n temp_text = ''\n if type(text) != str:\n raise TypeError(\"text must be a string\")\n\n for character in text:\n temp_text += character\n if character in ['?', '.', ':']:\n print(temp_text.strip() + \"\\n\")\n temp_text = ''\n print(temp_text.strip(), end=\"\")\n","repo_name":"Callistus25/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/5-text_indentation.py","file_name":"5-text_indentation.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"39951079027","text":"from setuptools import setup, find_packages\n\n\nwith open('requirements.txt') as f:\n requirements = f.read().splitlines()\n\n\nsetup(\n name='adventofcode2019',\n version='1.0',\n install_requires=requirements,\n author='marlew',\n packages=find_packages(),\n include_package_data=True,\n url='https://github.com/lev7/adventofcode2019',\n description='Advent of Code 2019'\n)\n","repo_name":"leonobilis/adventofcode2019","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11812659515","text":"# -*- coding: utf-8 -*-\n# @Time : 18-10-29 上午11:51\n# @Author : Redtree\n# @File : zf_sys_role.py\n# @Desc : 角色表\n\n\nimport json\nfrom __init__ import Base_xxcxb\nfrom sqlalchemy import (Column, String, Integer, Text)\n\n\nclass Zf_sys_role(Base_xxcxb):\n __tablename__ = 'zf_sys_role'\n\n zfid = Column(Integer, primary_key=True)\n role_name = Column(String(20)) # 角色名\n created_user_id = Column(String(20)) # 创建用户id\n created_time = Column(Integer)\n updated_time = Column(Integer)\n status = Column(Integer) # 状态名 0为可用 1为禁止\n role_code = Column(String(50)) # 角色代码\n\n def __repr__(self):\n get_data = {\n \"zfid\": self.zfid,\n \"role_name\": self.role_name,\n \"created_user_id\": self.created_user_id,\n \"created_time\": self.created_time,\n \"updated_time\": self.updated_time,\n \"status\": self.status,\n \"role_code\": self.role_code\n }\n get_data = json.dumps(get_data)\n return get_data\n","repo_name":"redtreeai/irony-man-server","sub_path":"database/sqlalchemy/orm_models/zf_sys_role.py","file_name":"zf_sys_role.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19403072952","text":"# -*- encoding: utf-8 -*-\n'''\n@File : s4_save_question.py\n@Time : 2020/04/19 20:25:34\n@Author : lryself \n@Version : 1.0\n@Contact : lnolvwe@163.com\n题目:爬取这个网址上http://www.python3.vip/doc/prac/python/0001/,所有的Python练习题题目和答案;保存到txt文件中(只保留文字);\n 文本文件类似(注意是类似的效果,不是说一定要做的一模一样)的效果如下:\n\n 参考文档:https://blog.csdn.net/weixin_43687366/article/details/88877996\n 大家看完这篇文档后,再开始动手做这道题;\n\n'''\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport tools\nimport re\n# here put the import lib\nurl = 'http://www.python3.vip/doc/prac/python/0001/'\n \n#伪装���浏览器\nheaders = {\n\t'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'\n}\n \n#发送请求\nr = requests.get(url,headers=headers).content.decode('utf-8')\n# print(r)\n \n#解析html文档\nsoup = BeautifulSoup(r,'html.parser')\t#这里用lxml会出错\n# print(type(soup))\n \n#查找每个练习的a链接href属性获取对应的链接地址\nre_a = soup.find( class_ ='nav__items').find_all('a')\t#返回的是100个a标签的列表\n \n#创建一个列表保存url\nlist = []\nfor i in re_a:\n\tlist.append(i.attrs['href'])\n# print(list)\n \n \n\"\"\"\n\t2、根据获取的每个练习的链接地址来请求每个练习获得页面内容\n\"\"\"\nclass dataclass:\n def __init__(self,x):\n self.title=x\n self.question=\"\"\n self.answer=\"\"\n \n def addquestion(self,x):\n self.question+=x\n self.question+=\"\\n\"\n\n def addanswer(self,x):\n self.answer+=x\n self.answer+=\"\\n\"\n \ndef finddata(list1,data1):\n for i in range(len(list1)):\n if list1[i].title==data1:\n return i\n return None\n\ndef writedata(f,list1):\n for i in list1:\n f.write(i.title+\"\\n\")\n f.write(i.question+\"\\n\")\n f.write(\"答案与解析:\\n\")\n f.write(i.answer+\"\\n\")\n f.write('-'*50+\"\\n\")\nwith open(\"s4_questions.txt\", \"w\"):\n pass\nfor x in list:\n data=[]\n # 请求详细页面\n test = requests.get(x, headers=headers).content.decode('utf-8')\n # print(test)\n\n # 解析html文档\n soup_test = BeautifulSoup(test, 'html.parser')\n if soup_test.find('head').text=='404 Not Found':\n print(x+\"打开失败\")\n continue\n # print(type(soup_test))\n\n # 查找练习内容\n # 查找标题\n title_text = soup_test.find(class_='page__title').text\n\n list1=soup_test.find(class_='content').contents\n p=-1\n isquestion=True\n for i in list1:\n if i=='\\n':\n continue\n if i.text=='':\n continue\n elif re.match(r\"^题目[0-9]$\",i.text) or i.text==\"编程题\" or i.text==\"判断题\":\n isquestion=True\n data.append(dataclass(i.text))\n p+=1\n elif re.match(r\"^题目[0-9]-答案$\",i.text):\n isquestion=False\n p=finddata(data,i.text[:3])\n elif i.name=='p':\n if i.text=='请大家点击此处链接,观看讲解视频':\n data[p].addanswer(i.text)\n data[p].addanswer(i.contents[0].attrs['href'])\n elif i.text==\"扫码分享给朋友,一起学更有动力哦\":\n continue\n elif i.text==\"答案与解析\":\n continue\n elif i.text=='点击这里 下载一个zip包,解压后,得到一个目录source。':\n title_text+=\"\\n{}{}\".format(i.contents[0].attrs['href'],i.text)\n else:\n if isquestion:\n data[p].addquestion(i.text)\n else:\n data[p].addanswer(i.text)\n elif i.name=='div':\n if \"class\" in i.attrs:\n if i.attrs['class'][0]=='highlighter-rouge':\n data[p].addquestion(i.text)\n elif i.attrs['class'][0]=='language-py' and i.attrs['class'][1]=='highlighter-rouge':\n data[p].addanswer(i.text)\n elif i.name=='ul':\n list2=i.find_all('p')\n for w in list2:\n data[p].addquestion(w.text)\n with open(\"s4_questions.txt\", \"a\", encoding=\"utf-8\") as f:\n f.write(\"章节:\"+title_text+\"\\n\")\n writedata(f,data)\n f.write(\"*\"*50+\"\\n\")\n\nprint(\"全部保存完成\")\n # 查找题目\n # questions = soup_test.find(class_='content').find_all('h2')\n # for w in questions:\n # if re.match(r\"^题目[0-9]$\",w.text) or w.text==\"编程题\" or w.text==\"判断题\":\n # data.append(dataclass(w.text))\n # question = soup_test.find(class_='content').find_all('p')\n # p=0\n # flag=True\n # for i in question:\n # if i.text == '请大家点击此处链接,观看讲解视频':\n # flag=True\n # p-=1\n # data[p].addanswer(i.text)\n # data[p].addanswer(i.contents[0].attrs['href'])\n # elif i.text == '扫码分享给朋友,一起学更有动力哦':\n # break\n # elif i.text == '答案与解析':\n # continue\n # elif i.text !='':\n # flag=True\n # data[p].addquestion(i.text)\n # elif flag==True:\n # p+=1\n # flag=False\n # if soup_test.find(class_='language-py highlighter-rouge'):\n # answers = soup_test.findall(class_='language-py highlighter-rouge')\n # for i in answers:\n # answer=''\n # answer1 = i.findall('span')\n # for w in answer1:\n # answer+=w.text\n\n\n # # 程序源代码\n # try:\n # dict['code'] = soup_test.find(class_=\"hl-main\").text\n # except Exception as e:\n # dict['code'] = soup_test.find('pre').text\n # # print(code)\n # # print(dict)\n\n # with open('s4_question.txt','w',encoding='utf-8') as file:\n # file.write(dict['title']+'\\n')\n # file.write(dict['tm']+'\\n')\n # file.write(dict['cxfx']+'\\n')\n # file.write(dict['code']+'\\n')\n # file.write('*'*50+'\\n')\n # file.write('\\n')","repo_name":"lryself/python_learning","sub_path":"study_class/homeworks/homework7/s4_save/s4_save_question.py","file_name":"s4_save_question.py","file_ext":"py","file_size_in_byte":5578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"37480529220","text":"from Bankaccount import *\n\nclass User:\n\n def __init__(self,name,int_rate,balance=0):\n self.accountslist = []\n self.name=name\n self.accountslist.append(BankAcccount(int_rate,balance))\n\n def add_account(self,int_rate,balance):\n self.accountslist.append(BankAcccount(int_rate,balance))\n\n def make_withdrawal(self, amount):\n\n account = int(input(\"User \"+self.name+\" Please Enter the ID of The Account you want to withdraw money from: \"))\n if account not in range(0,len(self.accountslist)):\n print(\"Account of similar ID doesnt exist for this user, Withdrawal Failed!\")\n else:\n self.accountslist[account].withdraw(amount)\n return self\n\n\n def display_user_balance(self):\n account = int(input(\"User \"+self.name+\" Please Enter the ID of The Account You Want to Display Balance for: \"))\n if account not in range(0, len(self.accountslist)):\n print(\"Account of similar ID doesnt exist for this user, Display Balance Failed!\")\n else:\n print(self.accountslist[account].display_account_balance())\n\n def deposite(self,amount):\n\n account = int(input(\"User \"+self.name+\" Please Enter the ID of The Account You want to Deposit to: \"))\n if account not in range(0, len(self.accountslist)):\n print(\"Account of similar ID doesnt exist for this user, Deposit Failed!\")\n else:\n self.accountslist[account].deposite(amount)\n return self\n\n def transfer_money(self, other_user, amount):\n\n account = int(input(\"User \"+self.name+\" Please Enter the ID of The Account You want to send money (FROM): \"))\n if account in range(0, len(self.accountslist)):\n account2 = int(input(\"Please Enter the ID of \" + other_user.name+ \" Account You want to send money (TO): \"))\n if account2 in range(0, len(other_user.accountslist)):\n\n if (amount < self.accountslist[account].balance):\n other_user.accountslist[account2].deposite(amount)\n self.accountslist[account].withdraw(amount)\n return self\n else:\n print(\"Account of similar ID doesnt exist for this user, Transfer Failed!\")\n else:\n print(\"Account of similar ID doesnt exist for this user, Transfer Failed!\")\n\n def print_user_info(self):\n account = int(input(\"User \"+self.name+\" Please Enter the ID of The Account You want to show info for: \"))\n if account not in range(0, len(self.accountslist)):\n print(\"Account of similar ID doesnt exist for this user, Display info Failed!\")\n else:\n print(\"User Name is: \" , self.name)\n self.accountslist[account].display_account_info()\n\n\n\n\n\n\n\n\n","repo_name":"MohammedBayatena/CodingDojoAxsos","sub_path":"PythonStack/_python/OOP/Users_with_BankAccount/User.py","file_name":"User.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7208426014","text":"#\n#\tData tools for multi-modal trajectory prediction. This module contains functions to:\n#\t\t- load data\n#\t\t- prepare data for processing\n#\t\t- visualise data\n#\n#\tChange Log:\n#\t\t27/01/2018:\t\t\tInitial Version (SD), based on code from Tharindu\n#\t\t28/01/2018 (SD):\tWorks for a single mode now\n#\t\t30/01/2018 (SD):\tAdd command line options, add hooks for multi-modal\n#\t\t\t\t\t\t\tversion\n#\t\t03/02/2018 (SD):\tMulit-model extraction added. Removed \"threshold\"\n#\t\t\t\t\t\t\ttime-shift like part for now\n#\n#\n\nimport loader\nimport numpy as np\nimport scipy.io as sio\nimport math\nimport argparse\nimport pickle\n#from plot_trajectories import plot_trajectory_with_neighbours\n\n#\n# determine if traj_1 is in front of traj_2, if so, return True, else return False\n#\ndef in_front_of(traj_1_x, traj_1_y, traj_2_x, traj_2_y):\n\t# if traj_2 is in front, then AB < AM, and BM < AM\n\t#\tA is the first point of traj_1\n\t#\tB is the last point of traj_1\n\t#\tM is the last point of traj_2\n\t# will be positive for points on one side and negative for points on the other\n\t\n\t# pull out the points we need, do this to make it clearer\n\tAx = traj_1_x[0]\n\tAy = traj_1_y[0]\n\tBx = traj_1_x[-1]\n\tBy = traj_1_y[-1]\n\tX = traj_2_x[-1]\n\tY = traj_2_y[-1]\n\n\tAB = pow(Ax - Bx, 2.0) + pow(Ay - By, 2.0)\n\tAM = pow(Ax - X, 2.0) + pow(Ay - Y, 2.0)\n\tBM = pow(Bx - X, 2.0) + pow(By - Y, 2.0)\n\n\tif ((AB < AM) & (BM < AM)):\n\t\treturn True\n\telse:\n\t\treturn False\n#\n# determine if one trajectory is to the left of another. When determining this we\n#\t- consider only the main direction of motion of traj_1, i.e. just the first and last point\n#\t- consider only the last position of traj_2, i.e. is it's last point to the left\n#\ndef to_left_of(traj_1_x, traj_1_y, traj_2_x, traj_2_y):\n\t# sign of the determinant of the vectors AB and AM.\n\t#\tA is the first point of traj_1\n\t#\tB is the last point of traj_1\n\t#\tM is the last point of traj_2\n\t# will be positive for points on one side and negative for points on the other\n\t\n\t# pull out the points we need, do this to make it clearer\n\tAx = traj_1_x[0]\n\tAy = traj_1_y[0]\n\tBx = traj_1_x[-1]\n\tBy = traj_1_y[-1]\n\tX = traj_2_x[-1]\n\tY = traj_2_y[-1]\n\n\tposition = np.sign((Bx - Ax) * (Y - Ay) - (By - Ay) * (X - Ax))\n\tif (position > 1):\n\t\treturn True;\n\n#\n# determine if a trajectory is to the right of another. \n# Function just calls to_left_of and inverts the result. NOTE: This means that trajectories\n# that lie exactly on the path of traj_1 will be classed as being to the right of. We're going\n# to assume that cases of this happening will be very rare at most.\n#\ndef to_right_of(traj_1_x, traj_1_y, traj_2_x, traj_2_y):\n\tif (is_left_of(traj_1_x, traj_1_y, traj_2_x, traj_2_y) == True):\n\t\treturn False;\n\telse:\n\t\treturn True;\n\t\n#\n# split up a set of neighbouring trajectories according to whether they are to the left, right, or in front of a taget\n#\ndef split_neighbours(traj_of_interest_x, traj_of_interest_y, neighbours_x, neighbours_y):\n\tfront_x = np.zeros(neighbours_x.shape)\n\tfront_y = np.zeros(neighbours_x.shape)\n\tleft_x = np.zeros(neighbours_x.shape)\n\tleft_y = np.zeros(neighbours_x.shape)\n\tright_x = np.zeros(neighbours_x.shape)\n\tright_y = np.zeros(neighbours_x.shape)\n\tfront_idx = 0\n\tleft_idx = 0\n\tright_idx = 0\n\t# iterate through neighbours\n\tfor i in range(neighbours_x.shape[0]):\n\t\t# check front. Need to check front first as all traj will be either left\n\t\t# or right\n\t\tif in_front_of(traj_of_interest_x, traj_of_interest_y, neighbours_x[i, :], neighbours_y[i, :]):\n\t\t\tfront_x[front_idx,:] = neighbours_x[i, :]\n\t\t\tfront_y[front_idx,:] = neighbours_y[i, :]\n\t\t\tfront_idx += 1\n\t\t# check left\n\t\telif to_left_of(traj_of_interest_x, traj_of_interest_y, neighbours_x[i, :], neighbours_y[i, :]):\n\t\t\tleft_x[left_idx,:] = neighbours_x[i, :]\n\t\t\tleft_y[left_idx,:] = neighbours_y[i, :]\n\t\t\tleft_idx += 1\n\t\t# if not front and left, must be right\n\t\telse:\n\t\t\tright_x[right_idx,:] = neighbours_x[i, :]\n\t\t\tright_y[right_idx,:] = neighbours_y[i, :]\n\t\t\tright_idx += 1\n\t\n\treturn front_x, front_y, front_idx, left_x, left_y, left_idx, right_x, right_y, right_idx\n\n#\n# load data\n# This loads the file using the c++/python loader, and then creates trajectories of the target length from the data\n# Will extract sequences of a target length, and down-sample by a given factor as well. The downsample is used to \n# allow the network (defined elsewhere) to predict/model longer trajectories without needing to increase the network\n# size\n#\n# Limitions:\n#\tAt the moment this does not consider a sliding window when breaking up trajectories, this could be used to get more data\n#\t\ndef load_data(file_path, seq_length=50, downsmaple_factor=5, source = 0, offset = 0):\n\t# call the c++/python loader to load the file, this loads the data outputted\n\t# by c++ and puts into python structures. \n\t# Note for that different source data, this would need to change.\n if (source == 0):\n traj_list = loader.load_cplusplus_trajectories(file_path) \n else:\n traj_list = loader.load_python_trajectories(file_path)\n\n data_all=[]\n first_done=False\n\n\t# downsample trajectories\n traj_list_new=[]\n for i in range(len(traj_list)):\n traj=traj_list[i]\n traj_new=[]\n for j in range(len(traj)):\n \tif j % downsmaple_factor == 0:\n \t\ttraj_new.append(traj[j])\n traj_list_new.append(traj_new) \n \n traj_list=traj_list_new\n \n\t# loop through the downsampled trajectory list\n\t# put things into a giant numpy array, and break trajectories down into segments of \n\t# length seq_length (default 50)\n print(traj_list[1])\n print(len(traj_list[1]))\n for i in range(len(traj_list)):\n traj=traj_list[i]\n traj_x=[]\n traj_y=[]\n traj_t=[]\n track_length=len(traj)\n #no_sub_seq=int(math.floor(track_length/seq_length))\n for j in range(len(traj)):\n \n obs=traj[j]\n \n time=obs[0]\n x=obs[1]\n y=obs[2]\n traj_x.append(x)\n traj_y.append(y)\n traj_t.append(time)\n \n \n traj_x=np.asarray(traj_x)\n traj_y=np.asarray(traj_y)\n traj_t=np.asarray(traj_t)\n \n start_idx=offset\n #print((start_idx + seq_length),'...',len(traj),'...',traj)\n while ((start_idx + seq_length) < len(traj)):\n# for j in range(no_sub_seq):\n end_idx = start_idx + seq_length \n \n sub_track_x=traj_x[start_idx:end_idx]\n sub_track_y=traj_y[start_idx:end_idx]\n sub_track_t=traj_t[start_idx:end_idx]\n \n \n data_out=np.stack((sub_track_x, sub_track_y, sub_track_t),axis=1)\n data_out=np.expand_dims(data_out, axis=0) \n\n if first_done== False:\n first_done=True\n data_all=data_out\n else:\n #print('data_all:'+str(data_all.shape))\n data_all=np.concatenate((data_all, data_out),axis=0)\n start_idx = start_idx + seq_length\n \n return data_all\n\n\n#\n# deal with extra neighbours, or pad out the neigbour arrays if their missing a few values\n# Two modes are currently defined for this:\n#\textra_neighbours == 0:\taverage the extra ones\n#\textra_neighbours == 1:\ttake the closest of the rest, and ignore the others\n#\t\ndef merge_extra_neighbours(neighbour_x, neighbour_y, neighbour_w, num_neighbours = 10, extra_neighbours = 0):\n\tupdated_x = np.zeros([num_neighbours, neighbour_x.shape[0]])\n\tupdated_y = np.zeros([num_neighbours, neighbour_x.shape[0]])\n\tupdated_w = np.full([num_neighbours, neighbour_x.shape[0]], 0.00000000000000000000000000000000000000000000001)\n\tfor i in range(min(neighbour_x.shape[1], num_neighbours - 1)):\n\t\tupdated_x[i, :] = neighbour_x[:, i]\n\t\tupdated_y[i, :] = neighbour_y[:, i]\n\t\tupdated_w[i, :] = neighbour_w[:, i]\n\t\n\t# handle extra neighbours\n\t# option 0: average all the remaining neighbours\n\tif (neighbour_x.shape[1] >= num_neighbours):\n\t\tif (extra_neighbours == 0):\n\t\t\tcount = 0\n\t\t\tfor i in range(num_neighbours - 1, neighbour_x.shape[1]):\n\t\t\t\tupdated_x[num_neighbours - 1, :] += neighbour_x[:, i]\n\t\t\t\tupdated_y[num_neighbours - 1, :] += neighbour_y[:, i]\n\t\t\t\tupdated_w[num_neighbours - 1, :] += neighbour_w[:, i]\n\t\t\t\tcount += 1\n\t\t\tupdated_x[num_neighbours - 1, :] /= count\n\t\t\tupdated_y[num_neighbours - 1, :] /= count\n\t\t\tupdated_w[num_neighbours - 1, :] /= count\n\t\t# option 1 (or at the moment not 0): just take the 10th and ignore the rest\n\t\telse:\n\t\t\tupdated_x[num_neighbours - 1, :] = neighbour_x[:, num_neighbours - 1]\n\t\t\tupdated_y[num_neighbours - 1, :] = neighbour_y[:, num_neighbours - 1]\n\t\t\tupdated_w[num_neighbours - 1, :] = neighbour_w[:, num_neighbours - 1]\n\t\n\treturn updated_x, updated_y, updated_w\n\n#\n# calculates distance between the main and all neighbour trajectories\n#\ndef calculate_distance_to_adjecent_trajectories(selected_x, selected_y, adjecent_x, adjecent_y, dummy_value=-50):\n dist=np.zeros(adjecent_x.shape)\n \n #for each trajectory\n for i in range(adjecent_x.shape[1]):\n # for lenght of trajectory\n for j in range(adjecent_x.shape[0]):\n dist[j,i]=np.sqrt((adjecent_x[j,i]-selected_x[j])**2 + (adjecent_y[j,i]-selected_y[j])**2)\n \n dist=np.divide(1.0, dist, out=np.zeros_like(dist), where=dist!= 0)#1/dist\n \n rows,cols=np.where(adjecent_x == dummy_value)\n dist[rows,cols]=0.00000000000000000000000000000000000000000000001\n \n #print('dist:' + str(dist.shape))\n return dist\n \n#\n# find all trajectories that are temporally adjacent to a trajecroty of interest\n# retuns the list of adjacent trajectories as arrays of x and y points\n# \ndef find_all_adjecent_trajectories(x, y, time, time_selected, selected_idx, dummy_value=-50):\n \n # Create a matrix of size (x,y) and fill it with dummy point(-50,-50) values\n adjecent_x=np.full((time.shape[0],time_selected.shape[0]),dummy_value)\n adjecent_y=np.full((time.shape[0],time_selected.shape[0]),dummy_value)\n \n for i in range(time_selected.shape[0]):\n \n # Find row and column idxs where time is equal to time of the selected trajectory\n rows, cols = np.where(time == time_selected[i])\n #print('rows: '+str(rows.shape))\n \n # Replace the dummy points with the values of those rows and cols\n adjecent_x[rows,cols]=x[rows,cols];\n adjecent_y[rows,cols]=y[rows,cols];\n #print('x: '+str(adjecent_x[rows,cols]))\n \n # The above process also accounts for the selected trajectory\n # Replace the Row of the selected trajectory again with dummy values\n adjecent_x[selected_idx,:]=dummy_value\n adjecent_y[selected_idx,:]=dummy_value\n \n # Find unique rows that have values other than dummy points\n rows,cols=np.where(adjecent_x > dummy_value)\n temp=np.unique(rows)\n# print('No of rows with data: '+str(temp.shape))\n# print(str(temp.shape[0]))\n d=np.zeros(adjecent_x.shape[0])\n \n # Find the rows that have most of the values (i.e max col size) other than dummy points\n for i in range(temp.shape[0]):\n idx=temp[i]\n a=np.where(rows == idx)\n c=cols[a]\n\n d[idx]=c.shape[0]\n \n ids= np.argsort(d)\n #print(d[ids[(ids.shape[0]-10):]])\n \n if (temp.shape[0] > 0):\n \tadjecent_x=adjecent_x[ids[(ids.shape[0]-temp.shape[0]):],:]\n \tadjecent_y=adjecent_y[ids[(ids.shape[0]-temp.shape[0]):],:]\n \n\t\t#convert shape (10,#time-steps) to (#time-steps,10)\n \tadjecent_x=np.transpose( adjecent_x, (1, 0) )\n \tadjecent_y=np.transpose( adjecent_y, (1, 0) )\n \n \treturn adjecent_x,adjecent_y\n else:\n \treturn None, None\t\n\t\n#\t\n# Create the dataset. This will:\n#\t- loop through all trajectories. For each trajectory:\n#\t\t- find all neighbours\n#\t\t- setup neighbour weights\n#\t\t- split into left, right, front\n#\t\t- ensure that we ahve the correct number of neighbours in each direction\n#\t\t- store the results as a dictionary in a list\n#\ndef create_dataset_with_all_neighbours_and_t(main_mode, num_neighbours = 10, extra_neighbours = 0):\n\tdata=[];\n\t\n\tx_all = main_mode[:,:,0]\n\ty_all = main_mode[:,:,1]\n\tt_all = main_mode[:,:,2]\n\t\t\t\t\n\tfor i in range(x_all.shape[0]):\n\t\tselected_x = x_all[i,:]\n\t\tselected_y = y_all[i,:]\n\t\tselected_t = t_all[i,:]\n\n\t\t# get adjacent trajectories for the main main\n\t\t[adjecent_x, adjecent_y] = find_all_adjecent_trajectories(x_all, y_all, t_all, selected_t, i)\n\t\t\n\t\t# did we find any? If so, process them\n\t\tif (adjecent_x is not None):\n\t\t\t\t\t\t\n\t\t\t# need to split adjacent trajectories into front, left and right\n\t\t\tfront_x, front_y, n_f, left_x, left_y, n_l, right_x, right_y, n_r = split_neighbours(selected_x, selected_y, adjecent_x, adjecent_y)\n\t\t\t\n\t\t\t# get distances to trajectories in each direction\n\t\t\tweights_front = calculate_distance_to_adjecent_trajectories(selected_x, selected_y, front_x, front_y)\n\t\t\tweights_left = calculate_distance_to_adjecent_trajectories(selected_x, selected_y, left_x, left_y)\n\t\t\tweights_right = calculate_distance_to_adjecent_trajectories(selected_x, selected_y, right_x, right_y)\n\t\t\t#print('inter1')\n\t\t\t#print(np.shape(left_x))\n\n\t\t\t# if we have more than max_traj, deal with this. Can either:\n\t\t\t#\t- merge/average remaining trajectories, taking average traj and average weights\n\t\t\t#\t- take the 'best of the rest' and just discard others\n\t\t\tfront_x, front_y, weights_front = merge_extra_neighbours(front_x, front_y, weights_front, num_neighbours, extra_neighbours)\n\t\t\tleft_x, left_y, weights_left = merge_extra_neighbours(left_x, left_y, weights_left, num_neighbours, extra_neighbours)\n\t\t\tright_x, right_y, weights_right = merge_extra_neighbours(right_x, right_y, weights_right, num_neighbours, extra_neighbours)\n\t\t\t#print('inter2')\n\t\t\t#print(np.shape(left_x))\n\t\t\t\n\t\t\t# convert 1D to 2D\n\t\t\tselected_x=np.expand_dims(selected_x, axis=1)\n\t\t\tselected_y=np.expand_dims(selected_y, axis=1)\n\t\t\tselected_t=np.expand_dims(selected_t, axis=1)\n\t\t\n\t\telse:\n\t\t\n\t\t\t# no adjacent trajectories, need to create dummy variables and store them\n#\t\t\tprint(selected_y.shape)\n#\t\t\tprint(selected_y.shape[0])\n\t\t\tfront_x = np.zeros([num_neighbours, selected_y.shape[0]])\n\t\t\tfront_y = np.zeros([num_neighbours, selected_y.shape[0]])\n\t\t\tweights_front = np.full([num_neighbours, selected_y.shape[0]], 0.00000000000000000000000000000000000000000000001)\n\t\t\tleft_x = np.zeros([num_neighbours, selected_y.shape[0]])\n\t\t\tleft_y = np.zeros([num_neighbours, selected_y.shape[0]])\n\t\t\tweights_left = np.full([num_neighbours, selected_y.shape[0]], 0.00000000000000000000000000000000000000000000001)\n\t\t\tright_x = np.zeros([num_neighbours, selected_y.shape[0]])\n\t\t\tright_y = np.zeros([num_neighbours, selected_y.shape[0]])\n\t\t\tweights_right = np.full([num_neighbours, selected_y.shape[0]], 0.00000000000000000000000000000000000000000000001)\t\t\t\n\t\t\t\n\t\tsample = {'selected_x' : selected_x, 'selected_y' : selected_y, \\\n\t\t\t\t 'front_x' : front_x, 'front_y' : front_y, 'front_w' : weights_front, \\\n\t\t\t\t 'left_x' : left_x, 'left_y' : left_y, 'left_w' : weights_left, \\\n\t\t\t\t 'right_x' : right_x, 'right_y' : right_y, 'right_w' : weights_right, \\\n\t\t\t\t 'time' : selected_t }\n\n\t\tdata.append(sample)\n\n\treturn data\n\n#\n# Main function, use to extract data for later processing by the network\n#\ndef main():\t\n\n\t# setup command line parser\n\tparser = argparse.ArgumentParser(description='Create datasets for trajectory prediction')\n\n\t#\n\t# command line parser takes:\n\t# \tmode: defines whether we are processing a single file (mode == 0) or a list (mode == 1)\n\t#\tprimary and secondary data: can be a file or a list\n\t#\toutput file: where to save the data that's extracted\n\t#\ttrajectory parameters: length, decimate rate, and the number of neighbours to pull out from each mode\n\t#\n\tparser.add_argument('--mode', type=int, dest='mode', default=0, help='operating mode, 0 for process a single file (or pair), 1 for a list')\n\tparser.add_argument('--primary_mode', action='store', dest='primary_mode', help='location of primary mode data. May be either a data file, or a text file with a list of datafiles in it (depending on mode argument)')\n\tparser.add_argument('--output', action='store', dest='output', help='Where to save stuff')\n\tparser.add_argument('--length', type=int, dest='traj_length', default=50, help='length of trajectories to extract')\n\tparser.add_argument('--decimate', type=int, dest='decimate', default=5, help='rate to decimate input data by')\n\tparser.add_argument('--neighbours', type=int, dest='neighbours', default=10, help='maximum number of neighbours to extract per direction (left, right, front)')\n\tparser.add_argument('--datasource', type=int, dest='data_source', default=0, help='source of the data, 0=c++, 1=python')\n\tparser.add_argument('--windowstep', type=int, dest='window_step', default=1, help='sliding window step to use to create more samples')\n\tparser.add_argument('--slidinglimit', type=int, dest='sliding_limit', default=1, help='where to stop the sliding window')\n\n\tresults = parser.parse_args()\n\n\t# storage for data\n\tdata = []\n\n\t# are we processing a list or a single file\n\tif (results.mode == 0):\n\t\t# if it's just a single file, put it in a list anyway, this means that the next\n\t\t# bit where we load all the files is the same for each mode\n\t\tprimary_data = [results.primary_mode]\n\telse:\n\t\twith open(results.primary_mode) as f:\n\t\t\tprimary_data = f.readlines()\n\t\tprimary_data = [x.strip() for x in primary_data]\n\t# print(primary_data)\n\t# loop through all the files, load each, extract trajectories, and append to the\n\t# list of data that we are building\n\tfor i in range(len(primary_data)):\n\t\t\n\t\t# load primary data\n\t\tfor j in range(0, results.sliding_limit, results.window_step):\n\t\t\tp = load_data(primary_data[i], results.traj_length, results.decimate, results.data_source, j)\n\t\t\t# load secondary if we have it, otherwise just set it to None\n\t\t\t\n\t\t\t# get data\n\t\t\td = create_dataset_with_all_neighbours_and_t(p, results.neighbours)\n\t\t\tdata = data + d\n\n\t# save data\n\tprint(np.shape(data))\n\toutput_file = open(results.output, 'wb')\n\tfor d in data:\n\t\tpickle.dump(d, output_file)\n\toutput_file.close()\n\t\nif __name__ == '__main__':\n\tmain()\t\n \n ","repo_name":"ChongbinYe/EGH400","sub_path":"prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":18103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28905027044","text":"from .config import config\nfrom .filedb import file_db\n\nfrom itertools import chain\nfrom pathlib import Path\n\n\ndef find_watch_dirs():\n \"\"\"List all directories that contain files that need watching.\"\"\"\n input_file_list = list_input_files()\n markdown_dirs = set(p.parent for p in input_file_list)\n with file_db(readonly=True) as db:\n code_dirs = set(p.parent for p in db.managed)\n return code_dirs.union(markdown_dirs)\n\n\ndef list_input_files():\n \"\"\"List all input files.\"\"\"\n include_file_list = chain.from_iterable(map(Path(\".\").glob, config.watch_list))\n exclude_file_list = list(\n chain.from_iterable(map(Path(\".\").glob, config.ignore_list))\n )\n return [path for path in include_file_list if not path in exclude_file_list]\n\n\ndef list_dependent_files():\n with file_db(readonly=True) as db:\n result = list(db.managed)\n return result\n","repo_name":"entangled/entangled.py","sub_path":"entangled/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"3"} +{"seq_id":"14189852863","text":"import pandas as pd\nimport numpy as np\nimport numpy.ma as ma\nimport matplotlib.pyplot as plt\n\nmsize=25\nrrange=int(msize**0.5)\njump=3\nstart=int(jump/2)\n\nX,Y=np.meshgrid(range(0,msize),range(0,msize))\ndat=np.random.rand(msize,msize)*rrange\n\nmsk=np.zeros_like(dat)\nmsk[start::jump,start::jump].fill(1)\nmdat=msk*dat\nmdat[mdat==0]=np.nan\nmmdat = ma.masked_where(np.isnan(mdat),mdat)\n\nfargs={ 'edgecolor': 'w',\n 'facecolor': 'w',\n 'frameon': True,\n }\n\nfig = plt.figure(**fargs)\n\ncmap = plt.get_cmap('RdYlBu')\ncmap.set_bad(color='#cccccc', alpha=1.)\n\nplot = plt.pcolormesh(X,Y,mmdat,cmap=cmap)\n\nplot.axes.set_ylim(0,msize-1)\nplot.axes.set_xlim(0,msize-1)\nplot.axes.set_aspect('equal')\n\nfargs['bbox_inches']='tight'\n\n# Save\nfig.savefig(\"masked100.png\",dpi=100,**fargs)\n\nplt.colorbar()\nfig.savefig(\"masked101.png\",dpi=100,**fargs)\n\n\n","repo_name":"igormorgado/seismic","sub_path":"oldsrc/veryold/teste2.py","file_name":"teste2.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"32348003769","text":"#!/usr/bin/env python3\n\n# -----------------------------\n# convolution to compare images\n# -----------------------------\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\nimport tensorflow as tf\nimport numpy as np\nnp.set_printoptions(threshold=np.nan)\nfrom scipy import signal as sig\nfrom PIL import Image as im\n\ndef main():\n\n print(\"\\nconvolution --- image evaluation\\n\")\n\n # ----------------------- data ------------------------ #\n # ----------------------------------------------------- #\n # original_images: 96x96 image w/ int in [0,255] #\n # reconstructed_images: 96x96 image w/ float in [0,255] #\n # comparison_images: 96x96 image w/ float in [0,1) #\n # ----------------------------------------------------- #\n\n original_images = np.loadtxt(\"data/orig_3pics.txt\")\n reconstructed_images = np.loadtxt(\"data/recon_3pics.txt\")\n comparison_images = np.loadtxt(\"data/ssim_3pics.txt\")\n\n # data is now a 3 X 96 X 96 array (3 square 96px images)\n original_images = original_images.reshape(3,96,96)\n reconstructed_images = reconstructed_images.reshape(3,96,96)\n comparison_images = comparison_images.reshape(3,96,96)\n\n # these are copys of the data but with each entry being its own list\n # i made two copy because i have been doing stuff with the non-dimension version separately\n original_images_dim1 = original_images.reshape(3,96,96,1)\n reconstructed_images_dim1 = reconstructed_images.reshape(3,96,96,1)\n comparison_images_dim1 = comparison_images.reshape(3,96,96,1)\n\n # start of the tf stuff\n sess = tf.Session()\n width = 96\n height = 96\n\n # this placeholder will recieve the image data from outside tf and turn it into a tensor\n x_image = tf.placeholder(tf.float32, shape = [None, width, height, 1])\n\n # these are the variables that will be learned, initial values not too important\n filter_conv = tf.Variable(tf.truncated_normal([5,5,1,1]))\n bias_conv = tf.Variable(tf.constant(0.1))\n\n # the convolution operation, strides is how much it travels between each dot product.\n # ----------------------------------------------------------------------------------------#\n ## NOTE: this is actually dope of tensor flow. when we specify the padding as same, then #\n ## it automagically chooses the right number of zeros to pad in order to give the output #\n ## the same size as the input. so that is take care of for us. you can check this by #\n ## changing the size of the filter. the output of the results.shape function will always #\n ## be 96,96,3,1. #\n # ----------------------------------------------------------------------------------------#\n convolution = tf.nn.conv2d(x_image, filter_conv, strides=[1,1,1,1], padding='SAME') + bias_conv\n\n # running the operation --- we run it on the original and the reconstructed\n init = tf.global_variables_initializer()\n sess.run(init)\n result_original = sess.run(convolution, feed_dict = {x_image: original_images_dim1})\n result_recon = sess.run(convolution, feed_dict = {x_image: reconstructed_images_dim1})\n\n # flattening out the images, because we arent using the square structure anymore\n ## this process is combining the original and reconstructed convolution into one array\n ## of length 18432 (96*96*2). this is to use the two images combined for our mlp training\n ## NOTE: i am sure there is a more efficient way to do this\n result_original = tf.reshape(result_original, [3, 9216])\n result_recon = tf.reshape(result_recon, [3, 9216])\n result_combined1 = tf.concat([result_original[0], result_recon[0]], 0)\n result_combined2 = tf.concat([result_original[1], result_recon[1]], 0)\n result_combined3 = tf.concat([result_original[2], result_recon[2]], 0)\n result_combined1 = tf.reshape(result_combined1, [1, 18432])\n result_combined2 = tf.reshape(result_combined2, [1, 18432])\n result_combined3 = tf.reshape(result_combined3, [1, 18432])\n result_total = tf.concat([result_combined1, result_combined2, result_combined3], 0)\n # print(result_total.shape)\n\n # this is the start of the MLP aspect of the network.\n ## x is the input from our combined result of the convolution\n ## y_ is the output, which is an array holding the resulting values\n x = tf.placeholder(tf.float32, shape=[None, 18432])\n y_ = tf.placeholder(tf.float32, shape=[None, 9612])\n\n # variables to be learned\n weights = tf.Variable(tf.zeros([18432, 9612], tf.float32))\n bias = tf.Variable(tf.zeros([9612], tf.float32))\n sess.run(tf.global_variables_initializer())\n\n # operations --- sigmoid normalizes the result\n # apply_weights_op = tf.matmul(x, weight)\n # add_bias_op = tf.add(apply_weights_op, bias)\n # activation_op = tf.nn.sigmoid(add_bias_op)\n\n y = tf.nn.sigmoid(tf.matmul(x, weights) + bias)\n number_epochs = 1000\n learning_rate = .0001\n\n cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))\n train = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\n \n y_1 = comparison_images\n y_1 = y_1.reshape(3,1,9216)\n\n\n # looking at images --- i just did this because i was curious was the images were.\n # if you want to see just uncomment the image_view.show() line\n # you can see the reconstruction by switching which one is commented out. pretty cool stuff\n image = np.asarray(original_images[1], dtype='uint8')\n # image = np.asarray(reconstructed_images[1], dtype='uint8')\n image_view = im.fromarray(image, 'L')\n # image_view.save(\"images/test.png\")\n # image_view.show()\n\nif __name__ == '__main__':\n main()\n","repo_name":"michaelneuder/image_quality_analysis","sub_path":"bin/nets/old/convolutional_nn.py","file_name":"convolutional_nn.py","file_ext":"py","file_size_in_byte":5727,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"38042612883","text":"import pytest\n\nfrom rbac.common import addresser\nfrom rbac.common.logs import get_default_logger\nfrom tests.rbac.common.assertions import TestAssertions\n\nLOGGER = get_default_logger(__name__)\n\n\n@pytest.mark.addressing\n@pytest.mark.library\nclass TestRoleAddresser(TestAssertions):\n \"\"\"Test Role Addresser\"\"\"\n\n def test_address(self):\n \"\"\"Tests address makes an address that identifies as the correct AddressSpace\"\"\"\n role_id = addresser.role.unique_id()\n role_address = addresser.role.address(object_id=role_id)\n self.assertIsAddress(role_address)\n self.assertEqual(\n addresser.get_address_type(role_address),\n addresser.AddressSpace.ROLES_ATTRIBUTES,\n )\n\n def test_get_address_type(self):\n \"\"\"Tests that get_address_type returns AddressSpace.USER if it is a role\n address, and None if it is of another address type\"\"\"\n role_address = addresser.role.address(addresser.role.unique_id())\n other_address = addresser.user.address(addresser.user.unique_id())\n self.assertEqual(\n addresser.get_address_type(role_address),\n addresser.AddressSpace.ROLES_ATTRIBUTES,\n )\n self.assertEqual(\n addresser.role.get_address_type(role_address),\n addresser.AddressSpace.ROLES_ATTRIBUTES,\n )\n self.assertIsNone(addresser.role.get_address_type(other_address))\n\n def test_addresses_are(self):\n \"\"\"Test that addresses_are returns True if all addresses are a role\n addresses, and False if any addresses are if a different address type\"\"\"\n role_address1 = addresser.role.address(addresser.role.unique_id())\n role_address2 = addresser.role.address(addresser.role.unique_id())\n other_address = addresser.user.address(addresser.user.unique_id())\n self.assertTrue(addresser.role.addresses_are([role_address1]))\n self.assertTrue(addresser.role.addresses_are([role_address1, role_address2]))\n self.assertFalse(addresser.role.addresses_are([other_address]))\n self.assertFalse(addresser.role.addresses_are([role_address1, other_address]))\n self.assertFalse(addresser.role.addresses_are([other_address, role_address1]))\n self.assertTrue(addresser.role.addresses_are([]))\n\n def test_address_deterministic(self):\n \"\"\"Tests address makes an address that identifies as the correct AddressSpace\"\"\"\n role_id1 = addresser.role.unique_id()\n role_address1 = addresser.role.address(object_id=role_id1)\n role_address2 = addresser.role.address(object_id=role_id1)\n self.assertIsAddress(role_address1)\n self.assertIsAddress(role_address2)\n self.assertEqual(role_address1, role_address2)\n self.assertEqual(\n addresser.get_address_type(role_address1),\n addresser.AddressSpace.ROLES_ATTRIBUTES,\n )\n\n def test_address_random(self):\n \"\"\"Tests address makes a unique address given different inputs\"\"\"\n role_id1 = addresser.role.unique_id()\n role_id2 = addresser.role.unique_id()\n role_address1 = addresser.role.address(object_id=role_id1)\n role_address2 = addresser.role.address(object_id=role_id2)\n self.assertIsAddress(role_address1)\n self.assertIsAddress(role_address2)\n self.assertNotEqual(role_address1, role_address2)\n self.assertEqual(\n addresser.get_address_type(role_address1),\n addresser.AddressSpace.ROLES_ATTRIBUTES,\n )\n self.assertEqual(\n addresser.get_address_type(role_address2),\n addresser.AddressSpace.ROLES_ATTRIBUTES,\n )\n\n def test_addresser_parse(self):\n \"\"\"Test addresser.parse returns a parsed address\"\"\"\n role_id = addresser.role.unique_id()\n role_address = addresser.role.address(role_id)\n\n parsed = addresser.parse(role_address)\n\n self.assertEqual(parsed.object_type, addresser.ObjectType.ROLE)\n self.assertEqual(parsed.related_type, addresser.ObjectType.NONE)\n self.assertEqual(\n parsed.relationship_type, addresser.RelationshipType.ATTRIBUTES\n )\n self.assertEqual(parsed.address_type, addresser.AddressSpace.ROLES_ATTRIBUTES)\n self.assertEqual(parsed.object_id, role_id)\n self.assertEqual(parsed.related_id, None)\n","repo_name":"hyperledger-archives/sawtooth-next-directory","sub_path":"tests/rbac/common/addresser/role_tests.py","file_name":"role_tests.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"3"} +{"seq_id":"27357397414","text":"import pygame\n\n\nclass four_stone_info():\n\n def __init__(self):\n # name of game\n self.NAME = \"four footman\"\n # backgrand size\n self.X = 400\n self.Y = 400\n # backgrand large\n self.SIZE = 4\n # block size\n self.X1 = int(self.X / self.SIZE)\n self.Y1 = int(self.Y / self.SIZE)\n # turn for now\n self.TURN_NOW = 0\n # turn size\n self.TURN_SIZE = 2\n\n # picture constants\n self.water_footman_list = [[0, 0], [1, 0], [2, 0], [3, 0]]\n self.fire_footman_list = [[0, 3], [1, 3], [2, 3], [3, 3]]\n\n # army_list\n self.army_list = [self.fire_footman_list, self.water_footman_list]\n\n # attack_list\n self.attack_water_list = [[1, 0, 0, -1], [-1, 1, 0, 0], [0, 0, 1, -1], [-1, 0, 0, 1]]\n self.attack_fire_list = [[0, 1, 1, -1], [-1, 0, 1, 1], [1, 1, 0, -1], [-1, 1, 1, 0]]\n self.attack_list = [self.attack_water_list, self.attack_fire_list]\n\n def attack_charge(self, array):\n \"\"\"\n charge if there has a attack\n :param array: the info of one line or one row\n :return: -1 or 2 or 3\n \"\"\"\n if array[0] == self.TURN_NOW \\\n and array[1] == self.TURN_NOW \\\n and array[2] == (self.TURN_NOW + 1) % self.TURN_SIZE \\\n and array[3] != (self.TURN_NOW + 1) % self.TURN_SIZE:\n return 2\n if array[0] != (self.TURN_NOW + 1) % self.TURN_SIZE \\\n and array[1] == self.TURN_NOW \\\n and array[2] == self.TURN_NOW \\\n and array[3] == (self.TURN_NOW + 1) % self.TURN_SIZE:\n return 3\n return -1\n\n def attack(self, position_move):\n \"\"\"\n\n :param position_move:\n :return:\n \"\"\"\n array = []\n for i in range(self.SIZE):\n array.append(self.charge_which_army([position_move[0], i]))\n number = self.attack_charge(array)\n if number > 0:\n self.army_list[(self.TURN_NOW + 1) % self.TURN_SIZE].remove(\n [position_move[0], number])\n\n array = []\n for i in range(self.SIZE - 1, -1, -1):\n array.append(self.charge_which_army([position_move[0], i]))\n number = self.attack_charge(array)\n if number > 0:\n self.army_list[(self.TURN_NOW + 1) % self.TURN_SIZE].remove(\n [position_move[0], (number + 1) * -1 + self.SIZE])\n\n array = []\n for i in range(self.SIZE):\n array.append(self.charge_which_army([i, position_move[1]]))\n number = self.attack_charge(array)\n if number > 0:\n self.army_list[(self.TURN_NOW + 1) % self.TURN_SIZE].remove(\n [number, position_move[1]])\n\n array = []\n for i in range(self.SIZE - 1, -1, -1):\n array.append(self.charge_which_army([i, position_move[1]]))\n number = self.attack_charge(array)\n if number > 0:\n self.army_list[(self.TURN_NOW + 1) % self.TURN_SIZE].remove(\n [(number + 1) * -1 + self.SIZE, position_move[1]])\n\n def turn_next(self):\n self.TURN_NOW = (self.TURN_NOW + 1) % self.TURN_SIZE\n\n def charge_which_army(self, position):\n \"\"\"\n charge which army's footman in this position\n :param position: (x,y)\n :return: 0 or 1 which is turn\n \"\"\"\n for i in range(self.TURN_SIZE):\n if position in self.army_list[i]:\n return i\n return -1\n\n def check(self, turn, position_init, position_move):\n # charge if the right turn\n if turn != self.charge_which_army(position_init):\n return False\n # charge if the position move is empty\n if -1 != self.charge_which_army(position_move):\n return False\n # charge if the position is right\n if 1 != abs((position_init[0] + position_init[1]) - (position_move[0] + position_move[1])):\n return False\n return True\n\n def move_footman(self, turn, position_init, position_move):\n \"\"\"\n check and change position_init to position_move\n :param position_init: the footman's position\n :param position_move: the footman want to go to position\n :return: True or False\n \"\"\"\n # check if it is right to move\n if not self.check(turn, position_init, position_move):\n return False\n\n # move footman\n self.army_list[self.TURN_NOW].remove(position_init)\n self.army_list[self.TURN_NOW].append(position_move)\n\n # attack\n self.attack(position_move)\n\n self.turn_next()\n\n return True\n\n def is_win(self):\n \"\"\"\n charg if this turn is win\n :return:\n \"\"\"\n if len(self.army_list[(self.TURN_NOW + 1) % self.TURN_SIZE]) == 1:\n return True\n return False\n\n def reset(self):\n \"\"\"\n reset the footman's position\n \"\"\"\n self.water_footman_list = [(0, 0), (1, 0), (2, 0), (3, 0)]\n self.fire_footman_list = [(0, 3), (1, 3), (2, 3), (3, 3)]\n\n # util to find which block mouse click\n def get_which_block(self, postion):\n return int(postion[0] * 4 / self.X), int(postion[1] * 4 / self.Y)\n\n \"\"\"\n ***************************************************************************************************************\n * map split *\n ***************************************************************************************************************\n \"\"\"\n\n def draw_block(self, screen, images_real, x, y):\n \"\"\"\n draw the block by x and y\n :param screen: screen\n :param images_real: black_broad or white_broad\n :param x: 0~3\n :param y: 0~3\n :return:\n \"\"\"\n screen.blit(images_real, (self.X1 * x, self.Y1 * y))\n\n def draw_board(self, screen, images_real, is_black):\n \"\"\"\n :param screen: screen\n :param images_real: black_broad or white_broad\n :param num: 0~3\n :param is_black: the number of which is white or black\n \"\"\"\n for i in range(self.SIZE):\n for j in range(self.SIZE):\n if (j + i) % 2 == is_black:\n self.draw_block(screen, images_real, i, j)\n\n def draw_footman(self, screen, images_real, x, y):\n \"\"\"\n draw the little footman which is fire or water\n :param screen: screen\n :param images_real: fire or water\n :param x: 0~3\n :param y: 0~3\n \"\"\"\n screen.blit(images_real, (self.X1 * x + int(self.X1 / self.SIZE / 2),\n self.Y1 * y + int(self.Y1 / self.SIZE / 2)))\n\n def draw_footman_list(self, screen, images_real, footman_list):\n \"\"\"\n draw footman by list which is fire_list or water_list\n :param screen: screen\n :param images_real: fire or water\n :param footman_list: fire_list or water_list\n \"\"\"\n for footman in footman_list:\n self.draw_footman(screen, images_real, footman[0], footman[1])\n\n def draw_picture(self, fire_list, water_list):\n # create a windows\n screen = pygame.display.set_mode((self.X, self.Y))\n\n # init the images\n black_images_real = self.generate_black_images()\n white_images_real = self.generate_white_images()\n fire_images_real = self.generate_fire_images()\n water_images_real = self.generate_water_images()\n\n # draw backgrand\n self.draw_board(screen, black_images_real, 1)\n self.draw_board(screen, white_images_real, 0)\n\n # draw footman\n self.draw_footman_list(screen, fire_images_real, fire_list)\n self.draw_footman_list(screen, water_images_real, water_list)\n\n pygame.display.update()\n return screen\n\n def generate_black_images(self):\n white_image_filename = 'game/four_stone/picture/black_block.bmp'\n # 加载图片并转换\n white_images = pygame.image.load(white_image_filename)\n return pygame.transform.scale(white_images, (self.X1, self.Y1))\n\n def generate_white_images(self):\n black_image_filename = 'game/four_stone/picture/white_block.bmp'\n # 加载图片并转换\n black_images = pygame.image.load(black_image_filename)\n return pygame.transform.scale(black_images, (self.X1, self.Y1))\n\n def generate_fire_images(self):\n fire_image_filename = 'game/four_stone/picture/fire.png'\n # 加载图片并转换\n fire_images = pygame.image.load(fire_image_filename)\n return pygame.transform.scale(fire_images, (self.X1 - int(self.X1 / self.SIZE),\n self.Y1 - int(\n self.Y1 / self.SIZE)))\n\n def generate_water_images(self):\n water_image_filename = 'game/four_stone/picture/water.png'\n # 加载图片并转换\n water_images = pygame.image.load(water_image_filename)\n return pygame.transform.scale(water_images,\n (self.X1 - int(self.X1 / self.SIZE),\n self.Y1 - int(self.Y1 / self.SIZE)))\n","repo_name":"GOODDAYDAY/practice1","sub_path":"game/four_stone/game_info.py","file_name":"game_info.py","file_ext":"py","file_size_in_byte":9303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"6512108509","text":"import tensorflow as tf\nimport math\n\nclass Cell(object):\n\n def __init__(self, descriptor):\n self._descriptor = descriptor\n self._built = False\n\n @property\n def descriptor(self):\n return self._descriptor\n\n @property\n def built(self):\n return self._built\n\nclass SensorCell(Cell):\n\n def __init__(self, descriptor):\n super(SensorCell, self).__init__(descriptor)\n\nclass NetworkCell(Cell):\n\n def __init__(self, descriptor):\n super(NetworkCell, self).__init__(descriptor)\n\n @property\n def variable_collection(self):\n return tf.get_collection(self._descriptor.variable_collections[0])\n\n def _add_variables_to_collections(self):\n raise NotImplementedError\n \n\nclass GlimpseSensorCell(SensorCell):\n \"\"\"Sensor mimicing retina-like structure to capture\n glimpse of an image\"\"\"\n \n def __init__(self, descriptor):\n \"\"\"Initializes the sensor\n \n Args:\n glimpse_descriptor: RetinaGlimpseDescriptor object\n image_descriptor: ImageDescriptor object\n \"\"\"\n super(GlimpseSensorCell, self).__init__(descriptor)\n \n @property\n def shapes(self):\n return self._glimpse_shapes_list\n \n \n def __call__(self, image, location):\n \"\"\"Glimpse sensor\n \n Args:\n images: [batch_size x height x width x channels] tensor\n of input images\n locations: [batch_size x 2] tensor representing location\n of sensor scaled to ([-1, 1], [-1, 1])\n \n Returns:\n Encoded glimpse\n \"\"\"\n if not self._built:\n self._build(image, location)\n self._built = True\n \n return tf.concat([self._create_glimpse(image, size, location) for size in self._glimpse_shapes_list], \n axis=1)\n \n def _build(self, image, location):\n self._glimpse_shapes_list = list()\n \n for i in range(self._descriptor.number_of_scales):\n self._glimpse_shapes_list.append(tf.constant([int(math.pow(2, i) * self._descriptor.scan_height), \n int(math.pow(2, i) * self._descriptor.scan_width)]))\n \n def _create_glimpse(self, image, size, location):\n return tf.contrib.layers.flatten(\n tf.image.resize_images(tf.image.extract_glimpse(image, size, location), \n tf.constant([int(self._descriptor.scan_height), \n int(self._descriptor.scan_width)])))\n\n\nclass GlimpseNetworkCell(NetworkCell):\n \n def __init__(self, descriptor):\n super(GlimpseNetworkCell, self).__init__(descriptor)\n \n \n @property\n def kernel_in_hg(self):\n return self._kernel_in_hg\n \n @property\n def bias_hg(self):\n return self._bias_hg\n \n @property\n def kernel_loc_hl(self):\n return self._kernel_loc_hl\n \n @property\n def bias_hl(self):\n return self._bias_hl\n \n @property\n def kernel_hg_out(self):\n return self._kernel_hg_out\n \n @property\n def kernel_hl_out(self):\n return self._kernel_hl_out\n \n @property\n def bias_out(self):\n return self._bias_out\n\n \n def __call__(self, glimpse, location):\n\n if not self._built:\n self._build(glimpse, location)\n self._built = True\n \n h_g = self._descriptor.activation_hg(\n tf.add(tf.matmul(glimpse, self._kernel_in_hg), \n self._bias_hg))\n \n h_l = self._descriptor.activation_hl(\n tf.add(tf.matmul(location, self._kernel_loc_hl), \n self._bias_hl))\n \n z_g = tf.add(tf.add(tf.matmul(h_g, self._kernel_hg_out),\n tf.matmul(h_l, self._kernel_hl_out)),\n self._bias_out)\n \n g = self._descriptor.output_activation(z_g)\n return g\n \n def _build(self, glimpse, location):\n self._kernel_in_hg = tf.get_variable(\n \"kernel_in_hg\",\n shape=[glimpse.shape[1],\n self._descriptor.hg_vector_length],\n initializer=self._descriptor.kernel_in_hg_initializer,\n trainable=self._descriptor.backprop_trainable)\n \n self._bias_hg = tf.get_variable(\n \"bias_hg\",\n shape=[1,\n self._descriptor.hg_vector_length],\n initializer=self._descriptor.bias_hg_initializer,\n trainable=self._descriptor.backprop_trainable)\n \n self._kernel_loc_hl = tf.get_variable(\n \"kernel_loc_hl\",\n shape=[location.shape[1],\n self._descriptor.hl_vector_length],\n initializer=self._descriptor.kernel_loc_hl_initializer,\n trainable=self._descriptor.backprop_trainable)\n \n self._bias_hl = tf.get_variable(\n \"bias_hl\",\n shape=[1,\n self._descriptor.hl_vector_length],\n initializer=self._descriptor.bias_hl_initializer,\n trainable=self._descriptor.backprop_trainable)\n \n self._kernel_hg_out = tf.get_variable(\n \"kernel_hg_out\",\n shape=[self._descriptor.hg_vector_length,\n self._descriptor.output_dimensions],\n initializer=self._descriptor.kernel_hg_out_initializer,\n trainable=self._descriptor.backprop_trainable)\n \n self._kernel_hl_out = tf.get_variable(\n \"kernel_hl_out\",\n shape=[self._descriptor.hl_vector_length,\n self._descriptor.output_dimensions],\n initializer=self._descriptor.kernel_hl_out_initializer,\n trainable=self._descriptor.backprop_trainable)\n \n self._bias_out = tf.get_variable(\n \"bias_out\",\n shape=[1,\n self._descriptor.output_dimensions],\n initializer=self._descriptor.bias_out_initializer,\n trainable=self._descriptor.backprop_trainable)\n\n self._add_variables_to_collections()\n\n def _add_variables_to_collections(self):\n for collection in self._descriptor.variable_collections:\n tf.add_to_collection(collection, self._kernel_in_hg)\n tf.add_to_collection(collection, self._bias_hg)\n tf.add_to_collection(collection, self._kernel_loc_hl)\n tf.add_to_collection(collection, self._bias_hl)\n tf.add_to_collection(collection, self._kernel_hg_out)\n tf.add_to_collection(collection, self._kernel_hl_out)\n tf.add_to_collection(collection, self._self._bias_out)\n \n\nclass CoreNetworkCell(NetworkCell):\n\n def __init__(self,\n descriptor):\n super(CoreNetworkCell, self).__init__(descriptor)\n\n self._initial_state = \\\n tf.nn.rnn_cell.LSTMStateTuple(\\\n tf.Variable(\\\n tf.zeros(\\\n [self._descriptor.batch_size,\n self._descriptor.output_dimensions])),\n tf.Variable(\\\n tf.zeros(\\\n [self._descriptor.batch_size,\n self._descriptor.output_dimensions])))\n \n\n @property\n def lstm_cell(self):\n return self._lstm_cell\n\n @property\n def initial_state(self):\n return self._initial_state\n\n def __call__(self, inputs, state):\n if not self._built:\n self._build(inputs, state)\n self._built = True\n\n h, state = self._lstm_cell(inputs, state)\n \n return self._descriptor.output_activation(h), state\n\n def _build(self, inputs, state):\n self._lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self._descriptor.output_dimensions)\n self._add_variables_to_collections()\n\n def _add_variables_to_collections(self):\n for collection in self._descriptor.variable_collections:\n tf.add_to_collection(collection, self._initial_state)\n\nclass ActionNetworkCell(NetworkCell):\n\n def __init__(self,\n descriptor):\n super(ActionNetworkCell, self).__init__(descriptor)\n \n\n @property\n def kernel_in_fa(self):\n return self._kernel_in_fa\n\n @property\n def bias_fa(self):\n return self._bias_fa\n \n\n def __call__(self, inputs):\n if not self._built:\n self._build(inputs)\n self._built = True\n\n return tf.nn.softmax(self._descriptor.output_activation(\\\n tf.add(tf.matmul(inputs, self._kernel_in_fa), self._bias_fa)))\n\n def _build(self, inputs):\n self._kernel_in_fa = tf.get_variable(\"kernel_in_fa\",\n shape=[inputs.shape[1],\n self._descriptor.output_dimensions],\n initializer=self._descriptor.kernel_in_fa_initializer,\n trainable=self._descriptor.backprop_trainable)\n self._bias_fa = tf.get_variable(\"bias_fa\",\n shape=[1,\n self._descriptor.output_dimensions],\n initializer=self._descriptor.bias_fa_initializer,\n trainable=self._descriptor.backprop_trainable)\n\n self._add_variables_to_collections()\n\n def _add_variables_to_collections(self):\n for collection in self._descriptor.variable_collections:\n tf.add_to_collection(collection, self._kernel_in_fa)\n tf.add_to_collection(collection, self._bias_fa)\n\n\nclass LocationNetworkCell(NetworkCell):\n\n def __init__(self,\n descriptor):\n super(LocationNetworkCell, self).__init__(descriptor)\n\n \n @property\n def kernel_in_fl(self):\n return self._kernel_in_fl\n\n @property\n def bias_fl(self):\n return self._bias_fl\n\n def __call__(self, inputs):\n if not self._built:\n self._build(inputs)\n self._built = True\n \n return self._descriptor.output_activation(\\\n tf.matmul(tf.concat([tf.ones([inputs.shape[0], 1]), inputs], 1), self._kernel_in_fl))\n\n def _build(self, inputs):\n \n self._kernel_in_fl = tf.get_variable(\"kernel_in_fl\",\n shape=[inputs.shape[1] + tf.Dimension(1),\n self._descriptor.output_dimensions],\n initializer=self._descriptor.kernel_in_fl_initializer,\n trainable=self._descriptor.backprop_trainable)\n\n self._add_variables_to_collections()\n\n def _add_variables_to_collections(self):\n for collection in self._descriptor.variable_collections:\n tf.add_to_collection(collection, self._kernel_in_fl)\n\n\nclass BaselineNetworkCell(NetworkCell):\n def __init__(self,\n descriptor):\n super(BaselineNetworkCell, self).__init__(descriptor)\n\n @property\n def kernel_in_fb(self):\n return self._kernel_in_fb\n\n @property\n def bias_fb(self):\n return self._bias_fb\n\n def __call__(self, inputs):\n \n if not self._built:\n self._build(inputs)\n self._built = True\n\n return self._descriptor.output_activation(\\\n tf.add(tf.matmul(inputs, self._kernel_in_fb), self._bias_fb))\n\n def _build(self, inputs):\n \n self._kernel_in_fb = tf.get_variable(\"kernel_in_fb\",\n shape=[inputs.shape[1],\n self._descriptor.output_dimensions],\n initializer=self._descriptor.kernel_in_fb_initializer,\n trainable=self._descriptor.backprop_trainable)\n \n self._bias_fb = tf.get_variable(\"bias_fb\",\n shape=[1,\n self._descriptor.output_dimensions],\n initializer=self._descriptor.bias_fb_initializer,\n trainable=self._descriptor.backprop_trainable)\n \n self._add_variables_to_collections()\n\n def _add_variables_to_collections(self):\n for collection in self._descriptor.variable_collections:\n tf.add_to_collection(collection, self._kernel_in_fb)\n tf.add_to_collection(collection, self._bias_fb)\n\nclass ClippedRandomNormalSamplerCell:\n\n def __init__(self,\n descriptor):\n self._descriptor = descriptor\n self._built = False\n\n @property\n def descriptor(self):\n return self._descriptor\n\n @property\n def built(self):\n return self._built\n\n def __call__(self, inputs):\n if not self._built:\n self._build(inputs)\n self._built = True\n\n return tf.clip_by_value(\\\n tf.contrib.distributions.MultivariateNormalDiag(\\\n inputs, self._scales).sample(),\n self._descriptor.min_val,\n self._descriptor.max_val)\n\n def _build(self, inputs):\n self._scales = tf.ones([self._descriptor.batch_size, inputs.shape[1]])\n","repo_name":"rrrane/mnist-classification","sub_path":"networkcells.py","file_name":"networkcells.py","file_ext":"py","file_size_in_byte":13437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32914598217","text":"import argparse\nimport numpy as np\nimport logging\nimport json\nimport os\n# os.environ['OMP_NUM_THREADS'] = str(32)\n\nfrom tqdm import tqdm\n\nimport src.beireval.slurm as slurm\nimport src.beireval.beir_utils as beir_utils\nimport src.utils.training_utils as training_utils\nimport src.beireval.dist_utils as dist_utils\nfrom src.beir.datasets.data_loader import GenericDataLoader\nfrom src.beir.retrieval.evaluation import EvaluateRetrieval\nfrom src.beir.util import download_and_unzip\nfrom beir.retrieval.search.lexical import BM25Search as BM25\n\nfrom src.beir.retrieval.search.dense import DenseRetrievalExactSearch, FlatIPFaissSearch\n\nimport torch.distributed as dist\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\nlogger = logging.getLogger(__name__)\n\n\ndef setup(args):\n slurm.init_distributed_mode(args)\n slurm.init_signal_handler()\n os.makedirs(args.output_dir, exist_ok=True)\n\n logger = training_utils.setup_logger()\n logger.info(f\"Loading model from {args.model_name_or_path}\")\n model, tokenizer = training_utils.load_model(args.model_name_or_path)\n\n if args.use_gpu:\n model = model.cuda()\n model = model.half()\n return model, tokenizer\n\n\ndef mine_msmarco_dense_model(args, tokenizer, model):\n '''\n # os.environ['OMP_NUM_THREADS'] = 1\n https://github.com/facebookresearch/faiss/issues/2502\n no, it doesn't matter...\n '''\n args.dataset = 'msmarco'\n # args.dataset = 'nq'\n # args.dataset = 'trec-covid'\n # args.dataset = 'nfcorpus'\n # args.dataset = 'scifact'\n logger.info(f\"Start indexing with dataset={args.dataset}\")\n split = 'train' if args.dataset == 'msmarco' else 'test'\n url = \"https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip\".format(args.dataset)\n data_path = download_and_unzip(url, args.beir_data_path)\n corpus, queries, qrels = GenericDataLoader(data_folder=data_path).load(split=split)\n if dist.is_initialized():\n logger.info(f'device={dist.get_rank()}, #(corpus)={len(corpus)}, #(queries)={len(queries)}, #(qrels)={len(qrels)}')\n else:\n logger.info(f'#(corpus)={len(corpus)}, #(queries)={len(queries)}')\n metric = 'cos_sim' if model.sim_metric == 'cosine' else 'dot'\n if args.use_faiss:\n dmodel = FlatIPFaissSearch(\n beir_utils.DenseEncoderModel(\n query_encoder=model,\n doc_encoder=model,\n tokenizer=tokenizer,\n maxlength=512,\n add_special_tokens=True,\n norm_query=model.norm_query,\n norm_doc=model.norm_doc,\n ),\n batch_size=args.per_gpu_batch_size,\n query_batch_size=8, # faiss bug? batch size must be small if use_gpu=True. large batch size leads to zero scores\n use_gpu=True, # can speed up 1000x than on cpu\n add_qd_prompt=args.add_qd_prompt,\n corpus_chunk_size=8192\n )\n if (dist.is_initialized() and dist.get_rank() == 0) or not dist.is_initialized():\n dmodel.index(corpus, metric)\n dmodel.save(args.output_dir, args.dataset, split)\n dmodel.load(args.output_dir, args.dataset, split)\n if dist.is_initialized():\n dist.barrier()\n else:\n dmodel = DenseRetrievalExactSearch(\n beir_utils.DenseEncoderModel(\n query_encoder=model,\n doc_encoder=model,\n tokenizer=tokenizer,\n maxlength=512,\n add_special_tokens=True,\n norm_query=model.norm_query,\n norm_doc=model.norm_doc,\n ),\n return_cpu=True,\n batch_size=args.per_gpu_batch_size,\n query_batch_size=4096,\n add_qd_prompt=args.add_qd_prompt,\n corpus_chunk_size=8192\n )\n retriever = EvaluateRetrieval(dmodel, score_function=metric, k_values=[100])\n predicts = retriever.retrieve(corpus, queries)\n # load again to remove prompts\n # corpus, queries, qrels = GenericDataLoader(data_folder=data_path).load(split=split)\n for docid, ctx in corpus.items():\n ctx['passage_id'] = docid\n if dist_utils.is_main():\n ndcg, _map, recall, precision = retriever.evaluate(qrels, predicts, k_values=[5, 10, 100])\n output_file = f'{args.output_dir}/{args.dataset}.jsonl'\n logger.info(f'Dumping negatives to {output_file}')\n export_beir_to_dpr_format(output_file, args.num_negatives, corpus, queries, qrels, predicts,\n dataset_name=f'{args.dataset}-{split}')\n\n\ndef export_msmarco_no_negative(args):\n args.dataset = 'msmarco'\n split = 'train'\n url = \"https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip\".format(args.dataset)\n data_path = download_and_unzip(url, args.beir_data_path)\n corpus, queries, qrels = GenericDataLoader(data_folder=data_path).load(split=split)\n logger.info(f'Dumping negatives to {args.output_dir}/{args.dataset}.jsonl')\n progress_bar = tqdm(range(len(qrels)), desc=f\"Creating DPR formatted {args.dataset} file\")\n with open(f'{args.output_dir}/{args.dataset}.jsonl', 'w') as fp:\n for cnt, (query_id, pos_doc2score) in enumerate(qrels.items()):\n # query\n query = queries[query_id]\n # positive doc\n pos_doc_id, pos_score = list(pos_doc2score.items())[0]\n pos_ctx = corpus[pos_doc_id]\n pos_ctx['passage_id'] = pos_doc_id\n pos_ctx['score'] = pos_score\n json.dump({\"id\": query_id,\n \"question\": query,\n \"answers\": [],\n \"positive_ctxs\": [pos_ctx],\n \"hard_negative_ctxs\": []}, # empty negatives\n fp)\n fp.write(\"\\n\")\n progress_bar.update(1)\n\n\ndef export_msmarco_random_negatives(args):\n args.dataset = 'msmarco'\n split = 'train'\n url = \"https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip\".format(args.dataset)\n data_path = download_and_unzip(url, args.beir_data_path)\n corpus, queries, qrels = GenericDataLoader(data_folder=data_path).load(split=split)\n for docid, ctx in corpus.items():\n ctx['passage_id'] = docid\n all_docs = list(corpus.values())\n\n logger.info(f'Dumping data to {args.output_dir}/{args.dataset}-random{args.num_negatives}.jsonl')\n progress_bar = tqdm(range(len(qrels)), desc=f\"Creating DPR formatted {args.dataset} file\")\n with open(f'{args.output_dir}/{args.dataset}-random{args.num_negatives}.jsonl', 'w') as fp:\n for cnt, (query_id, pos_doc2score) in enumerate(qrels.items()):\n # query\n query = queries[query_id]\n # positive doc\n pos_docid, pos_score = list(pos_doc2score.items())[0]\n pos_ctx = corpus[pos_docid]\n pos_ctx['passage_id'] = pos_docid\n pos_ctx['score'] = pos_score\n # random negative docs\n neg_idxs = np.random.randint(0, len(all_docs), size=args.num_negatives)\n neg_ctxs = [all_docs[i] for i in neg_idxs if all_docs[i]['passage_id'] != pos_docid]\n json.dump({\n \"dataset\": f'{args.dataset}-{split}',\n \"question_id\": query_id,\n \"question\": query,\n \"answers\": [],\n \"positive_ctxs\": [pos_ctx],\n \"negative_ctxs\": neg_ctxs,\n \"hard_negative_ctxs\": []\n }, fp)\n fp.write(\"\\n\")\n progress_bar.update(1)\n\n\ndef mine_msmarco_bm25(args):\n args.dataset = 'msmarco'\n # args.dataset = 'scifact'\n split = 'train'\n hostname = \"http://localhost:9200\" # localhost\n index_name = f\"bm25-{args.dataset}-train\"\n initialize = False # False if load and use existing index\n\n logger.info(f'Loading data')\n url = \"https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip\".format(args.dataset)\n data_path = download_and_unzip(url, args.beir_data_path)\n corpus, queries, qrels = GenericDataLoader(data_folder=data_path).load(split=split)\n for docid, ctx in corpus.items():\n ctx['passage_id'] = docid\n\n logger.info(f'#doc={len(corpus)}, #query={len(queries)}, #qrels={len(qrels)}')\n logger.info(f'Start retrieving w/ BM25')\n model = BM25(index_name=index_name, hostname=hostname, initialize=initialize)\n retriever = EvaluateRetrieval(model, k_values=[args.num_negatives])\n predicts = retriever.retrieve(corpus, queries)\n ndcg, _map, recall, precision = retriever.evaluate(qrels, predicts, [10, 100])\n output_file = f'{args.output_dir}/{args.dataset}-bm25.jsonl'\n export_beir_to_dpr_format(output_file, args.num_negatives, corpus, queries, qrels, predicts, dataset_name=f'{args.dataset}-{split}')\n\n\ndef mine_msmarco_exact(args, tokenizer, model):\n '''\n very slow, MS-MARCO can take ~24h\n '''\n args.dataset = 'msmarco'\n # args.dataset = 'scifact'\n logger.info(f\"Start indexing with dataset={args.dataset}\")\n split = 'train'\n\n output_dict = beir_utils.evaluate_model(\n query_encoder=model,\n doc_encoder=model,\n tokenizer=tokenizer,\n dataset=args.dataset,\n batch_size=args.per_gpu_batch_size,\n query_batch_size=args.per_gpu_batch_size,\n norm_query=model.norm_query,\n norm_doc=model.norm_doc,\n is_main=dist_utils.is_main(),\n split=split,\n metric=model.sim_metric,\n beir_data_path=args.beir_data_path,\n add_qd_prompt=args.add_qd_prompt,\n corpus_chunk_size=8192,\n return_all=True,\n k_values=[100]\n )\n ndcg, _map, recall, precision, mrr, recall_cap, hole = output_dict['scores']\n corpus = output_dict['corpus']\n queries = output_dict['queries']\n qrels = output_dict['qrels']\n predicts = output_dict['predicts']\n for docid, ctx in corpus.items():\n ctx['passage_id'] = docid\n\n if dist_utils.is_main():\n output_file = f'{args.output_dir}/{args.dataset}.jsonl'\n export_beir_to_dpr_format(output_file, args.num_negatives, corpus, queries, qrels, predicts, dataset_name=f'{args.dataset}-{split}')\n\n\ndef export_beir_to_dpr_format(output_path, num_negatives, corpus, queries, qrels, predicts, dataset_name):\n logger.info(f'Dumping negatives to {output_path}')\n progress_bar = tqdm(range(len(qrels)), desc=f\"Exporting...\")\n all_docs = list(corpus.values())\n with open(output_path, 'w') as fp:\n for cnt, (query_id, pos_doc2score) in enumerate(qrels.items()):\n if query_id not in predicts: continue # skip erroneous cases\n # query\n query = queries[query_id]\n # positive doc\n pos_docid, pos_score = list(pos_doc2score.items())[0]\n pos_ctx = corpus[pos_docid]\n pos_ctx['passage_id'] = pos_docid\n pos_ctx['score'] = pos_score\n # random negative docs\n neg_idxs = np.random.randint(0, len(all_docs), size=args.num_negatives)\n neg_ctxs = [all_docs[i] for i in neg_idxs if all_docs[i]['passage_id'] != pos_docid]\n # hard negative docs\n hard_neg_ctxs = []\n pred_d2scores = sorted(predicts[query_id].items(), key=lambda k: k[1], reverse=True)\n for neg_docid, score in pred_d2scores[:num_negatives]:\n if neg_docid == pos_docid: continue\n neg_ctx = corpus[neg_docid]\n neg_ctx['passage_id'] = neg_docid\n neg_ctx['score'] = score\n hard_neg_ctxs.append(neg_ctx)\n json.dump({\n \"dataset\": dataset_name,\n \"question_id\": query_id,\n \"question\": query,\n \"answers\": [],\n \"positive_ctxs\": [pos_ctx],\n \"negative_ctxs\": neg_ctxs,\n \"hard_negative_ctxs\": hard_neg_ctxs,\n }, fp)\n fp.write(\"\\n\")\n progress_bar.update(1)\n logger.info(f'Done')\n\n\ndef mine_nq(args, tokenizer, model):\n pass\n\n\ndef main(args):\n # model, tokenizer = setup(args)\n # mine_msmarco_dense_model(args, tokenizer, model)\n\n export_msmarco_random_negatives(args)\n # mine_msmarco_bm25(args)\n # mine_nq(args, tokenizer, model)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\"--dataset\", type=str, help=\"Evaluation dataset from the BEIR benchmark\")\n parser.add_argument(\"--beir_data_path\", type=str, default=\"BEIR/datasets\", help=\"Directory to save and load beir datasets\")\n\n parser.add_argument(\"--per_gpu_batch_size\", default=128, type=int, help=\"Batch size per GPU/CPU for indexing.\")\n parser.add_argument(\"--output_dir\", type=str, default=\"./my_experiment\", help=\"Output directory\")\n parser.add_argument(\"--model_name_or_path\", type=str, help=\"Model name or path\")\n parser.add_argument(\"--add_qd_prompt\", type=bool, default=False, help=\"Add a prompt prefix to Q/D\")\n parser.add_argument(\"--num_negatives\", type=int, default=100, help=\"how many negative examples to return\")\n # parser.add_argument(\"--text_maxlength\", type=int, default=512, help=\"Maximum text length\")\n # parser.add_argument(\"--metric\", type=str, default=\"dot\", help=\"Metric used to compute similarity between two embeddings\")\n # parser.add_argument(\"--norm_query\", action=\"store_true\", help=\"Normalize query representation\")\n # parser.add_argument(\"--norm_doc\", action=\"store_true\", help=\"Normalize document representation\")\n\n parser.add_argument(\"--use_bf16\", type=bool, default=False, help=\"\")\n parser.add_argument(\"--use_gpu\", type=bool, default=True, help=\"\")\n parser.add_argument(\"--use_faiss\", type=bool, default=False, help=\"\")\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"For distributed training: local_rank\")\n # parser.add_argument(\"--main_addr\", type=str, default='localhost', help=\"Main IP address.\")\n # parser.add_argument(\"--main_port\", type=str, default=6666, help=\"Main port (for multi-node SLURM jobs)\")\n\n args, _ = parser.parse_known_args()\n main(args)\n\n","repo_name":"salesforce/AugTriever","sub_path":"eval/mine_negatives.py","file_name":"mine_negatives.py","file_ext":"py","file_size_in_byte":14280,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"29138922460","text":"import os\nimport glob\nimport argparse\nfrom os.path import join as osp\nimport cv2\nfrom tqdm import tqdm\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"root\")\nargs = parser.parse_args()\nroot = args.root\n\nif not os.path.exists(root):\n os.system(\"mkdir -p \"+root)\n\nwith open(\"video_list.txt\") as f:\n lines = f.readlines()\n\nfor line in lines:\n items = line.strip().split(\" \")\n video_id = items[0]\n url = \"https://www.youtube.com/watch?v=\"+video_id\n os.system(\"youtube-dl --id \"+url)\n filename = glob.glob(video_id+\"*\")[0]\n os.system(\"mv {} {}\".format(filename, osp(root, filename)))\n if not os.path.exists(osp(root, video_id, \"origin_image\")):\n os.system(\"mkdir -p \" + osp(root, video_id, \"origin_image\"))\n os.system(\"ffmpeg -i {} {} {}/%06d.jpg\".format(osp(root, filename), \" \".join(items[1:]), osp(root, video_id, \"origin_image\")))\n os.system(\"mv {} {}\".format(osp(root, filename), osp(root, video_id)))\n\nfolders = glob.glob(osp(root, \"*\"))\nfor folder in folders:\n images = glob.glob(osp(folder, \"origin_image\", \"*.jpg\"))\n if not os.path.exists(osp(folder, \"image\")):\n os.system(\"mkdir -p \"+osp(folder, \"image\"))\n print(folder)\n for img_path in tqdm(images, total=len(images)):\n in_path = img_path\n out_path = img_path.replace(\"origin_image\", \"image\")\n image = cv2.imread(in_path)\n h, w, _ = image.shape\n image = image[:, (w-h)//2:(w+h)//2]\n image = cv2.resize(image, (512, 512))\n cv2.imwrite(out_path, image)","repo_name":"HuangZhiChao95/FewShotMotionTransfer","sub_path":"data_preprocess/download_video.py","file_name":"download_video.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"22"} +{"seq_id":"3832752752","text":"class Node(object):\n def __init__(self, data=None):\n self.data = data\n self.next = None\n\n\nclass SLL(object):\n \"\"\"Singly linked list (SLL) data structure with insert, delete, search and show functions.\"\"\"\n def __init__(self):\n self.head = None\n\n def delete(self, node):\n \"\"\"Cannot delete the head of the SLL\"\"\"\n trav1 = self.head\n if trav1.next is not None:\n trav2 = trav1.next\n else:\n if trav1.data == node:\n self.head = trav1.next\n return print('Deleted', node)\n else:\n return print(node, 'not in list to be deleted')\n while trav2.next is not None:\n if trav2.data == node:\n trav1.next = trav2.next\n return print('Deleted', node)\n else:\n trav1 = trav1.next\n trav2 = trav2.next\n else:\n if trav2.data == node:\n trav1.next = trav2.next\n return print('Deleted', node)\n else:\n return print(node, 'not in list to be deleted')\n\n def insert(self, node):\n trav = self.head\n while trav.next is not None:\n trav = trav.next\n trav.next = Node(node)\n\n def search(self, node):\n trav = self.head\n p = 0\n while trav.data != node and trav.next is not None:\n p = p + 1\n trav = trav.next\n if trav.data == node:\n print(node, 'is in position', p)\n else:\n print(node, 'was not found')\n\n def show(self):\n trav = self.head\n while trav.next is not None:\n print(trav.data, '- ', end='') # no new lines are printed\n trav = trav.next\n print(trav.data) # prints last node and a new line\n\n def sort(self, direction='descending'):\n \"\"\"Set direction to ascending to get ascending order and to descending to get descending order.\"\"\"\n if direction == 'descending':\n direction = '<'\n elif direction == 'ascending':\n direction = '>'\n swap_needed = True\n while swap_needed:\n trav1 = self.head\n if trav1.next is not None:\n trav2 = trav1.next\n else:\n return print('Because linked list has exactly one node list cannot'\n ' be sorted')\n while trav2.next is not None or str(trav1.data) + direction + str(trav2.data):\n if eval(str(trav1.data) + direction + str(trav2.data)): # either a >, or a < comparison is made\n # if trav1.data > trav2.data:\n temp = trav1.data\n trav1.data = trav2.data\n trav2.data = temp\n swap_needed = True\n break\n else:\n swap_needed = False\n trav1 = trav1.next\n if trav2.next is not None:\n trav2 = trav2.next\n else:\n break\n\n\nllist = SLL()\nllist.head = Node(8)\nllist.insert(3)\nllist.insert(7)\nllist.show()\nllist.search(3)\nllist.delete(7)\nllist.show()\nllist.search(10)\nllist.insert(7)\nllist.insert(2)\nllist.sort()\nllist.show()\nllist.sort('ascending')\nllist.show()\n","repo_name":"dbarthelmeh/Singly-Linked-List","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14156483090","text":"import time\nfrom adafruit_circuitplayground import cp\n\ncp.pixels.brightness = 0.2 # Adjust overall brightness as desired, between 0 and 1\n\n\ndef color_amount(accel_component):\n \"\"\"Convert acceleration component (x, y, or z) to color amount (r, g, or b)\"\"\"\n standard_gravity = 9.81 # Acceleration (m/s²) due to gravity at the earth’s surface\n accel_magnitude = abs(accel_component) # Ignore the direction\n constrained_accel = min(accel_magnitude, standard_gravity) # Constrain values\n normalized_accel = constrained_accel / standard_gravity # Convert to 0–1\n return round(normalized_accel * 255) # Convert to 0–255\n\n\ndef format_acceleration():\n return \", \".join((\"{:>6.2f}\".format(axis_value) for axis_value in acceleration))\n\n\ndef format_rgb():\n return \", \".join((\"{:>3d}\".format(rgb_amount) for rgb_amount in rgb_amounts))\n\n\ndef log_values():\n print(\"({}) ==> ({})\".format(format_acceleration(), format_rgb()))\n\n\nwhile True:\n acceleration = cp.acceleration\n rgb_amounts = [color_amount(axis_value) for axis_value in acceleration]\n cp.pixels.fill(rgb_amounts)\n log_values()\n time.sleep(0.1)\n","repo_name":"adafruit/Adafruit_CircuitPython_CircuitPlayground","sub_path":"examples/circuitplayground_advanced_examples/circuitplayground_acceleration_mapping_neopixels.py","file_name":"circuitplayground_acceleration_mapping_neopixels.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"22"} +{"seq_id":"27896925162","text":"import telebot\nimport yt_dlp\nfrom telebot import types\n\nTOKEN = '5718397874:AAF09k95kIaD0W5rRSgmNa1gtwKs56WzIAU'\nbot = telebot.TeleBot(TOKEN)\n\n@bot.message_handler(commands=['start', 'help'])\ndef send_welcome(message):\n bot.reply_to(message, \"Hi, please send url to stream it\")\n@bot.message_handler(commands=['sites'])\ndef sites_command_handler(message):\n bot.reply_to(message, f\"`Here is` [supported sites](https://ytdl-org.github.io/youtube-dl/supportedsites.html)\", disable_web_page_preview=True)\n@bot.message_handler(func=lambda message: True)\ndef process_video(message):\n try:\n video_url = message.text\n ydl_opts = {'format': 'best'}\n\n with yt_dlp.YoutubeDL(ydl_opts) as ydl:\n info_dict = ydl.extract_info(video_url, download=False)\n stream_url = info_dict['url']\n\n # إنشاء رابط قابل للنقر باستخدام HTML\n markup = types.InlineKeyboardMarkup()\n btn_watch = types.InlineKeyboardButton(text='Watch', url=stream_url)\n markup.add(btn_watch)\n\n bot.reply_to(message, \"Here is streaming link : \", reply_markup=markup)\n\n except Exception as e:\n bot.reply_to(message, f\"Error : {str(e)}\")\n\nbot.infinity_polling()\n","repo_name":"iraqx/pyrobom","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13596392811","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):\n self.val = int(x)\n self.next = next\n self.random = random\n\"\"\"\n\nclass Solution:\n def copyRandomList(self, head: 'Optional[Node]') -> 'Optional[Node]':\n # O(N) complexity, O(N) space\n # We'll be doing this with a cache (dict) and a previous pointer to hold the previous new node, in one pass\n # We'll be using the original nodes as keys, since their values are not guarenteed to be unique\n\n # Hash the old node with the corresponding new node\n # During every iteration, do the following:\n # 1) Check if the current original node has a corresponding new node\n # 2) If step 1 is false, create a new node (without copying the next and random pointers), and store in cache\n # 3) Assign the previous new node's next properter to this new node\n # 4) Access the random pointer of the current original node, check if that node has a corresponding new node\n # 5) If step 4 is false, create a new node (for the node at the random pointer), and store in cache\n\n cache = dict()\n previousNewNode = None\n ptr = head # don't change head, we need it if we want to return the head of the new list\n\n while ptr:\n # declaring these variables here so they'll be accessible outside the if statements\n newNode = randomNewNode = None \n if ptr in cache:\n newNode = cache[ptr]\n else:\n newNode = Node(ptr.val)\n cache[ptr] = newNode # associate the original node with the new node\n\n # update the previousNewNode's next value as long as it's not nullish\n if previousNewNode:\n previousNewNode.next = newNode\n previousNewNode = newNode\n\n # Fetch the random new node or create it as long it's not nullish\n if ptr.random in cache:\n randomNewNode = cache[ptr.random]\n elif ptr.random: # null check\n randomNewNode = Node(ptr.random.val)\n cache[ptr.random] = randomNewNode\n\n newNode.random = randomNewNode # update the current newNode's random property \n ptr = ptr.next\n\n return cache[head] if head else None","repo_name":"captnw/leetcodeResponses","sub_path":"responses/q138_CopyListRandomPointer/copyRandomList.py","file_name":"copyRandomList.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14897279668","text":"import matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib import style\n\nstyle.use('fivethirtyeight')\n\nfig = plt.figure()\nax1 = fig.add_subplot(1, 1, 1)\n\n# graph_data_ = open('example.txt', 'r').read()\n# lines_ = graph_data_.split('\\n')\n\ndef animate(i):\n graph_data = open('trajectory_new.csv', 'r').read()\n lines = graph_data.split('\\n')\n # xs = []\n ys = []\n for line in lines:\n # for k in range(i):\n if len(line) > 1:\n # x, y = lines[k].split(',')\n linedata = line.split(',')\n # xs.append(float(x))\n ys.append(float(linedata[2]))\n ax1.clear()\n ax1.plot(ys)\n plt.xlim(0, len(lines))\n plt.ylim(0.0, 1.0)\n\nframes = 5000\nani = animation.FuncAnimation(fig, animate, interval=1000, frames = 5000)\nplt.show()","repo_name":"wpiHWzhao/Baysian-Inference-Shared-Autonomous","sub_path":"plot_result.py","file_name":"plot_result.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"72102257336","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\ndef convert_int(s):\n if isinstance(s, int):\n return s\n if not s:\n return 0\n return int(s.strip().replace(',', ''))\n\n\nclass UniversityItem(scrapy.Item):\n\n name = scrapy.Field()\n rank = scrapy.Field(serializer=convert_int)\n country = scrapy.Field()\n state = scrapy.Field()\n city = scrapy.Field()\n undergraduate_num = scrapy.Field()\n postgraduate_num = scrapy.Field()\n website = scrapy.Field()\n\n\nif __name__ == '__main__':\n u = UniversityItem(name='哈佛大学', rank=1)\n u['country'] = '美国'\n u['state'] = '马萨诸塞州'\n print(u)\n print(u['name'])\n\n # 将会打印出['country', 'state', 'name'],不包含未设置值的字段\n print(u.keys())\n # 打印出所有定义过的字段名称\n print(u.fields.keys())\n # 打印出所有的fields及其序列化函数\n print(u.fields)\n # 判断某个item对象是否包含指定字段\n print('undergraduate_num' in u.fields)\n # 判断某个字段是否设置了值\n print('name' in u)\n print('undergraduate_num' in u)\n\n # 复制另外一个Item对象的值\n u2 = UniversityItem(u)\n u2['undergraduate_num'] = 2345\n print(u2)\n print(u)\n\n # 将Item对象转换为字典对象\n u_dict = dict(u)\n print(type(u_dict))\n # 从一个字典对象中创建item对象\n u3 = UniversityItem(u_dict)\n print(u3)\n\n # 如果设置一个未定义的字段,则会抛出KeyError异常\n u4 = UniversityItem({'unknow': 123})\n","repo_name":"guyecode/qianmu","sub_path":"qianmu/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"22"} +{"seq_id":"2297104474","text":"from django.shortcuts import render\nfrom rest_framework.views import APIView\nimport redis\nfrom utils.geetest import GeetestLib\nfrom django.http import HttpResponse\nimport json\n\n# Create your views here.\n\npc_geetest_id = \"64936e8e1ad53dad8bbee6f96224e7d0\"\npc_geetest_key = \"8322ed330d370a704a77d8205c94d20f\"\nCONN = redis.Redis(host='127.0.0.1') # 前提自己安装上redis并配置好可以连接\n\nclass AuthView(APIView):\n def get(self, request):\n return render(request, \"index.html\")\n\n\nclass GtView(APIView):\n def get(self, request):\n user_id = 'test'\n gt = GeetestLib(pc_geetest_id, pc_geetest_key)\n status = gt.pre_process(user_id)\n # request.session[gt.GT_STATUS_SESSION_KEY] = status\n # request.session[\"user_id\"] = user_id\n CONN.set(gt.GT_STATUS_SESSION_KEY, status)\n CONN.set(\"user_id\", user_id)\n response_str = gt.get_response_str()\n return HttpResponse(response_str)\n\n def post(self, request):\n gt = GeetestLib(pc_geetest_id, pc_geetest_key)\n challenge = request.data.get(gt.FN_CHALLENGE, '')\n validate = request.data.get(gt.FN_VALIDATE, '')\n seccode = request.data.get(gt.FN_SECCODE, '')\n # status = request.session[gt.GT_STATUS_SESSION_KEY]\n # user_id = request.session[\"user_id\"]\n status = CONN.get(gt.GT_STATUS_SESSION_KEY)\n user_id = CONN.get(\"user_id\")\n if status:\n result = gt.success_validate(challenge, validate, seccode, user_id)\n else:\n result = gt.failback_validate(challenge, validate, seccode)\n result = {\"status\": \"success\"} if result else {\"status\": \"fail\"}\n return HttpResponse(json.dumps(result))\n","repo_name":"Eeyhan/My-way-of-programming","sub_path":"极验验证码验证/LoginAuth/generic/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"23972995355","text":"import datetime\n\nfrom rest_framework import serializers\nfrom reviews.models import Categories, Comments, Genre, Review, Title, User\n\n\nclass CommentsSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(\n slug_field='username', read_only=True,\n default=serializers.CurrentUserDefault()\n )\n\n class Meta:\n model = Comments\n fields = ('id', 'text', 'author', 'reviews', 'pub_date')\n read_only_fields = ('reviews', 'author')\n\n\nclass CategoriesSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Categories\n fields = ('name', 'slug',)\n\n\nclass GenreSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Genre\n fields = ('name', 'slug',)\n\n\nclass TitlesSerializer(serializers.ModelSerializer):\n category = serializers.SlugRelatedField(\n slug_field='slug', queryset=Categories.objects.all()\n )\n genre = serializers.SlugRelatedField(\n many=True, slug_field='slug', queryset=Genre.objects.all()\n )\n\n class Meta:\n model = Title\n fields = (\n 'id', 'name', 'year', 'genre', 'category', 'description'\n )\n\n def validate_year(self, value):\n \"\"\"Валидация года выпуска произведения.\"\"\"\n current_year = datetime.date.today().year\n if not 0 <= value <= current_year:\n raise serializers.ValidationError(\n \"Проверьте год создания произведения\"\n \"(не может быть больше текущего).\"\n )\n return value\n\n\nclass TitlesReadSerializer(serializers.ModelSerializer):\n category = CategoriesSerializer(read_only=True)\n genre = GenreSerializer(many=True, read_only=True)\n rating = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = Title\n fields = (\n 'id', 'name', 'year', 'genre', 'category', 'description', 'rating'\n )\n\n\nclass ReviewsSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(\n slug_field='username',\n read_only=True,\n )\n\n class Meta:\n model = Review\n exclude = ('title',)\n\n def validate(self, attrs):\n is_exist = Review.objects.filter(\n author=self.context['request'].user,\n title=self.context['view'].kwargs.get('title_id')).exists()\n if is_exist and self.context['request'].method == 'POST':\n raise serializers.ValidationError(\n 'Ваш отзыв на это название уже существует')\n return attrs\n\n def validate_score(self, value):\n if not 1 <= value <= 10:\n raise serializers.ValidationError(\n 'Оценкой может быть целое число в диапазоне от 1 до 10.'\n )\n return value\n\n\nclass UserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = User\n fields = (\n 'username', 'email', 'first_name', 'last_name', 'bio', 'role',\n )\n\n\nclass AdminUserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = User\n fields = (\n 'username', 'email', 'first_name', 'last_name', 'bio', 'role',\n )\n\n def validate_username(self, value):\n if value == 'me':\n raise serializers.ValidationError(\n 'Имя пользователя \"me\" не разрешено.'\n )\n return value\n\n\nclass SignupSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = User\n fields = ('username', 'email',)\n\n def validate_username(self, value):\n if value == 'me':\n raise serializers.ValidationError(\n 'Имя пользователя \"me\" не разрешено.'\n )\n return value\n\n def create(self, validated_data):\n user = User.objects.create(\n username=self.validated_data['username'],\n email=self.validated_data['email'],\n )\n return user\n\n\nclass TokenSerializer(serializers.Serializer):\n username = serializers.CharField(required=True)\n confirmation_code = serializers.CharField(required=True)\n\n class Meta:\n model = User\n fields = ('username', 'confirmation_code')\n","repo_name":"idmitrievpython/yamdb_final","sub_path":"api_yamdb/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12973760025","text":"import socket\nfrom threading import Thread\nfrom datetime import datetime\nfrom colorama import Fore, init, Back\nimport re\n\nHOST = '127.0.0.1'\nPORT = 4443\n\ninit()\n\nserver_color = Fore.GREEN\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((HOST, PORT))\ns.listen()\n\nprint('Aguardando conexão de um cliente')\n\ndef listen_clients(c):\n while True:\n try:\n message = c.recv(1024).decode()\n except Exception as e:\n print(f'[!] Error: {e}')\n else:\n regex = re.search('(?:^|\\W)sair(?:$|\\W)', message)\n if regex:\n print('Fechando conexão')\n s.close()\n break\n else:\n print(message)\n\n\t\t\ndef enviar_message(conn):\n while True:\n msg = input()\n date_now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n msg = f\"{server_color}[{date_now}] Server <> {msg}{Fore.RESET}\"\n print(msg)\n conn.send(msg.encode())\n\t\nwhile True:\n client_socket, ender = s.accept()\n \n print('Conectado em', ender)\n\n t1 = Thread(target=listen_clients, args=(client_socket,))\n t2 = Thread(target= enviar_message, args=(client_socket,))\n\n t1.daemon = True\n t2.daemon = True\n t1.start()\n t2.start()\n\n t1.join()\n t2.join()\n\n","repo_name":"Layravbf/UFLA","sub_path":"Sistemas Distribuidos/TP1 - sockets_layra_giovanna/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35032042648","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 17 08:52:07 2019\n\n@author: Student\n\"\"\"\n\ni = 0\nbalance = 484\nannualInterestRate = 0.2\nmonthlyPaymentRate = 0.04\n#newB = 0\nwhile i < 12:\n minPay = balance * monthlyPaymentRate\n unPay = balance - minPay\n Interest = unPay * annualInterestRate/12\n balance = unPay + Interest\n #print(\"mouth \",i+1,'remaining new balance= ',round(balance,2))\n i += 1\n \n ","repo_name":"rose1027/Mit6001.X","sub_path":"projects/project2/creditcardinterest-pset2.py","file_name":"creditcardinterest-pset2.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21652840763","text":"#!/usr/bin/env python2.7\n\n'''\nVersion 0.13\nSpiders sections of J1 or Exchanges looking for pdfs and reports on\nthe PDF's location throughout the section.\n\nBy design, this script will not spider links offsite or the entirety\nof those sites. Future versions may support spidering whole sites.\n'''\n\nimport os\nimport re\nimport csv\nimport sys\nimport logging\nimport urlparse\nimport lxml\nimport requests\n\nfrom bs4 import BeautifulSoup\nfrom sqlalchemy import create_engine, Column, Integer, String, Boolean, \\\n ForeignKey\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, relationship, backref\n\n\n# Setting up logging\nlogging.basicConfig(filename='spider.log', filemode='w',\n format='%(levelname)s: %(message)s',\n level=logging.INFO)\n\n\n# Setting up the database and database classes\nengine = create_engine('sqlite:///database.db')\nSession = sessionmaker(bind=engine)\nsession = Session()\nBase = declarative_base()\n\n\nclass SpiderUrl(Base):\n '''List of urls for the spider'''\n __tablename__ = 'urls'\n id = Column(Integer, primary_key=True)\n url = Column(String, unique=True)\n visited = Column(Boolean, default=False)\n\n def __init__(self, url, visited=False):\n self.url = url\n self.visited = visited\n\n\nclass Pdf(Base):\n '''The pdf files themselves'''\n __tablename__ = 'pdfs'\n id = Column(Integer, primary_key=True)\n url = Column(String, unique=True)\n\n def __init__(self, url):\n self.url = url\n\n\nclass PageUrl(Base):\n '''Pages on which the pdf is linked'''\n __tablename__ = 'page_urls'\n id = Column(Integer, primary_key=True)\n url = Column(String)\n pdf_id = Column(Integer, ForeignKey('pdfs.id'))\n pdf_url = relationship('Pdf', backref=backref('page_urls', order_by=id))\n\n def __init__(self, url):\n self.url = url\n\n\nBase.metadata.create_all(engine)\n\n\nclass Url(object):\n '''Class that makes it simple to get a urls base and path quickly'''\n def __init__(self, url):\n self.url = url\n self.base = 'http://{0}'.format(urlparse.urlparse(url).netloc)\n self.path = urlparse.urlparse(url).path\n\n\n# Setting globals\ntry:\n start = Url(sys.argv[1])\nexcept IndexError:\n print('You must enter a starting point, like http://www.example.com/start/index.html')\n start = Url(raw_input('Enter a starting point: '))\nif 'http://' not in start.url:\n start = Url(raw_input('The starting point must be a valid URL. Please enter a starting point: '))\n\ntry:\n html_flag = sys.argv[2].lower()\nexcept IndexError:\n print(\"Do your site's page links end in .html\")\n html_flag = raw_input(\"Enter Yes or No: \").lower()\nif html_flag not in ['yes', 'no']:\n html_flag = raw_input(\"Do your site's page URLs end in .html? You must enter Yes or No: \")\n\n\ndef get_pdfs(soup, address):\n '''\n Grabs the pdfs on a page and saves them to the db if they're not already\n there. If it is already there, it records the page on which it's links.\n '''\n diff_pdfs = set([urlparse.urljoin(start.base, link.get('href')) for\n link in soup.find_all('a', href=re.compile('\\.pdf'))])\n for pdf in diff_pdfs:\n if not session.query(Pdf).filter(\n Pdf.url==pdf).first():\n pdf = Pdf(pdf)\n pdf.page_urls.append(PageUrl(address))\n print('Adding PDF: {0}'.format(pdf.url))\n logging.info('Adding PDF: %s', pdf.url)\n session.add(pdf)\n else:\n pdf = session.query(Pdf).filter(\n Pdf.url==pdf).first()\n if address in [i.url for i in pdf.page_urls]:\n pass\n else:\n pdf.page_urls.append(PageUrl(address))\n session.add(pdf)\n session.commit()\n\n\ndef visited(address):\n '''Marks the pages as visited after being spidered'''\n not_visited = session.query(SpiderUrl).filter(\n SpiderUrl.url==address).first\n if not_visited() is not None:\n url = not_visited()\n url.visited = True\n\n\ndef spider(soup, address):\n '''\n Grabs all the urls on a page then checks if they're in the section it's\n supposed to spider. If so, it looks at the db to see if it's already\n there. If it's not, it saves it to the db to be visited later. Once it's\n finished with the page, it marks it as visited.\n '''\n webpage_extensions = ['.html', '']\n get_pdfs(soup, address)\n # Looks for all links that don't start with # and prepends\n # the scheme and netloc to them.\n diff_links = set([urlparse.urljoin(start.base, link.get('href')) for\n link in soup.find_all('a', href=re.compile(\n r'^(?!#)'))])\n for link in diff_links:\n link = link.strip()\n if 'cms' in link or 'staging' in link:\n logging.info('Found link to cms or staging: %s', link)\n pass\n elif start.base not in link:\n pass\n elif os.path.splitext(link)[1] not in webpage_extensions:\n pass\n elif urlparse.urlparse(start.url).path.split('/')[1] in link:\n # Prevent the spider from entering redirect hell when the\n # the link doesn't end with / because it will never mark\n # /visited as /visited/ in the db\n if html_flag == 'no':\n if link[-1:] != '/':\n link = link+'/'\n if not session.query(SpiderUrl).filter(\n SpiderUrl.url==link).first():\n url = SpiderUrl(link)\n logging.debug('Adding Page: %s', url.url)\n session.add(url)\n visited(address)\n session.commit()\n\n\ndef main():\n '''The function that makes it all happen.'''\n r = requests.get(start.url)\n while True:\n logging.debug('Checking %s', r.url)\n soup = BeautifulSoup(r.text, 'lxml')\n spider(soup, r.url)\n not_visited = session.query(SpiderUrl).filter(\n SpiderUrl.visited==False).first\n if not not_visited():\n break\n r = requests.get(not_visited().url)\n if r.status_code == 404:\n logging.info('Found broken link: %s', r.url)\n pass\n with open('output.csv', 'wb') as f:\n writer = csv.writer(f)\n records = session.query(Pdf).join(PageUrl).all()\n for record in records:\n writer.writerow([record.url])\n for entry in record.page_urls:\n writer.writerow(['', entry.url])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"NathanKleekamp/pdf-scraper","sub_path":"spider/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":6561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"26502824589","text":"from P049 import plot_decision_regions\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LogisticRegression\n\n\ndf_wine = pd.read_csv(\"https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\", header=None)\nX, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values\n\nX_train, X_test, y_train, y_test = \\\n train_test_split(X, y, test_size=0.3, random_state=0)\n\nsc = StandardScaler()\nX_train_std = sc.fit_transform(X_train)\nX_test_std = sc.fit_transform(X_test)\n#----------------------------------------------\n\nlda = LDA(n_components=2)\nX_train_lda = lda.fit_transform(X_train_std, y_train)\n\nlr = LogisticRegression()\nlr.fit(X_train_lda, y_train)\n#plot_decision_regions(X_train_lda, y_train, classifier=lr)\n#plt.xlabel('LD1')\n#plt.ylabel('LD2')\n#plt.legend(loc='lower left')\n#plt.show()\n\nX_test_lda = lda.fit_transform(X_test_std, y_test)\nlr.fit(X_test_lda, y_test)\nplot_decision_regions(X_test_lda, y_test, classifier=lr)\nplt.xlabel('LD1')\nplt.ylabel('LD2')\nplt.legend(loc='lower left')\nplt.show()\n\n\n\n\n","repo_name":"nk7260ynpa/Python_Machine_Learning_black_book","sub_path":"P139.py","file_name":"P139.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40858429144","text":"# accounts.forms.py\nfrom django import forms\n\nfrom .models import Teacher\nfrom datetime import date\n# from .models import User\nfrom course.models import Subject\n\n# datetimepicker\nfrom bootstrap_datepicker_plus import DateTimePickerInput, DatePickerInput\n\n\nclass TeacherUpdateForm(forms.ModelForm):\n # since = forms.DateField(widget=forms.SelectDateWidget(empty_label=\"Nothing\"))\n _today = date.today()\n _years = [x for x in range(_today.year, 1950, -1)]\n # since = forms.DateField(\n # widget=forms.SelectDateWidget(\n # years=_years,\n # # empty_label=(\"Choose Year\", \"Choose Month\", \"Choose Day\"),\n # ),\n # required=False\n # )\n\n since = forms.DateField(widget=DatePickerInput())\n\n class Meta:\n model = Teacher\n fields = (\n 'since',\n 'courses',\n 'seeking_job',\n 'cv_file',\n 'current_workplaces',\n 'past_workplaces',\n # 'active',\n )\n\n def __init__(self, *args, **kwargs):\n super(TeacherUpdateForm, self).__init__(*args, **kwargs)\n self.fields['courses'].widget = forms.widgets.CheckboxSelectMultiple()\n self.fields[\"courses\"].queryset = Subject.objects.all()\n\n def save(self, commit=True):\n teacher = super(TeacherUpdateForm, self).save(commit=False)\n\n if commit:\n teacher.save()\n return teacher\n","repo_name":"cseai/OpenEduQA","sub_path":"src/teacher/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13282028646","text":"# cleans errors when scraping data and adds IDs to all of the movies \r\n# for the database\r\n\r\nSOURCE_FILE = \"movies.txt\"\r\nDEST_FILE = \"movies.csv\"\r\n\r\ndata = \"\"\r\nwith open(SOURCE_FILE, \"r\") as f:\r\n data = str(f.read())\r\n f.close()\r\n\r\nentries = data.split(\"\\n\")\r\n\r\nnewEntries = []\r\n\r\nfor i in range(len(entries)):\r\n newEntries.append(str(entries[i]) + \",\" + str(i))\r\n\r\nwith open(DEST_FILE, \"w\") as f:\r\n for entry in newEntries:\r\n if len(entry) > 100: # a proper entry is always longer than 100 characters\r\n f.write(entry + \"\\n\")\r\n f.close()","repo_name":"JoshBecker2/jmovie","sub_path":"cleanup.py","file_name":"cleanup.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70562800696","text":"import socket\nimport pickle\nimport random\nimport os\nimport time\n\n# Cria o socket\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \ns.connect(('localhost', 12397))\n\ndef abrirConexao():\n try:\n # Tenta se conectar ao servidor\n s.connect(('localhost', 12397))\n msg = \"Ola servidor!\\n\"\n # Envia mensagem codificada em bytes ao servidor\n s.send(msg.encode('ascii')) \n except Exception as erro:\n print(str(erro))\n\ndef cls():\n os.system('cls' if os.name=='nt' else 'clear')\n\ndef fazerJogada():\n input1 = input('Digite as coordenadas da primeira peca(x,y): ')\n s.send(input1.encode('ascii'))\n encodedValid = s.recv(1024)\n isValid = pickle.loads(encodedValid)\n\n while not isValid:\n print('Coordenada invalida!')\n input1 = input('Digite as coordenadas da primeira peca(x,y): ')\n s.send(input1.encode('ascii'))\n encodedValid = s.recv(1024)\n isValid = pickle.loads(encodedValid) \n\n coordenadasTxt = input1.split(',')\n x = int(coordenadasTxt[0]) + 2\n y = int(coordenadasTxt[1]) + 2 \n\n input2 = input('Digite as coordenadas da segunda peca(x,y): ')\n s.send(input2.encode('ascii'))\n encodedValid = s.recv(1024)\n isValid = pickle.loads(encodedValid)\n\n while not isValid:\n print('Coordenada invalida!')\n input2 = input('Digite as coordenadas da segunda peca(x,y): ')\n s.send(input2.encode('ascii'))\n encodedValid = s.recv(1024)\n isValid = pickle.loads(encodedValid) \n\n coordenadasTxt2 = input2.split(',')\n x2 = int(coordenadasTxt2[0]) + 2\n y2 = int(coordenadasTxt2[1]) + 2 \n \n jogadas = [[x, y], [x2, y2]]\n\n s.send(pickle.dumps(jogadas))\n cls()\n txt = s.recv(1024)\n print(txt.decode('ascii'))\n \n\ndef jogar():\n print('Bem vindo ao jogo da memoria!!')\n print('Em qual dificuldade deseja jogar?')\n print('a) Facil (8 duplas)')\n print('b) Medio (12 duplas)')\n print('c) Dificil (20 duplas)')\n \n dificuldade = input('Entre com opcao: ')\n while dificuldade != 'a' and dificuldade != 'b' and dificuldade != 'c':\n print('Opcao invalida! Entre apenas com a letra a, b ou c.')\n dificuldade = input('Entre com opcao: ')\n\n s.send(dificuldade.encode('ascii'))\n \n cls()\n\n tabuleiro = s.recv(1024).decode('ascii')\n while tabuleiro != 'O jogo acabou':\n print(tabuleiro)\n fazerJogada()\n time.sleep(5)\n cls()\n tabuleiro = s.recv(1024).decode('ascii')\n \n cls()\n\n print('Obrigado por jogar nosso jogo da memoria!!')\n\njogar()","repo_name":"gcarvs/jogo-da-memoria","sub_path":"quadro.py","file_name":"quadro.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13940150603","text":"def check_pairs(file_path):\n contained_total = 0\n overlap_total = 0\n with open(file_path) as f:\n for line in f:\n left, right = line.rstrip('\\n').split(',')\n left = [int(char) for char in left.split('-') if not char == '-']\n right = [int(char) for char in right.split('-') if not char == '-']\n if is_fully_contained(left, right):\n overlap_total += 1\n contained_total += 1\n elif does_overlap(left, right):\n overlap_total += 1\n print(contained_total)\n print(overlap_total)\n\ndef is_fully_contained(range1, range2):\n return ((range1[0] >= range2[0] and range1[1] <= range2[1]) or (range2[0] >= range1[0] and range2[1] <= range1[1]))\n\ndef does_overlap(range1, range2):\n return (\n range1[0] >= range2[0] and range1[0] <= range2[1] or\n range1[1] >= range2[0] and range1[1] <= range2[1] or\n range2[0] >= range1[0] and range2[0] <= range1[1] or\n range2[1] >= range1[0] and range2[1] <= range1[1]\n )\n\n\ndef main():\n check_pairs('test_input.txt')\n check_pairs('input.txt')\n\nif __name__ == '__main__':\n main()\n","repo_name":"mibriggs/AdventOfCode2022","sub_path":"Day4/fully_contained_pair.py","file_name":"fully_contained_pair.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"42888039615","text":"import importlib\nimport json\nimport pdb\nimport re\nimport time\nimport threading\n\nimport lib\n\nfrom mitmproxy import http\nfrom urllib.parse import urlparse\n\nfrom lib.agent_api import AgentApi\nfrom lib.hashed_request_decorator import HashedRequestDecorator\nfrom lib.joined_request import JoinedRequest\nfrom lib.logger import Logger\nfrom lib.mitmproxy_request_adapter import MitmproxyRequestAdapter\nfrom lib.mitmproxy_response_adapter import MitmproxyResponseAdapter\nfrom lib.proxy_request import ProxyRequest\nfrom lib.settings import Settings\nfrom lib.scenarios_api import ScenariosApi\n\n# mitmproxy only hot reloads the main script, manually hot reload lib\nimportlib.reload(lib.hashed_request_decorator)\nimportlib.reload(lib.joined_request)\nimportlib.reload(lib.logger)\nimportlib.reload(lib.mitmproxy_request_adapter)\nimportlib.reload(lib.mitmproxy_response_adapter)\nimportlib.reload(lib.proxy_request)\nimportlib.reload(lib.settings)\nimportlib.reload(lib.scenarios_api)\n\nLOG_ID = 'record'\n\nAGENT_STATUSES = {\n 'REQUESTS_MODIFIED': 'requests-modified'\n}\n\nMOCK_POLICY = {\n 'ALL': 'all',\n 'NONE': 'none',\n 'FOUND': 'found',\n}\n\nRECORD_POLICY = {\n 'NONE': 'none',\n 'ALL': 'all',\n 'NOT_FOUND': 'not_found',\n}\n\nMODE = {\n 'MOCK': 'mock',\n 'NONE': 'none',\n 'RECORD': 'record',\n}\n\nCUSTOM_RESPONSE_CODES = {\n 'NOT_FOUND': 499,\n 'IGNORE_COMPONENTS': 498,\n}\n\nCUSTOM_HEADERS = {\n 'MOCK_POLICY': 'X-Mock-Policy',\n 'DO_PROXY': 'X-Do-Proxy',\n 'PROXY_MODE': 'X-Proxy-Mode',\n 'RECORD_POLICY': 'X-Record-Policy',\n 'RESPONSE_LATENCY': 'X-Response-Latency',\n 'SERVICE_URL': 'X-Service-Url',\n}\n\ndef request(flow):\n request = flow.request\n\n __disable_web_cache(request)\n\n settings = Settings.instance()\n mode = __get_proxy_mode(request.headers, settings)\n\n Logger.instance().debug(f\"Proxy Mode: {mode}\")\n\n if mode == MODE['NONE']:\n pass\n elif mode == MODE['RECORD']:\n __handle_record(request, settings)\n elif mode == MODE['MOCK']:\n __handle_mock(flow, settings)\n else:\n return __bad_request(\n flow,\n \"Valid env MODE: %s, %s, Got: %s\" % (MODE['RECORD'], MODE['MOCK'], mode)\n )\n\ndef response(flow):\n settings = Settings.instance()\n request = flow.request\n\n mode = __get_proxy_mode(request.headers, settings)\n\n if mode != MODE['RECORD']:\n return False\n\n __disable_transfer_encoding(flow.response)\n\n active_mode_settings = settings.active_mode_settings\n\n api = ScenariosApi(\n settings.api_url, settings.api_key\n )\n\n if active_mode_settings.get('enabled') and __allowed_request(active_mode_settings, request):\n upload_policy = __get_record_policy(request.headers, active_mode_settings)\n else:\n # If the request path does not match accepted paths, do not record\n upload_policy = RECORD_POLICY['NONE']\n\n Logger.instance().debug(f\"Upload Policy: {upload_policy}\")\n\n if upload_policy == RECORD_POLICY['ALL']:\n thread = threading.Thread(target=__upload_request, args=(flow, api, settings))\n thread.start()\n #__upload_request(flow, api, settings)\n elif upload_policy == RECORD_POLICY['NOT_FOUND']:\n res = __eval_request(request, api)\n\n if res.status_code == CUSTOM_RESPONSE_CODES['NOT_FOUND']:\n thread = threading.Thread(target=__upload_request, args=(flow, api, settings))\n thread.start()\n #__upload_request(flow, api, settings)\n elif upload_policy == RECORD_POLICY['NONE']:\n pass\n else:\n return __bad_request(\n flow,\n \"Valid env RECORD_POLICY: %s, %s, %s, Got: %s\" %\n [RECORD_POLICY['ALL'], RECORD_POLICY['NOT_FOUND'], RECORD_POLICY['NONE'], upload_policy]\n )\n\n### PRIVATE\n\n###\n#\n# @param request [mitmproxy.net.http.request.Request]\n# @param settings [Dict]\n#\ndef __handle_mock(flow, settings):\n start_time = time.time()\n\n request = flow.request\n active_mode_settings = settings.active_mode_settings\n service_url = __get_service_url(request, active_mode_settings)\n\n api = ScenariosApi(\n settings.api_url, settings.api_key\n )\n\n if active_mode_settings.get('enabled') and __allowed_request(active_mode_settings, request):\n mock_policy = __get_mock_policy(request.headers, active_mode_settings)\n else:\n # If the request path does not match accepted paths, do not mock\n mock_policy = MOCK_POLICY['NONE']\n\n if mock_policy == MOCK_POLICY['NONE']:\n return __reverse_proxy(request, service_url, {})\n elif mock_policy == MOCK_POLICY['ALL']:\n res = __eval_request(request, api, active_mode_settings)\n\n if res.status_code == CUSTOM_RESPONSE_CODES['IGNORE_COMPONENTS']:\n res = __eval_request(request, api, active_mode_settings, res.content)\n\n __simulate_latency(res.headers.get(CUSTOM_HEADERS['RESPONSE_LATENCY']), start_time)\n elif mock_policy == MOCK_POLICY['FOUND']:\n res = __eval_request(request, api, active_mode_settings)\n\n if res.status_code == CUSTOM_RESPONSE_CODES['NOT_FOUND']:\n return __reverse_proxy(request, service_url, get_options())\n else:\n __simulate_latency(res.headers.get(CUSTOM_HEADERS['RESPONSE_LATENCY']), start_time)\n else:\n return __bad_request(\n flow,\n \"Valid env MOCK_POLICY: %s, %s, %s, Got: %s\" %\n [MOCK_POLICY['ALL'], MOCK_POLICY['FOUND'], MOCK_POLICY['NONE'], mock_policy]\n )\n\n return __pass_on(flow, res)\n\ndef __handle_record(request, settings):\n active_mode_settings = settings.active_mode_settings\n service_url = __get_service_url(request, active_mode_settings)\n\n #\n # Try forwarding the request to the service specified by Settings.service_url\n #\n if not service_url:\n raise Exception('config service_url is not set')\n\n __reverse_proxy(request, service_url, {})\n\n### API Access\n\ndef __reverse_proxy(request, service_url, options = {}):\n uri = urlparse(service_url)\n\n #request.scheme = uri.scheme\n #request.host = uri.hostname\n #request.port = uri.port\n\n###\n#\n# Upon receiving a response, create the request in API for future use\n#\n# @param api [ScenariosApi]\n# @param settings [Settings.mode.mock | Settings.mode.record]\n# @param res [Net::HTTP::Response]\n#\ndef __upload_request(flow, api, settings):\n active_mode_settings = settings.active_mode_settings\n service_url = __get_service_url(flow.request, active_mode_settings)\n request = MitmproxyRequestAdapter(flow.request)\n proxy_request = ProxyRequest(request, service_url)\n response = MitmproxyResponseAdapter(flow.response)\n\n joined_request = JoinedRequest(proxy_request).with_response(response)\n\n Logger.instance().info(f\"Uploading {proxy_request.url()}\")\n\n res = api.request_create(\n active_mode_settings.get('project_key'),\n joined_request.build(),\n {\n 'importer': 'gor',\n 'scenario_key': active_mode_settings.get('scenario_key'),\n }\n\n )\n\n if res.status_code == 201:\n agent_url = settings.agent_url\n\n if not agent_url:\n Logger.instance().warn('Settings.agent_url not configured')\n else:\n api = AgentApi(agent_url)\n api.update_status(AGENT_STATUSES['REQUESTS_MODIFIED'], active_mode_settings.get('project_key'))\n\n return res\n\n\n###\n#\n# @param api [ScenariosApi]\n# @param settings [Settings.mode.mock | Settings.mode.record]\n# @param ignored_components_json [String] JSON string\n#\ndef __eval_request(request, api, settings, ignored_components_json = None):\n ignored_components = []\n\n if ignored_components_json:\n try:\n ignored_components = json.loads(ignored_components_json)\n except:\n pass\n\n query_params = __build_query_params(request, ignored_components)\n\n return api.request_response(\n settings.get('project_key'), query_params\n )\n\n### Helpers\n\n###\n#\n# Return response headers, body, and status code\n#\ndef __pass_on(flow, res):\n headers = {}\n for key, value in res.headers.items():\n headers[key.capitalize()] = value\n\n flow.response = http.HTTPResponse.make(\n res.status_code, res.content, headers,\n )\n\ndef __bad_request(flow, message):\n flow.response = http.HTTPResponse.make(\n 400, # (optional) status code\n message,\n {'Content-Type': 'text/plain'} # (optional) headers\n )\n\n return False\n\ndef __allowed_request(active_mode_settings, request):\n if __include(request, active_mode_settings.get('include_patterns')):\n return True\n\n return __exclude(request, active_mode_settings.get('exclude_patterns'))\n\n###\n#\n# @param patterns [Array]\n#\ndef __include(request, patterns):\n if not patterns:\n return True\n\n if len(patterns) == 0:\n return True\n\n for pattern in patterns:\n if re.match(pattern, request.url):\n return True\n\n return False\n\ndef __exclude(request, patterns):\n if not patterns:\n return False\n\n for pattern in patterns:\n if re.match(pattern, request.url):\n return True\n\n return False\n\n###\n#\n# Formats request into parameters expected by scenarios api\n#\n# @param request [lib.mitmproxy_request_adapter.MitmproxyRequestAdapter]\n# @param ignored_components [Array]\n#\n# @return [Hash] query parameters to pass to scenarios api\n#\ndef __build_query_params(request, ignored_components = []):\n request = MitmproxyRequestAdapter(request)\n hashed_request = HashedRequestDecorator(request).with_ignored_components(ignored_components)\n\n query_params_hash = hashed_request.query_params_hash()\n body_params_hash = hashed_request.body_params_hash()\n body_text_hash = hashed_request.body_text_hash()\n\n query_params = {}\n query_params['host'] = request.host\n query_params['path'] = request.path\n query_params['port'] = request.port\n query_params['method'] = request.method\n\n if len(query_params_hash) > 0:\n query_params['query_params_hash'] = query_params_hash\n\n if len(body_params_hash) > 0:\n query_params['body_params_hash'] = body_params_hash\n\n if len(body_text_hash) > 0:\n query_params['body_text_hash'] = body_text_hash\n\n if len(ignored_components) > 0:\n query_params['retry'] = 1\n\n return query_params\n\n###\n#\n# Try to simulate expected response latency\n#\n# wait_time (seconds) = expected_latency - estimated_rtt_network_latency - api_latency\n#\n# expected_latency = provided value\n# estimated_rtt_network_latency = 15ms\n# api_latency = current_time - start_time of this request\n#\ndef __simulate_latency(expected_latency, start_time):\n if not expected_latency:\n return 0\n\n estimated_rtt_network_latency = 0.015 # seconds\n api_latency = (time.time() - start_time)\n expected_latency = float(expected_latency) / 1000\n\n wait_time = expected_latency - estimated_rtt_network_latency - api_latency\n\n logger.instance().debug(f\"{LOG_ID}:Expected latency: {expected_latency}\")\n logger.instance().debug(f\"{LOG_ID}:API latency: {api_latency}\")\n logger.instance().debug(f\"{LOG_ID}:Wait time: {wait_time}\")\n\n if wait_time > 0:\n time.sleep(wait_time)\n\n return wait_time\n\n### Setters\n\ndef __disable_transfer_encoding(response):\n if 'Transfer-Encoding' in response.headers:\n # Without deleting this header, causes caller to stall\n del response.headers['Transfer-Encoding']\n\ndef __disable_web_cache(request):\n request.headers['CACHE-CONTROL'] = 'no-cache'\n\n if 'IF-NONE-MATCH' in request.headers:\n del request.headers['IF-NONE-MATCH']\n\n### Getters\n\ndef __get_proxy_mode(headers, settings):\n access_control_header = 'Access-Control-Request-Headers'\n do_proxy_header = CUSTOM_HEADERS['DO_PROXY']\n\n if access_control_header in headers and do_proxy_header.lower() in headers[access_control_header]:\n return MODE['NONE']\n elif do_proxy_header in headers:\n return MODE['NONE']\n elif CUSTOM_HEADERS['PROXY_MODE'] in headers:\n return headers[CUSTOM_HEADERS['PROXY_MODE']]\n else:\n return settings.active_mode\n\ndef __get_mock_policy(headers, settings):\n if CUSTOM_HEADERS['MOCK_POLICY'] in headers:\n return headers[CUSTOM_HEADERS['MOCK_POLICY']]\n else:\n return settings.get('policy')\n\ndef __get_record_policy(headers, settings):\n if CUSTOM_HEADERS['RECORD_POLICY'] in headers:\n return headers[CUSTOM_HEADERS['RECORD_POLICY']]\n else:\n return settings.get('policy')\n\ndef __get_service_url(request, settings):\n service_url = request.headers.get(CUSTOM_HEADERS['SERVICE_URL'])\n\n if service_url:\n return service_url\n else:\n if settings.get('service_url') and len(settings.get('service_url')) > 0:\n return settings.get('service_url')\n\n return f\"{request.scheme}://{request.host}:{request.port}\"\n\n","repo_name":"Jvlythical/scenarios-proxy","sub_path":"record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":12908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30912560419","text":"\nfrom django.apps import AppConfig\nfrom django.utils.translation import ugettext_lazy as _\n\ndefault_app_config = 'leonardo_module_blog.BlogConfig'\n\n\nclass Default(object):\n\n optgroup = ('Blog')\n\n @property\n def apps(self):\n return [\n 'leonardo_module_blog',\n 'elephantblog',\n\n ]\n\n @property\n def widgets(self):\n return [\n 'leonardo_module_blog.models.BlogCategoriesWidget',\n 'leonardo_module_blog.models.RecentBlogPostsWidget',\n ]\n\n @property\n def plugins(self):\n return [\n ('elephantblog.urls', 'Blog entries'),\n ]\n\n config = {\n 'BLOG_PAGINATE_BY': (10, _('Blog Entries Pagination')),\n 'BLOG_SHOW_NEXT_PREV': (True, _('Show next & prev under post detail')),\n 'DISQUS_COMMENTS': (False, _('Enable Disqus comments')),\n 'DISQUS_SHORTNAME': ('michaelkuty', _('Disqus shortname identificator.')),\n\n }\n\n navigation_extensions = [\n 'elephantblog.navigation_extensions.treeinfo',\n ]\n\n absolute_url_overrides = {\n 'elephantblog.entry': 'leonardo_module_blog.overrides.elephantblog_entry_url_app',\n 'elephantblog.categorytranslation':\n 'leonardo_module_blog.overrides.elephantblog_categorytranslation_url_app',\n }\n\n\nclass BlogConfig(AppConfig, Default):\n name = 'leonardo_module_blog'\n verbose_name = (\"Blog\")\n\ndefault = Default()\n","repo_name":"leonardo-modules/leonardo-module-blog","sub_path":"leonardo_module_blog/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"12327909861","text":"## High-pass filters.\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm\nfrom scipy import ndimage\n\ndef make_rectimg_fft():\n arr = np.random.rand(1024,1024) \n arr = arr>0.9999 \n ker = np.zeros_like(arr) \n ker[:40,:20] = 1. \n farr = np.fft.fftn(arr) \n fker = np.fft.fftn(ker) \n return np.abs(np.fft.ifftn(farr*fker)) \n \ndef my_imshow(arr):\n f,ax = plt.subplots()\n ax.imshow(arr, cmap=cm.gray, interpolation='none')\n plt.show()\n\n\n#High Pass Filter.\nimg = make_rectimg_fft()\nmy_imshow(img)\n\nker = np.array([[-1, -1, -1, -1, -1],\n [-1, 1, 2, 1, -1],\n [-1, 2, 4, 2, -1],\n [-1, 1, 2, 1, -1],\n [-1, -1, -1, -1, -1]])\nmy_imshow(ker)\nmy_imshow(ndimage.convolve(img, ker))\n\n\n## High Pass Filter using different kernel.\nfrom PIL import Image\n \nimg2 = Image.open('/home/asawari/Desktop/Lab/ComputationalImagingTools/FilterImages/all_PNG/lena.png')\ndata = np.array(img2, dtype=float)\nplt.imshow(data, cmap=cm.gray)\n\nker2 = np.array([[-1, -1, -1],\n [-1, 8, -1],\n [-1, -1, -1]])\nmy_imshow(ker2)\nmy_imshow(ndimage.convolve(data, ker2))\n","repo_name":"asawaric/Newbie","sub_path":"CompImgTools/CIT_PythonCodes/HPF.py","file_name":"HPF.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38839070439","text":"from typing import Union\n\nfrom sqlalchemy import all_, and_, select\nfrom sqlalchemy.exc import NoResultFound\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy.orm import selectinload\n\nfrom app.database.database_helper import Base\nfrom app.helpers.exceptions_helper import GenericNotFoundException\nfrom app.models.cart_model import Cart\nfrom app.models.cupom_model import Cupom\nfrom app.models.item_model import Item\nfrom app.repository.base_repository import BaseRepository\nfrom app.repository.item_repository import ItemRepository\n\n\nclass CartRepository(BaseRepository):\n def __init__(self, session: AsyncSession, model: Base):\n super().__init__(session, Cart)\n self.item_repository = ItemRepository(session, Item)\n\n async def create(self, cart: Cart) -> Union[Cart, None]:\n try:\n db_cart = await self.get_cart_by_user_id(cart.user_id)\n db_cart.cupoms_id = cart.cupoms_id\n self.session.add(db_cart)\n await self.session.commit()\n await self.session.refresh(db_cart)\n return db_cart\n except GenericNotFoundException:\n self.session.add(cart)\n await self.session.commit()\n return cart\n\n async def get_by_id(self, cart_id: int) -> Union[Base, None]:\n\n stmt = (\n select(self.model)\n .where(self.model.id == cart_id)\n .options(selectinload(Cart.items))\n )\n stream = await self.session.execute(stmt)\n result = stream.scalars().first()\n if result:\n return result\n\n async def update(self, cart: Cart) -> Union[Cart, None]:\n return await self.create_cart(cart)\n\n async def clean_cart(self, user_id: int) -> bool:\n result = await self.session.execute(\n select(self.model).where(self.model.user_id == user_id)\n )\n delete_cart = result.fetchone()\n\n # Cart found\n if delete_cart:\n delete_cart = delete_cart[0]\n await self.item_repository.delete_all_items_by_cart_id(delete_cart.id)\n await self.session.commit()\n await self.session.delete(delete_cart)\n await self.session.commit()\n return True\n\n # Object not found in database, delete not necessary\n return True\n\n async def get_cart_by_user_id(self, user_id: int) -> Union[Base, None]:\n try:\n stmt = (\n select(Cart)\n .join(Item, isouter=True)\n .join(Cupom, isouter=True)\n .where(Cart.user_id == user_id and Cart.finish_at is None)\n .options(selectinload(Cart.items), selectinload(Cart.cupoms))\n )\n stream = await self.session.execute(stmt)\n result = stream.scalars().first()\n if result:\n return result\n raise GenericNotFoundException(message=\"Cart not found\")\n except NoResultFound:\n raise GenericNotFoundException(message=\"Cart not found\")\n\n async def get_all(self) -> list:\n stmt = (\n select(Cart).where(Cart.finish_at is None).options(selectinload(Cart.items))\n )\n stream = await self.session.execute(stmt)\n return stream.scalars().all()\n","repo_name":"valdineidossantos/cart-api-example","sub_path":"app/repository/cart_repository.py","file_name":"cart_repository.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42941212932","text":"#coding:utf-8\n'''\n小易有一个长度为n的数字数组a1, a2, …, an。\n问你是否能用这n个数字构成一个环(首尾连接),使得环中的每一个数字都小于它相邻的两个数字的和(每个数字都必须使用并且每个数字只能使用一次)。\n'''\ndef check(h):\n n = len(h)\n h.sort()\n if h[n - 1] < h[n - 2] + h[0]:\n return True\n elif h[n - 1] < h[n - 2] + h[n - 3]:\n return True\n else:\n return False\n\nt = int(input())\nfor _ in range(t):\n n = int(input())\n h = list(map(int, input().split()))\n res = check(h)\n if res:\n print('YES')\n else:\n print('NO')","repo_name":"BoatInTheRiver/codes_algorithm","sub_path":"nowcoder/netease/数字圆环.py","file_name":"数字圆环.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"42728385335","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 14 23:11:32 2019\r\n\r\n@author: Shyam\r\nLets split the QA files into train and test data\r\nWe will split data into 80-20%.\r\n\r\nSince we are doing baseline method, we will not downsample\r\nArguments - downsample = True / False\r\n\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndef F_downsample(downsample):\r\n pass\r\n\r\n\"\"\"\r\nFile reading, where we will take question - answer file (individual segment), and SoftmaxIndex file\r\n1. We will filter out data which are not in softmaxindex file using pandas inbuilt isin function\r\n2. We will split by y variable and Qtype.\r\n\"\"\"\r\ndef read_file(filename):\r\n return pd.read_csv(filename)\r\n\r\ndef Generate_answer_list(df):\r\n #df = read_file(filename)\r\n answer_list = np.array(df['answer'])\r\n return answer_list\r\n\r\ndef Train_Test_Split(df, answer_list):\r\n df_subset = df[df['answer'].isin(answer_list)]\r\n train, test = train_test_split(df_subset, test_size=0.2)\r\n return train, test\r\n\r\n\"\"\"main program starts below\"\"\"\r\n#generating answer list\r\nfilename = 'SoftmaxIndex.csv'\r\ndf_SoftmaxIndex = read_file(filename)\r\nanswer_list = Generate_answer_list(df_SoftmaxIndex)\r\n\r\n#generating train test split\r\nfilename = 'QA_Individual_segments.csv'\r\ndf_QA_segments = read_file(filename)\r\ntrainData, testData = Train_Test_Split(df_QA_segments, answer_list)\r\n\r\n#writing to files\r\ntrainData.to_csv('train_QA.csv', index = False)\r\ntestData.to_csv('test_QA.csv', index = False)\r\n\r\n","repo_name":"shyam1692/Video-Question-Answering","sub_path":"QA Generation/SplitSegment_train_validation.py","file_name":"SplitSegment_train_validation.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32951728460","text":"# ©Xiler - Arthurdw\n\nfrom datetime import datetime\nfrom enum import Enum\n\ncodes = list(map(lambda i: f\"\\033[{i}m\",\n [0, 2, 4, 5, 7, 8, 21, 22, 24, 25, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, 39, 40, 41, 42, 43, 44, 45,\n 46, 47, 49, 90, 91, 92, 93, 94, 95, 96, 97, 100, 101, 102, 103, 104, 105, 106, 107]))\n\n\nclass Formats(Enum):\n r\"\"\"\n Enum for console formats/styling codes.\n\n Example:\n print(f\"{Formats.underline.value}Underline text{Formats.default.value}\")\n\n NOTE\n --------\n Some CLI's may not support this formatting.\n \"\"\"\n default = \"\\033[0m\\033[21m\\033[22m\\033[24m\\033[25m\\033[27m\\033[28m\"\n dim = \"\\033[2m\"\n underline = \"\\033[4m\"\n blink = \"\\033[5m\"\n inverted = \"\\033[7m\"\n hidden = \"\\033[8m\"\n\n\nclass Colors(Enum):\n r\"\"\"\n Enum for console color codes.\n\n Example:\n print(f\"{Colors.yellow.value}Yellow text{Colors.default.value}\")\n\n NOTE\n --------\n Some CLI's may not support colors.\n \"\"\"\n default = \"\\033[39m\"\n black = \"\\033[30m\"\n red = \"\\033[31m\"\n green = \"\\033[32m\"\n yellow = \"\\033[33m\"\n blue = \"\\033[34m\"\n magenta = \"\\033[35m\"\n cyan = \"\\033[36m\"\n light_gray = \"\\033[37m\"\n dark_gray = \"\\033[90m\"\n light_red = \"\\033[91m\"\n light_green = \"\\033[92m\"\n light_yellow = \"\\033[93m\"\n light_blue = \"\\033[94m\"\n light_magenta = \"\\033[95m\"\n light_cyan = \"\\033[96m\"\n white = \"\\033[97m\"\n\n\nclass Backgrounds(Enum):\n r\"\"\"\n Enum for console background color codes.\n\n Example:\n print(f\"{Backgrounds.red.value}This text has a red background{Backgrounds.default.value}\")\n\n NOTE\n --------\n Some CLI's may not support background colors.\n \"\"\"\n default = \"\\033[49m\"\n black = \"\\033[40m\"\n red = \"\\033[41m\"\n green = \"\\033[42m\"\n yellow = \"\\033[43m\"\n blue = \"\\033[44m\"\n magenta = \"\\033[45m\"\n cyan = \"\\033[46m\"\n light_gray = \"\\033[47m\"\n dark_gray = \"\\033[100m\"\n light_red = \"\\033[101m\"\n light_green = \"\\033[102m\"\n light_yellow = \"\\033[103m\"\n light_blue = \"\\033[104m\"\n light_magenta = \"\\033[105m\"\n light_cyan = \"\\033[106m\"\n white = \"\\033[107m\"\n\n\nclass Prettier:\n r\"\"\"\n UtilsX its solution for easily formatting your consoles. Prettier\n can make your programs look more professional with almost no effort!\n\n Parameters\n ------------\n datetime_format: :class:`str`\n The datetime format that your entered datetime object will take.\n The default format is `[%y-%d-%m %H:%M:%S] `.\n default_text_format: :class:`str`\n The default way text will be formatted in a print. This can be a\n color, format or background. (or combined)\n colors_enabled: :class:`bool`\n If colors should be enabled in the console. If false it will strip\n all color codes from the message.\n auto_strip_message: :class:`bool`\n If the pretty printer should automatically apply the python .strip()\n method to the content.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.datetime_format = \\\n str(kwargs.get(\"datetime_format\") or\n f\"{Formats.default.value + Colors.dark_gray.value + Backgrounds.default.value}[\"\n f\"{Colors.light_green.value}%y-%d-%m %H:%M:%S{Colors.dark_gray.value}]{Colors.default.value} \")\n\n self.default_text_format = \\\n str(kwargs.get(\"default_text_format\") or Formats.default.value + Colors.default.value +\n Backgrounds.default.value)\n\n # 'x if x is not None else `default`' -> Cheat code to check if a x is passed and if its not None (undefined)\n self.colors_enabled = bool(kwargs.get(\"colors_enabled\") if kwargs.get(\"colors_enabled\") is not None else True)\n self.auto_strip_message = \\\n bool(kwargs.get(\"auto_strip_message\") if kwargs.get(\"auto_strip_message\") is not None else False)\n\n @staticmethod\n def clear_colors(msg: str):\n r\"\"\"\n Clears all known color codes from a given message.\n\n Parameters\n ------------\n msg: :class:`str`\n The message that is the target.\n\n Returns\n ------------\n :class:`str`\n A color code stripped string.\n \"\"\"\n for code in codes:\n msg = msg.replace(code, \"\")\n return msg\n\n def print(self, message: str, time: datetime = None) -> None:\n r\"\"\"\n Pretty prints a given message.\n\n Parameters\n ------------\n message: :class:`str`\n The message that must be pretty printed\n time: :class:`datetime`\n The printed datetime object. (Optional)\n \"\"\"\n print(self.format(message, time))\n\n def format(self, message: str, time: datetime = None) -> str:\n r\"\"\"\n Formats a message, this method is also called in the\n Prettier print statement!\n\n Parameters\n ------------\n message: :class:`str`\n The message that must be formatted\n time: :class:`datetime`\n The printed datetime object. (Optional)\n\n Returns\n ------------\n :class:`str`\n A formatted string.\n \"\"\"\n data = str((self.format_timestamp(time) if time is not None else '') + self.default_text_format +\n (message.strip() if self.auto_strip_message else message))\n return data if self.colors_enabled else self.clear_colors(data)\n\n def format_timestamp(self, time: datetime) -> str:\n r\"\"\"\n Formats a datetime object, this method is also called in the\n Prettier format statement!\n\n Parameters\n ------------\n time: :class:`datetime`\n The datetime object that must be formatted\n\n Returns\n ------------\n :class:`str`\n A formatted datetime object.\n \"\"\"\n formatted = time.strftime(self.datetime_format)\n return formatted if self.colors_enabled else self.clear_colors(formatted)\n","repo_name":"0x5ubt13/my_python_journey","sub_path":"projects/discord_bot/venv/Lib/site-packages/utilsx/console/formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":5979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"74115515576","text":"from pathlib import Path\n\nclass AudioFile:\n ext: str\n \n def __init__(self, filepath: Path) -> None:\n if not filepath.suffix == self.ext:\n raise ValueError(\"Invalid file format\")\n self.filepath = filepath\n \nclass MP3File(AudioFile):\n ext = \".mp3\"\n \n def play(self) -> None:\n print(f\"playing {self.filepath} as mp3\")\n \n \nclass WavFile(AudioFile):\n ext = \".wav\"\n \n def play(self) -> None:\n print(f\"playing {self.filepath} as wav\")\n \n\nclass OggFile(AudioFile):\n ext = \".ogg\"\n \n def play(self) -> None:\n print(f\"playing {self.filepath} as ogg\")\n\np_1 = MP3File(Path(\"Heart of the Sunrise.mp3\"))\np_1.play()\n\np_2 = WavFile(Path(\"my piano playing file.wav\"))\np_2.play()\n\np_3 = OggFile(Path(\"my music instruments file.ogg\"))\np_3.play()\n\n# p_4 = MP3File(Path(\"wrong file.mov\"))\n# p_4.play()\n\n\nclass FileChat:\n def __init__(self, filepath: Path) -> None:\n if not filepath.suffix == \".flac\":\n raise ValueError(\"Not a .flac file\")\n self.filepath = filepath\n \n def play(self) -> None:\n print(f\"playing {self.filepath} as falc\")\n\n\nwrong_chat_file = FileChat(Path(\"wrong audio file.flac\"))\nwrong_chat_file.play()\n","repo_name":"easywaldo/python_lab_advanced","sub_path":"python_oo/ch03/polymorphism_sample.py","file_name":"polymorphism_sample.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"1311959139","text":"T = int(input())\narr = []\narr2 = []\nfor i in range(T):\n n = int(input())\n arr.append(n)\nprint(arr)\ndef testoore(n):\n for j in arr:\n \n for i in range(1, max(arr)+1):\n if j%i == 0:\n arr2.append(i)\n print(arr2)\n if len(arr2)>2 or len(arr2)==1 :\n print(\"Not Prime\")\n else:\n print(\"Prime\")\n arr2.clear() \nif __name__ == \"__main__\":\n testoore(n)\n","repo_name":"justdave001/Personal-Sols-to-HackerRank-and-LeetCode-problems-","sub_path":"Running time and complexity.py","file_name":"Running time and complexity.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72322768057","text":"import sys\nimport json\n\ndef handle(event):\n body = event.get('body')\n if body is None:\n return 400, \"missing body\"\n\n if 'superSecretData=' not in body:\n return 400, \"missing superSecretData\"\n\n return 200, \"OK\" \n\ndef handler(event, context):\n statusCode, responseBody = handle(event)\n return {\n \"isBase64Encoded\": False,\n \"statusCode\": statusCode,\n \"headers\": {},\n \"multiValueHeaders\": {},\n \"body\": json.dumps({'message': responseBody})\n }","repo_name":"RhinoSecurityLabs/cloudgoat","sub_path":"scenarios/cicd/assets/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":2256,"dataset":"github-code","pt":"22"} +{"seq_id":"35640827225","text":"# -*- coding: utf-8 -*-\r\n#########################################################\r\n# python\r\nimport os, sys, traceback, re, json, threading, time, shutil\r\nfrom datetime import datetime\r\nimport urllib\r\n# third-party\r\nimport requests\r\n# third-party\r\nfrom flask import request, render_template, jsonify, redirect, send_file\r\nfrom sqlalchemy import or_, and_, func, not_, desc\r\nimport lxml.html\r\nfrom lxml import etree as ET\r\n\r\n# sjva 공용\r\nfrom framework import db, scheduler, path_data, socketio, SystemModelSetting, app\r\nfrom framework.util import Util\r\nfrom framework.common.util import headers\r\nfrom plugin import LogicModuleBase, default_route_socketio\r\n# 패키지\r\nfrom .plugin import P\r\nlogger = P.logger\r\npackage_name = P.package_name\r\nModelSetting = P.ModelSetting\r\n#########################################################\r\nfrom .process_plex import ProcessPlex, plex_default_vod, plex_default_series\r\nfrom .process_wavve import ProcessWavve, wavve_default_live, wavve_default_vod, wavve_default_series\r\nfrom .process_tving import ProcessTving, tving_default_live, tving_default_vod, tving_default_series\r\nfrom .process_sstv import ProcessSstv\r\nfrom .process_spotv import ProcessSpotv\r\n\r\nsource_list = [ProcessPlex, ProcessWavve, ProcessTving, ProcessSpotv, ProcessSstv]\r\n\r\n@P.blueprint.route('/get.php', methods=['GET'])\r\ndef get_php():\r\n logger.debug('>> get.php : %s', request.args)\r\n return jsonify('')\r\n\r\n@P.blueprint.route('/xmltv.php', methods=['GET'])\r\ndef xmltv_php():\r\n logger.debug('>> xmltv.php : %s', request.args)\r\n root = ET.Element('tv')\r\n root.set('generator-info-name', SystemModelSetting.get('ddns'))\r\n\r\n for source in source_list:\r\n tmp = source.get_live_channel_list()\r\n if tmp is None:\r\n continue\r\n for key, channel in tmp.items():\r\n channel_tag = ET.SubElement(root, 'channel') \r\n channel_tag.set('id', '%s' % key)\r\n icon_tag = ET.SubElement(channel_tag, 'icon')\r\n icon_tag.set('src', channel['icon'])\r\n display_name_tag = ET.SubElement(channel_tag, 'display-name') \r\n display_name_tag.text = channel['name']\r\n\r\n for program in channel['list']:\r\n program_tag = ET.SubElement(root, 'programme')\r\n program_tag.set('start', program['start_time'].strftime('%Y%m%d%H%M%S') + ' +0900')\r\n program_tag.set('stop', program['end_time'].strftime('%Y%m%d%H%M%S') + ' +0900')\r\n program_tag.set('channel', '%s' % key)\r\n title_tag = ET.SubElement(program_tag, 'title')\r\n title_tag.set('lang', 'ko')\r\n title_tag.text = program['title']\r\n if 'desc' in program:\r\n desc_tag = ET.SubElement(program_tag, 'desc')\r\n desc_tag.text = program['desc']\r\n if 'icon' in program:\r\n icon_tag = ET.SubElement(program_tag, 'icon')\r\n icon_tag.set('src', program['icon'])\r\n\r\n return app.response_class(ET.tostring(root, pretty_print=True, xml_declaration=True, encoding=\"utf-8\"), mimetype='application/xml')\r\n \r\n\r\n@P.blueprint.route('/player_api.php')\r\ndef player_api_php(): \r\n logger.debug('>> player_api.php : %s', request.args)\r\n action = request.args.get('action')\r\n output = []\r\n index = 1\r\n if action == 'get_live_categories':\r\n for source in source_list:\r\n data = source.get_live_categories()\r\n if data is not None:\r\n output += data\r\n elif action == 'get_live_streams':\r\n for source in source_list:\r\n data = source.get_live_streams(category_id=request.args.get('category_id'))\r\n if data is None or len(data) == 0:\r\n continue\r\n for item in data:\r\n entity = item\r\n entity['num'] = index\r\n index += 1\r\n output.append(entity)\r\n elif action == 'get_vod_categories':\r\n for source in source_list:\r\n data = source.get_vod_categories()\r\n if data is not None:\r\n output += data\r\n elif action == 'get_vod_streams':\r\n for source in source_list:\r\n data = source.get_vod_streams(category_id=request.args.get('category_id'))\r\n if data is None or len(data) == 0:\r\n continue\r\n for item in data:\r\n entity = item\r\n entity['num'] = index\r\n index += 1\r\n output.append(entity)\r\n elif action == 'get_vod_info':\r\n vod_id = request.args.get('vod_id')\r\n output = source_list[int(vod_id)%10].get_vod_info(vod_id)\r\n elif action == 'get_series_categories':\r\n for source in source_list:\r\n data = source.get_series_categories()\r\n if data is not None:\r\n output += data\r\n elif action == 'get_series':\r\n for source in source_list:\r\n data = source.get_series(category_id=request.args.get('category_id'))\r\n if data is None or len(data) == 0:\r\n continue\r\n for item in data:\r\n entity = item\r\n entity['num'] = index\r\n index += 1\r\n output.append(entity)\r\n elif request.args.get('action') == 'get_series_info':\r\n series_id = request.args.get('series_id')\r\n output = source_list[int(series_id[-1])].get_series_info(series_id)\r\n else:\r\n output = {\"user_info\":{\"username\":ModelSetting.get('user'),\"password\":ModelSetting.get('pass'),\"message\":\"\",\"auth\":1,\"status\":\"Active\",\"exp_date\":\"1632734599\",\"is_trial\":\"0\",\"active_cons\":\"1\",\"created_at\":\"1585304571\",\"max_connections\":\"10\",\"allowed_output_formats\":[\"m3u8\"]},\"server_info\":{\"url\":SystemModelSetting.get('ddns'),\"port\":\"\",\"https_port\":\"\",\"server_protocol\":\"http\",\"rtmp_port\":\"\",\"timezone\":\"UTC\",\"timestamp_now\":int(time.time()),\"time_now\":datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\"process\":True}}\r\n \r\n return jsonify(output)\r\n\r\n\r\ndef redirect_streaming_url(content_type, path):\r\n #logger.debug('>> CONTENT : %s, PATH : %s, ags : %s', content_type, path, request.args)\r\n tmp = path.split('/')[-1].split('.')\r\n xc_id = tmp[0]\r\n url = source_list[int(xc_id)%10].get_streaming_url(xc_id, content_type, extension=tmp[1])\r\n if type(url) == type({}):\r\n return jsonify(url)\r\n return redirect(url)\r\n \r\n\r\n@P.blueprint.route('/movie/')\r\ndef movie(path):\r\n return redirect_streaming_url('vod', path)\r\n\r\n@P.blueprint.route('/series/')\r\ndef series(path):\r\n return redirect_streaming_url('series', path)\r\n\r\n@P.blueprint.route('/live/')\r\ndef live(path):\r\n return redirect_streaming_url('live', path)\r\n\r\n\r\n@P.blueprint.route('/img', methods=['GET', 'POST'])\r\ndef img():\r\n from PIL import Image\r\n image_url = urllib.parse.unquote_plus(request.args.get('url'))\r\n im = Image.open(requests.get(image_url, stream=True).raw)\r\n width, height = im.size\r\n new_height = height\r\n new_width = int(height * 1.78)\r\n #new_image = Image.new('RGBA',(new_width, new_height), (0,0,0, 0))\r\n new_image = Image.new('RGBA',(new_width, new_height), (0,0,0,0))\r\n new_image.paste(im, (int((new_width - width)/2), 0))\r\n filename = os.path.join(path_data, 'tmp', f'proxy_{str(time.time())}.png')\r\n new_image.save(filename)\r\n #return send_file(filename, mimetype='image/jpeg')\r\n return send_file(filename, mimetype='image/png')\r\n\r\n\r\nclass LogicXC(LogicModuleBase):\r\n db_default = {\r\n 'db_version' : '1',\r\n 'xc_auto_start' : 'False',\r\n 'xc_interval' : '10',\r\n\r\n 'use_auth' : 'False',\r\n 'user' : 'user',\r\n 'pass' : 'pass',\r\n 'default_frequency' : '1',\r\n 'default_max_count' : '20',\r\n 'drm_include' : 'False',\r\n 'drm_notify' : 'True',\r\n\r\n 'plex_use' : 'False',\r\n 'plex_server' : '',\r\n 'plex_token' : '',\r\n 'plex_vod' : plex_default_vod,\r\n 'plex_series' : plex_default_series,\r\n 'plex_all_container' : 'False',\r\n\r\n 'wavve_use' : 'True',\r\n 'wavve_quality' : 'HD', \r\n 'wavve_is_adult' : 'False', \r\n 'wavve_live' : wavve_default_live, \r\n 'wavve_vod' : wavve_default_vod, \r\n 'wavve_series' : wavve_default_series, \r\n\r\n 'tving_use' : 'True',\r\n 'tving_quality' : 'HD', \r\n 'tving_is_adult' : 'False', \r\n 'tving_live' : tving_default_live, \r\n 'tving_vod' : tving_default_vod, \r\n 'tving_series' : tving_default_series, \r\n\r\n 'sstv_use' : 'True',\r\n 'sstv_only_kor' : 'True',\r\n 'sstv_group_only_country' : 'True',\r\n \r\n 'spotv_use' : 'False',\r\n 'spotv_pk' : '',\r\n 'spotv_username' : '',\r\n 'spotv_password' : '',\r\n 'spotv_quality' : '',\r\n }\r\n\r\n\r\n def __init__(self, P):\r\n super(LogicXC, self).__init__(P, 'base', scheduler_desc=u'tivimate 항목 생성')\r\n self.name = 'xc'\r\n\r\n def process_menu(self, sub, req):\r\n arg = P.ModelSetting.to_dict()\r\n arg['sub'] = self.name\r\n if sub in ['base']:\r\n job_id = '%s_%s' % (self.P.package_name, self.name)\r\n arg['scheduler'] = str(scheduler.is_include(job_id))\r\n arg['is_running'] = str(scheduler.is_running(job_id))\r\n arg['scheduler_count'] = u'%s 회 실행' % P.scheduler_count\r\n arg['tivimate_url'] = '{}/{}'.format(SystemModelSetting.get('ddns'), P.package_name)\r\n return render_template('{package_name}_{module_name}_{sub}.html'.format(package_name=P.package_name, module_name=self.name, sub=sub), arg=arg)\r\n return render_template('sample.html', title='%s - %s' % (P.package_name, sub))\r\n\r\n def process_ajax(self, sub, req):\r\n try:\r\n if sub == 'all_load':\r\n def func():\r\n ProcessSstv.scheduler_function(mode='force')\r\n ProcessSpotv.scheduler_function(mode='force')\r\n ProcessWavve.scheduler_function(mode='force')\r\n ProcessTving.scheduler_function(mode='force')\r\n ProcessPlex.scheduler_function(mode='force')\r\n socketio.emit(\"notify\", data = {'type':'success', 'msg' : u'아이템 로딩 완료'}, namespace='/framework', broadcast=True) \r\n t = threading.Thread(target=func, args=())\r\n t.daemon = True\r\n t.start()\r\n return jsonify(True)\r\n except Exception as e: \r\n P.logger.error('Exception:%s', e)\r\n P.logger.error(traceback.format_exc())\r\n return jsonify({'ret':'exception', 'log':str(e)})\r\n\r\n def reset_db(self):\r\n from .process_wavve import ModelWavveMap\r\n db.session.query(ModelWavveMap).delete()\r\n from .process_tving import ModelTvingMap\r\n db.session.query(ModelTvingMap).delete()\r\n db.session.commit()\r\n return True\r\n \r\n\r\n #########################################################\r\n\r\n \r\n def scheduler_function(self):\r\n try:\r\n mode = 'force' if (P.scheduler_count % 50) == 0 else 'scheduler'\r\n ProcessSstv.scheduler_function(mode=mode)\r\n ProcessSpotv.scheduler_function(mode=mode)\r\n ProcessWavve.scheduler_function(mode=mode)\r\n ProcessTving.scheduler_function(mode=mode)\r\n ProcessPlex.scheduler_function(mode=mode)\r\n logger.debug('scheduler_function end..')\r\n except Exception as e: \r\n P.logger.error('Exception:%s', e)\r\n P.logger.error(traceback.format_exc())\r\n finally:\r\n P.scheduler_count += 1\r\n \r\n","repo_name":"soju6jan/tivimate","sub_path":"logic_xc.py","file_name":"logic_xc.py","file_ext":"py","file_size_in_byte":11765,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"33467748382","text":"# encoding: utf-8\nimport pandas as pd\n\n# leggo il file excel e lo carico in un dataframe\nf=pd.read_excel('resources/f.xlsx')\n\n# leggo file csv e lo carico in un dataframe\np=pd.read_csv('resources/p.csv', sep=';')\n\n\n# stampo le prime righe 4 righe di f\n#print(f.head(4))\n\n# stampo le ultime righe 4 righe di p\n#print(p.tail(4))\n\n\n# ESERCIZIO Carica il file pf.csv e verifica che ha 12 righe e 4 colonne. (per verificare la dimensione del gile usa .shape)\n# -------------------------------------------------------------------------------------- your code here!!!\npf=pd.read_csv('resources/pf.csv', sep='|')\nprint(pf.shape)\n\n\n\n# ESERCIZIO Salva il dataframe f in formato csv ( usa la funzione to_csv):\n# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_csv.html\n# -------------------------------------------------------------------------------------- your code here!!!\nf.to_csv('pippo.csv')","repo_name":"RiccardoNizzolo/corso-python","sub_path":"day5/lezioni/l1-pandasRead.py","file_name":"l1-pandasRead.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26704440184","text":"# -*- coding: utf-8 -*-\n\nfrom random import randint\nimport sys\n\nclass TreeNode:\n\n def __init__(self, user_data):\n self.data = user_data\n self.leftchild = None # 指向左子節點\n self.rightchild = None # 指向右子節點\n self.parent = None # 指向父母節點\n\n def insert(self, user_data):\n \"\"\"\n 新增節點\n :param user_data:\n :return:\n \"\"\"\n if self.data == user_data: # 避免重複資料\n return False\n elif self.data > user_data: # 節點資料大於新資料,新資料往左邊子樹節點走\n if self.leftchild:\n return self.leftchild.insert(user_data)\n else:\n self.leftchild = TreeNode(user_data)\n self.leftchild.parent = self # 設定新節點的parent\n return True\n elif self.data < user_data: # 節點資料小於新資料,新資料往右邊子樹節點走\n if self.rightchild:\n return self.rightchild.insert(user_data)\n else:\n self.rightchild = TreeNode(user_data)\n self.rightchild.parent = self # 設定新節點的parent\n return True\n\n def find(self, find_data):\n \"\"\"\n 尋找節點\n 有找到返回該節點\n 沒找到返回False\n :param find_data:\n :return:\n \"\"\"\n if self.data == find_data:\n return self\n elif self.data > find_data and self.leftchild: # 節點資料大於被搜尋資料,被搜尋資料往左邊子樹節點走\n return self.leftchild.find(find_data)\n elif self.data < find_data and self.rightchild: # 節點資料小於被搜尋資料,被搜尋資料往右邊子樹節點走\n return self.rightchild.find(find_data)\n else:\n return False\n\n def preorder(self):\n \"\"\"\n 前序走訪\n :return:\n \"\"\"\n print(str(self.data))\n if self.leftchild:\n self.leftchild.preorder()\n if self.rightchild:\n self.rightchild.preorder()\n\n def inorder(self):\n \"\"\"\n 中序走訪\n :return:\n \"\"\"\n if self.leftchild:\n self.leftchild.inorder()\n print(str(self.data))\n if self.rightchild:\n self.rightchild.inorder()\n\n def postorder(self):\n \"\"\"\n 後序走訪\n :return:\n \"\"\"\n if self.leftchild:\n self.leftchild.postorder()\n if self.rightchild:\n self.rightchild.postorder()\n print(str(self.data))\n\n def get_height(self):\n \"\"\"\n 計算樹的高度\n :return:\n \"\"\"\n if self.leftchild and self.rightchild:\n return 1 + max(self.leftchild.get_height(), self.rightchild.get_height())\n elif self.leftchild:\n return 1 + self.leftchild.get_height()\n elif self.rightchild:\n return 1 + self.rightchild.get_height()\n else:\n return 1\n\n def get_height_second(self):\n l_height = 0\n r_height = 0\n # Compute the depth of each subtree\n if self.leftchild:\n l_height = self.leftchild.get_height()\n elif self.rightchild:\n r_height = self.rightchild.get_height()\n\n # Use the larger one\n if (l_height > r_height):\n return l_height + 1\n else:\n return r_height + 1\n\nclass BinarySearchTree:\n\n def __init__(self):\n self.root = None\n\n def insert(self, user_data):\n if self.root is None: # 第一次建立節點->成為根節點\n self.root = TreeNode(user_data)\n return True\n else:\n return self.root.insert(user_data)\n\n def find(self, find_data):\n if self.root is None: # 沒有任何節點\n return False\n else:\n return self.root.find(find_data)\n\n def preorder(self):\n\n if self.root:\n print('Pre-Order')\n self.root.preorder()\n else:\n return False\n\n def inorder(self):\n\n if self.root:\n print('In-Order')\n self.root.inorder()\n else:\n return False\n\n def postorder(self):\n\n if self.root:\n print('Post-Order')\n self.root.postorder()\n else:\n return False\n\n def get_height(self):\n if self.root:\n return self.root.get_height()\n else:\n return 0\n\n def get_height_second(self):\n if self.root:\n return self.root.get_height_second()\n else:\n return 0\n\n def get_num_of_child(self, begin_node):\n \"\"\"\n 返回begin_node有幾個子節點\n :param begin_node:\n :return:\n \"\"\"\n num_of_child = 0\n if begin_node.leftchild:\n num_of_child += 1\n if begin_node.rightchild:\n num_of_child += 1\n\n return num_of_child\n\n def min_value_node(self, begin_node = None):\n \"\"\"\n begin_node的子樹中,有最小值的節點(the leftmost leaf node)\n :param begin_node:\n :return:\n \"\"\"\n if begin_node is None:\n curr_node = self.root\n else:\n curr_node = begin_node\n\n while curr_node.leftchild is not None:\n curr_node = curr_node.leftchild\n\n return curr_node\n\n def remove_value(self, del_data):\n \"\"\"\n 透過值找到該節點然後移除節點\n :param del_data:\n :return:\n \"\"\"\n return self.remove_node(self.find(del_data))\n\n def remove_node(self, node_to_del):\n \"\"\"\n 移除節點\n :param node_to_del:\n :return:\n \"\"\"\n if node_to_del is False:\n return 'Not Found'\n else:\n node_parent = node_to_del.parent\n num_of_child = self.get_num_of_child(node_to_del)\n\n # situation 1 : the node to be deleted has no child (leaf node)\n if num_of_child == 0:\n if node_parent is not None: # the node to be deleted has parent node\n if node_parent.leftchild == node_to_del:\n node_parent.leftchild = None\n else:\n node_parent.rightchild = None\n else: # the node to be deleted has no parent node == root node\n self.root = None # the tree has only root node, delete root node means delete the tree\n\n # situation 2 : the node to be deleted has only one child\n if num_of_child == 1:\n if node_to_del.leftchild is not None:\n child = node_to_del.leftchild\n else:\n child = node_to_del.rightchild\n\n if node_parent is not None:\n if node_parent.leftchild == node_to_del:\n node_parent.leftchild = child\n else:\n node_parent.rightchild = child\n else: # root node\n self.root = child\n\n child.parent = node_parent\n\n # situation 3 : the node to be deleted has both left child and right child\n if num_of_child == 2:\n # get the inorder successor node (smallest in the right subtree) of the node to be deleted\n successor_node = self.min_value_node(node_to_del.rightchild)\n node_to_del.data = successor_node.data\n self.remove_node(successor_node)\n\n return True\n\ndef main():\n myTree = BinarySearchTree()\n\n number_list = []\n for i in range(0, 10):\n rand_number = randint(1, 100)\n number_list.append(rand_number)\n myTree.insert(rand_number)\n\n # number_list = [88, 7, 30, 37, 26, 53, 18, 5, 77, 80]\n # for i in number_list:\n # myTree.insert(i)\n\n print(number_list)\n myTree.preorder()\n myTree.inorder()\n myTree.postorder()\n print('Height:', myTree.get_height())\n # print('Height:', myTree.get_height_second())\n\n del_data = number_list[randint(0, 9)]\n # del_data = 30\n print('Find?', del_data, myTree.find(del_data))\n print('Delete:', del_data, myTree.remove_value(del_data))\n myTree.inorder()\n\nif __name__ == '__main__':\n main()\n","repo_name":"hohh0115/Data-Structures-Practices-with-Python","sub_path":"Tree/Binary Search Tree/linked_implementation.py","file_name":"linked_implementation.py","file_ext":"py","file_size_in_byte":8344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24119228963","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport requests\nfrom zhilian2.items import XiciItem\nfrom scrapy.conf import settings\nimport pymysql\nimport time\n\n\nclass XiciSpider(scrapy.Spider):\n name = 'xici'\n allowed_domains = ['xicidaili.com']\n start_urls = ['https://www.xicidaili.com/wt/1']\n\n #基础网址\n base_url = 'https://www.xicidaili.com/wt/'\n start = 1\n def parse(self, response):\n #xpath提取数据节点列表\n node_list = response.xpath('//tr[@class=\"odd\"]|//tr[@class=\"\"]')\n\n for node in node_list:\n #实例化item类\n item1 = XiciItem()\n #将ip地址数据赋值给item\n item1['ip'] = node.xpath('./td[2]/text()').extract_first()\n #将端口数据赋值给port\n item1['port'] = node.xpath('./td[3]/text()').extract_first()\n #将速度数据赋值给speed\n item1['speed'] = node.xpath('./td[7]/div/@title').extract_first()\n #将类型数据赋值给proxy_type\n item1['proxy_type'] = node.xpath('./td[6]/text()').extract_first()\n\n proxies = {\n \"http\":item1['ip']+':'+item1['port']\n }\n try:\n if requests.get('http://www.baidu.com',proxies=proxies,timeout=2).status_code == 200:\n if requests.get('http://www.hao123.com',proxies=proxies,timeout=2).status_code == 200:\n print('prase_成功的ip地址:{}'.format(item1['ip']+':'+item1['port']))\n yield item1\n else:\n print('parse_失败的IP地址:{}'.format(item1['ip'] + ':' + item1['port']))\n except:\n print('parse_失败的IP地址:{}'.format(item1['ip']+':'+item1['port']))\n\n self.start += 1\n #构造下一页链接,爬取20页\n if self.start <= 20:\n next_page = self.base_url + str(self.start)\n try:\n yield scrapy.Request(url=next_page,callback=self.parse)\n except:\n print('西刺代理数据爬取完成!')\n\n","repo_name":"tangjinlong8888/LearningLibring","sub_path":"DataJobs/zhilian21/zhilian2/spiders/xici.py","file_name":"xici.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4845010427","text":"from PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import fftpack\nimport urllib3\nimport IPython, time\nimport os\n\n\ndef get_matrix_image(url):\n im = Image.open(url)\n np_im = np.array(im)\n return np_im\n \ndef cal(matrix):\n hist = {}\n for i in range(0, matrix.shape[0]):\n for j in range(0, matrix.shape[1]):\n for k in range(0, matrix.shape[2]):\n hist[matrix[i][j][k]] = hist.get(matrix[i][j][k],0) + 1\n return hist\ndef getHigh(url):\n return get_matrix_image(url).shape[0]\ndef getWeight(url):\n return get_matrix_image(url).shape[1]\ndef getShape(url):\n return get_matrix_image(url).shape[2]\ndef sortFreq (vector) :\n value = vector.keys()\n tuples = []\n for i in value :\n tuples.append((vector[i],i))\n tuples.sort()\n return tuples\n\ndef getKey(tuple):\n return tuple[0]\n\ndef getValue(tuple):\n return tuple[1]\n\ndef buildTree(vector):\n while len(vector) > 1:\n lowestTwo = tuple(vector[0:2])\n theRest = vector[2:]\n sumPro = lowestTwo[0][0] + lowestTwo[1][0]\n vector = theRest + [(sumPro, lowestTwo)]\n sorted(vector, key = getKey)\n return vector[0]\n\ndef Tree(tree):\n a = 3\n a = np.dtype('uint8').type(a)\n p = tree[1]\n if type(p) == type(a): \n return p\n else:\n return (Tree(p[0]), Tree(p[1]))\ncode= {}\ndef assignCodes(n, pat = ''):\n a = 3\n a = np.dtype('uint8').type(a)\n \n if type(n) == type(a):\n code[n] = pat\n else:\n assignCodes(n[0], pat+\"0\")\n assignCodes(n[1], pat+\"1\")\n \n\ndef pad_encoded_text(encoded_text):\n extra_padding = 8 - len(encoded_text) % 8\n for i in range(extra_padding):\n encoded_text += \"0\"\n\n padded_info = \"{0:08b}\".format(extra_padding)\n encoded_text = padded_info + encoded_text\n return encoded_text\n\ndef get_byte_array(padded_encoded_text):\n if(len(padded_encoded_text) % 8 != 0):\n print(\"Encoded text not padded properly\")\n exit(0)\n\n b = bytearray()\n for i in range(0, len(padded_encoded_text), 8):\n byte = padded_encoded_text[i:i+8]\n b.append(int(byte, 2))\n return b\n\ndef encode(code, vector):\n file = open(\"text_code.txt\",\"w+\")\n for i in range(0, vector.shape[0]):\n for j in range(0, vector.shape[1]):\n for k in range(0, vector.shape[2]):\n file.write(code[vector[i][j][k]])\n file.close()\n \ndef read(path):\n file = open(\"text_code.txt\", \"r\")\n t = pad_encoded_text(file.read())\n file.close()\n b = get_byte_array(t)\n file_name_out = path + \"_hm.bin\"\n file = open(file_name_out,\"wb\")\n file.write(bytes(b))\n file.close()\n return file_name_out\n \ndef read_file(filename):\n file = open(filename, 'rb')\n bit_string = \"\"\n byte = file.read()\n for i in byte:\n bits = bin(i)[2:].rjust(8, '0')\n bit_string += bits\n # loại bỏ các ký tự fix size ở cuối của string \n fixed_size = bit_string[:8]\n fixed_size = int(fixed_size, 2)\n encoded_text = bit_string[8:] \n encoded_text = encoded_text[:-1*fixed_size]\n # trả về string nhị phân\n return encoded_text\n pass\n\ndef decode(tree, str, path):\n a = 3\n high = getHigh(path[0:len(path)- 7])\n weight = getWeight(path[0:len(path) - 7])\n shape = getShape(path[0:len(path) - 7])\n output = np.zeros((high,weight,shape))\n output = np.uint8(output)\n k =0\n j = 0\n n = 0\n p = tree\n for i in str:\n if j == weight:\n j = 0\n k+=1\n if n == shape:\n j+=1\n n = 0\n if i == '0': p = p[0]\n else: p = p[1]\n if type(p) == type(a):\n p = np.dtype('uint8').type(p)\n output[k][j][n] = p\n n+=1\n p = tree\n return output\n\ndef hm_compression(path):\n print(\"Waiting ...\")\n matrix = get_matrix_image(path)\n hist = cal(matrix)\n sorted_hist = sortFreq(hist)\n tree = buildTree(sorted_hist)\n trim = Tree(tree)\n assignCodes(trim)\n file = open(path + \"_hmtree.txt\", \"w\")\n file.write(str(trim))\n file.close()\n encode(code,matrix)\n t = read(path)\n os.remove(\"text_code.txt\")\n return t\n\n##print(t)\ndef hm_decompression(path):\n print(\"Waiting ...\")\n file = open(path[0:len(path) - 7] + \"_hmtree.txt\",\"r\")\n trim = file.read()\n file.close()\n trim = eval(trim)\n bit_string = read_file(path)\n# print(path)\n ot = decode(trim, bit_string, path)\n ot = np.array(ot)\n new_im = Image.fromarray(ot)\n file_image = path.replace(\".bin\",\"\") + \"_decode.bmp\"\n new_im.save(file_image)\n new_im.show() \n return file_image\n#p = decompression(\"flying.bmp_hm.bin\")\n#print(p)","repo_name":"thientrang2106/huffman_encode_decode_image","sub_path":"nén/huffmanNew.py","file_name":"huffmanNew.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72338534776","text":"def pentlist(init,end):\n arr=[]\n for i in range(init,end):\n arr.append((i*(3*i-1))/2)\n return arr\n\n## I just moved the range untill i got an answer through brute force not fance but it gives you the answer\ndef main():\n init=1000\n end=3000\n lst=pentlist(init,end)\n for i in range(0,len(lst)):\n #print(i)\n for j in range(0,i):\n S=lst[i]+lst[j]\n D=lst[i]-lst[j]\n if S in lst and D in lst and j!=0:\n print(i,j,S,D)\n break\n return\nmain()\n","repo_name":"J0eG1bson/PythonClass-Final","sub_path":"prblm44.py","file_name":"prblm44.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29441687251","text":"#!/usr/bin/env python3\r\n# Author: Chiedozie Enworom\r\n# Owner: Burke IT Consulting\r\n\r\nfrom flask import Flask, jsonify, request\r\nimport database\r\nimport reputation\r\n\r\n# creates the app\r\napp = Flask(__name__)\r\n\r\n# @return: a home page\r\n@app.route('/', methods=['GET']) # **************************************\r\ndef index():\r\n return jsonify(\"IdentiCaller\")\r\n\r\n\r\n# @param: a phone number\r\n# @return: the reputation for the phone number\r\n@app.route('/reputation/', methods=['GET'])\r\ndef get_reputation(phone): # **************************************\r\n hdr = get_headers()\r\n\r\n if header_check() and is_phone(phone):\r\n print('Header Check is GOOOOOOD')\r\n database.add_calls(cid=hdr.get('Client-Id'), pn=phone)\r\n return jsonify(reputation.get_rep(phone))\r\n\r\n return \"Please check your headers.\"\r\n\r\n\r\n# @params: the url of the request\r\n# @return: the headers for client id and api token\r\ndef get_headers(): # **************************************\r\n arr = []\r\n\r\n for i in request.headers:\r\n if 'Client-Id' in i or 'Api-Token' in i:\r\n arr.append(i)\r\n\r\n hdr = dict(arr)\r\n\r\n return hdr\r\n\r\n\r\n# @params: a phone number\r\n# @return: true or false for the number format\r\ndef is_phone(pn):\r\n # print(str(pn))\r\n spn = str(pn)\r\n if len(spn) == 10:\r\n for i in spn:\r\n if int(i) <= 9:\r\n # print(True)\r\n return True\r\n elif len(spn) == 11 or len(spn) == 12:\r\n if spn[0] == '1' or spn[:2] == '+1':\r\n # print(True)\r\n return True\r\n # print(False)\r\n return False\r\n\r\n\r\n# @return: if headers are correct, true\r\ndef header_check(): # **************************************\r\n hdr = get_headers()\r\n\r\n conn = database.open_conn()\r\n curs = conn.cursor()\r\n curs.execute(\"SELECT user_id, client_token FROM clients\")\r\n row = curs.fetchall()\r\n\r\n for i in row:\r\n if int(i[0]) == int(hdr.get('Client-Id')) and i[1] == hdr.get('Api-Token'):\r\n conn.close()\r\n print('header true')\r\n return True\r\n\r\n print('header false')\r\n conn.close()\r\n return False\r\n\r\n\r\n# @return: the api token, client id and number of remaining calls\r\n@app.route('/license', methods=['GET'])\r\ndef get_license(): # **************************************\r\n hdr = get_headers()\r\n\r\n if header_check():\r\n conn = database.open_conn()\r\n curs = conn.cursor()\r\n rows = curs.execute(\"SELECT * FROM license WHERE user_id = %s\" % hdr.get('Client-Id'))\r\n conn.close()\r\n return rows\r\n\r\n return \"Please check that your Client-Id and Api-Token are correct\"\r\n\r\n\r\n# @return: data records\r\n@app.route('/records', methods=['GET'])\r\ndef get_records(): # **************************************\r\n hdr = get_headers()\r\n\r\n conn = database.open_conn()\r\n curs = conn.cursor()\r\n curs.execute(\"SELECT * FROM client\")\r\n rows = curs.fetchall()\r\n if header_check():\r\n for i in rows:\r\n if i[0] == hdr.get('Client-Id'):\r\n conn.close()\r\n return jsonify(i)\r\n\r\n conn.close()\r\n return \"Please check that your Client-Id and Api-Token are correct\"\r\n\r\n\r\n# @return: warnings regarding their api call usage\r\ndef warning(): # **************************************\r\n hdr = get_headers()\r\n\r\n conn = database.open_conn()\r\n curs = conn.cursor()\r\n curs.execute(\"SELECT id FROM client\")\r\n rows = curs.fetchall()\r\n\r\n for i in rows:\r\n if i == hdr.get('Client-Id'):\r\n curs.execute(\"SELECT calls FROM calls WHERE id = %s\" % i)\r\n num = curs.fetchone()\r\n if num >= 0:\r\n conn.close()\r\n return \"CALL LIMIT EXCEEDED\"\r\n elif num >= 5:\r\n conn.close()\r\n return \"APPROACHING CALL LIMIT\"\r\n\r\n return \"OK\"\r\n\r\n\r\nif __name__ == '__main__':\r\n # app.debug = True\r\n app.run()\r\n","repo_name":"dozie07/bitcon","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"11978723651","text":"\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize as VecNormalise_\nfrom stable_baselines3.common.vec_env import (DummyVecEnv, SubprocVecEnv, VecEnvWrapper)\nimport torch\nimport gym\nimport numpy as np\n\ndef list_to_tensor(list):\n return torch.stack(list)\n\ndef make_env(rank, sites, update_obs,string_seed, action_space, environ,\n urls=None,input_tags=None, transitions=None, seed=0, login=None, features=None,\n transition_tags=None, context=None, parent_tag=None, sink=None, source=None):\n def _thunk():\n environment = environ\n if input_tags is None and transitions is None and context is None:\n env = environment(action_space, 1, sites=sites, string_seed=string_seed, update_obs=update_obs)\n elif context or source:\n env = environment(action_space, update_obs=update_obs, string_seed=string_seed, context=context, rank=rank,\n parent_tag=parent_tag, transition_tags=transition_tags, transitions=transitions, sites=sites, urls=urls, source=source, sink=sink, features=features, input_tags=input_tags)\n else:\n env = environment(action_space, sites=sites, update_obs=update_obs, string_seed=string_seed, urls=urls,\n input_tags=input_tags, transitions=transitions, login=login, features=features, transition_tags=transition_tags, rank=rank)\n\n\n env.seed(seed+rank)\n return env\n return _thunk()\n\n\ndef make_envs_as_vec(seed, processes, gamma, sites, env, action_space, urls=None, input_tags=None, \n transitions=None, login=None, features=None, transition_tags=None,\n context=None, parent_tag=None, sink=None, source=None):\n if processes > 1:\n envs = SubprocVecEnv([lambda: make_env(parent_tag=parent_tag,context=context, action_space=action_space,rank=i, sites=sites, environ=env, string_seed=seed, urls=urls,input_tags=input_tags, transitions=transitions, login=login, features=features, transition_tags=transition_tags, update_obs=False, source=source, sink=sink) for i in range(processes)],\n start_method='spawn')\n else:\n envs = DummyVecEnv([lambda: make_env(parent_tag=parent_tag,context=context, action_space=action_space, rank=0, sites=sites, environ=env,string_seed=seed,urls=urls, input_tags=input_tags, transitions=transitions, login=login, features=features, transition_tags=transition_tags, update_obs=True, source=source, sink=sink)])\n\n\n if len(envs.observation_space.shape) == 1:\n envs = VecNormalise(envs, gamma=gamma)\n if processes > 1:\n envs = VecPyTorch(envs)\n for i in range(processes):\n envs.set_attr('rank', i, i)\n\n else:\n envs = VecPyTorchSingle(envs)\n\n return envs\n\nclass StepLimitMask(gym.Wrapper):\n def step(self, action):\n observation, reward, done, info = self.env.step(action)\n if done and self.env._max_episode_steps == self.env._elapsed_steps:\n info['bad_transition'] = True\n return observation, reward, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\nclass VecPyTorch(VecEnvWrapper):\n def __init__(self, venv):\n super(VecPyTorch, self).__init__(venv)\n\n def reset(self):\n observation = self.venv.reset()\n observation_decoded = np.ndarray(observation.shape)\n\n for x in range(observation.shape[0]):\n for y in range(observation.shape[1] - 1):\n observation_decoded[x][y] = \\\n self.venv.env_method('add_observation_to_states',\n observation[x][y])[0]\n observation_decoded[x][-1] = observation[x][-1]\n observation_decoded = torch.from_numpy(observation_decoded).float()\n return observation_decoded\n\n def step_async(self, actions):\n if isinstance(actions, torch.LongTensor):\n actions = actions.squeeze(1)\n #actions = actions.numpy()\n try:\n self.venv.step_async(actions)\n except RuntimeError as e:\n self.venv.step_async(actions)\n\n def step_wait(self):\n observations, reward, done, info = self.venv.step_wait()\n #observations = observations[:,0]\n observations_decoded = np.ndarray(observations.shape)\n for x in range(observations.shape[0]):\n if np.count_nonzero(observations[x] == observations[x][0]) != len(observations[x]):\n for y in range(observations.shape[1]):\n new_obs = self.venv.env_method('add_observation_to_states',\n observations[x][y])[0]\n if type(new_obs) != int:\n while type(new_obs) != int:\n new_obs = self.venv.env_method('add_observation_to_states',\n observations[x][y])[0]\n observations_decoded[x][y] = new_obs\n else:\n new_obs = self.venv.env_method('add_observation_to_states',\n observations[x][0])[0]\n if type(new_obs) != int:\n while type(new_obs) != int:\n new_obs = self.venv.env_method('add_observation_to_states',\n observations[x][0])[0]\n observations_decoded[x][0] = observations_decoded[x][1] = new_obs\n\n observations_decoded = torch.from_numpy(observations_decoded).float()\n reward = torch.from_numpy(reward).unsqueeze(dim=1).float()\n return observations_decoded, reward, done, info\n\n\nclass VecBasePyTorch(VecEnvWrapper):\n def __init__(self, venv):\n super(VecBasePyTorch, self).__init__(venv)\n def reset(self):\n observation = self.venv.reset()\n #observation = observation[0]\n observation_decoded = np.ndarray(observation.shape)\n\n for x in range(observation.shape[0]):\n if np.count_nonzero(observation[x] == observation[x][0]) != len(observation[x]):\n for y in range(observation.shape[1]):\n observation_decoded[x][y] = \\\n self.venv.env_method('add_observation_to_states',\n observation[x][y])[0]\n else:\n observation_decoded[x][0] = observation_decoded[x][1] = \\\n self.venv.env_method('add_observation_to_states',\n observation[x][0])[0]\n observation = torch.from_numpy(observation).float()\n return observation\n\n def step_async(self, actions):\n if isinstance(actions, torch.LongTensor):\n actions = actions.squeeze(1)\n #actions = actions.numpy()\n self.venv.step_async(actions)\n\n def step_wait(self):\n observations, reward, done, info = self.venv.step_wait()\n #observations = observations[0]\n observations_decoded = np.ndarray(observations.shape)\n\n for x in range(observations.shape[0]):\n if np.count_nonzero(observations[x] == observations[x][0]) != len(observations[x]):\n for y in range(observations.shape[1]):\n observations_decoded[x][y] = \\\n self.venv.env_method('add_observation_to_states',\n observations[x][y])[0]\n else:\n observations_decoded[x][0] = observations_decoded[x][1] = \\\n self.venv.env_method('add_observation_to_states',\n observations[x][0])[0]\n\n observations_decoded = torch.from_numpy(observations_decoded).float()\n reward = torch.from_numpy(reward).unsqueeze(dim=1).float()\n return observations_decoded, reward, done, info\n\n\n\n\nclass VecPyTorchSingle(VecEnvWrapper):\n def __init__(self, venv):\n super(VecPyTorchSingle, self).__init__(venv)\n\n def reset(self):\n observation = self.venv.reset()[0]\n observation = torch.from_numpy(observation).float()\n return observation\n\n def step_async(self, actions):\n if isinstance(actions, torch.LongTensor):\n actions = actions.squeeze(1)\n self.venv.step_async(actions)\n\n def step_wait(self):\n observations, reward, done, info = self.venv.step_wait()\n observations = observations[0]\n observations = torch.from_numpy(observations).float()\n reward = torch.from_numpy(reward).unsqueeze(dim=1).float()\n return observations, reward, done, info\n\nclass VecNormalise(VecNormalise_):\n def __init__(self, *args, **kwargs):\n super(VecNormalise, self).__init__(*args, **kwargs)\n self.training = True\n\n def _obfilt(self, obs, update=True):\n if self.obs_rms:\n if self.training and update:\n self.obs_rms.update(obs)\n obs = np.clip((obs - self.obs_rms.mean) / np.sqrt(self.obs_rms.var + self.epsilon),\n -self.clipob, self.clipob)\n return obs\n else:\n return obs\n\n def train(self):\n self.training = True\n\n def eval(self):\n self.training = False\n","repo_name":"ICL-ml4csec/HAXSS","sub_path":"env/environ_utils.py","file_name":"environ_utils.py","file_ext":"py","file_size_in_byte":9282,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"42547717043","text":"N, M = map(int, input().split())\nlessons = list(map(int, input().split()))\nl = max(lessons)\nr = sum(lessons)\nm = (l + r) // 2\nans = r\n\n\ndef is_possible(sz):\n cnt = 1\n bluray = 0\n for lesson in lessons:\n if bluray + lesson <= sz:\n bluray += lesson\n else:\n cnt += 1\n bluray = lesson\n\n return cnt <= M\n\n\nwhile l <= r:\n if is_possible(m):\n ans = m\n r = m - 1\n else:\n l = m + 1\n\n m = (l + r) // 2\n\nprint(ans)\n","repo_name":"ydh0213/coding-test-book","sub_path":"PART 2/Practice 07/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"22"} +{"seq_id":"32171749337","text":"from weasyprint import HTML, CSS\n\n\nclass PdfGenerator:\n\n def __init__(self, main_html, header_html=None, footer_html=None,\n base_url=None, side_margin=2, extra_vertical_margin=30,\n stylesheets=None, page_orientation='portrait'):\n self.main_html = main_html\n self.header_html = header_html\n self.footer_html = footer_html\n self.base_url = base_url\n self.side_margin = side_margin\n self.extra_vertical_margin = extra_vertical_margin\n self.stylesheets = stylesheets or []\n self.page_orientation = page_orientation\n\n @staticmethod\n def get_element(boxes, element):\n for box in boxes:\n if box.element_tag == element:\n return box\n return PdfGenerator.get_element(box.all_children(), element)\n\n def render_html(self):\n if self.header_html:\n header_body, header_height = self._compute_overlay_element(\n 'header')\n else:\n header_body, header_height = None, 0\n\n if self.footer_html:\n footer_body, footer_height = self._compute_overlay_element(\n 'footer')\n else:\n footer_body, footer_height = None, 0\n\n margins = '{header_size}px {side_margin} {footer_size}px {side_margin}'.format(\n header_size=header_height + self.extra_vertical_margin,\n footer_size=footer_height + self.extra_vertical_margin,\n side_margin='{}cm'.format(self.side_margin),\n )\n content_print_layout = ('@page {size: A4 %s; margin: %s;}' %\n (self.page_orientation,\n margins)\n )\n stylesheets = [CSS(string=content_print_layout)]\n for sheet in self.stylesheets:\n stylesheets.append(CSS(string=sheet or ''))\n\n html = HTML(\n string=self.main_html,\n base_url=self.base_url,\n )\n main_doc = html.render(stylesheets=stylesheets)\n\n if self.header_html or self.footer_html:\n self._apply_overlay_on_main(main_doc, header_body, footer_body)\n\n return main_doc\n\n def _compute_overlay_element(self, element: str):\n overlay_layout = (\n '@page {size: A4 %s; margin: 0;}' % self.page_orientation +\n '\\nheader {position: fixed; width: 100%; top: 0;}' +\n '\\nfooter {position: fixed; width: 100%; bottom: 0;}')\n stylesheets = [CSS(string=overlay_layout)]\n for sheet in self.stylesheets:\n stylesheets.append(CSS(string=sheet or ''))\n\n html = HTML(\n string=getattr(self, '{}_html'.format(element)),\n base_url=self.base_url,\n )\n element_doc = html.render(stylesheets=stylesheets)\n element_page = element_doc.pages[0]\n element_body = PdfGenerator.get_element(\n element_page._page_box.all_children(), 'body')\n element_body = element_body.copy_with_children(\n element_body.all_children())\n element_html = PdfGenerator.get_element(\n element_page._page_box.all_children(), element)\n\n if element == 'header':\n element_height = element_html.height\n if element == 'footer':\n element_height = element_page.height - element_html.position_y\n\n return element_body, element_height\n\n def _apply_overlay_on_main(self, main_doc,\n header_body=None, footer_body=None):\n for page in main_doc.pages:\n page_body = PdfGenerator.get_element(\n page._page_box.all_children(), 'body')\n if header_body:\n page_body.children += header_body.all_children()\n if footer_body:\n page_body.children += footer_body.all_children()\n","repo_name":"Kalenis/kalenislims","sub_path":"lims_report_html/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"22"} +{"seq_id":"17264963699","text":"from openpyxl import *\nimport models\n\nbase = load_workbook('Users.xlsx')\n\nbase.active\n\nusers = base['Users']\n\n\ndef get_users(users):\n\n output = []\n\n i = 0\n\n j = 0\n\n fields = ['id','fullname','hard_skills','soft_skills','character']\n\n for row in users.rows:\n \n user = models.User\n \n data = {\n 'id': '',\n 'fullname': '',\n 'hard_skills': '',\n 'soft_skills': '',\n 'character': ''\n }\n\n for cell in row:\n data[fields[j]] = cell.value\n j += 1\n \n output.append(data)\n j = 0\n i += 1\n \n base.close()\n\n return output\n\ndef register_user(user: models.User):\n\n row=users.max_row+1\n\n users[row][0].value=user.id\n\n users[row][1].value=user.fullname\n\n users[row][2].value=' '.join(user.soft_skills)\n\n users[row][3].value=' '.join(user.hard_skills)\n\n users[row][4].value=' '.join(user.character)\n\n base.save('Users.xlsx')\n\ndef get_user_by_uid(id, users):\n\n users = get_users(users)\n\n for i in range(1, len(users)):\n if users[i]['id'] == id:\n return users[i]\n \n return \"Данного пользователя нет в базе (\"\n \n","repo_name":"RoboDJLex/Case5_Bot","sub_path":"flat_file_pattern.py","file_name":"flat_file_pattern.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40352429561","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\n\nimport pyrealsense2 as rs\nimport numpy as np\nimport gol\n\nimport os\nimport cv2\n\nfrom opts import opts\nfrom detectors.detector_factory import detector_factory\n\nimage_ext = ['jpg', 'jpeg', 'png', 'webp']\nvideo_ext = ['mp4', 'mov', 'avi', 'mkv']\ntime_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']\n\n\ndef demo(opt):\n os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str\n opt.debug = max(opt.debug, 1)\n Detector = detector_factory[opt.task]\n detector = Detector(opt)\n\n if opt.demo == 'webcam' or \\\n opt.demo[opt.demo.rfind('.') + 1:].lower() in video_ext:\n cam = cv2.VideoCapture(0 if opt.demo == 'webcam' else opt.demo)\n detector.pause = False\n while True:\n _, img = cam.read()\n #print(img.shape)\n #cv2.imshow('input', img)\n ret = detector.run(img)\n time_str = ''\n for stat in time_stats:\n time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])\n print(time_str)\n if cv2.waitKey(1) == 27:\n return \n\n elif opt.demo== '435':\n \n # Configure depth and color streams\n pipeline = rs.pipeline()\n # 创建 config 对象:\n config = rs.config()\n config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)\n config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)\n # Start streaming\n profile=pipeline.start(config)\n\n # Getting the depth sensor's depth scale (see rs-align example for explanation)\n depth_sensor = profile.get_device().first_depth_sensor()\n depth_scale = depth_sensor.get_depth_scale()\n print(\"Depth Scale is: \" , depth_scale)\n\n # We will be removing the background of objects more than \n # clipping_distance_in_meters meters away\n clipping_distance_in_meters = 1 #meters\n clipping_distance = clipping_distance_in_meters / depth_scale\n\n align_to = rs.stream.color\n align = rs.align(align_to)\n \n i=0\n timeF=30\n while True:\n # Wait for a coherent pair of frames(一对连贯的帧): depth and color\n frames = pipeline.wait_for_frames()\n\n aligned_frames = align.process(frames) \n\n aligned_depth_frame = aligned_frames.get_depth_frame()\n gol.set_value('aligned_depth_frame',aligned_depth_frame) #定义跨模块全局变量\n color_frame = aligned_frames.get_color_frame()\n\n\n# # Intrinsics & Extrinsics\n# #深度相机内参矩阵\n# depth_intrin = aligned_depth_frame.profile.as_video_stream_profile().intrinsics\n# #RGB相机内参矩阵\n# color_intrin = color_frame.profile.as_video_stream_profile().intrinsics\n# # 外参矩阵-深度图相对于彩色图像的外参RT\n# depth_to_color_extrin = aligned_depth_frame.profile.get_extrinsics_to(color_frame.profile)\n# print(\"内参 ppx,ppy\",depth_intrin.ppx, ':', depth_intrin.ppy)\n# print(\"内参矩阵\",depth_intrin)\n\n if not aligned_depth_frame or not color_frame:\n continue\n\n color_image = np.asanyarray(color_frame.get_data())\n #global depth_image\n depth_image = np.asanyarray(aligned_depth_frame.get_data())\n\n # Remove background - Set pixels further than clipping_distance to grey\n grey_color = 153\n depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels\n bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image)\n\n # Apply colormap on depth image (image must be converted to 8-bit per pixel first)\n depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)\n\n # Stack both images horizontally\n images = np.hstack((bg_removed, depth_colormap))\n\n# #imwrite depth_image color_iamge\n# if i%timeF==0:\n# cv2.imwrite('./mydata/savefig/rgb/image_r_{}.png'.format(str(i).zfill(5)), color_image)\n# cv2.imwrite('./mydata/savefig/depth/image_d_{}.png'.format(str(i).zfill(5)), depth_colormap)\n# cv2.imwrite('./mydata/savefig/depth/images_stack_{}.png'.format(str(i).zfill(5)), images)\n# np.savetxt(\"./mydata/savefig/depth_csv/depth_image_{}.csv\".format(str(i).zfill(5)),depth_image,fmt=\"%d\",delimiter=\",\")\n# i+=30\n# \n #Show images\n cv2.namedWindow('Remove Background', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('Remove Background', images)\n\n cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('RealSense', color_image) \n\n# # 通过对齐后的深度图,对齐原始RGB:color_frame,保存彩色点云\n# pc = rs.pointcloud()\n# pc.map_to(color_frame)\n# points = pc.calculate(aligned_depth_frame)\n# points.export_to_ply('./out.ply', color_frame)\n# #pcd = read_point_cloud(file_path)\n# # Visualize PLY\n# #draw_geometries([pcd])\n\n\n ret = detector.run(color_image)\n time_str = '' \n for stat in time_stats:\n time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])\n print(time_str)\n if cv2.waitKey(1) & 0xff == ord('q'):\n return \n\n\n else:\n if os.path.isdir(opt.demo):\n image_names = []\n ls = os.listdir(opt.demo)\n for file_name in sorted(ls):\n ext = file_name[file_name.rfind('.') + 1:].lower()\n if ext in image_ext:\n image_names.append(os.path.join(opt.demo, file_name))\n else:\n image_names = [opt.demo]\n \n for (image_name) in image_names:\n ret = detector.run(image_name)\n time_str = ''\n for stat in time_stats:\n time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])\n print(time_str)\n\nif __name__ == '__main__':\n\n gol._init()#先必须在主模块初始化(只在Main模块需要一次即可)\n# gol.set_value('depth_image',depth_image) #定义跨模块全局变量\n opt = opts().init()\n demo(opt)\n","repo_name":"donghang941114/ubt_projects","sub_path":"CenterNet/src/mydemo.py","file_name":"mydemo.py","file_ext":"py","file_size_in_byte":6051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10986086751","text":"# -*- coding: utf-8 -*-\n# @Author : LG\n\n\"\"\"\n执行用时:56 ms, 在所有 Python3 提交中击败了85.56% 的用户\n内存消耗:13.8 MB, 在所有 Python3 提交中击败了5.23% 的用户\n\n解题思路:\n 集合 去重\n\"\"\"\nclass Solution:\n def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:\n nums1 = set(nums1)\n nums2 = set(nums2)\n if len(nums1) > len(nums2):\n nums2, nums1 = nums1, nums2\n return [i for i in nums1 if i in nums2]","repo_name":"yatengLG/leetcode-python","sub_path":"question_bank/intersection-of-two-arrays/intersection-of-two-arrays.py","file_name":"intersection-of-two-arrays.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"zh","doc_type":"code","stars":10,"dataset":"github-code","pt":"22"} +{"seq_id":"70538870777","text":"\"\"\"\nGiven a unsorted array with integers, find the median of it. \n\nA median is the middle number of the array after it is sorted. \n\nIf there are even numbers in the array, return the N/2-th number after sorted.\n\nExample\nGiven [4, 5, 1, 2, 3], return 3\n\nGiven [7, 9, 4, 5], return 5\n\nChallenge\nO(n) time.\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param nums: A list of integers.\n @return: An integer denotes the middle number of the array.\n \"\"\"\n def median(self, nums):\n # write your code here\n n = len(nums)\n return self.kthLargestElement((n - 1) / 2 + 1, nums)\n \n def kthLargestElement(self, k, nums):\n from random import randint\n left,right = 0, len(nums) - 1\n while left <= right:\n pivot_idx = randint(left, right)\n new_pivot_idx = self.partition(left, right, pivot_idx, nums)\n if new_pivot_idx == k - 1:\n return nums[new_pivot_idx]\n elif new_pivot_idx > k - 1:\n right = new_pivot_idx -1\n else:\n left = new_pivot_idx + 1\n\n def partition(self, left, right, pivot_idx, nums):\n pivot = nums[pivot_idx]\n nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]\n store_idx = left\n for i in range(left, right):\n if nums[i] < pivot:\n nums[i], nums[store_idx] = nums[store_idx], nums[i]\n store_idx += 1\n nums[right], nums[store_idx] = nums[store_idx], nums[right]\n return store_idx\n","repo_name":"AnthonyNeu/LintCode","sub_path":"Python/Median.py","file_name":"Median.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"22"} +{"seq_id":"35501872669","text":"from portage import os, _encodings\nfrom portage.const import USER_CONFIG_PATH\nfrom portage.tests import TestCase\nfrom portage.tests.resolver.ResolverPlayground import ResolverPlayground\nfrom portage.dep import ExtendedAtomDict\nfrom portage.util import ensure_dirs\n\n\nclass ProfileDefaultEAPITestCase(TestCase):\n def testProfileDefaultEAPI(self):\n repo_configs = {\n \"test_repo\": {\n \"layout.conf\": (\n \"profile-formats = profile-default-eapi\",\n \"profile_eapi_when_unspecified = 5\",\n ),\n }\n }\n\n profiles = (\n (\n \"\",\n {\n \"package.mask\": (\"sys-libs/A:1\",),\n \"package.use\": (\"sys-libs/A:1 flag\",),\n },\n ),\n (\n \"default/linux\",\n {\n \"package.mask\": (\"sys-libs/B:1\",),\n \"package.use\": (\"sys-libs/B:1 flag\",),\n \"package.keywords\": (\"sys-libs/B:1 x86\",),\n },\n ),\n (\n \"default/linux/x86\",\n {\n \"package.mask\": (\"sys-libs/C:1\",),\n \"package.use\": (\"sys-libs/C:1 flag\",),\n \"package.keywords\": (\"sys-libs/C:1 x86\",),\n \"parent\": (\"..\",),\n },\n ),\n )\n\n user_profile = {\n \"package.mask\": (\"sys-libs/D:1\",),\n \"package.use\": (\"sys-libs/D:1 flag\",),\n \"package.keywords\": (\"sys-libs/D:1 x86\",),\n }\n\n test_cases = (\n (\n lambda x: x._mask_manager._pmaskdict,\n {\n \"sys-libs/A\": (\"sys-libs/A:1::test_repo\",),\n \"sys-libs/B\": (\"sys-libs/B:1\",),\n \"sys-libs/C\": (\"sys-libs/C:1\",),\n \"sys-libs/D\": (\"sys-libs/D:1\",),\n },\n ),\n (\n lambda x: x._use_manager._repo_puse_dict,\n {\"test_repo\": {\"sys-libs/A\": {\"sys-libs/A:1\": (\"flag\",)}}},\n ),\n (\n lambda x: x._use_manager._pkgprofileuse,\n (\n {\"sys-libs/B\": {\"sys-libs/B:1\": \"flag\"}},\n {\"sys-libs/C\": {\"sys-libs/C:1\": \"flag\"}},\n {},\n {\"sys-libs/D\": {\"sys-libs/D:1\": \"flag\"}},\n ),\n ),\n (\n lambda x: x._keywords_manager._pkeywords_list,\n (\n {\"sys-libs/B\": {\"sys-libs/B:1\": [\"x86\"]}},\n {\"sys-libs/C\": {\"sys-libs/C:1\": [\"x86\"]}},\n {\"sys-libs/D\": {\"sys-libs/D:1\": [\"x86\"]}},\n ),\n ),\n )\n\n playground = ResolverPlayground(debug=False, repo_configs=repo_configs)\n try:\n repo_dir = playground.settings.repositories.get_location_for_name(\n \"test_repo\"\n )\n profile_root = os.path.join(repo_dir, \"profiles\")\n profile_info = [\n (os.path.join(profile_root, p), data) for p, data in profiles\n ]\n profile_info.append(\n (\n os.path.join(playground.eroot, USER_CONFIG_PATH, \"profile\"),\n user_profile,\n )\n )\n\n for prof_path, data in profile_info:\n ensure_dirs(prof_path)\n for k, v in data.items():\n with open(\n os.path.join(prof_path, k),\n mode=\"w\",\n encoding=_encodings[\"repo.content\"],\n ) as f:\n for line in v:\n f.write(f\"{line}\\n\")\n\n # The config must be reloaded in order to account\n # for the above profile customizations.\n playground.reload_config()\n\n for fn, expected in test_cases:\n result = self._translate_result(fn(playground.settings))\n self.assertEqual(result, expected)\n\n finally:\n playground.cleanup()\n\n @staticmethod\n def _translate_result(result):\n if isinstance(result, ExtendedAtomDict):\n result = dict(result.items())\n elif isinstance(result, tuple):\n result = tuple(dict(x.items()) for x in result)\n return result\n","repo_name":"gentoo/portage","sub_path":"lib/portage/tests/resolver/test_profile_default_eapi.py","file_name":"test_profile_default_eapi.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"en","doc_type":"code","stars":507,"dataset":"github-code","pt":"22"} +{"seq_id":"16594992711","text":"\"\"\"\nJoins Pixtream data packets into a Stream\n\"\"\"\n\nfrom itertools import takewhile, count\n\nfrom pixtream.util.event import Event\n\n__all__ = ['Joiner']\n\nclass Joiner(object):\n\n def __init__(self):\n\n self.on_data_joined = Event()\n self.on_end_join = Event()\n\n self._buffer = bytes()\n self._current_sequence = 0\n self._packets = {}\n self.sequences = set()\n\n def push_packet(self, packet):\n self._packets[packet.sequence] = packet.data\n self._join_buffer()\n self._update_sequences()\n\n def end_join(self):\n self.on_end_join.call(self)\n\n def pop_stream(self):\n buffer = self._buffer\n self._buffer = bytes()\n return buffer\n\n def _update_sequences(self):\n self.sequences = set(self._packets.keys())\n\n def _join_buffer(self):\n sequences = takewhile(lambda seq: seq in self._packets,\n count(self._current_sequence))\n\n sequences = list(sequences)\n\n if len(sequences) > 0:\n self._buffer += ''.join(self._packets[seq] for seq in sequences)\n\n for sequence in sequences:\n del self._packets[sequence]\n\n self._current_sequence = sequences[-1] + 1\n\n self.on_data_joined.call(self)\n\n\n","repo_name":"ceronman/pixtream","sub_path":"src/pixtream/peer/joiner.py","file_name":"joiner.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71531334803","text":"import json\n\nfrom models import Network, Subnet\n\n\nclass SubnetController:\n\n def __init__(self, network_id):\n self.network_id = network_id\n\n\n def allocate_subnet(self, additional_mask_bits, name):\n from .rest_controller import RestController\n rest = RestController()\n import ipaddress as ip\n net = rest.get_instance(resource='network', resource_id=self.network_id)\n network = Network(**net)\n if type(net) is None:\n pass\n else:\n used_sbns = list(map(lambda x: ip.IPv4Network(x.cidr), network.subnets))\n n = ip.IPv4Network(network.cidr)\n psns = list(n.subnets(int(additional_mask_bits)))\n\n for sbn in used_sbns:\n psns = list(filter(lambda x: not sbn.overlaps(x), psns))\n\n subnet_cidr = str(psns[0].compressed)\n\n return subnet_cidr\n\n","repo_name":"gnydick/orch","sub_path":"controllers/subnets.py","file_name":"subnets.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15023239072","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 6 11:49:26 2017\n\n@author: Krzysztof Stasiowski\nWEAIiIB - Informatyka\nWDI\nliczenie liczb pierwszych metodą sita Eratostenesa\n\"\"\"\n\nimport time\nimport math\nimport sys\n\ndef pierwsze(n):\n #deklaracja tablicy, pokazującej czy dana liczba jest pierwsza\n pierwsze = [False]*2 + [True]*(n-2)\n #Deklaracja tablicy do przechowywania końcowych liczb pierwszych \n ppierwsze =[0]*n\n ppi=0;# licznik liczb pierwszych\n d=math.sqrt(n) #obliczenie pierwiastka z n, musimy sprawdzić jedynie liczbymniejsze od tego pierwiastka\n\n for (i,p) in enumerate(pierwsze):#przechodzimy przez listę liczbpierwszych (i - sprawdzana liczba, p - czy jest pierwsza)\n if(i>d):\n break #zakończenie sprawdzania jeżeli i > pierwiastka z n\n if(not p):\n continue # pominięcie jeśeli i nie jest liczbą pierwszą\n ppierwsze[ppi]=i #dodanie liczby pierwszej do listy liczb pierwszych\n ppi+=1;#zwiększenie licznika liczb pierwszych\n for delete in range(i*i,n,i):\n pierwsze[delete]=False#usunięcie wszyskich wielokrotności liczby pierwszej\n \n return ppierwsze[0:ppi] #zwracamy wszyskie liczby pierwsze\n\n\nn = (int)(sys.argv[1]) #pobranie liczby sprawdzanych liczb jako argument programu\n\n#pobieramy czas przed wykonaniem funkcji\nstart_time = time.time()\np=pierwsze(n)\nprint(\"{} {}\".format(n,((time.time() - start_time))))\n#wyświetlamy różnicę czasu\n","repo_name":"Shinigami072/AGH-Python-Excercises","sub_path":"lab5/zad5-d-d.py","file_name":"zad5-d-d.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8599680473","text":"import re\nimport sys\n\nimport petl as etl\n\nimport connect_to_db\n\n\ndef transform(filename):\n\n table1 = (\n etl\n .fromcsv(filename)\n )\n\n # Create restaurants table data\n table2 = etl.rename(\n table1,\n {\n 'CAMIS': 'camis',\n 'DBA': 'name',\n 'BORO': 'boro',\n 'BUILDING': 'building',\n 'STREET': 'street',\n 'ZIPCODE': 'zipcode',\n 'PHONE': 'phone',\n 'CUISINE DESCRIPTION': 'cuisine_description',\n 'INSPECTION DATE': 'inspection_date',\n 'ACTION': 'action',\n 'VIOLATION CODE': 'violation_code',\n 'VIOLATION DESCRIPTION': 'violation_description',\n 'CRITICAL FLAG': 'critical_flag',\n 'SCORE': 'score',\n 'GRADE': 'grade',\n 'GRADE DATE': 'grade_date',\n 'RECORD DATE': 'record_date',\n 'INSPECTION TYPE': 'inspection_type'\n }\n )\n\n table3 = etl.convert(\n table2, {\n 'phone': lambda v: convert_phone(v),\n 'zipcode': lambda v: convert_zipcode(v)\n }\n )\n\n return table3\n\n\ndef convert_phone(phone):\n try:\n return int(re.sub(r'-|_|\\(|\\)|\\s', '', phone))\n except:\n return 0\n\ndef convert_zipcode(zip_code):\n try:\n return int(zip_code)\n except:\n return 0\n\n\ndef main():\n table = transform('DOHMH_New_York_City_Restaurant_Inspection_Results_sample.csv')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"roviedo/nyc_restaurant_task","sub_path":"transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16531995517","text":"# perf.py - performance test routines\n'''helper extension to measure performance'''\n\nfrom mercurial import cmdutil, scmutil, util, commands, obsolete\nfrom mercurial import repoview, branchmap, merge, copies\nimport time, os, sys\nimport functools\n\ncmdtable = {}\ncommand = cmdutil.command(cmdtable)\n\ndef gettimer(ui, opts=None):\n \"\"\"return a timer function and formatter: (timer, formatter)\n\n This functions exist to gather the creation of formatter in a single\n place instead of duplicating it in all performance command.\"\"\"\n\n # enforce an idle period before execution to counteract power management\n time.sleep(ui.configint(\"perf\", \"presleep\", 1))\n\n if opts is None:\n opts = {}\n # redirect all to stderr\n ui = ui.copy()\n ui.fout = ui.ferr\n # get a formatter\n fm = ui.formatter('perf', opts)\n return functools.partial(_timer, fm), fm\n\ndef _timer(fm, func, title=None):\n results = []\n begin = time.time()\n count = 0\n while True:\n ostart = os.times()\n cstart = time.time()\n r = func()\n cstop = time.time()\n ostop = os.times()\n count += 1\n a, b = ostart, ostop\n results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))\n if cstop - begin > 3 and count >= 100:\n break\n if cstop - begin > 10 and count >= 3:\n break\n\n fm.startitem()\n\n if title:\n fm.write('title', '! %s\\n', title)\n if r:\n fm.write('result', '! result: %s\\n', r)\n m = min(results)\n fm.plain('!')\n fm.write('wall', ' wall %f', m[0])\n fm.write('comb', ' comb %f', m[1] + m[2])\n fm.write('user', ' user %f', m[1])\n fm.write('sys', ' sys %f', m[2])\n fm.write('count', ' (best of %d)', count)\n fm.plain('\\n')\n\n@command('perfwalk')\ndef perfwalk(ui, repo, *pats):\n timer, fm = gettimer(ui)\n try:\n m = scmutil.match(repo[None], pats, {})\n timer(lambda: len(list(repo.dirstate.walk(m, [], True, False))))\n except Exception:\n try:\n m = scmutil.match(repo[None], pats, {})\n timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)]))\n except Exception:\n timer(lambda: len(list(cmdutil.walk(repo, pats, {}))))\n fm.end()\n\n@command('perfannotate')\ndef perfannotate(ui, repo, f):\n timer, fm = gettimer(ui)\n fc = repo['.'][f]\n timer(lambda: len(fc.annotate(True)))\n fm.end()\n\n@command('perfstatus',\n [('u', 'unknown', False,\n 'ask status to look for unknown files')])\ndef perfstatus(ui, repo, **opts):\n #m = match.always(repo.root, repo.getcwd())\n #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,\n # False))))\n timer, fm = gettimer(ui)\n timer(lambda: sum(map(len, repo.status(**opts))))\n fm.end()\n\n@command('perfaddremove')\ndef perfaddremove(ui, repo):\n timer, fm = gettimer(ui)\n try:\n oldquiet = repo.ui.quiet\n repo.ui.quiet = True\n matcher = scmutil.match(repo[None])\n timer(lambda: scmutil.addremove(repo, matcher, \"\", dry_run=True))\n finally:\n repo.ui.quiet = oldquiet\n fm.end()\n\ndef clearcaches(cl):\n # behave somewhat consistently across internal API changes\n if util.safehasattr(cl, 'clearcaches'):\n cl.clearcaches()\n elif util.safehasattr(cl, '_nodecache'):\n from mercurial.node import nullid, nullrev\n cl._nodecache = {nullid: nullrev}\n cl._nodepos = None\n\n@command('perfheads')\ndef perfheads(ui, repo):\n timer, fm = gettimer(ui)\n cl = repo.changelog\n def d():\n len(cl.headrevs())\n clearcaches(cl)\n timer(d)\n fm.end()\n\n@command('perftags')\ndef perftags(ui, repo):\n import mercurial.changelog\n import mercurial.manifest\n timer, fm = gettimer(ui)\n def t():\n repo.changelog = mercurial.changelog.changelog(repo.svfs)\n repo.manifest = mercurial.manifest.manifest(repo.svfs)\n repo._tags = None\n return len(repo.tags())\n timer(t)\n fm.end()\n\n@command('perfancestors')\ndef perfancestors(ui, repo):\n timer, fm = gettimer(ui)\n heads = repo.changelog.headrevs()\n def d():\n for a in repo.changelog.ancestors(heads):\n pass\n timer(d)\n fm.end()\n\n@command('perfancestorset')\ndef perfancestorset(ui, repo, revset):\n timer, fm = gettimer(ui)\n revs = repo.revs(revset)\n heads = repo.changelog.headrevs()\n def d():\n s = repo.changelog.ancestors(heads)\n for rev in revs:\n rev in s\n timer(d)\n fm.end()\n\n@command('perfdirs')\ndef perfdirs(ui, repo):\n timer, fm = gettimer(ui)\n dirstate = repo.dirstate\n 'a' in dirstate\n def d():\n dirstate.dirs()\n del dirstate._dirs\n timer(d)\n fm.end()\n\n@command('perfdirstate')\ndef perfdirstate(ui, repo):\n timer, fm = gettimer(ui)\n \"a\" in repo.dirstate\n def d():\n repo.dirstate.invalidate()\n \"a\" in repo.dirstate\n timer(d)\n fm.end()\n\n@command('perfdirstatedirs')\ndef perfdirstatedirs(ui, repo):\n timer, fm = gettimer(ui)\n \"a\" in repo.dirstate\n def d():\n \"a\" in repo.dirstate._dirs\n del repo.dirstate._dirs\n timer(d)\n fm.end()\n\n@command('perfdirstatefoldmap')\ndef perffoldmap(ui, repo):\n timer, fm = gettimer(ui)\n dirstate = repo.dirstate\n 'a' in dirstate\n def d():\n dirstate._foldmap.get('a')\n del dirstate._foldmap\n del dirstate._dirs\n timer(d)\n fm.end()\n\n@command('perfdirstatewrite')\ndef perfdirstatewrite(ui, repo):\n timer, fm = gettimer(ui)\n ds = repo.dirstate\n \"a\" in ds\n def d():\n ds._dirty = True\n ds.write()\n timer(d)\n fm.end()\n\n@command('perfmergecalculate',\n [('r', 'rev', '.', 'rev to merge against')])\ndef perfmergecalculate(ui, repo, rev):\n timer, fm = gettimer(ui)\n wctx = repo[None]\n rctx = scmutil.revsingle(repo, rev, rev)\n ancestor = wctx.ancestor(rctx)\n # we don't want working dir files to be stat'd in the benchmark, so prime\n # that cache\n wctx.dirty()\n def d():\n # acceptremote is True because we don't want prompts in the middle of\n # our benchmark\n merge.calculateupdates(repo, wctx, rctx, ancestor, False, False, False,\n acceptremote=True)\n timer(d)\n fm.end()\n\n@command('perfpathcopies', [], \"REV REV\")\ndef perfpathcopies(ui, repo, rev1, rev2):\n timer, fm = gettimer(ui)\n ctx1 = scmutil.revsingle(repo, rev1, rev1)\n ctx2 = scmutil.revsingle(repo, rev2, rev2)\n def d():\n copies.pathcopies(ctx1, ctx2)\n timer(d)\n fm.end()\n\n@command('perfmanifest', [], 'REV')\ndef perfmanifest(ui, repo, rev):\n timer, fm = gettimer(ui)\n ctx = scmutil.revsingle(repo, rev, rev)\n t = ctx.manifestnode()\n def d():\n repo.manifest._mancache.clear()\n repo.manifest._cache = None\n repo.manifest.read(t)\n timer(d)\n fm.end()\n\n@command('perfchangeset')\ndef perfchangeset(ui, repo, rev):\n timer, fm = gettimer(ui)\n n = repo[rev].node()\n def d():\n repo.changelog.read(n)\n #repo.changelog._cache = None\n timer(d)\n fm.end()\n\n@command('perfindex')\ndef perfindex(ui, repo):\n import mercurial.revlog\n timer, fm = gettimer(ui)\n mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg\n n = repo[\"tip\"].node()\n def d():\n cl = mercurial.revlog.revlog(repo.svfs, \"00changelog.i\")\n cl.rev(n)\n timer(d)\n fm.end()\n\n@command('perfstartup')\ndef perfstartup(ui, repo):\n timer, fm = gettimer(ui)\n cmd = sys.argv[0]\n def d():\n os.system(\"HGRCPATH= %s version -q > /dev/null\" % cmd)\n timer(d)\n fm.end()\n\n@command('perfparents')\ndef perfparents(ui, repo):\n timer, fm = gettimer(ui)\n nl = [repo.changelog.node(i) for i in xrange(1000)]\n def d():\n for n in nl:\n repo.changelog.parents(n)\n timer(d)\n fm.end()\n\n@command('perfctxfiles')\ndef perfparents(ui, repo, x):\n x = int(x)\n timer, fm = gettimer(ui)\n def d():\n len(repo[x].files())\n timer(d)\n fm.end()\n\n@command('perfrawfiles')\ndef perfparents(ui, repo, x):\n x = int(x)\n timer, fm = gettimer(ui)\n cl = repo.changelog\n def d():\n len(cl.read(x)[3])\n timer(d)\n fm.end()\n\n@command('perflookup')\ndef perflookup(ui, repo, rev):\n timer, fm = gettimer(ui)\n timer(lambda: len(repo.lookup(rev)))\n fm.end()\n\n@command('perfrevrange')\ndef perfrevrange(ui, repo, *specs):\n timer, fm = gettimer(ui)\n revrange = scmutil.revrange\n timer(lambda: len(revrange(repo, specs)))\n fm.end()\n\n@command('perfnodelookup')\ndef perfnodelookup(ui, repo, rev):\n timer, fm = gettimer(ui)\n import mercurial.revlog\n mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg\n n = repo[rev].node()\n cl = mercurial.revlog.revlog(repo.svfs, \"00changelog.i\")\n def d():\n cl.rev(n)\n clearcaches(cl)\n timer(d)\n fm.end()\n\n@command('perflog',\n [('', 'rename', False, 'ask log to follow renames')])\ndef perflog(ui, repo, **opts):\n timer, fm = gettimer(ui)\n ui.pushbuffer()\n timer(lambda: commands.log(ui, repo, rev=[], date='', user='',\n copies=opts.get('rename')))\n ui.popbuffer()\n fm.end()\n\n@command('perfmoonwalk')\ndef perfmoonwalk(ui, repo):\n \"\"\"benchmark walking the changelog backwards\n\n This also loads the changelog data for each revision in the changelog.\n \"\"\"\n timer, fm = gettimer(ui)\n def moonwalk():\n for i in xrange(len(repo), -1, -1):\n ctx = repo[i]\n ctx.branch() # read changelog data (in addition to the index)\n timer(moonwalk)\n fm.end()\n\n@command('perftemplating')\ndef perftemplating(ui, repo):\n timer, fm = gettimer(ui)\n ui.pushbuffer()\n timer(lambda: commands.log(ui, repo, rev=[], date='', user='',\n template='{date|shortdate} [{rev}:{node|short}]'\n ' {author|person}: {desc|firstline}\\n'))\n ui.popbuffer()\n fm.end()\n\n@command('perfcca')\ndef perfcca(ui, repo):\n timer, fm = gettimer(ui)\n timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))\n fm.end()\n\n@command('perffncacheload')\ndef perffncacheload(ui, repo):\n timer, fm = gettimer(ui)\n s = repo.store\n def d():\n s.fncache._load()\n timer(d)\n fm.end()\n\n@command('perffncachewrite')\ndef perffncachewrite(ui, repo):\n timer, fm = gettimer(ui)\n s = repo.store\n s.fncache._load()\n def d():\n s.fncache._dirty = True\n s.fncache.write()\n timer(d)\n fm.end()\n\n@command('perffncacheencode')\ndef perffncacheencode(ui, repo):\n timer, fm = gettimer(ui)\n s = repo.store\n s.fncache._load()\n def d():\n for p in s.fncache.entries:\n s.encode(p)\n timer(d)\n fm.end()\n\n@command('perfdiffwd')\ndef perfdiffwd(ui, repo):\n \"\"\"Profile diff of working directory changes\"\"\"\n timer, fm = gettimer(ui)\n options = {\n 'w': 'ignore_all_space',\n 'b': 'ignore_space_change',\n 'B': 'ignore_blank_lines',\n }\n\n for diffopt in ('', 'w', 'b', 'B', 'wB'):\n opts = dict((options[c], '1') for c in diffopt)\n def d():\n ui.pushbuffer()\n commands.diff(ui, repo, **opts)\n ui.popbuffer()\n title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')\n timer(d, title)\n fm.end()\n\n@command('perfrevlog',\n [('d', 'dist', 100, 'distance between the revisions')],\n \"[INDEXFILE]\")\ndef perfrevlog(ui, repo, file_, **opts):\n timer, fm = gettimer(ui)\n from mercurial import revlog\n dist = opts['dist']\n def d():\n r = revlog.revlog(lambda fn: open(fn, 'rb'), file_)\n for x in xrange(0, len(r), dist):\n r.revision(r.node(x))\n\n timer(d)\n fm.end()\n\n@command('perfrevset',\n [('C', 'clear', False, 'clear volatile cache between each call.')],\n \"REVSET\")\ndef perfrevset(ui, repo, expr, clear=False):\n \"\"\"benchmark the execution time of a revset\n\n Use the --clean option if need to evaluate the impact of build volatile\n revisions set cache on the revset execution. Volatile cache hold filtered\n and obsolete related cache.\"\"\"\n timer, fm = gettimer(ui)\n def d():\n if clear:\n repo.invalidatevolatilesets()\n for r in repo.revs(expr): pass\n timer(d)\n fm.end()\n\n@command('perfvolatilesets')\ndef perfvolatilesets(ui, repo, *names):\n \"\"\"benchmark the computation of various volatile set\n\n Volatile set computes element related to filtering and obsolescence.\"\"\"\n timer, fm = gettimer(ui)\n repo = repo.unfiltered()\n\n def getobs(name):\n def d():\n repo.invalidatevolatilesets()\n obsolete.getrevs(repo, name)\n return d\n\n allobs = sorted(obsolete.cachefuncs)\n if names:\n allobs = [n for n in allobs if n in names]\n\n for name in allobs:\n timer(getobs(name), title=name)\n\n def getfiltered(name):\n def d():\n repo.invalidatevolatilesets()\n repoview.filterrevs(repo, name)\n return d\n\n allfilter = sorted(repoview.filtertable)\n if names:\n allfilter = [n for n in allfilter if n in names]\n\n for name in allfilter:\n timer(getfiltered(name), title=name)\n fm.end()\n\n@command('perfbranchmap',\n [('f', 'full', False,\n 'Includes build time of subset'),\n ])\ndef perfbranchmap(ui, repo, full=False):\n \"\"\"benchmark the update of a branchmap\n\n This benchmarks the full repo.branchmap() call with read and write disabled\n \"\"\"\n timer, fm = gettimer(ui)\n def getbranchmap(filtername):\n \"\"\"generate a benchmark function for the filtername\"\"\"\n if filtername is None:\n view = repo\n else:\n view = repo.filtered(filtername)\n def d():\n if full:\n view._branchcaches.clear()\n else:\n view._branchcaches.pop(filtername, None)\n view.branchmap()\n return d\n # add filter in smaller subset to bigger subset\n possiblefilters = set(repoview.filtertable)\n allfilters = []\n while possiblefilters:\n for name in possiblefilters:\n subset = branchmap.subsettable.get(name)\n if subset not in possiblefilters:\n break\n else:\n assert False, 'subset cycle %s!' % possiblefilters\n allfilters.append(name)\n possiblefilters.remove(name)\n\n # warm the cache\n if not full:\n for name in allfilters:\n repo.filtered(name).branchmap()\n # add unfiltered\n allfilters.append(None)\n oldread = branchmap.read\n oldwrite = branchmap.branchcache.write\n try:\n branchmap.read = lambda repo: None\n branchmap.write = lambda repo: None\n for name in allfilters:\n timer(getbranchmap(name), title=str(name))\n finally:\n branchmap.read = oldread\n branchmap.branchcache.write = oldwrite\n fm.end()\n\n@command('perfloadmarkers')\ndef perfloadmarkers(ui, repo):\n \"\"\"benchmark the time to parse the on-disk markers for a repo\n\n Result is the number of markers in the repo.\"\"\"\n timer, fm = gettimer(ui)\n timer(lambda: len(obsolete.obsstore(repo.svfs)))\n fm.end()\n","repo_name":"steen-lund/mercurial-for-amigaos4","sub_path":"contrib/perf.py","file_name":"perf.py","file_ext":"py","file_size_in_byte":15330,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"3331614065","text":"from collections import deque\n\nclass TreeNode:\n def __init__(self,val):\n self.val = val\n self.left, self.right = None,None\ndef TopView(root):\n result = []\n if root is None:\n return result\n mp = {}\n queue = deque()\n queue.append((root,0))\n while queue: \n currentNode,hd = queue.popleft()\n if hd not in mp:\n mp[hd] = currentNode.val\n if currentNode.left:\n queue.append((currentNode.left,hd-1))\n if currentNode.right:\n queue.append((currentNode.right,hd+1))\n mp = sorted(mp.items())\n for _,v in mp:\n result.append(v)\n return result\n\ndef main():\n root = TreeNode(2)\n root.left = TreeNode(4)\n root.right = TreeNode(6)\n root.left.left = TreeNode(7)\n root.left.right = TreeNode(9)\n root.right.left = TreeNode(10)\n root.right.right = TreeNode(15)\n root.left.left.left= TreeNode(18)\n print(\"Top order traversal: \"+str(TopView(root)))\nmain()","repo_name":"agnik2019/Pythonista","sub_path":"data_structure_algorithm/graph_Tree/top_view_binary_tree.py","file_name":"top_view_binary_tree.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22986515943","text":"\nclass AdaboostClassifier:\n\n def __init__(self, n_estimators, learning_rate, algorithm, max_depth,\n random_state=None):\n self.n_estimators = n_estimators\n self.learning_rate = learning_rate\n self.algorithm = algorithm\n self.random_state = random_state\n self.max_depth = max_depth\n self.estimator = None\n\n def fit(self, X, Y, sample_weight=None):\n import sklearn.tree\n\n self.n_estimators = int(self.n_estimators)\n self.learning_rate = float(self.learning_rate)\n self.max_depth = int(self.max_depth)\n base_estimator = sklearn.tree.DecisionTreeClassifier(max_depth=self.max_depth)\n\n estimator = sklearn.ensemble.AdaBoostClassifier(\n base_estimator=base_estimator,\n n_estimators=self.n_estimators,\n learning_rate=self.learning_rate,\n algorithm=self.algorithm,\n random_state=self.random_state\n )\n\n estimator.fit(X, Y, sample_weight=sample_weight)\n\n self.estimator = estimator\n return self\n\n def predict(self, X):\n if self.estimator is None:\n raise NotImplementedError\n return self.estimator.predict(X)","repo_name":"yunx-z/lite-bo","sub_path":"litebo/model/adaboost.py","file_name":"adaboost.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"12657265801","text":"import requests\nfrom bs4 import BeautifulSoup\n\nURL = 'https://www.kfc.ru/restaurants'\nHEADERS = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.160 YaBrowser/22.5.1.985 Yowser/2.5 Safari/537.36',\n 'accept': '*/*'\n}\n\n\ndef get_html(url, params=None):\n r = requests.get(url, headers=HEADERS, params=params)\n return r\n\n\ndef get_content(html_doc):\n soup = BeautifulSoup(html_doc, 'html.parser')\n items = soup.find_all('div', class_='Mujm2VkJ7g')\n print(items)\n # print(soup.find_all('div', {\"class\": \"Mujm2VkJ7g\"}))\n\n\n# def get_content(html):\n# soup = BeautifulSoup(html, 'html.parser')\n# items = soup.find_all('class', class_='Mujm2VkJ7g')\n# items = soup.find_all('div', attrs={'id':'Mujm2VkJ7g'})\n# restaurants = []\n# for item in items:\n# restaurants.append({\n# 'title': item.find('div', class_='Mujm2VkJ7g').get_text(strip=True)\n# })\n#\n# print(restaurants)\n\n\ndef parse():\n html = get_html(URL)\n if html.status_code == 200:\n get_content(html.text)\n else:\n print('Error')\n\n\nparse()\n","repo_name":"Maksim-Lukashyk-1996/Test_Project","sub_path":"kfc.py","file_name":"kfc.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13716057785","text":"# N, M = map(int,input().split())\n\n# map_ = [list(map(int,input().split())) for _ in range(N)]\n\ndef check(i,j,k,l):\n for row in range(i,j+1):\n for col in range(k,l+1):\n if map_[row][col]<= 0 :\n return False\n return True\n\ndef main():\n max_size = 0\n for i in range(N):\n for j in range(i,N):\n for k in range(M):\n for l in range(k,M):\n if check(i,j,k,l):\n max_size = max(max_size,(j-i+1)*(l-k+1))\n if max_size :\n print(max_size)\n else :\n print(-1)\nmap_ = [\n [1,2,3,4],\n [5,6,7,8],\n [9,10,11,12],\n [13,14,15,16]\n]\n\nprint(map_[1:3][2:3])","repo_name":"SeongSuKim95/Python_practice","sub_path":"Implementation_practice/양의정수사각형.py","file_name":"양의정수사각형.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13315196760","text":"\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm\nfrom .forms import SignUpForm\n\ndef home_view(request):\n user = request.user\n uc_form = SignUpForm()\n registration_error = False\n\n if 'user_registration' in request.POST:\n uc_form = SignUpForm(request.POST)\n if uc_form.is_valid():\n uc_form.save()\n new_user = authenticate(username=uc_form.cleaned_data['username'],\n password=uc_form.cleaned_data['password1'],\n )\n login(request, new_user)\n return redirect(\"/timeline/\")\n else: \n registration_error = True \n\n context = {\n 'r_error': registration_error,\n 'uc_form': uc_form,\n 'user': user,\n }\n\n if request.user.is_authenticated:\n return redirect('posts:main-post-view') \n else:\n return render(request, 'main/home.html', context)\n\n\n","repo_name":"j0a0vieira/PAP-Project-Django","sub_path":"src/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"19401951062","text":"# coding=gbk\n\"\"\"\nauthor(作者): Channing Xie(谢琛)\ntime(时间): 2020/4/8 8:58\nfilename(文件名): test4.py\nfunction description(功能描述):\n 我们需要读写二进制数据,比如图像,声音文件等。使用open()函数的rb和wb模式就可以实现对二进制数据的读写\n 将文本写入二进制文件:binary_file.write(text.encode(\"utf-8\"))\n 从二进制文件中读取数据并转化为文本:binary_file.read().decode(\"utf-8\")\n...\n\"\"\"\nwith open(\"binary.bin\", 'ab') as file:\n file.write(b'Hello World!\\n')\n file.write(\"Hello World!\\n\".encode(\"utf-8\"))\nwith open(\"binary.bin\", 'rb') as file:\n data = file.read()\n for d in data:\n print(d)\n","repo_name":"XieChen10983/python_cookbook","sub_path":"第5章 文件和IO/5.4 读写二进制数据/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3041731597","text":"from lexical_analyzer.constant import *\nfrom lexical_analyzer.identifier import *\nfrom lexical_analyzer.label import *\nfrom lexical_analyzer.lexem import *\nfrom lexical_analyzer.symbol_classes import *\nfrom lexical_analyzer.table_tokens import *\nfrom lexical_analyzer.lexical_exeptions import *\n\n\nclass LexicalAnalyzer(object):\n\n def __init__(self, program_text):\n self.ch = ''\n self.lex = ''\n self.state = 1\n self.has_to_read = True\n self.current_line = 1\n self.table_tokens = TableTokens()\n self.program_text = program_text\n self.collection_records_lexem = []\n self.collection_records_idn = []\n self.collection_records_con = []\n self.collection_records_label = []\n self.errors = LexicalExeptions()\n\n self.is_goto = False\n self.index_labels_without_declareted = 5\n\n def __next_char(self):\n if len(self.program_text) > 0:\n self.ch = self.program_text[0]\n if len(self.program_text) >= 2:\n self.program_text = self.program_text[1:]\n else:\n self.program_text = ''\n else:\n self.ch = ''\n\n\n def __lexemic_growth(self):\n self.lex += self.ch\n\n def __lexemic_growth_and_read_next_char(self):\n self.__lexemic_growth()\n self.__next_char()\n\n def which_line(self, symbol):\n if symbol == '\\n':\n self.current_line = self.current_line + 1\n\n def has_errors(self):\n if len(self.errors.get_errors()) > 0:\n return True\n return False\n\n def get_errors(self):\n return self.errors.get_errors()\n\n def get_output_lexems(self):\n return self.collection_records_lexem\n\n def get_id_table(self):\n return self.collection_records_idn\n\n def get_constants_table(self):\n return self.collection_records_con\n\n def get_labels_table(self):\n return self.collection_records_label\n\n def __err_for_not_defined_labels(self, all_labels_in_used_but_not_declareted):\n count = len(all_labels_in_used_but_not_declareted)\n print(count)\n if count > 0:\n for i in range(count):\n self.errors.add_exeption('You use not defined label {label} on line {line}'.format(\n label=all_labels_in_used_but_not_declareted[i],\n line=Lexem.get_number_line_for_lexem(self.collection_records_lexem, all_labels_in_used_but_not_declareted[i])))\n\n def __label_in_err_labels(self, label, err_labels):\n count = len(err_labels)\n if count > 0:\n for i in range(count):\n if label == err_labels[i]:\n return True\n return False\n\n\n def run(self):\n all_labels_in_used_but_not_declareted = []\n while len(self.program_text) >= 0:\n if self.state == 1:\n if self.has_to_read:\n self.__next_char()\n while SymbolClasses.white_separator(self.ch):\n self.__next_char()\n self.lex = ''\n if SymbolClasses.letter(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 2\n elif SymbolClasses.number(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 3\n elif SymbolClasses.plus(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 6\n elif SymbolClasses.dot(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 4\n elif SymbolClasses.single_character_splitters(self.ch):\n self.__lexemic_growth()\n self.has_to_read = True\n\n if (self.ch == '\\n'):\n Lexem.add_lex(self.collection_records_lexem, self.current_line, '\\\\n', self.table_tokens.get_code(self.lex))\n else:\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, self.table_tokens.get_code(self.lex))\n\n self.which_line(self.ch)\n\n self.state = 1\n elif SymbolClasses.less(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 7\n elif SymbolClasses.more(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 8\n elif SymbolClasses.equally(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 9\n elif SymbolClasses.exclamation(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 10\n elif SymbolClasses.dollar(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 11\n else:\n if self.ch:\n self.errors.add_exeption(\"Your symbol '{symbol}' on line {line} is not valid\".format(\n symbol = self.ch, line = self.current_line))\n break\n\n elif self.state == 2:\n if SymbolClasses.letter(self.ch) or SymbolClasses.number(self.ch):\n self.state = 2\n self.__lexemic_growth_and_read_next_char()\n else:\n if not self.table_tokens.get_code(self.lex):\n code_idn = Identifier.find_idn(self.lex, self.collection_records_idn)\n if not code_idn:\n current_code_idn = len(self.collection_records_idn)\n type_idn = Lexem.find_type_idn(self.collection_records_lexem)\n if type_idn:\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, 100, current_code_idn + 1)\n Identifier.add_idn(self.collection_records_idn, current_code_idn + 1, self.lex, type_idn)\n else:\n self.errors.add_exeption(\"You use not defined identificator {id} on line {line}\".format(\n id = self.lex, line = self.current_line))\n break\n\n else:\n type_idn = Lexem.find_type_idn(self.collection_records_lexem)\n if type_idn:\n self.errors.add_exeption('You duplicate variable {var} on line {line}'.format(\n var = self.lex, line = self.current_line))\n break\n else:\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, 100, code_idn)\n else:\n if self.lex == 'goto':\n self.is_goto = True\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, self.table_tokens.get_code(self.lex))\n self.has_to_read = False\n self.state = 1\n\n\n elif self.state == 3:\n if SymbolClasses.number(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 3\n elif SymbolClasses.dot(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 5\n else:\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, 101, code_con = len(self.collection_records_con) + 1)\n Constant.add_con(self.collection_records_con, len(self.collection_records_con) + 1, self.lex, Constant.type_con(self.lex))\n self.state = 1\n self.has_to_read = False\n\n\n elif self.state == 4:\n if SymbolClasses.number(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 5\n else:\n self.errors.add_exeption('You have not entered the fractional part of the constant. line = {line}'.format(\n line = self.current_line))\n break\n\n\n elif self.state == 5:\n if SymbolClasses.number(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 5\n else:\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, 101, code_con = len(self.collection_records_con) + 1)\n Constant.add_con(self.collection_records_con, len(self.collection_records_con) + 1, self.lex, Constant.type_con(self.lex))\n self.state = 1\n self.has_to_read = False\n\n\n elif self.state == 6:\n if self.collection_records_lexem[len(self.collection_records_lexem) - 1].code_lexem == 101 or self.collection_records_lexem[len(self.collection_records_lexem) - 1].code_lexem == 100 or self.collection_records_lexem[len(self.collection_records_lexem) - 1].code_lexem == 25:\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, self.table_tokens.get_code(self.lex))\n self.state = 1\n self.has_to_read = False\n elif SymbolClasses.number(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 3\n elif SymbolClasses.dot(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 4\n else:\n self.errors.add_exeption('You have not entered a constant on line {line}'.format(line = self.current_line))\n break\n\n\n elif self.state == 7:\n if SymbolClasses.less(self.ch):\n self.__lexemic_growth()\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, self.table_tokens.get_code(self.lex))\n self.has_to_read = True\n elif SymbolClasses.equally(self.ch):\n self.__lexemic_growth()\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, self.table_tokens.get_code(self.lex))\n self.has_to_read = True\n else:\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, self.table_tokens.get_code(self.lex))\n self.has_to_read = False\n self.state = 1\n\n\n elif self.state == 8:\n if SymbolClasses.more(self.ch):\n self.__lexemic_growth()\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, self.table_tokens.get_code(self.lex))\n self.has_to_read = True\n elif SymbolClasses.equally(self.ch):\n self.__lexemic_growth()\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, self.table_tokens.get_code(self.lex))\n self.has_to_read = True\n else:\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, self.table_tokens.get_code(self.lex))\n self.has_to_read = False\n self.state = 1\n\n elif self.state == 9:\n if SymbolClasses.equally(self.ch):\n self.__lexemic_growth()\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, self.table_tokens.get_code(self.lex))\n self.has_to_read = True\n else:\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, self.table_tokens.get_code(self.lex))\n self.has_to_read = False\n self.state = 1\n\n elif self.state == 10:\n if SymbolClasses.equally(self.ch):\n self.__lexemic_growth()\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, self.table_tokens.get_code(self.lex))\n self.has_to_read = True\n self.state = 1\n else:\n self.errors.add_exeption('Error. You must enter != on line {line}'.format(line = self.current_line))\n break\n\n elif self.state == 11:\n if SymbolClasses.letter(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 12\n else:\n self.errors.add_exeption('Error label')\n break\n\n elif self.state == 12:\n if SymbolClasses.letter(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 12\n elif SymbolClasses.number(self.ch):\n self.__lexemic_growth_and_read_next_char()\n self.state = 12\n else:\n code_label = Label.find_label(self.lex, self.collection_records_label)\n if (not code_label or code_label == 0) and not self.is_goto and self.ch == ':':\n if len(all_labels_in_used_but_not_declareted) > 0:\n if self.__label_in_err_labels(self.lex, all_labels_in_used_but_not_declareted):\n self.index_labels_without_declareted = all_labels_in_used_but_not_declareted.index(self.lex)\n if self.index_labels_without_declareted >= 0:\n index_label = Label.find_not_defined_label(all_labels_in_used_but_not_declareted.pop(self.index_labels_without_declareted), self.collection_records_label)\n self.collection_records_label[index_label].code = index_label + 1\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, 102,\n code_label = index_label + 1)\n Lexem.set_code_for_label(self.lex, index_label + 1, self.collection_records_lexem)\n else:\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, 102,\n code_label = len(self.collection_records_label) + 1)\n Label.add_label(self.collection_records_label, len(self.collection_records_label) + 1, self.lex)\n self.index_labels_without_declareted = -1\n elif not code_label and code_label != 0 and (self.is_goto or not self.ch == ':'):\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, 102,\n code_label = len(self.collection_records_label) + 1)\n Label.add_label(self.collection_records_label, 0, self.lex)\n all_labels_in_used_but_not_declareted.append(self.lex)\n self.is_goto = False\n elif code_label and not self.is_goto and self.ch == ':':\n self.errors.add_exeption('You duplicate label {label} on line {line}'.format(label = self.lex, line = self.current_line))\n break\n else:\n Lexem.add_lex(self.collection_records_lexem, self.current_line, self.lex, 102, code_label = code_label)\n self.is_goto = False\n self.has_to_read = False\n self.state = 1\n\n else:\n self.errors.add_exeption('Error')\n self.__err_for_not_defined_labels(all_labels_in_used_but_not_declareted)\n\n\n def show_output_table(self):\n Lexem.show_lexes(self.collection_records_lexem)\n Identifier.show_idn(self.collection_records_idn)\n Constant.show_con(self.collection_records_con)\n Label.show_label(self.collection_records_label)","repo_name":"smartTigerCode98/translator_desctop","sub_path":"lexical_analyzer/lexical_analyzer.py","file_name":"lexical_analyzer.py","file_ext":"py","file_size_in_byte":16384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15717756727","text":"import logging\n\nimport pandas as pd\n\nfrom bots import imps\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.economy import wsj_model\n\nlogger = logging.getLogger(__name__)\n\n\n# pylint: disable=E1137\n@log_start_end(log=logger)\ndef currencies_command():\n \"\"\"Currencies overview [Wall St. Journal]\"\"\"\n\n # Debug user input\n if imps.DEBUG:\n logger.debug(\"econ-currencies\")\n\n # Retrieve data\n df = wsj_model.global_currencies()\n df = df.fillna(\"\")\n\n # Check for argument\n if df.empty:\n raise Exception(\"No available data found\")\n\n df[\"Last Price\"] = pd.to_numeric(df[\"Last\"].astype(float))\n df[\"Change\"] = pd.to_numeric(df[\"Chng\"].astype(float))\n df[\"%Chng\"] = pd.to_numeric(df[\"%Chng\"].astype(float))\n\n # Debug user output\n if imps.DEBUG:\n logger.debug(df.to_string())\n\n formats = {\n \"Last Price\": \"${:.2f}\",\n \"Change\": \"${:.2f}\",\n \"%Chng\": \"{:.2f}%\",\n }\n for col, value in formats.items():\n df[col] = df[col].map(lambda x: value.format(x)) # pylint: disable=W0640\n\n df[\"Change\"] = df.apply(lambda x: f\"{x['Change']} ({x['%Chng']})\", axis=1)\n\n df.set_index(\" \", inplace=True)\n\n font_color = [\"white\"] * 2 + [\n [\"#e4003a\" if boolv else \"#00ACFF\" for boolv in df[\"%Chng\"].str.contains(\"-\")]\n ]\n df = df.drop(columns=[\"Last\", \"Chng\", \"%Chng\"])\n fig = imps.plot_df(\n df,\n fig_size=(620, (40 + (40 * len(df.index)))),\n col_width=[4.2, 2.4, 3],\n tbl_header=imps.PLT_TBL_HEADER,\n tbl_cells=imps.PLT_TBL_CELLS,\n font=imps.PLT_TBL_FONT,\n row_fill_color=imps.PLT_TBL_ROW_COLORS,\n paper_bgcolor=\"rgba(0, 0, 0, 0)\",\n )\n fig.update_traces(\n cells=(\n dict(\n align=[\"center\", \"right\"],\n font=dict(color=font_color),\n )\n )\n )\n imagefile = imps.save_image(\"econ-currencies.png\", fig)\n return {\n \"title\": \"Economy: [WSJ] Currencies\",\n \"imagefile\": imagefile,\n }\n","repo_name":"rohankumardubey/OpenBBTerminal","sub_path":"bots/economy/currencies.py","file_name":"currencies.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"74308058960","text":"import multiprocessing\nimport time\nstart_time = time.time()\n\n\ndef read_rows(file_name=\"practice\", rows=2, start_column=1, end_column=2):\n from openpyxl import load_workbook\n workbook = load_workbook(filename=file_name)\n workbook.sheetnames\n sheet = workbook.active\n\n lists = []\n n = start_column - 1\n while (n <= (end_column)):\n n += 1\n cell = (sheet.cell(row=rows, column=n))\n lists.append(cell.value)\n return lists\n\n\ndef read_table(file_name=\"practice\", start_row=1, end_row=2, start_column=1, end_column=2):\n lists = []\n n = start_row - 1\n end_row -= 1\n while (n <= (end_row)):\n n += 1\n lists.append(read_rows(file_name, n, start_column, end_column))\n return lists\n\n\ndef protein_values(file_protei=\"file\"):\n file_protein = file_protei + \".xlsx\"\n return (read_table(file_protein, 24, 31, 3, 13))\n\n\ndef ABS_values(file_ab=\"file\"):\n file_abs = file_ab + \".xlsx\"\n return (read_table(file_abs, 24, 31, 3, 13))\n\n\ndef Atpase(abs=0, protein=1):\n x = (abs / 0.2431) * 1000\n w = x / protein\n v = w / 0.2\n o = v / 60\n Atpase = o * 4.5\n return Atpase\n\n\ndef carbonyl(abs=0):\n car = (abs * 0.18) / 132000 # car = carbonyl\n return car\n\n\ndef MDA(abs=0, protein=1):\n a = abs * 3\n b = 1.56 * 100000 * 0.4 * protein\n MDA = a / b\n return MDA\n\n\ndef H2O2(abs=0, protein=1):\n a = 2.499 - abs\n b = 0.3175 * protein\n H2O2 = a / b\n return H2O2\n\n\ndef sulphy(abs=0):\n a = 1 - (abs / 14.150)\n b = (1.5 * a) / 1000\n c = (b * 1000) / 0.2\n return c\n\n\ndef column_result_Atpase(ABS_file=\"file\", protein_file=\"file\", r=0):\n lists = []\n n = -1\n while (n < 12):\n n += 1\n lists.append(Atpase((ABS_values()[r][n]), (protein_values()[r][n])))\n if (n == 11):\n break\n\n return lists\n\n\ndef table_result_Atpase(ABS_file=\"file\", protein_file=\"file\"):\n lists = []\n lists.append(column_result_Atpase(0))\n lists.append(column_result_Atpase(1))\n lists.append(column_result_Atpase(2))\n lists.append(column_result_Atpase(3))\n lists.append(column_result_Atpase(4))\n lists.append(column_result_Atpase(5))\n lists.append(column_result_Atpase(6))\n lists.append(column_result_Atpase(7))\n\n return lists\n\n\ndef column_result_sulphy(ABS_file=\"file\", r=0):\n lists = []\n n = -1\n while (n < 12):\n n += 1\n lists.append(sulphy(ABS_values(ABS_file)[r][n]))\n if (n == 11):\n break\n\n return lists\n\n\ndef table_result_sulphy(ABS_file=\"file\"):\n lists = []\n lists.append(column_result_sulphy(ABS_file, 0))\n lists.append(column_result_sulphy(ABS_file, 1))\n lists.append(column_result_sulphy(ABS_file, 2))\n lists.append(column_result_sulphy(ABS_file, 3))\n lists.append(column_result_sulphy(ABS_file, 4))\n lists.append(column_result_sulphy(ABS_file, 5))\n lists.append(column_result_sulphy(ABS_file, 6))\n lists.append(column_result_sulphy(ABS_file, 7))\n\n\ndef column_result_carbonyl(ABS_file=\"file\", r=0):\n lists = []\n n = -1\n while (n < 12):\n n += 1\n lists.append(carbonyl(ABS_values(ABS_file)[r][n]))\n if (n == 11):\n break\n\n return lists\n\n\ndef table_result_carbonyl(ABS_file=\"file\"):\n lists = []\n lists.append(column_result_carbonyl(ABS_file, 0))\n lists.append(column_result_carbonyl(ABS_file, 1))\n lists.append(column_result_carbonyl(ABS_file, 2))\n lists.append(column_result_carbonyl(ABS_file, 3))\n lists.append(column_result_carbonyl(ABS_file, 4))\n lists.append(column_result_carbonyl(ABS_file, 5))\n lists.append(column_result_carbonyl(ABS_file, 6))\n lists.append(column_result_carbonyl(ABS_file, 7))\n\n return lists\n\n\ndef column_result_H2O2(ABS_file=\"file\", protein_file=\"file\", r=0):\n lists = []\n n = -1\n while (n < 12):\n n += 1\n lists.append(H2O2((ABS_values(ABS_file)[r][n]), (protein_values(protein_file)[r][n])))\n if (n == 11):\n break\n\n return lists\n\n\ndef table_result_H2O2(ABS_file=\"file\", protein_file=\"file\"):\n lists = []\n lists.append(column_result_H2O2(ABS_file, protein_file, 0))\n lists.append(column_result_H2O2(ABS_file, protein_file, 1))\n lists.append(column_result_H2O2(ABS_file, protein_file, 2))\n lists.append(column_result_H2O2(ABS_file, protein_file, 3))\n lists.append(column_result_H2O2(ABS_file, protein_file, 4))\n lists.append(column_result_H2O2(ABS_file, protein_file, 5))\n lists.append(column_result_H2O2(ABS_file, protein_file, 6))\n lists.append(column_result_H2O2(ABS_file, protein_file, 7))\n\n return lists\n\n\ndef column_result_MDA(ABS_file=\"file\", protein_file=\"file\", r=0):\n lists = []\n n = -1\n while (n < 12):\n n += 1\n lists.append(MDA((ABS_values(ABS_file)[r][n]), (protein_values(protein_file)[r][n])))\n if (n == 11):\n break\n\n return lists\n\n\ndef table_result_MDA(ABS_file=\"file\", protein_file=\"file\"):\n lists = []\n lists.append(column_result_MDA(ABS_file, protein_file, 0))\n lists.append(column_result_MDA(ABS_file, protein_file, 1))\n lists.append(column_result_MDA(ABS_file, protein_file, 2))\n lists.append(column_result_MDA(ABS_file, protein_file, 3))\n lists.append(column_result_MDA(ABS_file, protein_file, 4))\n lists.append(column_result_MDA(ABS_file, protein_file, 5))\n lists.append(column_result_MDA(ABS_file, protein_file, 6))\n lists.append(column_result_MDA(ABS_file, protein_file, 7))\n\n return lists\n\n\n\nselect = int(input('''\nEnter 1 for a \"sulphy\" table' \nEnter 2 for a \"Carbonyl\" table' \nEnter 3 for a \"Atpase\" table\nEnter 4 for a \"MDA\" table\nEnter 5 for a \"H2O2\" table\n '''))\n\nif (select <= 2):\n ABS_file = input(\"Enter file containing ABS values: \")\n\n\nelse:\n if (select > 2):\n ABS_file = input(\"Enter file containing ABS values: \")\n protein_file = input(\"Enter file containing protein values: \")\n\ndef selection():\n if (select == 1):\n return table_result_sulphy(ABS_file)\n elif (select == 2):\n return table_result_carbonyl(ABS_file)\n\n elif (select == 3):\n return table_result_Atpase(ABS_file, protein_file)\n elif (select == 4):\n return table_result_MDA(ABS_file, protein_file)\n elif (select == 5):\n return table_result_H2O2(ABS_file, protein_file)\n\n\n\n\nfrom openpyxl import Workbook\nworkbook = Workbook()\nsheet = workbook.active\ndef result_1():\n sheet[\"C24\"] = selection[0][0]\n sheet[\"D24\"] = selection[0][1]\n sheet[\"E24\"] = selection[0][2]\n sheet[\"F24\"] = selection[0][3]\n sheet[\"G24\"] = selection[0][4]\n sheet[\"H24\"] = selection[0][5]\n sheet[\"I24\"] = selection[0][6]\n sheet[\"J24\"] = selection[0][7]\n sheet[\"K24\"] = selection[0][8]\n sheet[\"L24\"] = selection[0][9]\n sheet[\"M24\"] = selection[0][10]\n sheet[\"N24\"] = selection[0][11]\n\ndef result_2():\n sheet[\"C25\"] = selection[1][0]\n sheet[\"D25\"] = selection[1][1]\n sheet[\"E25\"] = selection[1][2]\n sheet[\"F25\"] = selection[1][3]\n sheet[\"G25\"] = selection[1][4]\n sheet[\"H25\"] = selection[1][5]\n sheet[\"I25\"] = selection[1][6]\n sheet[\"J25\"] = selection[1][7]\n sheet[\"K25\"] = selection[1][8]\n sheet[\"L25\"] = selection[1][9]\n sheet[\"M25\"] = selection[1][10]\n sheet[\"N25\"] = selection[1][11]\n\ndef result_3():\n sheet[\"C26\"] = selection[2][0]\n sheet[\"D26\"] = selection[2][1]\n sheet[\"E26\"] = selection[2][2]\n sheet[\"F26\"] = selection[2][3]\n sheet[\"G26\"] = selection[2][4]\n sheet[\"H26\"] = selection[2][5]\n sheet[\"I26\"] = selection[2][6]\n sheet[\"J26\"] = selection[2][7]\n sheet[\"K26\"] = selection[2][8]\n sheet[\"L26\"] = selection[2][9]\n sheet[\"M26\"] = selection[2][10]\n sheet[\"N26\"] = selection[2][11]\n\ndef result_4():\n sheet[\"C27\"] = selection[3][0]\n sheet[\"D27\"] = selection[3][1]\n sheet[\"E27\"] = selection[3][2]\n sheet[\"F27\"] = selection[3][3]\n sheet[\"G27\"] = selection[3][4]\n sheet[\"H27\"] = selection[3][5]\n sheet[\"I27\"] = selection[3][6]\n sheet[\"J27\"] = selection[3][7]\n sheet[\"K27\"] = selection[3][8]\n sheet[\"L27\"] = selection[3][9]\n sheet[\"M27\"] = selection[3][10]\n sheet[\"N27\"] = selection[3][11]\n\ndef result_5():\n sheet[\"C26\"] = selection[4][0]\n sheet[\"D26\"] = selection[4][1]\n sheet[\"E26\"] = selection[4][2]\n sheet[\"F26\"] = selection[4][3]\n sheet[\"G26\"] = selection[4][4]\n sheet[\"H26\"] = selection[4][5]\n sheet[\"I26\"] = selection[4][6]\n sheet[\"J26\"] = selection[4][7]\n sheet[\"K26\"] = selection[4][8]\n sheet[\"L26\"] = selection[4][9]\n sheet[\"M26\"] = selection[4][10]\n sheet[\"N26\"] = selection[4][11]\ndef result_6():\n sheet[\"C27\"] = selection[5][0]\n sheet[\"D27\"] = selection[5][1]\n sheet[\"E27\"] = selection[5][2]\n sheet[\"F27\"] = selection[5][3]\n sheet[\"G27\"] = selection[5][4]\n sheet[\"H27\"] = selection[5][5]\n sheet[\"I27\"] = selection[5][6]\n sheet[\"J27\"] = selection[5][7]\n sheet[\"K27\"] = selection[5][8]\n sheet[\"L27\"] = selection[5][9]\n sheet[\"M27\"] = selection[5][10]\n sheet[\"N27\"] = selection[5][11]\n\n\ndef result_7():\n\n sheet[\"C28\"] = selection[6][0]\n sheet[\"D28\"] = selection[6][1]\n sheet[\"E28\"] = selection[6][2]\n sheet[\"F28\"] = selection[6][3]\n sheet[\"G28\"] = selection[6][4]\n sheet[\"H28\"] = selection[6][5]\n sheet[\"I28\"] = selection[6][6]\n sheet[\"J28\"] = selection[6][7]\n sheet[\"K28\"] = selection[6][8]\n sheet[\"L28\"] = selection[6][9]\n sheet[\"M28\"] = selection[6][10]\n sheet[\"N28\"] = selection[6][11]\ndef result_8():\n sheet[\"C29\"] = selection[7][0]\n sheet[\"D29\"] = selection[7][1]\n sheet[\"E29\"] = selection[7][2]\n sheet[\"F29\"] = selection[7][3]\n sheet[\"G29\"] = selection[7][4]\n sheet[\"H29\"] = selection[7][5]\n sheet[\"I29\"] = selection[7][6]\n sheet[\"J29\"] = selection[7][7]\n sheet[\"K29\"] = selection[7][8]\n sheet[\"L29\"] = selection[7][9]\n sheet[\"M29\"] = selection[7][10]\n sheet[\"N29\"] = selection[7][11]\n\n\n\np1 = multiprocessing.Process(target=result_1())\np2 = multiprocessing.Process(target=result_2())\np3 = multiprocessing.Process(target=result_3())\np4 = multiprocessing.Process(target=result_4())\np5 = multiprocessing.Process(target=result_5())\np6 = multiprocessing.Process(target=result_6())\np7 = multiprocessing.Process(target=result_7())\np8 = multiprocessing.Process(target=result_8())\n\np1.start()\np2.start()\np3.start()\np4.start()\np5.start()\np6.start()\np7.start()\np8.start()\n\np1.join()\np2.join()\np3.join()\np4.join()\np5.join()\np6.join()\np7.join()\np8.join()\n\n\nworkbook.save(filename=\"result.xlsx\")\nprint(\"--- %s seconds ---\" % (time.time() - start_time))","repo_name":"Ayomikun-Adekoya/my-","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":10605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6559955691","text":"import matplotlib.pyplot as plt\n\nfrom transform_images import unscale\n\n\ndef view_dataset(images):\n # plot emojis in order\n lines = 12\n f, axarr = plt.subplots(lines, lines, sharex=True, sharey=True, figsize=(12, 12))\n for i in range(lines ** 2):\n a = axarr[i % lines, i // lines]\n img = images[i]\n a.axis(\"off\")\n a.imshow(img)\n plt.subplots_adjust(wspace=0, hspace=0)\n\n\ndef view_samples(epoch, samples, nrows, ncols, figsize=(5, 5)):\n fig, axes = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols,\n sharey=True, sharex=True)\n for ax, img in zip(axes.flatten(), samples[epoch]):\n ax.axis('off')\n img = unscale(img)\n im = ax.imshow(img, aspect='equal')\n\n plt.subplots_adjust(wspace=0, hspace=0)\n return fig, axes\n\n\ndef view_epoch_samples(samples, figsize=(5, 5)):\n epochs = len(samples)\n ncols = 12\n nrows = epochs // ncols\n fig, axes = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols,\n sharey=True, sharex=True)\n print(len(samples))\n for ax, s in zip(axes.flatten(), samples):\n ax.axis('off')\n img = s[3]\n img = unscale(img)\n im = ax.imshow(img, aspect='equal')\n\n plt.subplots_adjust(wspace=0, hspace=0)\n return fig, axes\n\n\ndef view_losses(losses):\n plt.subplots()\n plt.plot(losses.T[0], label='Discriminator', alpha=0.5)\n plt.plot(losses.T[1], label='Generator', alpha=0.5)\n plt.title(\"Training Losses\")\n plt.legend()\n","repo_name":"Kyksi/DCGAN-image-generation","sub_path":"plot_images.py","file_name":"plot_images.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27985038970","text":"# 左子树加和\n# 给定一个二叉树, 返回所有左叶子的和\n# 带标签的递归, 设置标签side标示搜寻的是左子树还是右子树\nimport sys\nsys.path.append(\"E:/code/packages\")\nfrom AVLTree import *\n\ndef sumLeftLeaves(root, side=''):\n if not root:\n return 0\n # 如果不是叶子节点, 则返回两子树的和\n elif root.left or root.right:\n return sumLeftLeaves(root.left,'l') + sumLeftLeaves(root.right, 'r')\n # 如果是叶子节点, 则仅在其为左节点时加和\n elif side == 'l':\n return root.value\n # 右节点则加0\n else:\n return 0\nif __name__ == \"__main__\":\n tree = AVLTree([1, 2, 3, 4, 5, 6, 7, 8, 9])\n print(sumLeftLeaves(tree.root))\n","repo_name":"Canadasunyan/codes","sub_path":"029-左叶节点加和.py","file_name":"029-左叶节点加和.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"26586938368","text":"import importlib\nimport os\nimport re\nimport shutil\nimport sys\nimport traceback\nimport warnings\nimport webbrowser\nfrom collections import defaultdict\nfrom copy import deepcopy\nfrom pathlib import Path\nfrom typing import Any, Callable, Dict, List, Union\n\nimport click\nimport pkg_resources\nimport yaml\nfrom cookiecutter.main import cookiecutter\n\nimport kedro.config.default_logger # noqa\nfrom kedro import __version__ as version\nfrom kedro.cli.utils import CommandCollection, KedroCliError\nfrom kedro.context import load_context\n\nKEDRO_PATH = os.path.dirname(kedro.__file__)\nTEMPLATE_PATH = os.path.join(KEDRO_PATH, \"template\")\nCONTEXT_SETTINGS = dict(help_option_names=[\"-h\", \"--help\"])\n\n_VERBOSE = True\n\nLOGO = r\"\"\"\n _ _\n| | _____ __| |_ __ ___\n| |/ / _ \\/ _` | '__/ _ \\\n| < __/ (_| | | | (_) |\n|_|\\_\\___|\\__,_|_| \\___/\nv{}\n\"\"\".format(\n version\n)\n\n\n@click.group(context_settings=CONTEXT_SETTINGS, name=\"Kedro\")\n@click.version_option(version, \"--version\", \"-V\", help=\"Show version and exit\")\n@click.option(\n \"--verbose\",\n \"-v\",\n is_flag=True,\n help=\"See extensive logging and error stack traces.\",\n)\ndef cli(verbose):\n \"\"\"Kedro is a CLI for creating and using Kedro projects\n For more information, type ``kedro info``.\n\n When inside a Kedro project (created with `kedro new`) commands from\n the project's `kedro_cli.py` file will also be available here.\n \"\"\"\n global _VERBOSE # pylint: disable=global-statement\n _VERBOSE = verbose\n\n\nENTRY_POINT_GROUPS = {\n \"global\": \"kedro.global_commands\",\n \"project\": \"kedro.project_commands\",\n \"init\": \"kedro.init\",\n \"line_magic\": \"kedro.line_magic\",\n}\n\n\n@cli.command()\ndef info():\n \"\"\"Get more information about kedro.\n \"\"\"\n click.secho(LOGO, fg=\"green\")\n click.echo(\n \"kedro allows teams to create analytics\\n\"\n \"projects. It is developed as part of\\n\"\n \"the Kedro initiative at QuantumBlack.\"\n )\n\n plugin_versions = {}\n plugin_hooks = defaultdict(set)\n for hook, group in ENTRY_POINT_GROUPS.items():\n for entry_point in pkg_resources.iter_entry_points(group=group):\n module_name = entry_point.module_name.split(\".\")[0]\n plugin_version = pkg_resources.get_distribution(module_name).version\n plugin_versions[module_name] = plugin_version\n plugin_hooks[module_name].add(hook)\n\n click.echo()\n if plugin_versions:\n click.echo(\"Installed plugins:\")\n for plugin_name, plugin_version in sorted(plugin_versions.items()):\n hooks = \",\".join(sorted(plugin_hooks[plugin_name]))\n click.echo(\"{}: {} (hooks:{})\".format(plugin_name, plugin_version, hooks))\n else:\n click.echo(\"No plugins installed\")\n\n\n@cli.command(short_help=\"Create a new kedro project.\")\n@click.option(\n \"--config\",\n \"-c\",\n type=click.Path(exists=True),\n help=\"Non-interactive mode, using a configuration yaml file.\",\n)\ndef new(config):\n \"\"\"Create a new kedro project, either interactively or from a\n configuration file.\n\n Create projects according to the Kedro default project template. This\n template is ideal for analytics projects and comes with a data\n architecture, folders for notebooks, configuration, source code, etc.\n\n \\b\n ``kedro new``\n Create a new project interactively.\n\n \\b\n You will have to provide four choices:\n * ``Project Name`` - name of the project, not to be confused with name of\n the project folder.\n * ``Repository Name`` - intended name of your project folder.\n * ``Package Name`` - intended name of your Python package.\n * ``Generate Example Pipeline`` - yes/no to generating an example pipeline\n in your project.\n\n \\b\n ``kedro new --config ``\n ``kedro new -c ``\n Create a new project from configuration.\n\n * ``config.yml`` - The configuration YAML must contain at the top level\n the above parameters (project_name, repo_name,\n python_package, include_example) and output_dir - the\n parent directory for the new project directory.\n \"\"\"\n _create_project(config, _VERBOSE)\n\n\n@cli.command(short_help=\"See the kedro API docs and introductory tutorial.\")\ndef docs():\n \"\"\"Display the API docs and introductory tutorial in the browser,\n using the packaged HTML doc files.\"\"\"\n index_path = \"file://\" + os.path.realpath(\n os.path.join(\n os.path.realpath(__file__), os.pardir, os.pardir, \"html\", \"index.html\"\n )\n )\n click.echo(\"Opening \" + index_path)\n webbrowser.open(index_path)\n\n\ndef _clean_pycache(project_path):\n # Since template is part of the Kedro package __pycache__ is generated.\n # This method recursively cleans all __pycache__ folders.\n to_delete = [\n filename.resolve()\n for filename in project_path.rglob(\"**/*\")\n if str(filename).endswith(\"__pycache__\")\n ]\n\n for file in to_delete: # pragma: no cover\n shutil.rmtree(str(file))\n\n\ndef _create_project(config_path: str, verbose: bool):\n \"\"\"Implementation of the kedro new cli command.\n\n Args:\n config_path: In non-interactive mode, the path of the config.yml which\n should contain the project_name, output_dir and repo_name.\n verbose: Extensive debug terminal logs.\n \"\"\"\n try:\n if config_path:\n config = _parse_config(config_path, verbose)\n config = _check_config_ok(config_path, config)\n else:\n config = _get_config_from_prompts()\n config.setdefault(\"kedro_version\", version)\n\n result_path = Path(\n cookiecutter(\n TEMPLATE_PATH,\n output_dir=config[\"output_dir\"],\n no_input=True,\n extra_context=config,\n )\n )\n\n if not config[\"include_example\"]:\n (result_path / \"data\" / \"01_raw\" / \"iris.csv\").unlink()\n\n pipelines_dir = result_path / \"src\" / config[\"python_package\"] / \"pipelines\"\n\n for dir_path in [\n pipelines_dir / \"data_engineering\",\n pipelines_dir / \"data_science\",\n ]:\n shutil.rmtree(str(dir_path))\n\n _clean_pycache(result_path)\n _print_kedro_new_success_message(result_path)\n except click.exceptions.Abort: # pragma: no cover\n _handle_exception(\"User interrupt.\")\n # we don't want the user to see a stack trace on the cli\n except Exception: # pylint: disable=broad-except\n _handle_exception(\"Failed to generate project.\")\n\n\ndef _get_config_from_prompts() -> Dict:\n \"\"\"Ask user to provide necessary inputs.\n\n Returns:\n Resulting config dictionary.\n\n \"\"\"\n\n def _get_user_input(\n text: str,\n default: Any = None,\n assert_or_check_funcs: Union[Callable, List[Callable]] = None,\n ) -> Any:\n \"\"\"Get user input and validate it.\n\n Args:\n text: Text to display in command line prompt.\n default: Default value for the input.\n assert_or_check_funcs: List of functions to apply to user input.\n Value is overridden by function output if the latter is\n not None.\n\n Returns:\n Processed user value.\n\n \"\"\"\n if callable(assert_or_check_funcs):\n assert_or_check_funcs = [assert_or_check_funcs]\n else:\n assert_or_check_funcs = assert_or_check_funcs or []\n while True:\n try:\n value = click.prompt(text, default=default)\n for _func in assert_or_check_funcs:\n _func(value)\n except KedroCliError as exc:\n click.secho(str(exc), fg=\"red\", err=True)\n else:\n break\n return value\n\n # set output directory to the current directory\n output_dir = os.path.abspath(os.path.curdir)\n\n # get project name\n project_name_prompt = _get_prompt_text(\n \"Project Name:\",\n \"Please enter a human readable name for your new project.\",\n \"Spaces and punctuation are allowed.\",\n )\n\n project_name = _get_user_input(project_name_prompt, default=\"New Kedro Project\")\n\n normalized_project_name = re.sub(r\"[^\\w-]+\", \"-\", project_name).lower().strip(\"-\")\n\n # get repo name\n repo_name_prompt = _get_prompt_text(\n \"Repository Name:\",\n \"Please enter a directory name for your new project repository.\",\n \"Alphanumeric characters, hyphens and underscores are allowed.\",\n \"Lowercase is recommended.\",\n )\n repo_name = _get_user_input(\n repo_name_prompt, normalized_project_name, _assert_repo_name_ok\n )\n\n # get python package_name\n default_pkg_name = normalized_project_name.replace(\"-\", \"_\")\n pkg_name_prompt = _get_prompt_text(\n \"Python Package Name:\",\n \"Please enter a valid Python package name for your project package.\",\n \"Alphanumeric characters and underscores are allowed.\",\n \"Lowercase is recommended. Package name must start with a letter \"\n \"or underscore.\",\n )\n python_package = _get_user_input(\n pkg_name_prompt, default_pkg_name, _assert_pkg_name_ok\n )\n\n # option for whether iris example code is included in the project\n code_example_prompt = _get_prompt_text(\n \"Generate Example Pipeline:\",\n \"Do you want to generate an example pipeline in your project?\",\n \"Good for first-time users. (default=N)\",\n )\n include_example = click.confirm(code_example_prompt, default=False)\n\n return {\n \"output_dir\": output_dir,\n \"project_name\": project_name,\n \"repo_name\": repo_name,\n \"python_package\": python_package,\n \"include_example\": include_example,\n }\n\n\ndef _parse_config(config_path: str, verbose: bool) -> Dict:\n \"\"\"Parse the config YAML from its path.\n\n Args:\n config_path: The path of the config.yml file.\n verbose: Print the config contents.\n\n Raises:\n Exception: If the file cannot be parsed.\n\n Returns:\n The config as a dictionary.\n\n \"\"\"\n try:\n with open(config_path, \"r\") as config_file:\n config = yaml.safe_load(config_file)\n\n if verbose:\n click.echo(config_path + \":\")\n click.echo(yaml.dump(config, default_flow_style=False))\n\n return config\n\n except Exception as exc:\n click.secho(\"Failed to parse \" + config_path, fg=\"red\", err=True)\n _show_example_config()\n raise exc\n\n\ndef _check_config_ok(config_path: str, config: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Check that the configuration file contains all needed variables.\n\n Args:\n config_path: The path of the config file.\n config: The config as a dictionary.\n\n Returns:\n Config dictionary.\n\n Raises:\n KedroCliError: If the config file is empty or does not contain all\n keys from template/cookiecutter.json and output_dir.\n\n \"\"\"\n if config is None:\n _show_example_config()\n raise KedroCliError(config_path + \" is empty\")\n\n required_in_config = _get_default_config().keys()\n\n for var in required_in_config:\n if var not in config:\n click.echo(\"\\n\" + config_path + \":\")\n click.echo(yaml.dump(config, default_flow_style=False))\n _show_example_config()\n\n raise KedroCliError(\"[\" + var + \"] not found in \" + config_path)\n\n config[\"output_dir\"] = _fix_user_path(config[\"output_dir\"])\n _assert_output_dir_ok(config[\"output_dir\"])\n _assert_repo_name_ok(config[\"repo_name\"])\n _assert_pkg_name_ok(config[\"python_package\"])\n _assert_include_example_ok(config[\"include_example\"])\n return config\n\n\ndef _get_default_config():\n default_config_path = os.path.join(TEMPLATE_PATH, \"default_config.yml\")\n with open(default_config_path) as default_config_file:\n default_config = yaml.safe_load(default_config_file)\n return default_config\n\n\ndef _assert_output_dir_ok(output_dir: str):\n \"\"\"Check that output directory exists.\n\n Args:\n output_dir: Output directory path.\n\n Raises:\n KedroCliError: If the output directory does not exist.\n\n \"\"\"\n if not os.path.exists(output_dir):\n message = (\n \"`{}` is not a valid output directory. \"\n \"It must be a relative or absolute path \"\n \"to an existing directory.\".format(output_dir)\n )\n raise KedroCliError(message)\n\n\ndef _assert_pkg_name_ok(pkg_name: str):\n \"\"\"Check that python package name is in line with PEP8 requirements.\n\n Args:\n pkg_name: Candidate Python package name.\n\n Raises:\n KedroCliError: If package name violates the requirements.\n \"\"\"\n\n base_message = \"`{}` is not a valid Python package name.\".format(pkg_name)\n if not re.match(r\"^[a-zA-Z_]\", pkg_name):\n message = base_message + \" It must start with a letter or underscore.\"\n raise KedroCliError(message)\n if len(pkg_name) < 2:\n message = base_message + \" It must be at least 2 characters long.\"\n raise KedroCliError(message)\n if not re.match(r\"^\\w+$\", pkg_name[1:]):\n message = (\n base_message + \" It must contain only letters, \"\n \"digits, and/or underscores.\"\n )\n raise KedroCliError(message)\n\n\ndef _assert_repo_name_ok(repo_name):\n if not re.match(r\"^\\w+(-*\\w+)*$\", repo_name):\n message = (\n \"`{}` is not a valid repository name. It must contain \"\n \"only word symbols and/or hyphens, must also start and \"\n \"end with alphanumeric symbol.\".format(repo_name)\n )\n raise KedroCliError(message)\n\n\ndef _assert_include_example_ok(include_example):\n if not isinstance(include_example, bool):\n message = (\n \"`{}` value for `include_example` is invalid. It must be a boolean value \"\n \"True or False.\".format(include_example)\n )\n raise KedroCliError(message)\n\n\ndef _fix_user_path(output_dir):\n output_dir = output_dir or \"\"\n output_dir = os.path.expanduser(output_dir)\n\n result = os.path.abspath(output_dir)\n return result\n\n\ndef _show_example_config():\n click.secho(\"Example of valid config.yml:\")\n default_config = _get_default_config()\n for key, value in default_config.items():\n click.secho(\n click.style(key + \": \", bold=True, fg=\"yellow\")\n + click.style(str(value), fg=\"cyan\")\n )\n click.echo(\"\")\n\n\ndef _print_kedro_new_success_message(result):\n click.secho(\n \"Change directory to the project generated in \" + str(result.resolve()),\n fg=\"green\",\n )\n click.secho(\n \"A best-practice setup includes initialising git and creating \"\n \"a virtual environment before running `kedro install` to install \"\n \"project-specific dependencies. Refer to the Kedro documentation: \"\n \"https://kedro.readthedocs.io/\"\n )\n\n\ndef _get_prompt_text(title, *text):\n title = title.strip().title()\n title = click.style(title + \"\\n\" + \"=\" * len(title), bold=True)\n prompt_text = [title] + list(text)\n return \"\\n\".join(str(x).strip() for x in prompt_text) + \"\\n\"\n\n\ndef get_project_context(key: str = \"context\", **kwargs) -> Any:\n \"\"\"Gets the context value from context associated with the key.\n\n Args:\n key: Optional key to get associated value from Kedro context.\n Supported keys are \"verbose\" and \"context\", and it defaults to \"context\".\n kwargs: Optional custom arguments defined by users, which will be passed into\n the constructor of the projects KedroContext subclass.\n\n Returns:\n Requested value from Kedro context dictionary or the default if the key\n was not found.\n\n Raises:\n KedroCliError: When the key is not found and the default value was not\n specified.\n \"\"\"\n\n def _deprecation_msg(key):\n msg_dict = {\n \"get_config\": [\"config_loader\", \"ConfigLoader\"],\n \"create_catalog\": [\"catalog\", \"DataCatalog\"],\n \"create_pipeline\": [\"pipeline\", \"Pipeline\"],\n \"template_version\": [\"project_version\", None],\n \"project_name\": [\"project_name\", None],\n \"project_path\": [\"project_path\", None],\n }\n attr, obj_name = msg_dict[key]\n msg = '`get_project_context(\"{}\")` is now deprecated. '.format(key)\n if obj_name:\n msg += (\n \"This is still returning a function that returns `{}` \"\n \"instance, however passed arguments have no effect anymore \"\n \"since Kedro 0.15.0. \".format(obj_name)\n )\n msg += (\n \"Please get `KedroContext` instance by calling `get_project_context()` \"\n \"and use its `{}` attribute.\".format(attr)\n )\n\n return msg\n\n context = load_context(Path.cwd(), **kwargs)\n # Dictionary to be compatible with existing Plugins. Future plugins should\n # retrieve necessary Kedro project properties from context\n value = {\n \"context\": context,\n \"get_config\": lambda project_path, env=None, **kw: context.config_loader,\n \"create_catalog\": lambda config, **kw: context.catalog,\n \"create_pipeline\": lambda **kw: context.pipeline,\n \"template_version\": context.project_version,\n \"project_name\": context.project_name,\n \"project_path\": context.project_path,\n \"verbose\": _VERBOSE,\n }[key]\n\n if key not in (\"verbose\", \"context\"):\n warnings.warn(_deprecation_msg(key), DeprecationWarning)\n\n return deepcopy(value)\n\n\ndef load_entry_points(name: str) -> List[str]:\n \"\"\"Load package entry point commands.\n\n Args:\n name: The key value specified in ENTRY_POINT_GROUPS.\n\n Raises:\n Exception: If loading an entry point failed.\n\n Returns:\n List of entry point commands.\n\n \"\"\"\n entry_points = pkg_resources.iter_entry_points(group=ENTRY_POINT_GROUPS[name])\n entry_point_commands = []\n for entry_point in entry_points:\n try:\n entry_point_commands.append(entry_point.load())\n except Exception: # pylint: disable=broad-except\n _handle_exception(\n \"Loading {} commands from {}\".format(name, str(entry_point)), end=False\n )\n return entry_point_commands\n\n\ndef _init_plugins():\n group = ENTRY_POINT_GROUPS[\"init\"]\n for entry_point in pkg_resources.iter_entry_points(group=group):\n try:\n init_hook = entry_point.load()\n init_hook()\n except Exception: # pylint: disable=broad-except\n _handle_exception(\"Initializing {}\".format(str(entry_point)), end=False)\n\n\ndef main(): # pragma: no cover\n \"\"\"Main entry point, look for a `kedro_cli.py` and if found add its\n commands to `kedro`'s then invoke the cli.\n \"\"\"\n _init_plugins()\n\n global_groups = [cli]\n global_groups.extend(load_entry_points(\"global\"))\n project_groups = []\n\n # load project commands from kedro_cli.py\n path = Path.cwd()\n kedro_cli_path = path / \"kedro_cli.py\"\n\n if kedro_cli_path.exists():\n try:\n sys.path.append(str(path))\n kedro_cli = importlib.import_module(\"kedro_cli\")\n project_groups.extend(load_entry_points(\"project\"))\n project_groups.append(kedro_cli.cli)\n except Exception: # pylint: disable=broad-except\n _handle_exception(\n \"Cannot load commands from {}\".format(str(kedro_cli_path))\n )\n CommandCollection(\n (\"Global commands\", global_groups),\n (\"Project specific commands\", project_groups),\n )()\n\n\ndef _handle_exception(msg, end=True):\n \"\"\"Pretty print the current exception then exit.\"\"\"\n if _VERBOSE:\n click.secho(traceback.format_exc(), nl=False, fg=\"yellow\")\n else:\n etype, value, _ = sys.exc_info()\n click.secho(\n \"\".join(*traceback.format_exception_only(etype, value))\n + \"Run with --verbose to see the full exception\",\n fg=\"yellow\",\n )\n if end:\n raise KedroCliError(msg)\n click.secho(\"Error: \" + msg, fg=\"red\") # pragma: no cover\n\n\nif __name__ == \"__main__\": # pragma: no cover\n main()\n","repo_name":"matbarPL/stance-tagger-kedro","sub_path":"Lib/site-packages/kedro/cli/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":20278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6740607952","text":"# encoding: utf-8\nimport cv2\nimport time\ncap=cv2.VideoCapture(0)\nfaceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\");\ni=0\n\nwhile(1):\n ret,frame = cap.read()\n image=frame\n start = time.time()\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(gray, 1.2, 5)\n for (x, y, w, h) in faces:\n # Create rectangle around faces\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 3)\n end=time.time()\n fps=1/(end-start)\n fps=round(fps,2)\n cv2.putText(image,'FPS:{}'.format(fps),(15,30),cv2.FONT_ITALIC,0.8,(0,0,255),3)\n k=cv2.waitKey(1)\n if k==27: #按下ESC退出窗口\n break\n elif k==ord('s'): #按下s保存图片\n cv2.imwrite('./'+str(i)+'.jpg',frame)\n i+=1\n cv2.imshow(\"capture\", image)\ncap.release()","repo_name":"yuchen02/Face-Dect-on-RaspberryPi","sub_path":"02Face-Dect-Realtime.py","file_name":"02Face-Dect-Realtime.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41136806740","text":"import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport LMOptimizer\nimport Regression\n\nplt.ion()\nfigure, axes = plt.subplots(1, 3, sharey=False)\ndata_axes = axes[0]\nJ_axes = axes[1]\ndJ_axes = axes[2]\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('Usage: python ' + sys.argv[0] + ' wi=?? r=?? x0=?? x1=?? m=?? alpha=??')\n sys.exit(1)\n # end if\n args = {'w': {}, 'b': 0, 'alpha': 1.0, 'r': 0.0, 'x0': -1.0, 'x1': 1.0, 'm': 20}\n eps = 1e-8\n for a in sys.argv:\n v = a.split('=')\n if len(v) == 2:\n if v[0][0] == 'w':\n if v[0] == 'w0':\n args['b'] = v[1]\n else:\n args['w'][int(v[0][1:])] = v[1]\n # end if\n else:\n args[v[0]] = v[1]\n # end if\n # end if\n # end for\n\n # -- Build input objects from arguments\n args['w'] = sorted(args['w'].items(), reverse=True)\n w = np.zeros((1, args['w'][0][0]))\n for e in args['w']:\n w[0, e[0] - 1] = float(e[1])\n # end for\n b = float(args['b'])\n r = float(args['r'])\n m = int(args['m'])\n alpha = float(args['alpha'])\n x0 = float(args['x0'])\n x1 = float(args['x1'])\n eps = 1e-8\n n = w.shape[1]\n\n # -- Create data\n X = np.matrix(\n [((x1 - x0) * float(i) / float(m - 1)) + x0 for i in range(m)]\n ).T\n for i in range(n - 1):\n X = np.append(X, np.power(X[:, 0], i + 2), axis=1)\n # end for\n Y = (X @ w.T) + b\n X += np.random.randn(m, n) * r\n Y += np.random.randn(m, 1) * r\n\n data_axes.scatter([X[:, 0]], [Y], color='red', marker='+')\n\n # Solve regression\n cost_function = Regression.MSECost(X, Y)\n lm_regression, iterations = LMOptimizer.levenber_marquardt(\n cost_function,\n alpha=alpha)\n\n print('=================================================================')\n print('Levenberg Marquardt descent : ' + str(lm_regression))\n print('Number of iterations : ' + str(iterations))\n print('=================================================================')\n\n plt.ioff()\n\n vX = np.ones((m, 1))\n vX = np.append(\n vX,\n np.matrix(\n [((x1 - x0) * float(i) / float(m - 1)) + x0 for i in range(m)]\n ).T,\n axis=1\n )\n for i in range(n - 1):\n vX = np.append(vX, np.power(vX[:, 1], i + 2), axis=1)\n # end for\n\n g_vY = vX @ lm_regression.T\n data_axes.plot(vX[:, 1], g_vY, color='green')\n\n plt.show()","repo_name":"VargasM/Machine_Learning","sub_path":"taller1_LM/leo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42651357605","text":"import argparse\nfrom ec2_instance import *\nimport json\nimport os\n\n\ndef run_instance(config, user_data):\n instance = Instance(config, user_data)\n instance_creation_response = instance.create()\n\n\ndef terminate_instance(config):\n instance = Instance(config)\n response = instance.terminate_instance()\n return response\n\ndef create_s3Bucket():\n \"\"\"[TO DO]\"\"\"\n pass\n\n\nif __name__ == \"__main__\":\n config_path = os.path.join(os.getcwd(), \"configs\")\n parser = argparse.ArgumentParser(description=\"AWS EC2 and S3\")\n parser.add_argument(\"-c\", \"--config\", default=os.path.join(config_path, \"configs.json\"), metavar=\"Config\", type=str)\n parser.add_argument(\"-u\", \"--userdata\", default=os.path.join(config_path, \"user-data\"), metavar=\"User Data\", type=str)\n args = parser.parse_args()\n\n #run_instance(args.config, args.userdata)\n terminate_instance(args.config)\n","repo_name":"bngom/AWS-Python-Automatic-Provisionning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33465561575","text":"# Step 1 - Open Function - Filename as an argument\n# 'with' keyword closes a file once it is finished being accessed\nwith open('pi_digits.txt') as file_object:\n contents = file_object.read()\nprint(contents.rstrip())\n\n# File Paths - Relative (Relative to where the running program file is located)\nwith open('text_files/pi_digits_v2.txt') as file_object:\n contents = file_object.read()\nprint(contents)\n\n# You can use absolute paths as well as relative paths\n\n# Reading Line by Line\nfilename = 'pi_digits.txt'\n\nwith open(filename) as file_object:\n for line in file_object:\n print(line.rstrip())\n\n# Making a List of LInes from a File\nwith open(filename) as file_object:\n lines = file_object.readlines()\n\nfor line in lines:\n print(line.rstrip())\n\n# Working with a File's Contents\npi_string = \"\"\nfor line in lines:\n pi_string += line.rstrip().lstrip()\n\nprint(pi_string)\nprint(len(pi_string))\n\n\n# Does your birthday appear in the first 1 million digits of pie\nmillionpi = 'ResourceFiles/chapter_10/pi_million_digits.txt'\nwith open(millionpi) as file_object:\n lines = file_object.readlines()\n\nmil_pi_string = \"\"\n\nfor line in lines:\n mil_pi_string += line.strip()\n\nbirthday = input(\"Enter your birthday, in the form mmddyy: \")\nif birthday in mil_pi_string:\n print('Your birthday appears in the first million digits of pi!')\nelse:\n print('Your birthday does not appear in the first million digits of pi')\n","repo_name":"TheCoderHero/PythonCrashCourse2","sub_path":"language/10a_reading.py","file_name":"10a_reading.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21291719173","text":"from django.http import HttpResponse\nimport requests\nfrom .models import Meter, Archive, Alert\nimport json\nfrom django.utils import timezone\nfrom datetime import timedelta\nimport time\n\nultraSecretApiKey = '9e70c43d19034a0cbd246eb2444c40d7' #for Azure\n\n#reset the tables\nMeter.objects.all().delete()\nArchive.objects.all().delete()\nAlert.objects.all().delete()\n\nmockapiurl = 'http://localhost:8000/ofc' #not in use\n\napiurl = 'https://westeurope.api.cognitive.microsoft.com/vision/v3.2/read/analyze/'\n\n#This one big view contains most of the api logic\ndef bigApiView(request):\n #If this is gonna be the first entry, get the time for later use\n if Archive.objects.exists() == False:\n global starttime\n starttime = timezone.now()\n\n timeval = timezone.now() # used to that archive and meter match dates\n\n #1ST REQUEST\n #get the image url from request params\n url = request.GET.get('url')\n data = {'url': url}\n #set custom headers\n headers = {'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': ultraSecretApiKey}\n requrl = apiurl\n #send json request to azure api\n r = requests.post(requrl, json=data, headers=headers)\n #get the link that we need to see results in json data\n enigma = r.headers\n print(enigma)\n enigmalink = enigma['Operation-Location']\n\n time.sleep(5) #ensure that azure has time to process, since ratelimiting and slow connection\n\n #SECOND REQUEST\n headers = {'Ocp-Apim-Subscription-Key': ultraSecretApiKey}\n r = requests.get(enigmalink, headers=headers)\n\n #debugging info\n print(r.json())\n print(r.json()['analyzeResult']['readResults'][0]['lines'][0]['text'])\n\n #get digit from json data response\n number = int(r.json()['analyzeResult']['readResults'][0]['lines'][0]['text'])\n\n if Meter.objects.exists():\n oldnum = Meter.objects.latest('time').value\n\n #find difference\n delta = number - oldnum\n print(delta) \n\n value = delta\n else:\n value = number\n \n #save digit to database along with time of creation\n water = Meter.objects.create(value=value, time=timeval)\n #archive it as well\n archive = Archive.objects.create(value=value, time=timeval)\n water.save\n archive.save\n\n\n #get all the meter values\n values = Meter.objects.values_list('value')\n\n #Hacky fix for tuple issue in database entries\n realvals = []\n for i in values:\n realvals.append(i[0])\n \n print(realvals)\n\n #if it has been over 24 hours since the first entry and delta hasnt been 0 in the last 24 hours, trigger alarm\n if (0 not in realvals) & (timezone.now() - starttime >= timedelta(hours=24)) :\n Alert.objects.create(tragedy=archive.id)\n #Show current alerts since uptime in HttpResponse\n if Alert.objects.exists():\n badlist = ''\n for record in Alert.objects.all():\n epicid = record.tragedy\n badlist = badlist + Archive.objects.get(pk=epicid).time.strftime('%c') + ' '\n \n #response based on cases of leaks\n resptext = 'Leaks have been noted on the following dates: ' + badlist\n else:\n resptext = 'No leaks have been detected during system uptime.'\n return HttpResponse(resptext)","repo_name":"EmbyOne/Meter-Monitoring-System","sub_path":"monitor/scope/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10964559094","text":"#!/usr/bin/env python3\n\nimport sys\nimport requests\nimport hashlib\nimport random\nimport re\nimport functools\nimport time\nimport json\nimport pathlib\nimport base64\nimport traceback\n\nfrom user_agents import USER_AGENTS\nimport code_gen\nimport fraud_detector\n\nOK, CORRUPT, MUMBLE, DOWN, CHECKER_ERROR = 101, 102, 103, 104, 110\n\nPORT = 10000\nTIMEOUT = 10\n\nSCRIPT_PATH = pathlib.Path(__file__).parent\nUSERS = json.load(open(SCRIPT_PATH / \"users.json\"))\n\n\ndef gen_rule_name():\n ABC = \"\".join(chr(i) for i in range(33, 127) if chr(i) != \"/\")\n name = \"\".join(random.choice(ABC) for i in range(random.randrange(6, 10)))\n\n if random.random() < 0.1:\n return name + \".py\"\n if random.random() < 0.1:\n return \"select * from \" + name\n if random.random() < 0.1:\n return \"' union select * from \" + name\n if random.random() < 0.1:\n return \"echo $\" + name\n if random.random() < 0.1:\n return \"`echo\" + name + \"`\"\n if random.random() < 0.1:\n return name + \".txt\"\n return name\n\n\ndef create_session():\n s = requests.Session()\n\n # add timeouts\n s.get = functools.partial(s.get, timeout=TIMEOUT)\n s.post = functools.partial(s.post, timeout=TIMEOUT)\n s.headers[\"User-Agent\"] = random.choice(USER_AGENTS)\n return s\n\n\ndef call_get_rules_api(session, host):\n url = \"http://%s:%d/rules\" % (host, PORT)\n ans = session.get(url)\n if ans.status_code != 200:\n return None\n ans_obj = ans.json()\n if type(ans_obj) is not list or any(type(o) != str for o in ans_obj):\n return None\n return ans_obj\n\n\ndef call_add_rule_api(session, host, name, code):\n url = \"http://%s:%d/addrule\" % (host, PORT)\n ans = session.post(url, data=json.dumps({\"name\": name, \"code\": code}))\n if ans.status_code != 200:\n return None\n ans_obj = ans.json()\n if type(ans_obj) is not str:\n return None\n return ans_obj\n\n\ndef call_check_user_api(session, host, rules, user):\n url = \"http://%s:%d/checkuser\" % (host, PORT)\n ans = session.post(url, data=json.dumps({\"rules\": rules, \"user\": user}))\n if ans.status_code != 200:\n return None\n ans_obj = ans.json()\n if type(ans_obj) is not list or any(type(o) != int for o in ans_obj):\n return None\n return ans_obj\n\n\ndef verdict(exit_code, public=\"\", private=\"\"):\n if public:\n print(public)\n if private:\n print(private, file=sys.stderr)\n sys.exit(exit_code)\n\n\ndef info():\n verdict(OK, \"vulns: 1:1:2\")\n\n\ndef check(host):\n s = create_session()\n\n rule1_name = gen_rule_name()\n rule2_name = gen_rule_name()\n rule1 = code_gen.gen_empty_check()\n rule2 = code_gen.gen_rand_check()\n\n base_url = \"http://%s:%d\" % (host, PORT)\n\n for rule_name, rule in [rule1_name, rule1], [rule2_name, rule2]:\n ans = call_add_rule_api(s, host, name=rule_name, code=rule)\n if ans is None or not ans.startswith(\"ok:\"):\n verdict(MUMBLE, \"Failed to add rule\",\n \"Failed to add rule: %s %s\" % (rule_name, ans))\n\n ans = call_get_rules_api(s, host)\n if ans is None or rule1_name not in ans or rule2_name not in ans:\n verdict(MUMBLE, \"Bad rule list\", \"Bad rule list: no new rules\")\n\n user_idxs = random.sample(range(len(USERS)), 3)\n\n rules_seq = [random.choice([rule1_name, rule2_name]) for i in range(random.randint(32, 64))]\n\n ans = call_check_user_api(s, host, rules=rules_seq, user=user_idxs[0])\n if ans is None or len(set(ans)) in [1, 2]:\n verdict(MUMBLE, \"Check failed\", \"Bad random test\")\n\n for user_idx in user_idxs:\n ans = call_check_user_api(s, host, user=user_idx, rules=[rule1_name])\n\n expected = fraud_detector.run_rules([rule1], USERS[user_idx])\n if ans is None or expected != ans:\n verdict(MUMBLE, \"Check failed\", \"Bad interpreter test\")\n verdict(OK)\n\n\ndef put(host, flag_id, flag, vuln):\n s = create_session()\n\n rule_name = gen_rule_name()\n if int(vuln) == 1:\n rule = code_gen.gen_vuln1_check(flag)\n elif int(vuln) == 2:\n rule = code_gen.gen_vuln2_check(flag)\n else:\n rule = code_gen.gen_vuln3_check(flag)\n\n ans = call_add_rule_api(s, host, name=rule_name, code=rule)\n if ans is None or not ans.startswith(\"ok:\"):\n verdict(MUMBLE, \"Failed to add rule\",\n \"Failed to add rule: %s %s\" % (rule_name, ans))\n\n user_idxs = random.sample(range(len(USERS)), 8)\n\n get_help_data = []\n for user_idx in user_idxs:\n expected = fraud_detector.run_rules([rule], USERS[user_idx])\n get_help_data.append([user_idx, expected])\n flag_id = base64.b64encode(json.dumps([rule_name, get_help_data]).encode()).decode()\n verdict(OK, flag_id)\n\n\ndef get(host, flag_id, flag, vuln):\n s = create_session()\n\n try:\n rule_name, get_help_data = json.loads(base64.b64decode(flag_id))\n except Exception:\n verdict(MUMBLE, \"Bad flag id\", \"Bad flag_id: %s\" % traceback.format_exc())\n\n ans = call_get_rules_api(s, host)\n if ans is None or rule_name not in ans:\n verdict(MUMBLE, \"Bad rule list\", \"Bad rule list: no new rules\")\n\n for user_idx, expected in random.sample(get_help_data, 4):\n ans = call_check_user_api(s, host, rules=[rule_name], user=user_idx)\n if ans is None:\n verdict(MUMBLE, \"Check failed\")\n if ans != expected:\n verdict(MUMBLE, \"No such flag\")\n verdict(OK)\n\n\ndef main(args):\n CMD_MAPPING = {\n \"info\": (info, 0),\n \"check\": (check, 1),\n \"put\": (put, 4),\n \"get\": (get, 4),\n }\n\n if not args:\n verdict(CHECKER_ERROR, \"No args\", \"No args\")\n\n cmd, args = args[0], args[1:]\n if cmd not in CMD_MAPPING:\n verdict(CHECKER_ERROR, \"Checker error\", \"Wrong command %s\" % cmd)\n\n handler, args_count = CMD_MAPPING[cmd]\n if len(args) != args_count:\n verdict(CHECKER_ERROR, \"Checker error\", \"Wrong args count for %s\" % cmd)\n\n try:\n handler(*args)\n except requests.RequestException as E:\n verdict(DOWN, \"Connect error\", \"Connect error: %s\" % E)\n except json.decoder.JSONDecodeError as E:\n verdict(MUMBLE, \"Json decode error\", \"Json decode error: %s\" % traceback.format_exc())\n except Exception as E:\n verdict(CHECKER_ERROR, \"Checker error\", \"Checker error: %s\" % traceback.format_exc())\n verdict(CHECKER_ERROR, \"Checker error\", \"No verdict\")\n\n\nif __name__ == \"__main__\":\n main(args=sys.argv[1:])\n","repo_name":"HITB-CyberWeek/proctf-2019","sub_path":"checkers/fraud_detector/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":6441,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"17481998605","text":"import calendar\nimport time\n\nfrom nose.tools import eq_\n\nfrom receipts.receipts import Receipt\nfrom mkt.receipts.utils import reissue_receipt, sign\nfrom mkt.receipts.tests.test_verify import ReceiptTest\n\n\nclass TestReissue(ReceiptTest):\n\n def test_expired(self):\n receipt_data = self.sample_app_receipt()\n curr_time = calendar.timegm(time.gmtime())\n receipt_data['iat'] = curr_time - 1000\n receipt_data['nbf'] = curr_time - 1000\n receipt_data['exp'] = curr_time\n receipt = sign(receipt_data)\n old = Receipt(receipt).receipt_decoded()\n new = Receipt(reissue_receipt(receipt)).receipt_decoded()\n for greater in ['exp', 'iat', 'nbf']:\n assert new[greater] > old[greater], (\n '{0} for new: {1} should be greater than old: {2}'.format(\n greater, new[greater], old[greater]))\n\n for same in ['product', 'detail', 'iss', 'reissue', 'typ', 'user',\n 'verify']:\n eq_(new[same], old[same], (\n '{0} for new: {1} should be the same as old: {2}'.format(\n greater, new[same], old[same])))\n","repo_name":"mozilla/zamboni","sub_path":"mkt/receipts/tests/test_utils_.py","file_name":"test_utils_.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":476,"dataset":"github-code","pt":"3"} +{"seq_id":"10110795967","text":"import derpibooru_dl\n\n\ndef artists_at_top(query_list):\n \"\"\"Put anything with the string \"artist\" at the top of the list\"\"\"\n put_at_top = []\n put_at_bottom = []\n for query in query_list:\n if \"artist\".lower() in query.lower():\n put_at_top.append(query)\n else:\n put_at_bottom.append(query)\n output_list = put_at_top + put_at_bottom\n return output_list\n\n\ndef main():\n input_list_path = \"config\\\\to_sort.txt\"\n output_list_path = \"config\\\\artists_at_top.txt\"\n input_list = derpibooru_dl.import_list(input_list_path)\n artists_at_top_list = artists_at_top(input_list)\n derpibooru_dl.append_list(artists_at_top_list, output_list_path, initial_text=\"# Artists at the top.\\n\",overwrite=True)\n\nif __name__ == '__main__':\n main()\n","repo_name":"liamwhite/Derpibooru-dl","sub_path":"sort_dl_list.py","file_name":"sort_dl_list.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"3"} +{"seq_id":"72944677522","text":"TESTS = int(input(\"TESTE = \"))\n\nMAXN = int(input(\"MAXN = \"))\n\nMAXV = int(input(\"MAXV = \"))\n\nfrom random import randint\nfrom subprocess import call\n\nfor _ in range(TESTS):\n V = None\n if _ == TESTS - 1:\n V = MAXN\n else:\n V = randint(3, MAXN)\n call(\"python gen.py \" + str(V) + \" \" + str(MAXV) + \" > input\", shell=True)\n if call(\"./main < input > output\", shell=True) != 0:\n print(str(_ + 1) + \": Wrong\")\n break\n if call(\"diff output ok\", shell=True) != 0:\n print(str(_ + 1) + \": Aiurea\")\n break\n print(str(_ + 1) + \": OK\")\n","repo_name":"adrian-budau/work","sub_path":"ACM/2013/Warm-up/J/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"70320858001","text":"from math import sqrt\n\nline1 = input().split(\" \")\nline2 = input().split(\" \")\n\nx1, y1 = line1\nx2, y2 = line2\n\ndistance = sqrt((float(x2) - float(x1))**2 + (float(y2) - float(y1))**2)\n\nprint(f\"{distance:.4f}\")","repo_name":"falcao-g/beecrowd","sub_path":"Python/1-iniciante/1015.py","file_name":"1015.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"71877938963","text":"while True:\n\n # Prompting user for the number of faces\n while True:\n try:\n face = int(input(\"\\nPlease enter your desired number of faces: [1 - 23]: \"))\n assert 0 < face <= 23\n break\n # If the value is NOT within range\n except AssertionError:\n print(\n \"The value you entered is out of range! Please pick a number from 1 to 23.\"\n )\n # If the value is NOT a number\n except ValueError:\n print(\n \"The value you entered is invalid! Please pick a number from 1 to 23.\"\n )\n\n # Importing random integers\n import random\n\n # Rolling the dice\n print(\"\\nYou chose: \", face)\n print(\"You rolled: \", random.randint(1, face))\n\n # Prompting user to continue or end\n resume = input(\n \"\\nIf you would like to start over, please type 'yes'. Otherwise, press any key to exit. \"\n )\n if resume == \"yes\":\n continue\n else:\n break\n\nprint(\"\\nThe end. Bye now!\")\n\n# Yay success! :-)\n# Diana Jean\n\n# Code source/s:\n# Stack overflow: https://stackoverflow.com/questions/41832613/python-input-validation-how-to-limit-user-input-to-a-specific-range-of-integers\n","repo_name":"deetuquib/portfolio","sub_path":"cst8279/labs/lab04/Tuquib_041043852_rollADice.py","file_name":"Tuquib_041043852_rollADice.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17272140940","text":"from setuptools import setup\nfrom setuptools.extension import Extension\ntry:\n from Cython.Build import cythonize\n USE_CYTHON = True\nexcept ModuleNotFoundError:\n USE_CYTHON = False\n\next = '.pyx' if USE_CYTHON else '.c'\nextensions = [Extension('stochastictoolkit._PDE', ['stochastictoolkit/_PDE' + ext])]\nif USE_CYTHON:\n extensions = cythonize(extensions)\n\nsetup(name='stochastictoolkit',\n version='0.1',\n description='An ever expanding toolkit to build stochastic simulations in python',\n url='http://github.com/ulido/stochastictoolkit',\n author='Ulrich Dobramysl',\n author_email='ulrich.dobramysl@gmail.com',\n license='MIT',\n packages=['stochastictoolkit'],\n ext_modules = extensions,\n install_requires=[\n 'cython',\n 'numpy',\n 'randomgen',\n 'tqdm',\n 'pandas',\n 'shapely',\n 'tables',\n 'quadtree @ https://github.com/ulido/quadtree/tarball/master',\n ],\n test_suite='pytest',\n zip_safe=False)\n","repo_name":"ulido/stochastictoolkit","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14415000485","text":"from matplotlib import pyplot as plt\r\nimport pandas as pd\r\nimport math as ma\r\ndf = pd.read_csv(r\"C:\\Users\\Amiya Kumar\\Desktop\\python\\csvfile\\abc.csv\")\r\nprint(df)\r\na=len(df['age'])\r\nsumx=0\r\nsumy=0\r\nsumxy=0\r\nsumx2=0\r\nfor i in range(0,a,1):\r\n sumx=sumx+df['age'][i]\r\nprint(\"sumx is \",sumx)\r\nfor i in range(0,a,1):\r\n sumy=sumy+df['glucose'][i]\r\nprint(\"sumy is\",sumy)\r\nxy=[]\r\nfor i in range(0,a,1):\r\n su=(df['age'][i]*df['glucose'][i])\r\n xy.append(su)\r\nprint(\"xy is\",xy)\r\nfor i in range(0,a,1):\r\n sumxy=sumxy+xy[i]\r\nprint(\"sumxy is\",sumxy)\r\nc=1\r\nx2=[]\r\nfor i in range(0,a,1):\r\n c=pow(df['age'][i],2)\r\n x2.append(c)\r\nprint(\"x2 is\",x2)\r\nfor i in range(0,a,1):\r\n sumx2=sumx2+x2[i]\r\nprint(\"sumx2 is\",sumx2)\r\nxc=1\r\n#for i in range(0,a,1):\r\n #xc=(xc*(df['age'][i]*df['glucose'][i]))\r\n#print(\"xc is\",xc)\r\np=((sumy*sumx2)-(sumx*sumxy))\r\nq=((a*(sumx2))-(pow(sumx,2)))\r\nfd=p/q#a\r\nprint(\"a value is\",fd)\r\nrt=((a*sumxy)-(sumx*sumy))\r\ntr=((a*sumx2)-(pow(sumx,2)))\r\nff=rt/tr#b\r\nprint(\"b value is\",ff)\r\nag=int(input(\"enter your age\"))\r\npast=0\r\nfor i in range(0,a,1):\r\n if(ag==df['age'][i]):\r\n past=(df['glucose'][i])\r\n pre_age=(df['age'][i])\r\nsg=((ff*ag)+fd)\r\nprint(\"your predicted sugar level is\",sg)\r\nif(sg>120):\r\n print(\"dibetic patient\")\r\nelif(sg<90):\r\n print(\"lowsugar\")\r\nelse:\r\n print(\"perfectly fine\")\t\r\nimport csv\r\nwith open(r\"C:\\Users\\Amiya Kumar\\Desktop\\python\\csvfile\\abc.csv\",'a') as newFile:\r\n newFileWriter = csv.writer(newFile)\r\n newFileWriter.writerow([ag,sg])\r\nlag=[]\r\nfor i in range(0,a,1):\r\n while(ag==df['age'][i]):\r\n lag.append(ag)\r\nprint(\"no. of times yoyr entered agre present data\",lag)\r\n#for ag in df['age']:\r\n #sugar=(sg/past)\r\n #error_per=sugar*100\r\n #if(error_per<=1):\r\n #acc_per=1-error_per\r\n #else:\r\n #acc_per=1+error_per\r\n #print(\"accuracy of the prediction is\",acc_per)\r\n\r\n#plt.plot(45,df['glucose'],label=\"predict\",color=\"green\",linewidth=5)\r\n#plt.plot(45,past,label=\"past\",color='red',linewidth=5)\r\n#plt.xlabel('glucose')\r\n#plt.ylabel('age')\r\n#plt.title('Information')\r\n#plt.show()\r\n","repo_name":"oxygenFullstack/python","sub_path":"agevsglucose prediction (1).py","file_name":"agevsglucose prediction (1).py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9899590624","text":"# modulo=1000000007\n# factorialTable=list()\n# # factorialTable.append(1)\n# # factorialTable.append(1)\n# def factorial(n): \n# if n < 0: \n# return 0\n# elif n == 0 or n == 1: \n# factorialTable.append(1)\n# else: \n# fact = 1\n# while(n>1): \n# fact *= n\n# n -= 1\n# factorialTable.append(fact)\n\n# factorialTable=list()\n# factorial(10)\n# print(factorialTable)\n# import sys\n# sys.stdout = open(\"/home/nav/code/python/algo/test.txt\", \"w\")\n# F=[-1]*10001\n# fact=1\n# for i in range(0,10001):\n# if i==0:\n# F[i]=1\n# else:\n# fact=fact*i\n# F[i]=fact\n\n# print(F)\n# sys.stdout.close()\nmodulo=1000000007\n\ndef ncr(n,k):\n m=0\n if k==0:\n m=1\n if k==1:\n m=n\n if k>=2:\n num,dem,op1,op2=1,1,k,n\n while(op1>=1):\n \n num*=op2\n \n \n dem*=op1\n \n op1-=1\n op2-=1\n m=num//dem\n return m%modulo\n\nprint(ncr(9,9))\n","repo_name":"NavalPangtey/Competitive-programming","sub_path":"python/algo/fact2.py","file_name":"fact2.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35882741784","text":"import os\nimport time\nimport logging\nimport argparse\nimport datetime\nfrom io import BytesIO\n\nfrom upload import GooglePhotos\nfrom camera import take_picture, TIME_FORMAT\n\nlogger = logging.getLogger(__name__)\n\nFILENAME_FORMAT = '%Y-%b-%dT%I:%M%p.jpg'\nSTART_WORK = 7 # 7am\nEND_WORK = 20 # 8pm\n\nHERE = os.path.dirname(__file__)\n\ndef time_until(next_timeslot):\n delta = next_timeslot - datetime.datetime.now()\n return delta - datetime.timedelta(microseconds=delta.microseconds)\n\ndef get_next_timeslot(minutes, include_wkends = False):\n now = datetime.datetime.now()\n \n start_today = now.replace(hour=START_WORK, \n minute=0, \n second=0, \n microsecond=0)\n end_today = now.replace(hour=END_WORK, \n minute=0, \n second=0, \n microsecond=0)\n\n if not (start_today <= now < end_today):\n # move to next day - no longer working\n next_target = now + datetime.timedelta(days=1)\n next_target = next_target.replace(hour=START_WORK, \n minute=0, \n second=0, \n microsecond=0)\n\n # weekday() returns 0-6 for mon-sun\n if not include_wkends and next_target.weekday() >= 5:\n # move to the next monday\n delta = 7 - next_target.weekday()\n next_target = next_target + datetime.timedelta(days=delta)\n\n return next_target\n\n \n # still working - find the next interval\n delta = datetime.timedelta(minutes=minutes)\n return now + (datetime.datetime.min - now) % delta\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--elapse', type=int, \n default=30, help='minutes to wait between shots')\n parser.add_argument('--secret', type=str, default='client_secret.json')\n parser.add_argument('--album', type=str, default='', \n help='album to save to')\n parser.add_argument('--weekends', action=\"store_true\", default = False,\n help=\"flag to also capture on weekends, default False\")\n parser.add_argument('-v', '--verbose',\n dest='verbose',\n action='count',\n default=0,\n help='Give more output, additive up to 3 times.')\n parser.add_argument('-q', '--quiet',\n dest='quiet',\n action='count',\n default=0,\n help='Give less output, additive up to 3 times, '\n 'corresponding to WARNING, ERROR, and CRITICAL '\n 'logging levels')\n\n args = parser.parse_args()\n \n verbosity = args.verbose - args.quiet\n\n # compute verbosity\n if verbosity >= 1:\n loglevel = logging.DEBUG\n elif verbosity == -1:\n loglevel = logging.WARNING\n elif verbosity == -2:\n loglevel = logging.ERROR\n elif verbosity <= -3:\n loglevel = logging.CRITICAL\n else:\n loglevel = logging.INFO\n\n # configure logger\n logging.basicConfig(level=loglevel)\n\n # login\n gp = GooglePhotos(args.secret)\n\n # find the desired album\n albums = gp.get_albums()\n \n if args.album:\n if args.album not in albums:\n album = gp.create_album(args.album)\n else:\n album = albums[args.album]\n else:\n album = None\n \n while True:\n # take snapshot\n buffer = BytesIO()\n filename = time.strftime(FILENAME_FORMAT)\n take_picture(stream = buffer)\n \n # go back to beginning so uploader reads it all\n buffer.seek(0)\n\n try:\n gp.upload_photo(album, filename, buffer)\n except Exception:\n # write to file in case i still want it\n with open(os.path.join(HERE, 'photos', filename), 'wb') as f:\n buffer.seek(0)\n f.write(buffer.read())\n\n # set next target\n next_timeslot = get_next_timeslot(args.elapse, args.weekends)\n logger.info('Next capture: %s' % next_timeslot.strftime(TIME_FORMAT))\n\n while datetime.datetime.now() < next_timeslot:\n delta = (next_timeslot - datetime.datetime.now()).total_seconds()\n if delta > 5:\n sleeptime = round(delta/2,2)\n else:\n sleeptime = 1\n\n logger.info('Time until next picture: %s, sleeping - %ss' \n % (time_until(next_timeslot),sleeptime))\n time.sleep(sleeptime)\n\nif __name__ == '__main__':\n main()","repo_name":"simingy/rpi_timelapse_camera","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22568664863","text":"\"\"\"\nYou are given some integer as input, (i.e. ... -3, -2, -1, 0, 1, 2 ,3, ...)\n\nConvert the integer you are given to a string. Do not make use of the built-in\n\"str\" function.\n\nExample\n-------\n Input : 123\n Output : \"123\"\n\"\"\"\n\ndef int_to_str(input_int):\n if input_int < 0:\n is_negative = True\n input_int *= -1\n else: \n is_negative = False\n\n output_str = []\n while input_int > 0:\n output_str.append(chr(ord('0') + input_int % 10))\n input_int //= 10\n \n return ''.join(output_str[::-1])\n\nif __name__ == \"__main__\":\n input_int = int(input('Integer: '))\n print(int_to_str(input_int))\n","repo_name":"acekun141/Python","sub_path":"algorithms/int_to_str.py","file_name":"int_to_str.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21680206212","text":"import torch\nimport torch.nn as nn\nfrom Hw4.utils import PrintLayerShape\n\n\nclass UpsamplingDepthToSpace(nn.Module):\n def __init__(self, block_size=2):\n super(UpsamplingDepthToSpace, self).__init__()\n self.block_size = block_size\n self.block_size_sq = int(block_size ** 2)\n\n def forward(self, x):\n # bunch of dimension stuff\n out = x.permute(0, 2, 3, 1)\n (bs, or_height, or_width, or_channels) = out.shape\n up_height = int(or_height * self.block_size)\n up_width = int(or_width * self.block_size)\n up_channels = int(or_channels / self.block_size_sq)\n out_expanded = out.reshape(bs, or_height, or_width, self.block_size_sq, up_channels) # 4 copies\n split = out_expanded.split(self.block_size, dim=3) # split in 2\n stack = [x.reshape(bs, or_height, up_width, up_channels) for x in split] # reshape to double h and w\n out = torch.stack(stack, 0).transpose(0, 1).permute(0, 2, 1, 3, 4).reshape(\n bs, up_height, up_width, up_channels) # Stack, transpose, and reshape to [N, H, W, C]\n out = out.permute(0, 3, 1, 2)\n return out.contiguous() # to easy backprop\n\n\nclass UpsampleConv2d(nn.Module):\n def __init__(self, c_in, c_out, ks=3, padding=1):\n super(UpsampleConv2d, self).__init__()\n self.c_in = c_in\n self.c_out = c_out\n self.kernel_size = ks\n self.padding = padding\n\n self.conv = nn.Conv2d(c_in, c_out, kernel_size=ks, stride=1, padding=padding)\n self.depth_to_space = UpsamplingDepthToSpace(2)\n\n def forward(self, x):\n x = torch.cat([x, x, x, x], dim=1) # Prep for upsampling method.\n x = self.depth_to_space(x) # special upsampling method.\n x = self.conv(x)\n return x\n\n\nclass UpResnetBlock(nn.Module):\n def __init__(self, c_in, filters=128):\n super(UpResnetBlock, self).__init__()\n self.layers = nn.Sequential(\n nn.BatchNorm2d(c_in),\n nn.ReLU(),\n nn.Conv2d(c_in, filters, kernel_size=3, padding=1),\n nn.BatchNorm2d(filters),\n nn.ReLU(),\n UpsampleConv2d(filters, filters, ks=3, padding=1),\n )\n self.upsample_x = UpsampleConv2d(c_in, filters, ks=1, padding=0)\n\n def forward(self, x):\n res = self.layers(x)\n x = self.upsample_x(x)\n return res + x\n\n\nclass Generator(nn.Module):\n def __init__(self, noise_dim=128, n_filters=128):\n super(Generator, self).__init__()\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.noise_dim = noise_dim\n self.filters = n_filters\n self.dense_init = nn.Linear(noise_dim, 4 * 4 * n_filters)\n self.layers = nn.Sequential(\n UpResnetBlock(c_in=n_filters, filters=n_filters),\n UpResnetBlock(c_in=n_filters, filters=n_filters),\n UpResnetBlock(c_in=n_filters, filters=n_filters),\n nn.BatchNorm2d(n_filters),\n nn.ReLU(),\n nn.Conv2d(n_filters, 3, kernel_size=3, padding=1),\n nn.Tanh()\n )\n\n def forward(self, bs):\n z = torch.randn(bs, self.noise_dim).to(self.device)\n out = self.dense_init(z)\n out = out.reshape(-1, 128, 4, 4)\n out = self.layers(out)\n return out\n\n\nclass DownsamplingSpaceToDepth(nn.Module):\n def __init__(self, block_size=2):\n super(DownsamplingSpaceToDepth, self).__init__()\n self.block_size = block_size\n self.block_size_sq = int(block_size ** 2)\n\n def forward(self, x):\n # bunch of dimension stuff\n out = x.permute(0, 2, 3, 1)\n (bs, or_height, or_width, or_channels) = out.shape\n down_height = int(or_height / self.block_size)\n down_channels = int(or_channels * self.block_size_sq)\n split = x.split(self.block_size, dim=2)\n stack = [x.reshape(bs, down_height, down_channels) for x in split]\n output = torch.stack(stack, dim=1)\n output = output.permute(0, 3, 2, 1)\n return output.contiguous()\n\n\nclass Downsample_Conv2d(nn.Module):\n def __init__(self, c_in, c_out, ks=3, stride=1, padding=1):\n super(Downsample_Conv2d, self).__init__()\n self.c_in = c_in\n self.c_out = c_out\n self.kernel_size = ks\n self.padding = padding\n\n self.conv = nn.Conv2d(c_in, c_out, kernel_size=ks, stride=stride, padding=padding, bias=True)\n self.space_to_depth = DownsamplingSpaceToDepth(2)\n\n def forward(self, x):\n x = self.space_to_depth(x)\n x = sum(x.chunk(4, dim=1)) / 4.0\n x = self.conv(x)\n return x\n\n\nclass DownResnetBlock(nn.Module):\n def __init__(self, c_in, filters=128):\n super(DownResnetBlock, self).__init__()\n self.c_in = c_in\n self.filters = filters\n\n self.layers = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(c_in, filters, kernel_size=3, padding=1),\n nn.ReLU(),\n Downsample_Conv2d(filters, filters, ks=3, padding=1)\n )\n self.downsample_x = Downsample_Conv2d(c_in, filters, ks=1, padding=0)\n\n def forward(self, x):\n res = self.layers(x)\n x = self.downsample_x(x)\n return res + x\n\n\nclass ResnetBlock(nn.Module):\n def __init__(self, c_in, filters=128):\n super(ResnetBlock, self).__init__()\n self.c_in = c_in\n self.filters = filters\n\n self.layers = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(c_in, filters, kernel_size=3, stride=1, padding=1),\n nn.ReLU(),\n nn.Conv2d(filters, filters, kernel_size=3, stride=1, padding=1)\n )\n\n def forward(self, x):\n res = self.layers(x)\n return res + x\n\n\nclass Discriminator(nn.Module):\n def __init__(self, filters=128):\n super(Discriminator, self).__init__()\n\n self.layers = nn.Sequential(\n DownResnetBlock(3, filters=filters),\n DownResnetBlock(filters, filters),\n ResnetBlock(filters, filters),\n ResnetBlock(filters, filters),\n nn.ReLU(),\n )\n self.fc = nn.Linear(filters, 1)\n\n def forward(self, x):\n x = self.layers(x)\n print(x.shape)\n x = torch.sum(x, dim=[2, 3]) # TODO: WHY ARE THESE SUMMED?\n print(x.shape)\n x = self.fc(x)\n return x\n","repo_name":"JohanYe/CS294-158","sub_path":"Hw4/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"40286296149","text":"'''\nHow to program in Python - Chapter 14\nCreate, read, update and delete records in database\n'''\n\nfrom tkinter import *\nfrom tkinter.messagebox import *\nimport Pmw\nimport psycopg2\n\n\nclass Film(Frame):\n \"\"\"GUI Database Address Book Frame\"\"\"\n\n def __init__(self):\n\n Frame.__init__(self)\n Pmw.initialise()\n self.pack(expand=YES, fill=BOTH)\n self.master.title(\"Films for rental.\")\n\n self.buttons = Pmw.ButtonBox(self, padx=0)\n self.buttons.grid(columns=2)\n self.buttons.add(\"Find\", command=self.find_film)\n self.buttons.add(\"Add\", command=self.add_film)\n self.buttons.add(\"Update\", command=self.update_film)\n self.buttons.add(\"Clear\", command=self.clear_contents)\n self.buttons.add(\"Help\", command=self.help, width=14)\n self.buttons.alignbuttons()\n\n # list of fields in an address record\n fields = [\"film_id\", \"title\", \"description\", \"release_year\",\n \"language_id\", \"rental_duration\", \"rental_rate\", \"length\",\n \"replacement_cost\", \"rating\", \"last_update\", \"special_features\", \"fulltext\"]\n\n # dictionary with Entry components for values, keyed by\n # corresponding addresses table field names\n self.entries = {}\n self.IDEntry = StringVar()\n self.IDEntry.set(\"\")\n\n # create entries for each field\n for i in range(len(fields)):\n label = Label(self, text=fields[i] + \":\")\n label.grid(row=i + 1, column=0)\n entry = Entry(self, name=fields[i].lower(), font=\"Courier 12\")\n entry.grid(row=i + 1, column=1, sticky=W+E+N+S, padx=5)\n\n # user cannot type in ID field\n if fields[i] == \"film_id\":\n entry.config(state=DISABLED,\n textvariable=self.IDEntry, bg=\"gray\")\n\n # add entry field to dictionary\n key = fields[i].replace(\" \", \" \")\n #key = key.upper()\n self.entries[key] = entry\n\n def add_film(self):\n \"\"\"Add film record to database\"\"\"\n\n if self.entries[\"title\"].get() != \" \" and self.entries[\"description\"].get() != \" \":\n # create INSERT query command\n query = \"\"\"INSERT INTO film (title, description, release_year, \n language_id, rental_duration, rental_rate, length,\n replacement_cost, rating, last_update, special_features, fulltext)\n VALUES (\"\"\" + \"'%s', \" * 12 % \\\n (\n self.entries[\"title\"].get(),\n self.entries[\"description\"].get(),\n self.entries[\"release_year\"].get(),\n self.entries[\"language_id\"].get(),\n self.entries[\"rental_duration\"].get(),\n self.entries[\"rental_rate\"].get(),\n self.entries[\"length\"].get(),\n self.entries[\"replacement_cost\"].get(),\n self.entries[\"rating\"].get(),\n self.entries[\"last_update\"].get(),\n self.entries[\"especial_features\"].get(),\n self.entries[\"fulltext\"].get()\n )\n query = query[:-2] + \")\"\n\n # open connection, retrieve cursor and execute query\n try:\n conn = psycopg2.connect(\n \"dbname=dvdrental user=postgres password=root\")\n cursor = conn.cursor()\n cursor.execute(query)\n except psycopg2.OperationalError as error:\n error_message = \"Error %d: \\n%s\" % (error[0], error[1])\n showerror(\"Error\", error_message)\n else:\n cursor.close()\n conn.close()\n self.clear_contents()\n else:\n showwarning(\"Missing fields\", \"Please enter name\")\n\n def find_film(self):\n \"\"\"Query database for address record and display results\"\"\"\n\n if self.entries[\"title\"].get() != \" \":\n # create SELECT query\n query = \"SELECT * FROM film \" + \\\n \"WHERE title = '\" + \\\n self.entries[\"title\"].get() + \"'\"\n\n # open connection, retrieve cursor and execute query\n try:\n conn = psycopg2.connect(\n \"dbname=dvdrental user=postgres password=root\")\n cursor = conn.cursor()\n cursor.execute(query)\n except psycopg2.OperationalError as error:\n error_message = \"Error %d: \\n%s\" % (error[0], error[1])\n showerror(\"Error\", error_message)\n self.clear_contents()\n else:\n results = cursor.fetchall()\n fields = cursor.description\n\n if not results:\n showinfo(\"not found\", \"nonexisting records\")\n else:\n self.clear_contents()\n\n # display results\n for i in range(len(fields)):\n if fields[i][0] == \"film_id\":\n self.IDEntry.set(str(results[0][i]))\n else:\n self.entries[fields[i][0]].insert(\n INSERT, str(results[0][i]))\n\n cursor.close()\n conn.close()\n\n else:\n showwarning(\"Missing fields\", \"Please enter last name\")\n\n def update_film(self):\n \"\"\"Update address record in database\"\"\"\n\n if self.entries[\"film_id\"].get():\n\n # create UPDATE query command\n entry_items = self.entries.items()\n query = \"UPDATE film SET\"\n\n for key, value in entry_items:\n\n if key != \"film_id\":\n print(\" %s='%s',\" % (key, value.get().replace(\"'\", \"\\'\")))\n query += \" %s='%s',\" % (key,\n value.get().replace(\"'\", \"\\'\"))\n\n query = query[:-1] + \" WHERE film_id =\" + self.IDEntry.get()\n\n # open connection, retrieve cursor and execute query\n try:\n conn = psycopg2.connect(\n \"dbname=dvdrental user=postgres password=root\")\n cursor = conn.cursor()\n cursor.execute(query)\n except psycopg2.OperationalError as error:\n error_message = \"Error %d: \\n%s\" % (error[0], error[1])\n showerror(\"Error\", error_message)\n self.clear_contents()\n else:\n showinfo(\"database updated\", \"Database Updated.\")\n cursor.close()\n conn.close()\n\n else:\n showwarning(\"No ID specified\", \"\"\"\n You may only update an existing record.\n Use Find to locate the record,\n then modify the information and press Update.\"\"\")\n\n def clear_contents(self):\n \"\"\"Clear GUI panel\"\"\"\n\n for entry in self.entries.values():\n entry.delete(0, END)\n\n self.IDEntry.set(\" \")\n\n def help(self):\n \"Display help message to user\"\n\n showinfo(\"Help\", \"\"\"Click Find to locate a record.\n Click Add to insert a new record.\n Click Update to update the information in a record.\n Click Clear to empty the Entry fields.\\n\"\"\")\n\n\ndef main():\n '''Main Function'''\n Film().mainloop()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wesinalves/100daysofcodev2","sub_path":"codigos/cap14/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":7514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"35674683730","text":"import datetime\n\n#class definitions\nclass timerange:\n def __init__(self, starttime, endtime):\n self.starttime = starttime\n self.endtime = endtime\n\nclass person:\n def __init__(self, name, notfreetimes):\n self.name = name\n self.notfreetimes = notfreetimes\n\nclass timerangemarks:\n def __init__(self, starttime, endtime, notfree):\n self.starttime = starttime\n self.endtime = endtime\n self.notfree = notfree\n\nmastertimeranges = []\npeople = []\ndailyTimeRange = None\nrawfinal = []\nfinal = None\n\ndef addnewperson():\n name = input(\"New person name: \")\n notfreetimes = []\n addmoretimes = 'y'\n while addmoretimes[0].lower() != 'n':\n date = input(\"busy date: \").strip()\n starttime = input(\"busy starting from what time on that date: \").strip()\n endtime = input(\"busy till what time on that date: \").strip()\n starttime = datetime.datetime.strptime(date + \" \" + starttime, \"%d/%m/%Y %H:%M\")\n endtime = datetime.datetime.strptime(date + \" \" + endtime, \"%d/%m/%Y %H:%M\")\n notfreetimes.append(timerange(starttime, endtime))\n addmoretimes = input(\"add more busy times?\\n(y/n)\\n\").strip()\n #if addmoretimes[0].lower() == 'n':\n # break\n people.append(person(name, notfreetimes))\n \ndef gettimerange():\n startingdate = datetime.datetime.strptime(input(\"find free times starting what date?\\n\").strip(), \"%d/%m/%Y\")\n endingdate = datetime.datetime.strptime(input(\"find free times ending what date?\\n\").strip(), \"%d/%m/%Y\")\n startingtime = datetime.datetime.strptime(input(\"starting on what time everyday?\\n\").strip(), \"%H:%M\")\n endingtime = datetime.datetime.strptime(input(\"ending on what time everyday?\\n\").strip(), \"%H:%M\")\n global dailyTimeRange\n dailyTimeRange = timerange(startingtime.time(), endingtime.time())\n mastertimeranges.clear()\n i = startingdate\n while i != endingdate:\n starttime = datetime.datetime.combine(i.date(), startingtime.time())\n endtime = datetime.datetime.combine(i.date(), endingtime.time())\n mastertimeranges.append(timerange(starttime, endtime))\n i += datetime.timedelta(days = 1)\n\ndef time_in_range(start, end, x):\n return start <= x <= end\n\ndef calculate():\n criticalpoints = []\n for i in people:\n for j in range(len(i.notfreetimes)):\n if i.notfreetimes[j].starttime.time() >= dailyTimeRange.endtime or i.notfreetimes[j].endtime.time() <= dailyTimeRange.starttime:\n continue\n elif i.notfreetimes[j].starttime.time() < dailyTimeRange.starttime:\n i.notfreetimes[j].starttime = datetime.datetime.combine(i.notfreetimes[j].starttime.date(), dailyTimeRange.starttime)\n if i.notfreetimes[j].endtime.time() > dailyTimeRange.endtime:\n i.notfreetimes[j].endtime = datetime.datetime.combine(i.notfreetimes[j].endtime.date(), dailyTimeRange.endtime) \n if i.notfreetimes[j].starttime not in criticalpoints: \n criticalpoints.append(i.notfreetimes[j].starttime)\n if i.notfreetimes[j].endtime not in criticalpoints:\n criticalpoints.append(i.notfreetimes[j].endtime)\n for i in mastertimeranges:\n if i.starttime not in criticalpoints: \n criticalpoints.append(i.starttime)\n if i.endtime not in criticalpoints:\n criticalpoints.append(i.endtime)\n criticalpoints.sort()\n for i in range(1, len(criticalpoints)):\n if criticalpoints[i-1].date() == criticalpoints[i].date():\n notfree = 0\n starttime = criticalpoints[i-1]\n endtime = criticalpoints[i]\n for j in people:\n for k in j.notfreetimes:\n if time_in_range(k.starttime, k.endtime, starttime) and time_in_range(k.starttime, k.endtime, endtime):\n notfree += 1\n rawfinal.append(timerangemarks(starttime, endtime, notfree))\n global final\n final = sorted(rawfinal, key=lambda timerange: timerange.notfree)\n\ndef showsessions():\n for i in final:\n print(i.starttime, i.endtime, i.notfree)\n \n\n\ndef main():\n print(\"1. Add new person\\n2. Set time range\\n3. Calculate free times\\n4. Show not free times\")\n choice = input(\"Please enter your selection: \")\n if choice[0] == \"1\":\n addnewperson()\n main()\n elif choice[0] == \"2\":\n gettimerange()\n main()\n elif choice[0] == \"3\":\n if len(people) > 0:\n try:\n calculate()\n main()\n except:\n print(\"Please complete step 1 and step 2 before calculating free times.\")\n main()\n else:\n print(\"An error has occured, please try again.\")\n main()\n elif choice[0] == \"4\":\n showsessions()\n main()\n\nif __name__ == \"__main__\":\n print(\"***Welcome to Free Time Finder***\\n\\n\")\n main()","repo_name":"CheeXueyi/free-time-finder","sub_path":"v0/0.3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70270418002","text":"# Importing necessary libraries\nimport pickle\nimport os\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nos.chdir(\"../\")\n# Defining final data path\nparent_dir = os.getcwd()\ndirectory = \"\\\\data\\\\Gold\\\\data.parquet\"\nfinal_data_path = parent_dir + directory\n# Defining log paths\nlogs = \"\\\\data\\\\logs\"\nfinal_log_path = parent_dir + logs\n\n\n\n # This file takes the features as moving average and rolling median of adj_close and target as Volume.\n # The data is split and 20% is taken as test while 80% is taken as train.\n # We apply Random Forrest Regression to predict the target.\n # This function also stores the model results, log files and predicted values to the specific log folder\n # and returns 3 values, predicted volume, moving average used and rolling median used.\n # These values are later used for deployment in main file.\n\ndata_gold = pd.read_parquet(final_data_path)\ndata_gold[\"Date\"] = pd.to_datetime(data_gold[\"Date\"])\ndata_gold.set_index(\"Date\", inplace=True)\n\n# Remove rows with NaN values\ndata_gold.dropna(inplace=True)\n\n# Select features and target\nfeatures = [\"vol_moving_avg\", \"adj_close_rolling_med\"]\ntarget = \"Volume\"\n\nX = data_gold[features]\ny = data_gold[target]\n\n# Split data into train and test sets\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=42\n)\n\n# Create a RandomForestRegressor model\nmodel = RandomForestRegressor(n_estimators=10, random_state=42)\n\n# Train the model\nmodel.fit(X_train, y_train)\n\n# Make predictions on test data\ny_pred = model.predict(X_test)\n\n# # predict test values\n#\n# test_values = model.predict([[mov_avg, roll_med]])\n\n# Calculate the Mean Absolute Error and Mean Squared Error\nmae = mean_absolute_error(y_test, y_pred)\nmse = mean_squared_error(y_test, y_pred)\n\n# making log directory to store log files\nif not os.path.exists(final_log_path):\n os.makedirs(final_log_path)\nfilename = parent_dir + \"\\\\randomforestmodel.pkl\"\npickle.dump(model, open(filename, \"wb\"))\nwith open(final_log_path + \"\\\\error_logs.txt\", \"w\") as f:\n f.write(f\"mean_absolute_error = {mae}, mean_squared_error {mse}\")\n# adding predicted values to dataframe\ny_pred_df = pd.DataFrame(y_pred).reset_index(drop=True)\ny_pred_df.columns = [\"Predicted\"]\n# adding test values to dataframe\ny_test_df = pd.DataFrame(y_test).reset_index(drop=True)\ny_test_df.columns = [\"Actual\"]\n# Concatenating both predicted and actual test values\nconcat_df = pd.concat([y_test_df, y_pred_df], axis=1)\nconcat_df.to_csv(final_log_path + \"\\\\testdata_predicted_values.csv\")\n\n# volume_predictor(102, 24)","repo_name":"VaibhavStClair/VolumePrediction","sub_path":"Utilities/volume_predictor.py","file_name":"volume_predictor.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41028257335","text":"import numpy as np\nfrom openpnm.models.phase.mixtures import mixing_rule\nfrom openpnm.models.phase import _phasedocs\n\n\n__all__ = [\n 'gas_mixture_yweighted',\n 'gas_pure_TRC',\n 'liquid_pure_rp',\n 'liquid_mixture_xweighted',\n]\n\n\n@_phasedocs\ndef liquid_pure_rp(\n phase,\n T='pore.temperature',\n Tc='param.critical_temperature',\n omega='param.acentric_factor',\n Cpg='pore.heat_capacity_gas',\n):\n r\"\"\"\n\n Parameters\n ----------\n %(phase)s\n %(T)s\n %(Tc)s\n %(omega)s\n %(Cpg)s\n\n \"\"\"\n # Rowlinson and Poling\n T = phase[T]\n Tc = phase[Tc]\n omega = phase[omega]\n Cpgm = phase[Cpg]\n Tr = T/Tc\n if np.any(Tr > 1):\n raise Exception('Cannot calculate liquid property of fluid above'\n + 'its critical temperature')\n R = 8.314462618\n lhs = 1.586 + 0.49/(1-Tr) \\\n + omega*(4.2775 + 6.3*((1-Tr)**(1/3))/Tr + 0.4355/(1-Tr))\n Cp = lhs*R + Cpgm\n\n return Cp\n\n\n@_phasedocs\ndef gas_pure_TRC(\n phase,\n T='pore.temperature',\n a=[],\n):\n r\"\"\"\n\n Parameters\n ----------\n %(phase)s\n %(T)s\n a : list\n The coefficients to use (see notes for form of equation). If not\n given the ``phase['param.CAS']`` is used to lookup the values from\n ``chemicals.heat_capacity.TRC_gas_data``\n\n Returns\n -------\n\n \"\"\"\n # TRCCp\n from chemicals.heat_capacity import TRC_gas_data\n T = phase[T]\n if len(a) == 0:\n c = TRC_gas_data.loc[phase.params['CAS']]\n a = list(c[3:11])\n R = 8.314462618\n y = np.zeros_like(T)\n temp = (T - a[7])/(T + a[6])\n mask = T > a[7]\n y[mask] = temp[mask]\n Cp = R*(a[0] + (a[1]/(T**2))*np.exp(-a[1]/T) + a[3]*(y**2)\n + (a[4] - a[5]/((T - a[7])**2))*(y**8))\n return Cp\n\n\n@_phasedocs\ndef gas_mixture_yweighted(\n phase,\n Cps='pore.heat_capacity.*',\n):\n r\"\"\"\n Uses a linearly mole fraction weighted average\n\n Parameters\n ----------\n %(phase)s\n %(Cps)s\n\n Returns\n -------\n\n \"\"\"\n Cpmix = mixing_rule(phase=phase, prop=Cps, mode='linear')\n return Cpmix\n\n\n@_phasedocs\ndef liquid_mixture_xweighted(\n phase,\n Cps='pore.heat_capacity.*',\n):\n r\"\"\"\n Uses a linearly mole fraction weighted average\n\n Parameters\n ----------\n %(phase)s\n %(Cps)s\n\n Returns\n -------\n\n \"\"\"\n Cpmix = mixing_rule(phase=phase, prop=Cps, mode='linear')\n return Cpmix\n","repo_name":"PMEAL/OpenPNM","sub_path":"openpnm/models/phase/heat_capacity/_funcs.py","file_name":"_funcs.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","stars":404,"dataset":"github-code","pt":"3"} +{"seq_id":"34750257232","text":"import matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nfrom scipy import ndimage\r\nimport numpy as np\r\nimport math\r\nfrom skimage.metrics import peak_signal_noise_ratio, mean_squared_error\r\n\r\n# Manipulate channels\r\ndef get_greyscale_image(img):\r\n return img # For grayscale images, no need to modify the image\r\n\r\n# Transformations\r\ndef reduce(img, factor):\r\n result = np.zeros((img.shape[0] // factor, img.shape[1] // factor))\r\n \r\n #creates an empty array result with dimensions equal to the original image divided by the factor.\r\n # iterates through the smaller image (result) and calculates the mean value of each factor x factor block in the original image,\r\n #assigning this mean value to the corresponding pixel in the resulting downscaled image.\r\n for i in range(result.shape[0]):\r\n for j in range(result.shape[1]):\r\n result[i,j] = np.mean(img[i*factor:(i+1)*factor,j*factor:(j+1)*factor])\r\n return result\r\n\r\ndef rotate(img, angle):\r\n return ndimage.rotate(img, angle, reshape=False)\r\n\r\ndef flip(img, direction):\r\n return img[::direction,:]\r\n\r\ndef apply_transformation(img, direction, angle, contrast=1.0, brightness=0.0):\r\n return contrast*rotate(flip(img, direction), angle) + brightness\r\n\r\n# Contrast and brightness\r\ndef find_contrast_and_brightness1(D, S):\r\n # Fix the contrast and only fit the brightness\r\n contrast = 0.75\r\n brightness = (np.sum(D - contrast*S)) / D.size\r\n return contrast, brightness\r\n\r\ndef find_contrast_and_brightness2(D, S):\r\n # Fit the contrast and the brightness\r\n A = np.concatenate((np.ones((S.size, 1)), np.reshape(S, (S.size, 1))), axis=1)\r\n b = np.reshape(D, (D.size,))\r\n x, _, _, _ = np.linalg.lstsq(A, b)\r\n return x[1], x[0]\r\n\r\n# Compression for grayscale images\r\ndef generate_all_transformed_blocks(img, source_size, destination_size, step):\r\n factor = source_size // destination_size\r\n transformed_blocks = []\r\n for k in range((img.shape[0] - source_size) // step + 1):\r\n for l in range((img.shape[1] - source_size) // step + 1):\r\n # Extract the source block and reduce it to the shape of a destination block\r\n S = reduce(img[k*step:k*step+source_size,l*step:l*step+source_size], factor)\r\n # Generate all possible transformed blocks\r\n for direction, angle in candidates:\r\n transformed_blocks.append((k, l, direction, angle, apply_transformation(S, direction, angle)))\r\n return transformed_blocks\r\n\r\ndef compress(img, source_size, destination_size, step):\r\n transformations = []\r\n transformed_blocks = generate_all_transformed_blocks(img, source_size, destination_size, step)\r\n i_count = img.shape[0] // destination_size\r\n j_count = img.shape[1] // destination_size\r\n for i in range(i_count):\r\n transformations.append([])\r\n for j in range(j_count):\r\n #print(\"{}/{} ; {}/{}\".format(i, i_count, j, j_count))\r\n transformations[i].append(None)\r\n min_d = float('inf')\r\n # Extract the destination block\r\n D = img[i*destination_size:(i+1)*destination_size,j*destination_size:(j+1)*destination_size]\r\n # Test all possible transformations and take the best one\r\n for k, l, direction, angle, S in transformed_blocks:\r\n contrast, brightness = find_contrast_and_brightness2(D, S)\r\n S = contrast*S + brightness\r\n d = np.sum(np.square(D - S))\r\n if d < min_d:\r\n min_d = d\r\n transformations[i][j] = (k, l, direction, angle, contrast, brightness)\r\n return transformations\r\n\r\ndef decompress(transformations, source_size, destination_size, step, nb_iter=8):\r\n factor = source_size // destination_size\r\n height = len(transformations) * destination_size\r\n width = len(transformations[0]) * destination_size\r\n iterations = [np.random.randint(0, 256, (height, width))]\r\n cur_img = np.zeros((height, width))\r\n for i_iter in range(nb_iter):\r\n print(i_iter)\r\n for i in range(len(transformations)):\r\n for j in range(len(transformations[i])):\r\n # Apply transform\r\n k, l, flip, angle, contrast, brightness = transformations[i][j]\r\n S = reduce(iterations[-1][k*step:k*step+source_size,l*step:l*step+source_size], factor)\r\n D = apply_transformation(S, flip, angle, contrast, brightness)\r\n cur_img[i*destination_size:(i+1)*destination_size,j*destination_size:(j+1)*destination_size] = D\r\n iterations.append(cur_img)\r\n cur_img = np.zeros((height, width))\r\n return iterations\r\n\r\n# Parameters\r\ndirections = [1, -1]\r\nangles = [0, 90, 180, 270]\r\ncandidates = [[direction, angle] for direction in directions for angle in angles]\r\n\r\n# Plot\r\ndef plot_iterations(iterations, target=None):\r\n # Configure plot\r\n plt.figure()\r\n nb_row = math.ceil(np.sqrt(len(iterations)))\r\n nb_cols = nb_row\r\n # Plot\r\n for i, img in enumerate(iterations):\r\n plt.subplot(nb_row, nb_cols, i+1)\r\n plt.imshow(img, cmap='gray', vmin=0, vmax=255, interpolation='none')\r\n if target is None:\r\n plt.title(str(i))\r\n else:\r\n # Display the RMSE\r\n plt.title(\"{} ({:.2f})\".format(i, np.sqrt(np.mean(np.square(target - img)))))\r\n frame = plt.gca()\r\n frame.axes.get_xaxis().set_visible(False)\r\n frame.axes.get_yaxis().set_visible(False)\r\n plt.tight_layout()\r\n\r\n# Tests\r\ndef test_greyscale():\r\n #reading image\r\n img = mpimg.imread('image1.jpg') \r\n img = get_greyscale_image(img)\r\n img = reduce(img, 4)\r\n plt.figure()\r\n plt.imshow(img, cmap='gray', interpolation='none')\r\n transformations = compress(img, 8, 4, 8)\r\n iterations = decompress(transformations, 8, 4, 8)\r\n plot_iterations(iterations, img)\r\n plt.show()\r\n \r\n # Calculate PSNR and MSE\r\n psnr = peak_signal_noise_ratio(img, iterations[-1], data_range=255)\r\n mse = mean_squared_error(img, iterations[-1])\r\n\r\n # Print the PSNR and MSE values\r\n print(f\"PSNR: {psnr:.2f} dB\")\r\n print(f\"MSE: {mse:.2f}\")\r\n\r\nif __name__ == '__main__':\r\n test_greyscale()\r\n","repo_name":"Jenisa-Merlin/Pixel-Puzzles","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36793430519","text":"import pandas as pd\nfrom sklearn import tree\nfrom sklearn.model_selection import train_test_split, cross_val_score\nimport seaborn\nimport matplotlib.pyplot as plt\n\n\n# Повторим обработку данных из прошлого урока:\ntitanic = pd.read_csv('titanic.csv')\nfeatures = titanic.drop(['PassengerId', 'Name', 'Ticket', 'Cabin', 'Survived'], axis=1)\nresult = titanic.Survived\nfeatures = pd.get_dummies(features)\nfeatures = features.fillna({'Age': features.Age.median()})\n\ntrain_features, test_features, train_result, test_result = train_test_split(features,\n result,\n test_size=0.33,\n random_state=42)\n\n# Поиграемся с критериями, влияющими на качество нашей модели:\ntrain_and_test_scores = pd.DataFrame(columns=['max_depth', 'train_score', 'test_score'])\nmax_depth_values = range(1, 101)\n\nfor max_depth in max_depth_values:\n classifier_tree = tree.DecisionTreeClassifier(criterion='entropy', max_depth=max_depth)\n classifier_tree.fit(train_features, train_result)\n train_score = classifier_tree.score(train_features, train_result)\n test_score = classifier_tree.score(test_features, test_result)\n\n temp_dataframe = pd.DataFrame({'max_depth': [max_depth], 'train_score': [train_score], 'test_score': [test_score]})\n train_and_test_scores = pd.concat([train_and_test_scores, temp_dataframe])\n\nprint(train_and_test_scores, train_and_test_scores.isnull().sum())\n\n\"\"\"Отрисуем наш датафрейм, чтобы наглядно увидеть лучшее значение глубины дерева для точности модели.\nДля удобства отрисовки, объединим с помощью пандаса значения тестовых и тренировочных данных в одну колонку, а также\nсделаем для них группировку.\"\"\"\ntrain_and_test_scores_long = pd.melt(\n train_and_test_scores,\n id_vars=['max_depth'], # основной индекс\n value_vars=['train_score', 'test_score'], # столбцы для объединения\n var_name='set_type', # название столбца классификации\n value_name='score' # название столба со значениями\n)\nplot = seaborn.lineplot(\n data=train_and_test_scores_long,\n x=train_and_test_scores_long.max_depth,\n y=train_and_test_scores_long.score,\n hue=train_and_test_scores_long.set_type\n)\nplot.set_xticks(range(1, 101))\nplt.xticks(rotation=-90)\nplt.show()\n\n\n\"\"\"Однако наши модели все еще переобучены, ведь мы используем один и тот же набор данных для их тренировки и \nтестирования. \nДля решения данной проблемы необходимо разделить набор данных как и ранее, однако тренировочный набор \nследует тоже разделить, например, на 5 мини-тренировочных наборов данных. Допустим, 1 набор данных будет выступать \nтестовым. Тогда мы обучим модель на 2, 3, 4 и 5 наборах, а потом тестируем на том самом 1 наборе данных. Так мы делаем\nдля каждого мини-наборе данных, чтобы каждый из наборов был и в обучении, и в тесте. А далее, например, можно усреднить \nточность модели всех 5 случаев. А только потом мы будем скармливать моделям тестовые данные.\n\nТакой процесс называется кросс-валидацией.\"\"\"\n\n# Проверим вышесказанное на модели, с глубиной дерева равной 3:\nclassifier_tree = tree.DecisionTreeClassifier(criterion='entropy', max_depth=3)\ncross_score = cross_val_score(classifier_tree, train_features, train_result, cv=5) # cv=5 - делим на 5 наборов данных\n\n# Точность, которую показал классификатор. Сначала обучился на 4, протестил 5. Потом на 1-3, 5 и показал 4. И так далее:\nprint(cross_score)\naverage_cross_score = cross_score.mean()\nprint(average_cross_score)\n\n# Теперь с этими знаниями попробуем снова провести эксперимент с глубиной дерева:\nnew_train_and_test_scores = pd.DataFrame(columns=['max_depth', 'train_score', 'test_score'])\nnew_max_depth_values = range(1, 101)\n\nfor max_depth in new_max_depth_values:\n classifier_tree = tree.DecisionTreeClassifier(criterion='entropy', max_depth=max_depth)\n classifier_tree.fit(train_features, train_result)\n train_score = classifier_tree.score(train_features, train_result)\n test_score = classifier_tree.score(test_features, test_result)\n\n average_cross_score = cross_val_score(classifier_tree, train_features, train_result, cv=5).mean()\n\n temp_dataframe = pd.DataFrame({'max_depth': [max_depth],\n 'train_score': [train_score],\n 'test_score': [test_score],\n 'avg_cross_val_score': [average_cross_score]})\n new_train_and_test_scores = pd.concat([new_train_and_test_scores, temp_dataframe])\n\nprint(new_train_and_test_scores, new_train_and_test_scores.isnull().sum())\n\n# Отрисуем новые значения моделей:\nnew_train_and_test_scores_long = pd.melt(\n new_train_and_test_scores,\n id_vars=['max_depth'],\n value_vars=['train_score', 'test_score', 'avg_cross_val_score'],\n var_name='set_type',\n value_name='score'\n)\nnew_plot = seaborn.lineplot(\n data=new_train_and_test_scores_long,\n x=new_train_and_test_scores_long.max_depth,\n y=new_train_and_test_scores_long.score,\n hue=new_train_and_test_scores_long.set_type\n)\nnew_plot.set_xticks(range(1, 101))\nplt.xticks(rotation=-90)\nplt.show()\n\n\"\"\"Видим, что на самом деле наилучшая точность при кросс-валидации. \nТакже стоит отметить, что данные мешаются каждый раз с новым зерном выборке для кросс-валидации.\"\"\"\ncheck = new_train_and_test_scores_long.query('set_type==\"avg_cross_val_score\"').\\\n sort_values(by=['score'], ascending=False).head(10)\nprint(check)\n\n# Получим динамический лучший классификатор для теста на валидационных (test_features, test_result) данных:\nbest_max_depth = check['max_depth'].iloc[0]\nprint(best_max_depth)\nbest_clf = tree.DecisionTreeClassifier(criterion='entropy', max_depth=best_max_depth)\nbest_avg_cross_val_test_data = cross_val_score(best_clf, test_features, test_result, cv=5).mean()\nprint(best_avg_cross_val_test_data)\n","repo_name":"DKhorkov/neural_networks","sub_path":"Stepik_Learning/Introduction_to_Data_Science_and_Machine_Learning/Chapter_2/lesson_2.4_training_retraining_undertraining_crossvalidation.py","file_name":"lesson_2.4_training_retraining_undertraining_crossvalidation.py","file_ext":"py","file_size_in_byte":7330,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11096871705","text":"#!/usr/bin/env python\r\n__author__ = 'victor'\r\n\r\nimport sys\r\nimport re\r\n\r\n\r\ndef read_input(file):\r\n for line in file:\r\n yield line\r\n\r\n\r\ndef main():\r\n parts = [\r\n r'(?P\\S+)',\r\n r'(?P\\S+)',\r\n r'(?P\\S\\S)\\S+',\r\n r'\\S+',\r\n r'\\S+',\r\n r'(?P\\S+)',\r\n r'.+',\r\n ]\r\n pattern = re.compile(r'\\s+'.join(parts)+r'\\s*\\Z')\r\n\r\n data = read_input(sys.stdin)\r\n for line in data:\r\n res = pattern.match(str(line)).groupdict()\r\n sys.stdout.write('%s\\t\\t%d\\n' % (res[\"month\"] + '\\t' + res[\"day\"] + '\\t' + res[\"hour\"] + '\\t' + res[\"ip\"], 1))\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"VPlazaM/BigData-STIC","sub_path":"scripts/mapper/mapper2.py","file_name":"mapper2.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19705915511","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 16 15:50:15 2020\n\n@author: MrHossein\n\"\"\"\n\nimport preparing_data\nfrom torchvision import transforms\nfrom PIL import ImageDraw\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\n\ndef draw_lines(image, joints_coordinate, name, save=False):\n \"\"\"\n 0. Right ankle\n 1. Right knee\n 2. Right hip\n 3. Left hip\n 4. Left knee\n 5. Left ankle\n 6. Right wrist\n 7. Right elbow\n 8. Right shoulder\n 9. Left shoulder\n 10. Left elbow\n 11. Left wrist\n 12. Neck\n 13. Head top\n \"\"\"\n left_foot = [(joints_coordinate[0][5], joints_coordinate[1][5]), (joints_coordinate[0][4], joints_coordinate[1][4])]\n right_foot = [(joints_coordinate[0][0], joints_coordinate[1][0]),\n (joints_coordinate[0][1], joints_coordinate[1][1])]\n left_hip = [(joints_coordinate[0][4], joints_coordinate[1][4]), (joints_coordinate[0][3], joints_coordinate[1][3])]\n right_hip = [(joints_coordinate[0][1], joints_coordinate[1][1]), (joints_coordinate[0][2], joints_coordinate[1][2])]\n left_hand = [(joints_coordinate[0][11], joints_coordinate[1][11]),\n (joints_coordinate[0][10], joints_coordinate[1][10])]\n right_hand = [(joints_coordinate[0][6], joints_coordinate[1][6]),\n (joints_coordinate[0][7], joints_coordinate[1][7])]\n left_arm = [(joints_coordinate[0][10], joints_coordinate[1][10]),\n (joints_coordinate[0][9], joints_coordinate[1][9])]\n right_arm = [(joints_coordinate[0][7], joints_coordinate[1][7]), (joints_coordinate[0][8], joints_coordinate[1][8])]\n body = [(joints_coordinate[0][12], joints_coordinate[1][12]), (\n (joints_coordinate[0][3] + joints_coordinate[0][2]) / 2, (joints_coordinate[1][3] + joints_coordinate[1][2]) / 2)]\n head = [(joints_coordinate[0][13], joints_coordinate[1][13]), (joints_coordinate[0][12], joints_coordinate[1][12])]\n\n d = ImageDraw.Draw(image)\n d.line(left_foot, fill='blue', width=2)\n d.line(right_foot, fill='blue', width=2)\n d.line(left_hip, fill='green', width=2)\n d.line(right_hip, fill='green', width=2)\n d.line(left_hand, fill='red', width=2)\n d.line(right_hand, fill='red', width=2)\n d.line(left_arm, fill='yellow', width=2)\n d.line(right_arm, fill='yellow', width=2)\n d.line(body, fill='brown', width=2)\n d.line(head, fill='pink', width=2)\n\n plt.imshow(image)\n if save:\n image.save(name)\n plt.show()\n\n\ndef PDJ_metric(predicted_joints, true_joints, limbs_name):\n \"\"\"\n 0. Right ankle\n 1. Right knee\n 2. Right hip\n 3. Left hip\n 4. Left knee\n 5. Left ankle\n 6. Right wrist\n 7. Right elbow\n 8. Right shoulder\n 9. Left shoulder\n 10. Left elbow\n 11. Left wrist\n 12. Neck\n 13. Head top\n \"\"\"\n # Calculate True Distance of each Limb\n body_distance = np.linalg.norm(true_joints[:, 2] - true_joints[:, 9])\n correct_parts = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n # Claculate Distance between True Joints and Predicted Joints in each Limb\n for i in range(14):\n joint_distance = np.linalg.norm(true_joints[:, i] - predicted_joints[:, i])\n if joint_distance <= (0.2 * body_distance):\n correct_parts[i] = 1\n\n return correct_parts\n\n\ndef PCP_metric(predicted_joints, true_joints, limbs_name):\n \"\"\"\n 0. Right ankle\n 1. Right knee\n 2. Right hip\n 3. Left hip\n 4. Left knee\n 5. Left ankle\n 6. Right wrist\n 7. Right elbow\n 8. Right shoulder\n 9. Left shoulder\n 10. Left elbow\n 11. Left wrist\n 12. Neck\n 13. Head top\n \"\"\"\n # Calculate True Distance of each Limb\n true_limb_len = dict()\n true_limb_len[limbs_name[0]] = np.linalg.norm(true_joints[:, 0] - true_joints[:, 1])\n true_limb_len[limbs_name[1]] = np.linalg.norm(true_joints[:, 1] - true_joints[:, 2])\n true_limb_len[limbs_name[2]] = np.linalg.norm(true_joints[:, 3] - true_joints[:, 4])\n true_limb_len[limbs_name[3]] = np.linalg.norm(true_joints[:, 4] - true_joints[:, 5])\n true_limb_len[limbs_name[4]] = np.linalg.norm(true_joints[:, 6] - true_joints[:, 7])\n true_limb_len[limbs_name[5]] = np.linalg.norm(true_joints[:, 7] - true_joints[:, 8])\n true_limb_len[limbs_name[6]] = np.linalg.norm(true_joints[:, 9] - true_joints[:, 10])\n true_limb_len[limbs_name[7]] = np.linalg.norm(true_joints[:, 10] - true_joints[:, 11])\n\n correct_parts = [0, 0, 0, 0, 0, 0, 0, 0]\n # Claculate Distance between True Joints and Predicted Joints in each Limb\n for i in range(8):\n if i == 2 or i == 3:\n j = i + 1\n elif i == 4 or i == 5:\n j = i + 2\n elif i == 6 or i == 7:\n j = i + 3\n else:\n j = i\n\n joint_distance1 = np.linalg.norm(true_joints[:, j] - predicted_joints[:, j])\n joint_distance2 = np.linalg.norm(true_joints[:, j + 1] - predicted_joints[:, j + 1])\n if joint_distance1 <= (true_limb_len[limbs_name[i]] / 2) and joint_distance2 <= (\n true_limb_len[limbs_name[i]] / 2):\n correct_parts[i] = 1\n\n return correct_parts\n\n\ndef correct_percentage(image_label, predicted_joint, true_joint, names, metric='PCP'):\n if (metric == 'PCP'):\n total_correct_percentage = [0, 0, 0, 0, 0, 0, 0, 0]\n else:\n total_correct_percentage = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n joint_temp = np.zeros((len(image_label), 28))\n index = 0\n for i in range(len(predicted_joint)):\n for j in range(len(predicted_joint[i])):\n joint_temp[index] = np.array(\n preparing_data.Inverse_coordinate_normalize(torch.tensor(predicted_joint[i][j])))\n index += 1\n\n for i in range(len(image_label)):\n orig_joint = true_joint[image_label[i]]\n\n pred_joint = torch.zeros((2, 14))\n temp = joint_temp[i]\n for i in range(14):\n pred_joint[0][i] = temp[2 * i]\n pred_joint[1][i] = temp[2 * i + 1]\n\n if (metric == 'PCP'):\n correct_part = PCP_metric(pred_joint, orig_joint, names)\n total_correct_percentage = np.array(total_correct_percentage) + np.array(correct_part)\n else:\n correct_part = PDJ_metric(pred_joint, orig_joint, names)\n total_correct_percentage = np.array(total_correct_percentage) + np.array(correct_part)\n\n return (np.array(total_correct_percentage) / len(image_label)) * 100.00\n\n\ndef draw_selected_image(image_index, predicted_joint, true_joints, image, label, batch_size, save=False,\n name1='out_image1.jpg', name2='true_image1.jpg'):\n if image_index > 299:\n image_index = 299\n\n invers_normalize = transforms.Normalize([-0.452 / 0.216, -0.445 / 0.201, -0.379 / 0.203],\n [1 / 0.216, 1 / 0.201, 1 / 0.203])\n r = int(image_index / batch_size)\n index = int(image_index % batch_size)\n trans1 = transforms.ToPILImage()\n\n input1 = predicted_joint[r][index].cpu()\n input2 = invers_normalize(image[r][index].cpu())\n input3 = label[image_index]\n\n joint_temp = preparing_data.Inverse_coordinate_normalize(input1).reshape(28)\n pred_joint = torch.zeros((2, 14))\n for i in range(14):\n pred_joint[0][i] = joint_temp[2 * i]\n pred_joint[1][i] = joint_temp[2 * i + 1]\n\n orig_image = trans1(input2)\n draw_lines(orig_image, pred_joint, name1, save)\n\n true_joint = preparing_data.Inverse_coordinate_normalize(true_joints[input3]).reshape(28)\n true_joint_2d = torch.zeros((2, 14))\n for i in range(14):\n true_joint_2d[0][i] = true_joint[2 * i]\n true_joint_2d[1][i] = true_joint[2 * i + 1]\n\n orig_image = trans1(input2)\n draw_lines(orig_image, true_joint_2d, name2, save)\n","repo_name":"HosseinPAI/DeepPose-Human-Pose-Estimation","sub_path":"drawing_and_metrics.py","file_name":"drawing_and_metrics.py","file_ext":"py","file_size_in_byte":7825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"71191988217","text":"# Source : Image processing approach is derived based on the kats-vs-dogs machine learning tutorial\n# https://pythonprogramming.net/convolutional-neural-network-kats-vs-dogs-machine-learning-tutorial/\n\nimport cv2 # Opencv-python to work with images\nimport numpy as np # dealing with arrays and to store the data in arrays\nimport os # Support directory paths\nfrom random import shuffle # mixing up or currently ordered data that might lead our network astray in training.\nfrom tqdm import tqdm # smart percentage bar for tasks. \n\n# Source directories where Training and Test images are stored in EC2 instance\nTRAIN_DIR = '/home/ubuntu/src/datta_ms/Train'\nTEST_DIR = '/home/ubuntu/src/datta_ms/Test'\n\nIMG_SIZE = 200 # Image size \n\n# Preparing the label for dataset\ndef label_img(img):\n # print img --> debug\n # label name is being sourced from first three letters of image name\n word_label = img.split('_')[-2]\n # print(\"Word label: \", word_label) --> for debug\n\t\n\t#conversion to one-hot array [Car,Truck, Bike]\n \n if word_label == 'Bik': return [0,0,1]\n elif word_label == 'car': return [0,1,0]\n elif word_label == 'Tru': return [1,0,0]\n\n# Preparation of training data array \t\n\ndef create_train_data():\n training_data = []\n for img in tqdm(os.listdir(TRAIN_DIR)):\n label = label_img(img)\n # print(\"Label \", label) --> for debugging\n path = os.path.join(TRAIN_DIR,img)\n img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)\n img = cv2.resize(img, (IMG_SIZE,IMG_SIZE))\n training_data.append([np.array(img),np.array(label)])\n shuffle(training_data)\n np.save('train_data.npy', training_data)\n return training_data\n\n# Preparation of test data array\n\t\ndef process_test_data():\n testing_data = []\n for img in tqdm(os.listdir(TEST_DIR)):\n path = os.path.join(TEST_DIR,img)\n\t# Ensuring that image number is considered\n img_num = img.split('.')[0]\n img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)\n img = cv2.resize(img, (IMG_SIZE,IMG_SIZE))\n testing_data.append([np.array(img), img_num])\n \n np.save('test_data.npy', testing_data)\n return testing_data\n\t\ntrain_data = create_train_data()\nprint (\"Training and validation data is created\")\ntest_data = process_test_data()\nprint (\"Testing data is created\")\n","repo_name":"nsdatta/Masters_Project","sub_path":"ImageProcess.py","file_name":"ImageProcess.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74313686137","text":"# Задание 1.\n# Реализовать класс Matrix (матрица). Обеспечить перегрузку конструктора класса (метод init()),\n# который должен принимать данные (список списков) для формирования матрицы.\n# [[], [], []]\n# Следующий шаг — реализовать перегрузку метода str() для вывода матрицы в привычном виде.\n# Далее реализовать перегрузку метода add() для реализации операции\n# сложения двух объектов класса Matrix (двух матриц).\n# Результатом сложения должна быть новая матрица.\n# Подсказка: сложение элементов матриц выполнять поэлементно —\n# первый элемент первой строки первой матрицы складываем\n# с первым элементом первой строки второй матрицы и т.д.\n# Пример:\n# 1 2 3\n# 4 5 6\n# 7 8 9\n#\n# 1 2 3\n# 4 5 6\n# 7 8 9\n# Сумма матриц:\n# 2 4 6\n# 8 10 12\n# 14 16 18\n\n\nclass Matrix:\n def __init__(self, my_list):\n self.my_list = my_list\n\n def __str__(self):\n ans = '\\n'.join(map(str, self.my_list))\n ans = ans.replace(',', '').replace(']', '').replace('[', '')\n return ans\n\n def __add__(self, other):\n self.other = other\n for i in range(len(self.my_list)):\n for j in range(len(other.my_list[i])):\n self.my_list[i][j] = self.my_list[i][j] + other.my_list[i][j]\n return Matrix(self.my_list)\n\n\nmatrix_1 = Matrix([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])\nmatrix_2 = Matrix([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])\n\nprint(matrix_1 + matrix_2)\n\n\n# Задание 2\n# Реализовать программу работы с органическими клетками, состоящими из ячеек.\n# Необходимо создать класс Клетка (Cell).\n# В его конструкторе инициализировать параметр (quantity),\n# соответствующий количеству ячеек клетки (целое число).\n# В классе должны быть реализованы методы перегрузки арифметических операторов:\n# сложение (add()),\n# вычитание (sub()),\n# умножение (mul()),\n# деление (truediv()).\n\n\nclass Cell:\n def __init__(self, quantity):\n self.quantity = quantity\n\n def __add__(self, other):\n return f'Сумма: {self.quantity + other.quantity}'\n\n def __sub__(self, other):\n sub = self.quantity - other.quantity\n if sub > 0:\n return f'Разность: {sub}'\n else:\n return 'Вы уничтожили клетку('\n\n def __mul__(self, other):\n return f'Произведение: {self.quantity * other.quantity}'\n\n def __truediv__(self, other):\n return f'Деление: {self.quantity // other.quantity}'\n\n def make_order(self, row):\n my_str = ''\n for i in range(int(self.quantity / row)):\n my_str += f'{\"^\" * row}\\n'\n my_str += f'{\"^\" * (self.quantity % row)}\\n'\n return my_str\n\n\nceil_1 = Cell(31)\nceil_2 = Cell(9)\nprint(ceil_1 + ceil_2)\nprint(ceil_1 - ceil_2)\nprint(ceil_1 * ceil_2)\nprint(ceil_1 / ceil_2)\nprint(ceil_1.make_order(5))\n\n\n# Задание 3.\n# Создайте собственный класс-исключение, обрабатывающий ситуацию деления на нуль.\n# Проверьте его работу на данных, вводимых пользователем. При вводе пользователем нуля\n# в качестве делителя программа должна корректно обработать эту ситуацию и не завершиться с ошибкой.\n\nclass Zero(Exception):\n def __init__(self, text):\n self.text = text\n\n\ndef share_on_zero():\n try:\n divisible = int(input('Введите числитель дроби: '))\n divider = int(input('Введите знаменатель дроби: '))\n if divider == 0:\n raise Zero('Вы еще не освоили выш. мат.!')\n except ValueError:\n return 'Укажите числовое значение!'\n except Zero as err:\n print(err)\n else:\n print(f'Ответ: {divisible / divider}')\n\n\nshare_on_zero()\n","repo_name":"DanyaIT/Python","sub_path":"8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":4695,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29933308880","text":"import pyxel\nfrom vector import Vector2\n\ndef update_list(list):\n for elem in list:\n elem.update()\n\ndef draw_list(list):\n for elem in list:\n elem.draw()\n\ndef detect_collision(r1, rd1, r2, rd2):\n return r1.x + rd1.x >= r2.x and r1.x <= r2.x + rd2.x and r1.y + rd1.y >= r2.y and r1.y <= r2.y + rd2.y\n\nclass Selectable:\n def __init__(self, x, y, w, h, color):\n self.position = Vector2(x, y) * 8\n self.size = Vector2(w, h) * 8\n self.color = color\n \n def check_collision(self, vector2_pos, vector2_size):\n return detect_collision(self.position, self.size, vector2_pos, vector2_size)\n\n def update(self):\n pass\n # if pyxel.btnp(pyxel.MOUSE_LEFT_BUTTON,0,0):\n # print(Vector2(pyxel.mouse_x, pyxel.mouse_y))\n # print(self.check_collision(Vector2(pyxel.mouse_x, pyxel.mouse_y), Vector2(0,0)))\n # print(detect_collision( self.position, self.size ))\n\n def draw(self):\n pyxel.rectb(self.position.x , self.position.y , self.size.x , self.size.y , self.color)\n\nclass Item():\n def __init__(self, item_type):\n self.no_sprite = [(0,0)]\n self.simple_arrow_sprite = [(8,0), (16,0), (24,0), (32,0)]\n self.double_arrow_sprite = [(40,0), (48,0), (56,0), (64,0),(72,0),(80,0)]\n self.pusher_sprite = [0,8]\n self.current_sprite_list = self.no_sprite\n self.current_sprite = self.current_sprite_list[0]\n self.index = 0\n self.set_type(item_type)\n\n def get_sprite(self, index):\n self.index += index\n self.current_sprite = self.current_sprite_list[self.index % len(self.current_sprite_list)]\n self.tilemap_index = (self.current_sprite[0] / 8, self.current_sprite[0] / 8)\n \n def get_timemap_index(self):\n self.tilemap_index = (self.current_sprite[0] / 8, self.current_sprite[0] / 8)\n return self.tilemap_index\n\n def set_type(self, item_type):\n \"\"\"Can be \"simple_arrow\", \"double_arrow\", \"pusher\" \"\"\"\n self.type = item_type\n self.index = 0\n if self.type == None:\n self.current_sprite_list = [(0,0), (0,0), (0,0), (0,0)]\n elif self.type == \"simple_arrow\":\n self.current_sprite_list = self.simple_arrow_sprite\n elif self.type == \"double_arrow\":\n self.current_sprite_list = self.double_arrow_sprite\n elif self.type == \"pusher\":\n self.current_sprite_list = self.pusher_sprite\n self.tilemap_index = (self.current_sprite[0] / 8, self.current_sprite[0] / 8)\n self.get_sprite(0)\n \n def draw(self):\n pyxel.blt(pyxel.mouse_x-8,pyxel.mouse_y-8, 0, self.current_sprite[0], self.current_sprite[1], 8, 8, 0)\n\n\nclass Inventory(Selectable):\n def __init__(self, x, y, w, h, color):\n super().__init__(x,y,w,h,color)\n self.slot = []\n self.selected_item = Item(None)\n self._last_pos = self.position.x + 4\n # for i in range(9):\n # self.slot.append(Selectable(self._last_pos + i, self.position.y+2, 4, self.size.y, pyxel.COLOR_PEACH))\n # self._last_pos = self._last_pos + i\n\n def update(self):\n super().update()\n if pyxel.btnp(pyxel.KEY_R):\n self.selected_item.get_sprite(1)\n elif pyxel.btnp(pyxel.KEY_A):\n self.selected_item.get_sprite(-1)\n elif pyxel.btnp(pyxel.KEY_E):\n self.selected_item.get_sprite(1)\n\n if pyxel.btnp(pyxel.KEY_1):\n self.selected_item.set_type(\"simple_arrow\")\n elif pyxel.btnp(pyxel.KEY_2):\n self.selected_item.set_type(\"double_arrow\")\n elif pyxel.btnp(pyxel.KEY_2):\n self.selected_item.set_type(\"pusher\")\n if pyxel.btnp(pyxel.MOUSE_LEFT_BUTTON):\n # print(pyxel.tilemap(0).get())\n # pyxel.tilemap(0).set(pyxel.mouse_x//8, pyxel.mouse_y//8, 2)\n # print(*self.selected_item.get_timemap_index(), pyxel.tilemap(0).get(0,0))\n # test = self.selected_item.get_timemap_index()\n test = (16,16)\n print( pyxel.image(0).get(test[0], test[1]) )\n print(pyxel.tilemap(0).refimg)\n pyxel.tilemap(0).set( pyxel.mouse_x//8, pyxel.mouse_y//8, pyxel.image(0).get(test[0], test[1]) )\n \n update_list(self.slot)\n\n def draw(self):\n super().draw()\n draw_list(self.slot)\n self.selected_item.draw()","repo_name":"Seubmarine/tower_defense","sub_path":"inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15151955330","text":"import pathlib\n\nimport pandas as pd\nfrom faker import Faker\n\n\ndef write_csv(items, file):\n df = pd.DataFrame(data=items)\n df.to_csv(file, encoding=\"utf_8_sig\", index=False, header=False)\n\n\ndef write_sql(items, file):\n file.write(\"insert into customer(name,gender,id_num,phone_no) values\" + \",\".join([\"('{0}','{1}','{2}','{3}')\".format(t[\"name\"], t[\"gender\"], t[\"id_num\"], t[\"phone_no\"]) for t in items]) + \";\\n\")\n\n\ndef generate(total: int):\n fake: Faker = Faker(locale=\"zh-CN\")\n temp_dir = pathlib.Path(__file__).parent.joinpath(\"___temp\")\n csv_file = temp_dir.joinpath(\"customer.csv\")\n sql_file = temp_dir.joinpath(\"customer.sql\")\n data = []\n with open(sql_file, mode=\"a\", encoding=\"utf-8\", newline=\"\\n\") as s:\n with open(csv_file, mode=\"a\", encoding=\"utf-8\", newline=\"\\n\") as c:\n for _ in range(0, total):\n user = {\n \"name\": fake.name(),\n \"gender\": int(fake.boolean()),\n \"id_num\": fake.ssn(min_age=18, max_age=60),\n \"phone_no\": fake.phone_number(),\n }\n data.append(user)\n if data.__len__() >= 100:\n write_csv(data, c)\n write_sql(data, s)\n data.clear()\n\n if data.__len__() > 0:\n write_csv(data, c)\n write_sql(data, s)\n\n\nif __name__ == '__main__':\n generate(10)\n","repo_name":"czy21/script","sub_path":"test/mock/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"23823665213","text":"from flask import Flask, render_template, request, redirect, url_for\r\nfrom bokeh.embed import components\r\nfrom bokeh.plotting import figure\r\nfrom bokeh.resources import INLINE\r\nfrom bokeh.models.widgets import Slider, Select\r\nfrom bokeh.models import CustomJS, ColumnDataSource\r\n#from bokeh.models.widgets.layouts import column\r\nfrom bokeh.layouts import column\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef main():\r\n\r\n\tx = [x*0.005 for x in range(0, 400)]\r\n\ty = x\r\n\r\n\tsource = ColumnDataSource(data=dict(x=x, y=y))\r\n\r\n\tplot = figure(plot_width=800, plot_height=400)\r\n\tplot.line('x', 'y', source=source, line_width=3, line_alpha=1500)\r\n\r\n\tcallback = CustomJS(args=dict(source=source), code=\"\"\"\r\n\t var data = source.data;\r\n\t var f = cb_obj.value\r\n\t x = data['x']\r\n\t y = data['y']\r\n\t for (i = 0; i < x.length; i++) {\r\n\t y[i] = Math.pow(x[i], f)\r\n\t }\r\n\t source.trigger('change');\r\n\t\"\"\")\r\n\r\n\tslider = Slider(start=0.0001, end=10, value=1, step=.0001, title=\"power\")\r\n\t# slider = Slider(start=0.1, end=4, value=1, step=.1, title=\"power\", callback=callback)\r\n\tslider.js_on_change('value', callback)\r\n\r\n\tlayout = column(slider, plot)\r\n\r\n\tscript, div = components( layout )\r\n\r\n\treturn render_template('0.html', js_resources = INLINE.render_js(), css_resources=INLINE.render_css(), script = script, div = div)\r\n\r\n\r\nif __name__ =='__main__':\r\n\tapp.run(debug=True, host='0.0.0.0')","repo_name":"ChrisHays1/My_Very_First_App","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27596629795","text":"# a loop in a loop is called nested loop\r\n# whether it is a while in for in loop or for in while loop\r\n# here we are writing a simple logic for atm withdrawal\r\nprint(\"welcome To Ravi's Bank ATM\")\r\nbalance = 67.51\r\nchances = 3\r\nresponse = ('y')\r\nprint(\"please Enter to pin to avail services\")\r\nwhile chances >0:\r\n pin = int(input(\"enter PIN:\"))\r\n if pin == (1234) :\r\n print(\"welcome\")\r\n while response not in ('n','N','no','NO'):\r\n print(\"press 1 for balance \\n\")\r\n print(\"press 2 for withdrawal \\n\")\r\n print(\"press 3 for deposit \\n\")\r\n option = int(input(\"enetr option number\"))\r\n if option == 1:\r\n print(\"Balance in your account is: \",balance,'\\n')\r\n response = input(\"would like to go back\")\r\n if response in ('n','N','NO','no'):\r\n print(\"thank you\")\r\n break\r\n elif option == 2:\r\n amount = float(input(\"enter amoun to withdraw\"))\r\n balance = balance - amount\r\n print('you have successfully withdrawan', amount,'/n')\r\n print('your current balance', balance,'/n')\r\n break\r\n\r\n\r\n elif pin != (1234):\r\n print(\"you have enter in correct pin\")\r\n chances = chances -1\r\n if chances == 0:\r\n print(\"\\n sorry you card blocked for the day\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"rravitanneru/python","sub_path":"neeted loop.py","file_name":"neeted loop.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23481728720","text":"\"\"\"add class for comment\n\nRevision ID: 9e9ccc1feb83\nRevises: 790abf3366b5\nCreate Date: 2019-03-05 18:35:21.130131\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9e9ccc1feb83'\ndown_revision = '790abf3366b5'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('comments', sa.Column('comment', sa.String(length=500), nullable=True))\n op.drop_column('comments', 'comments_sentences')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('comments', sa.Column('comments_sentences', sa.VARCHAR(length=500), autoincrement=False, nullable=True))\n op.drop_column('comments', 'comment')\n # ### end Alembic commands ###\n","repo_name":"Nyirabazungu/pitcher-app","sub_path":"migrations/versions/9e9ccc1feb83_add_class_for_comment.py","file_name":"9e9ccc1feb83_add_class_for_comment.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40811518239","text":"'''\r\n2021/6/1\r\n本代码获取era5的资料列表\r\n'''\r\nimport os\r\nimport subprocess\r\npath = '/data1/other_data/DataUpdate/ERA5/new-era5/hourly/'\r\nfor yyyy in range(1979,1980):\r\n path1 = path+str(yyyy)\r\n files = subprocess.check_output('ls -t /data1/other_data/DataUpdate/ERA5/new-era5/hourly/'+str(yyyy), shell=True)\r\n files = files.decode('utf-8')\r\n files = files.split('\\n')\r\n del files[0]\r\n del files[0]\r\n f = open('/data5/2019swh/data/'+str(yyyy)+'.txt','w+')\r\n for ss in files:\r\n f.write(ss)\r\n f.write('\\r\\n')\r\n f.close()\r\n","repo_name":"sunweihao2020/mycode","sub_path":"other/get-era5_file_list.py","file_name":"get-era5_file_list.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6317328301","text":"from django.db import migrations, models\nimport djgeojson.fields\n\ndef create_geom(apps, schema_editor):\n AirKoreaStations = apps.get_model('dashboard', 'AirKoreaStations')\n for station in AirKoreaStations.objects.all():\n station.geom = {'type': 'Point', 'coordinates': [station.dmy, station.dmx]}\n station.save()\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dashboard', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='airkoreastations',\n name='geom',\n field=djgeojson.fields.PointField(default=[37.4026616, 127.1010097]),\n ),\n migrations.AlterField(\n model_name='airkoreastations',\n name='dmx',\n field=models.DecimalField(blank=True, db_column='dmX', decimal_places=10, max_digits=15, null=True),\n ),\n migrations.AlterField(\n model_name='airkoreastations',\n name='dmy',\n field=models.DecimalField(blank=True, db_column='dmY', decimal_places=10, max_digits=15, null=True),\n ),\n migrations.RunPython(create_geom),\n ]","repo_name":"mkhoin/korea-air-pollution-dashboard","sub_path":"dashboard/migrations/0002_auto_20181210_1359.py","file_name":"0002_auto_20181210_1359.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9161841898","text":"class Error(Exception):\n \"\"\"Base class for TomlSection exceptions.\"\"\"\n def __init__(self, msg=''):\n self.message = msg\n Exception.__init__(self, msg)\n\n def __repr__(self):\n return self.message\n\n __str__ = __repr__\n\n\nclass NoSectionError(Error):\n \"\"\"Raised when TomlSection not exist.\"\"\"\n def __init__(self, sectionName):\n Error.__init__(self, 'No section: %r' % (sectionName, ))\n self.section = sectionName\n self.args = (sectionName, )\n\n\nclass SectionTypeError(Error):\n \"\"\"Raised when getting wrong type for TomlSection.\"\"\"\n def __init__(self, gettype, returntype):\n Error.__init__(self, 'Type Error: Return \"{}\", when get \"{}\".'.format(returntype, gettype))\n\n\nclass TomlSection(dict):\n \"\"\"A TomlSection means a dict object in toml. TomlSection is base on dict.\n\n Examples::\n\n sec = Section()\n if not sec.hasSec(\"sec1.sec11\"):\n sec.addSec(\"sec1.sec11\")\n sec11 = sec.getSec(\"sec1.sec11\")\n sec11.setValue(\"abc\")\n \"\"\"\n def __init__(self, other=()):\n super().__init__()\n self.update(other)\n\n def __contains__(self, item):\n return self.hasChild(item)\n\n def __getitem__(self, item):\n return self.getChild(item)\n\n def __setitem__(self, key, value):\n self.setChild(key, value)\n\n def __delitem__(self, key):\n self.rmChild(key)\n\n ##\n ## Child Item Operate\n ##\n def hasChild(self, childString):\n \"\"\"If child item exist return true, else false\n\n :param childString: \"childname.subchildname\" format path to ditermine child item\n \"\"\"\n childString = childString.strip(\". \\r\\n\\t\")\n if len(childString) < 1:\n return False\n childNames = childString.split(\".\")\n item = self\n for i in range(0, len(childNames)):\n if childNames[i] not in item.keys():\n return False\n item = item.get(childNames[i])\n return True\n\n def addChild(self, childString, obj=\"\"):\n \"\"\"Add child using format childname.subchildname string\n\n :param childString: name of child, childName shall be format \"childname.subchildname.subsub.childname\"\n :param obj: child tobe added, default is \"\"\n\n Example::\n\n self.addChild(\"child.key1\", \"value\") # aaa subsection of general section\n \"\"\"\n childString = childString.strip(\". \\r\\n\\t\")\n if len(childString) < 1:\n return None\n childNames = childString.split(\".\")\n item = self\n length = len(childNames)\n for i in range(0, length - 1):\n if childNames[i] in item.keys() and isinstance(item, dict):\n # item = TomlSection(item)\n if not isinstance(item.get(childNames[i]), dict):\n item.update({childNames[i]: TomlSection()})\n else:\n item.update({childNames[i]: TomlSection()})\n item = item.get(childNames[i])\n item.update({childNames[length - 1]: obj})\n return item.get(childNames[length - 1])\n\n def rmChild(self, childString):\n \"\"\"Remove child by format 'childname.subchildname.xxx'\n\n :returns: return the removed child, if not exist return None\n \"\"\"\n childString = childString.strip(\". \\r\\n\\t\")\n if len(childString) < 1:\n return None\n childNames = childString.split(\".\")\n item = self\n for name in childNames[:-1]:\n if name not in item.keys():\n return None\n item = item.get(name)\n if isinstance(item, dict):\n return item.pop(childNames[-1], None)\n return None\n\n def getChild(self, childString, addifnochild=True, defaultchild=\"\"):\n \"\"\"Get child by format 'childname.subchildname'\n\n :param childString: name of child, childName shall be format \"childname.subchildname.subsub.childname\"\n :param addifnochild: if child is not exist add the child\n :param defaultchild: if child not exist, add defaultchild as the child value\n \"\"\"\n childString = childString.strip(\". \\r\\n\\t\")\n if len(childString) < 1:\n return None\n childNames = childString.split(\".\")\n item = self\n for childname in childNames[:-1]:\n subitem = item.get(childname)\n if childname in item.keys() and isinstance(subitem, dict):\n # item.update({childname: TomlSection(subitem)})\n item = subitem\n elif addifnochild:\n item.update({childname: TomlSection()})\n item = item.get(childname)\n else:\n return None\n if childNames[-1] in item.keys():\n t = type(item.get(childNames[-1]))\n if t == dict and t != TomlSection:\n item.update({childNames[-1]: TomlSection(item.get(childNames[-1]))})\n return item.get(childNames[-1])\n elif addifnochild:\n item.update({childNames[-1]: defaultchild})\n return item.get(childNames[-1])\n else:\n return None\n\n def setChild(self, childString, value, addifnochild=True):\n \"\"\"Set value to child, if success return True else return False\n\n :param childString: name of child, childName shall be format \"childname.subchildname.subsub.childname\"\n :param value: value will be set to the child\n :param addifnochild: if child is not exist add the child\n \"\"\"\n if addifnochild or self.hasChild(childString):\n self.addChild(childString, value)\n return True\n return False\n\n def appendToChild(self, childString, obj):\n \"\"\"Append 'obj' to child, child indicated by 'name.subname' format, if it's not a list.\n if it's a list, obj will be appended. if it's a string or number, it will be converted to list.\n if it's a dict ,return false\n\n :param childString: \"name.subname\" format to get child\n :param obj: value to be appended\n :return: True if successed; False if child is not exist, or child is a dict\n \"\"\"\n if not self.hasChild(childString):\n self.addChild(childString)\n childString = childString.strip(\". \\r\\n\\t\")\n childNames = childString.split(\".\")\n item = self\n for childname in childNames[:-1]:\n subitem = item.get(childname)\n if childname in item and isinstance(subitem, dict):\n item = subitem\n else:\n item.update({childname: TomlSection()})\n item = item.get(childname)\n lastitem = item.get(childNames[-1])\n if not isinstance(lastitem, list):\n item.update({childNames[-1]: [lastitem]})\n item.get(childNames[-1]).append(obj)\n\n def insertToChild(self, childString, index, obj):\n \"\"\"Insert 'obj' to child at index position, child indicated by 'name.subname' format.\n if it's a list, obj will be inserted. if it's a string or number, it will be converted to list.\n if it's a dict ,return false\n\n :param childString: \"name.subname\" format to get child, child must be a list, if not a list, it will be covert to a list\n :param index: position to be inserted to the list\n :param obj: value to be inserted\n :return: True if insert successed; False if child is not exist, or child is a dict\n \"\"\"\n childString = childString.strip(\". \\r\\n\\t\")\n childNames = childString.split(\".\")\n item = self\n for childname in childNames[:-1]:\n subitem = item.get(childname)\n if childname in item.keys() and isinstance(subitem, dict):\n item = subitem\n else:\n item.update({childname: TomlSection()})\n item = item.get(childname)\n lastitem = item.get(childNames[-1])\n if not isinstance(lastitem, list):\n if lastitem is None:\n item.update({childNames[-1]: []})\n else:\n item.update({childNames[-1]: [lastitem]})\n item.get(childNames[-1]).insert(index, obj)\n\n ##\n ## Section Operate\n ##\n def hasSec(self, secString):\n \"\"\"If section exist and type is dict return true, else false\n\n :param secString: \"secname.subsecname\" format path to ditermine section\n \"\"\"\n secString = secString.strip(\". \\r\\n\\t\")\n if len(secString) < 1:\n return False\n secnames = secString.split(\".\")\n sec = self\n for i in range(0, len(secnames)):\n if secnames[i] not in sec.keys():\n return False\n sec = sec.get(secnames[i])\n if isinstance(sec, dict):\n return True\n return False\n\n def addSec(self, secString):\n \"\"\"Add section using format secname.subsecname string\n\n Example::\n\n self.addSec(\"general.subsection\") # add subsection of general section\n \"\"\"\n return self.addChild(secString, TomlSection())\n\n def rmSec(self, secString):\n \"\"\"Remove secname.subsecname sections if exist\"\"\"\n return self.rmChild(secString)\n\n def getSec(self, secString=None, addifnosec=True):\n \"\"\"Get section by secname.subsecname string\n\n Example::\n\n self.getSec() # get root section\n self.getSec(\"general.subsection\") # get the subsection of general section\n\n :param addifnotfound: if True, if section is not found, add it to toml\n \"\"\"\n secString = secString.strip(\". \\r\\n\\t\")\n if len(secString) < 1:\n return self\n sec = self.getChild(secString, addifnosec)\n if sec is None:\n raise NoSectionError(TomlSection)\n elif isinstance(sec, dict):\n sec.__class__ = TomlSection\n return sec\n elif not addifnosec:\n raise SectionTypeError(\"Section\", type(sec))\n else:\n return self.addSec(secString)\n","repo_name":"hustlei/QssStylesheetEditor","sub_path":"modules/tomlconfig/tomlconfig/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":9995,"program_lang":"python","lang":"en","doc_type":"code","stars":1105,"dataset":"github-code","pt":"22"} +{"seq_id":"69953032056","text":"# _____дано условию\r\nimport numpy\r\n\r\na = 1\r\nb = 2\r\ne = 0.02\r\ndelta=0.015\r\n# __________________\r\ndef f(x):\r\n \"\"\"Исследуемая функция\"\"\"\r\n return x + 1 / x ** 2\r\n\r\ndef df(x):\r\n \"\"\"Производная функции 1ого порядка\"\"\"\r\n return 1 - 2/x**3\r\n\r\ndef d2f(x):\r\n \"\"\"Производная функции 2ого порядка\"\"\"\r\n return 6/x**4\r\n\r\ndef secant():\r\n global a, b\r\n\r\n f_a = f(a)\r\n f_b = f(b)\r\n\r\n df_a = df(a)\r\n df_b = df(b)\r\n\r\n # Воспользуемся вместо второй производной ее приближением\r\n d2_f_b = (df_b - df_a) / (b - a)\r\n\r\n while abs(a - b) > e:\r\n\r\n a = b\r\n b -= df_b / d2_f_b\r\n\r\n f_a = f_b\r\n f_b = f(b)\r\n\r\n df_a = df_b\r\n df_b = df(b)\r\n\r\n d2_f_b = (df_b - df_a) / (b - a)\r\n\r\n if f_a > f_b:\r\n xmin = b\r\n fmin = f_b\r\n else:\r\n xmin = a\r\n fmin = f_a\r\n\r\n print(\"Минимальное значение функция принимает в точке x = \", xmin)\r\n print(\"Значение функции в этой точке: f(x) = \", fmin)\r\n\r\nsecant()","repo_name":"EgoInc/Optimization-methods","sub_path":"Одномерная минимизация/Метод секущих.py","file_name":"Метод секущих.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23898901994","text":"# Atomic Swaps - Example for illustrative purposes only.\n\nimport smartpy as sp\n\nclass AtomicSwap(sp.Contract):\n def __init__(self, notional, epoch, hashedSecret, owner, counterparty):\n self.init(notional = notional,\n hashedSecret = hashedSecret,\n epoch = epoch,\n owner = owner,\n counterparty = counterparty)\n\n def checkAlive(self, identity):\n sp.verify(self.data.notional != sp.mutez(0))\n sp.verify(identity == sp.sender)\n\n def finish(self):\n self.data.notional = sp.mutez(0)\n\n # If the owner is satisfied with the conditions of the swap,\n # they may call allSigned in order to send the notional tez\n # to the counterparty.\n @sp.entry_point\n def allSigned(self, params):\n self.checkAlive(self.data.owner)\n sp.send(self.data.counterparty, self.data.notional)\n self.finish()\n\n # If the time period has expired, the owner may cancel\n # the swap and reclaim their notional amount.\n @sp.entry_point\n def cancelSwap(self, params):\n self.checkAlive(self.data.owner)\n sp.verify(self.data.epoch < sp.now)\n sp.send(self.data.owner, self.data.notional)\n self.finish()\n\n # If the counterparty has the hash secret, and the time period\n # has not expired, they may claim the tez.\n @sp.entry_point\n def knownSecret(self, params):\n self.checkAlive(self.data.counterparty)\n sp.verify(self.data.hashedSecret == sp.blake2b(params.secret))\n sp.send(self.data.counterparty, self.data.notional)\n self.finish()\n\n@sp.add_test(name = \"AtomicSwap1\")\ndef test():\n hashSecret = sp.blake2b(sp.bytes(\"0x12345678aabb\"))\n alice = sp.test_account(\"Alice\")\n bob = sp.test_account(\"Robert\")\n c1 = AtomicSwap(sp.mutez(12), sp.timestamp(50), hashSecret,\n alice.address,\n bob.address)\n scenario = sp.test_scenario()\n scenario.h1(\"Atomic Swap\")\n scenario += c1\n\n@sp.add_test(name = \"AtomicSwap2\")\ndef test():\n alice = sp.test_account(\"Alice\")\n bob = sp.test_account(\"Robert\")\n scenario = sp.test_scenario()\n scenario.h1(\"Atomic Swap\")\n\n # Here, two AtomicSwap contracts are created. One with Alice as the owner\n # and Bob as the counterparty, and the second with the identities reversed.\n # They are both secured with the same hash secret, so if the secret gets\n # revealed, then both swaps can happen.\n hashSecret = sp.blake2b(sp.bytes(\"0x12345678aabb\"))\n c1 = AtomicSwap(sp.mutez(12), sp.timestamp(50), hashSecret,\n alice.address,\n bob.address)\n c2 = AtomicSwap(sp.mutez(20), sp.timestamp(50), hashSecret,\n bob.address,\n alice.address)\n scenario.h1(\"c1\")\n scenario += c1\n scenario += c1.knownSecret(secret = sp.bytes(\"0x12345678aa\")).run(sender = bob, valid = False)\n scenario += c1.knownSecret(secret = sp.bytes(\"0x12345678aabb\")).run(sender = bob)\n scenario.h1(\"c2\")\n scenario += c2\n scenario.h2(\"C2.export()\")\n scenario.p(c2.export())\n","repo_name":"boltlabs-inc/libzkchannels","sub_path":"tezos-sandbox/smartpy_scripts/useful_examples/AtomicSwap.py","file_name":"AtomicSwap.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"22"} +{"seq_id":"33309363878","text":"PATH = \"C:\\\\Users\\\\Developer\\Desktop\\\\test_data\\\\test_analysis\\\\test1\\individual_estimates\\\\\"\nbase = 'parameters_00{}.sai.txt'\n\n\nwith open(PATH + base.format(1), 'r') as initial:\n lines = initial.read().splitlines()\n header = [line.split('=')[0].lstrip().rstrip() for line in lines]\n\nprint(\";\".join(header))\nfor i in [1, 2, 3, 5]:\n # Open file\n with open(PATH + base.format(i), 'r') as dataFile:\n # Read lines into a list\n lines = dataFile.read().splitlines()\n # Get dictionary with names and values\n header_and_values = {line.split('=')[0].rstrip().lstrip():\n line.split('=')[-1].lstrip().rstrip() for line in lines}\n print(header_and_values)\n values = ';'.join([header_and_values[p] for p in header])\n print(values)\n\n\n","repo_name":"stefanradev93/fast-dm-gui","sub_path":"gui/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"10475721119","text":"\ntexto = 'lado do triangulo: '\ntexto2 = 'Não forma um triângulo!'\ntexto3 = 'Forma um triângulo '\nl1 = float(input(f'Primeiro {texto}'))\nl2 = float(input(f'Segundo {texto}'))\nl3 = float(input(f'Terceiro {texto}'))\nif l1 < l2 + l3 and l2 < l1 + l3 and l3 < l1 + l2:\n if l1 == l2 == l3:\n print(f'{texto3} EQUILÁTERO!')\n elif l1 == l2 or l1 == l3 or l2 == l3:\n print(f'{texto3}ISÓCELES!')\n else:\n print(f'{texto3}SCALENO!')\nelse:\n print(texto2)\n","repo_name":"DanielEngSoft/CursoPython","sub_path":"ExerciciosCursoEmVideo/ex042.py","file_name":"ex042.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23650294794","text":"__author__ = 'feiyicheng'\n\nfrom Tkinter import *\nfrom tkFileDialog import askopenfilename, askopenfile\nimport tkMessageBox as box\nfrom rdkit import Chem\nfrom rdkit.Chem import Draw\nfrom PIL import Image\nfrom PIL import ImageTk\n\n\n\nclass MmFrame(Frame):\n\tdef __init__(self):\n\t\tFrame.__init__( self )\n\t\tself.padding = \"3 3 12 12\"\n\t\tself.pack()\n\t\tself.columnconfigure( 0, weight=1 )\n\t\tself.rowconfigure( 0, weight=1 )\n\t\tself.button = Button(self, Text = \"fdsafd\", width = 30, command=self._popup()).pack()\n\n\n\tdef _popup(self):\n\t\ttoplevel = Toplevel()\n\t\tent1 = Entry(self,state = 'readonly')\n\t\tvar1 = StringVar()\n\t\tvar1.set(\"fasdfdsf\")\n\t\tent1.config( textvariable=var1, relief='flat' )\n\n\n\n\nif __name__ == '__main__':\n\tMmFrame.mainloop()","repo_name":"fycisc/ADS","sub_path":"testToplevel.py","file_name":"testToplevel.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4324309902","text":"import math \n\na = float(input(\"Enter lower bound: \"))\nb = float(input(\"Enter upper bound: \"))\n\nn = 100 # number of intermediate points\n\nprecision = 10**4 # The number of decimal places we want the answer correct to\n\ndelx = abs((b - a)/n) # calculating step value\n\nx1 = a # assigning x1 to lower bound of interval\n# Taking two points with small increments from the lower bound as x2, x3\nx2 = x1 + delx \nx3 = x2 + delx\n\n# Calculate function value at any particular point\ndef function(x):\n \n # Taking a random unimodal function\n return x*x + 54/x\n\ndef exhaustive_search(x1, x2, x3):\n # x3 should be <= b, else we will be calculating values outside the specified interval\n while (x3 <= b):\n \n # checking if function changes signs in specified interval\n if(function(x1) >= function(x2) and function(x2) <= function(x3)):\n break\n \n else:\n x1 = x2\n x2 = x3\n x3 = x2 + delx\n \n if(x3 > b):\n print(\"No minimum exists in (a, b) or a boundary point (a or b) is the minimum point.\")\n \n else:\n print(\"Minimum point lies in the region : (\" + str((math.trunc(x1*precision))/precision) + \" , \" \n + str((math.trunc(x3*precision))/precision) + \")\")\n \nexhaustive_search(x1, x2, x3)","repo_name":"AkhilKas/Optimization-Techniques","sub_path":"ExhaustiveSearch.py","file_name":"ExhaustiveSearch.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31991914772","text":"# 指定区间\n# lr_scheduler.MultiStepLR()\n# Assuming optimizer uses lr = 0.05 for all groups\n# lr = 0.05 if epoch < 30\n# lr = 0.005 if 30 <= epoch < 80\n# lr = 0.0005 if epoch >= 80\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nimport yoloV3\nimport torch\n\nmodel = yoloV3.Yolov3(1)\noptimizer = optim.SGD(params=model.parameters(), lr=0.05)\n\nplt.figure()\ny=[]\nscheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [30, 80], 0.1)\nfor epoch in range(100):\n scheduler.step()\n print(epoch, 'lr={:.6f}'.format(scheduler.get_lr()[0]))\n y.append(scheduler.get_lr()[0])\n\nplt.plot(y)\nplt.show()\n","repo_name":"UnstoppableCurry/Face-payment","sub_path":"yoloV3 人脸检测/lr/学习率动态调整.py","file_name":"学习率动态调整.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"22"} +{"seq_id":"27479485502","text":"import json\r\n\r\nfrom flask import Response\r\n\r\ndef resolve_error(error_object):\r\n if error_object[\"error\"][\"error\"]:\r\n message = error_object[\"error\"][\"message\"]\r\n response = Response(\r\n json.dumps({\"message\": message}),\r\n status=error_object[\"error\"][\"status\"],\r\n mimetype=\"application/json\"\r\n )\r\n return response\r\n # in the event that parsing the error fails return a parsing error\r\n error_message = {\"message\": \"error in evaulating failure state\"}\r\n response = Response(\r\n json.dumps(error_message), status=500, mimetype=\"application/json\"\r\n )\r\n return response\r\n\r\ndef build_error(boolean, msg, code):\r\n error = dict(error=dict(error=boolean, message=str(msg), status=int(code)))\r\n return error","repo_name":"dwilloug/python_cookbook","sub_path":"flask_CRUD/common/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3587419293","text":"\"\"\"\nCreated on Sun Aug 19 2019\n@author: Yan Qinghao\ntransforms\n\"\"\"\n# coding=utf-8\nfrom __future__ import absolute_import, print_function\n\nimport torchvision.transforms as transforms\nimport suanpan\nfrom suanpan.app.arguments import Float, Folder\nfrom suanpan.app import app\nfrom args import PytorchTransModel, PytorchDataset\nfrom utils import transImgSave, mkFolder\n\n\n@app.input(PytorchDataset(key=\"inputData\"))\n@app.input(PytorchTransModel(key=\"inputModel1\"))\n@app.input(PytorchTransModel(key=\"inputModel2\"))\n@app.input(PytorchTransModel(key=\"inputModel3\"))\n@app.input(PytorchTransModel(key=\"inputModel4\"))\n@app.input(PytorchTransModel(key=\"inputModel5\"))\n@app.param(Float(key=\"p\", default=0.5))\n@app.output(PytorchTransModel(key=\"outputModel\"))\n@app.output(Folder(key=\"outputData\"))\ndef SPRandomApply(context):\n \"\"\"\n Apply randomly a list of transformations with a given probability\n \"\"\"\n args = context.args\n transformLst = []\n for i in range(5):\n transform = getattr(args, \"inputModel{}\".format(i + 1))\n if transform:\n transformLst.append(transform)\n transformsAug = transforms.RandomApply(transformLst, p=args.p)\n folder = transImgSave(args.inputData, transformsAug) if args.inputData else mkFolder()\n return transformsAug, folder\n\n\nif __name__ == \"__main__\":\n suanpan.run(app)\n","repo_name":"yanqinghao/AiLab-Pytorch","sub_path":"components/docker/transform/SPRandomApply.py","file_name":"SPRandomApply.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5845763146","text":"from __future__ import annotations\n\nfrom typing import cast\n\nimport ctypes\nimport ctypes.util\nimport logging\nimport sys\nimport time\n\nfrom gi.repository import Gio\nfrom gi.repository import GLib\nfrom gi.repository import GObject\n\nfrom gajim.common import app\nfrom gajim.common.const import Display\nfrom gajim.common.const import IdleState\n\nlog = logging.getLogger('gajim.c.idle')\n\n\nclass IdleMonitor:\n def __init__(self):\n self._extended_away = False\n\n def get_idle_sec(self) -> int:\n raise NotImplementedError\n\n def set_extended_away(self, state: bool) -> None:\n self._extended_away = state\n\n def is_extended_away(self) -> bool:\n return self._extended_away\n\n\nclass DBusFreedesktop(IdleMonitor):\n\n def __init__(self) -> None:\n IdleMonitor.__init__(self)\n self._last_idle_time = 0\n\n log.debug('Connecting to org.freedesktop.ScreenSaver')\n self._dbus_proxy = Gio.DBusProxy.new_for_bus_sync(\n Gio.BusType.SESSION,\n Gio.DBusProxyFlags.NONE,\n None,\n 'org.freedesktop.ScreenSaver',\n '/org/freedesktop/ScreenSaver',\n 'org.freedesktop.ScreenSaver',\n None\n )\n log.debug('Connected')\n\n # Only the following call will trigger exceptions if the D-Bus\n # interface/method/... does not exist. Using the failing method\n # for class init to allow other idle monitors to be used on failure.\n self._get_idle_sec_fail()\n log.debug('Test successful')\n\n def _get_idle_sec_fail(self) -> int:\n (idle_time,) = cast(tuple[int], self._dbus_proxy.call_sync(\n 'GetSessionIdleTime',\n None,\n Gio.DBusCallFlags.NO_AUTO_START,\n -1,\n None))\n\n return idle_time // 1000\n\n def get_idle_sec(self) -> int:\n try:\n self._last_idle_time = self._get_idle_sec_fail()\n except GLib.Error as error:\n log.warning(\n 'org.freedesktop.ScreenSaver.GetSessionIdleTime() failed: %s',\n error)\n\n return self._last_idle_time\n\n\nclass DBusGnome(IdleMonitor):\n\n def __init__(self) -> None:\n IdleMonitor.__init__(self)\n self._last_idle_time = 0\n\n log.debug('Connecting to org.gnome.Mutter.IdleMonitor')\n self._dbus_proxy = Gio.DBusProxy.new_for_bus_sync(\n Gio.BusType.SESSION,\n Gio.DBusProxyFlags.NONE,\n None,\n 'org.gnome.Mutter.IdleMonitor',\n '/org/gnome/Mutter/IdleMonitor/Core',\n 'org.gnome.Mutter.IdleMonitor',\n None\n )\n log.debug('Connected')\n\n # Only the following call will trigger exceptions if the D-Bus\n # interface/method/... does not exist. Using the failing method\n # for class init to allow other idle monitors to be used on failure.\n self._get_idle_sec_fail()\n log.debug('Test successful')\n\n def _get_idle_sec_fail(self) -> int:\n (idle_time,) = cast(tuple[int], self._dbus_proxy.call_sync(\n 'GetIdletime',\n None,\n Gio.DBusCallFlags.NO_AUTO_START,\n -1,\n None))\n\n return idle_time // 1000\n\n def get_idle_sec(self) -> int:\n try:\n self._last_idle_time = self._get_idle_sec_fail()\n except GLib.Error as error:\n log.warning(\n 'org.gnome.Mutter.IdleMonitor.GetIdletime() failed: %s',\n error)\n\n return self._last_idle_time\n\n\nclass Xss(IdleMonitor):\n def __init__(self) -> None:\n IdleMonitor.__init__(self)\n\n class XScreenSaverInfo(ctypes.Structure):\n _fields_ = [\n ('window', ctypes.c_ulong),\n ('state', ctypes.c_int),\n ('kind', ctypes.c_int),\n ('til_or_since', ctypes.c_ulong),\n ('idle', ctypes.c_ulong),\n ('eventMask', ctypes.c_ulong)\n ]\n\n XScreenSaverInfo_p = ctypes.POINTER(XScreenSaverInfo)\n\n display_p = ctypes.c_void_p\n xid = ctypes.c_ulong\n c_int_p = ctypes.POINTER(ctypes.c_int)\n\n lib_x11_path = ctypes.util.find_library('X11')\n if lib_x11_path is None:\n raise OSError('libX11 could not be found.')\n\n lib_x11 = ctypes.cdll.LoadLibrary(lib_x11_path)\n lib_x11.XOpenDisplay.restype = display_p\n lib_x11.XOpenDisplay.argtypes = (ctypes.c_char_p,)\n lib_x11.XDefaultRootWindow.restype = xid\n lib_x11.XDefaultRootWindow.argtypes = (display_p,)\n\n lib_xss_path = ctypes.util.find_library('Xss')\n if lib_xss_path is None:\n raise OSError('libXss could not be found.')\n\n self._lib_xss = ctypes.cdll.LoadLibrary(lib_xss_path)\n self._lib_xss.XScreenSaverQueryExtension.argtypes = (\n display_p, c_int_p, c_int_p)\n self._lib_xss.XScreenSaverAllocInfo.restype = XScreenSaverInfo_p\n self._lib_xss.XScreenSaverQueryInfo.argtypes = (\n display_p, xid, XScreenSaverInfo_p)\n\n self._dpy_p = lib_x11.XOpenDisplay(None)\n if self._dpy_p is None:\n raise OSError('Could not open X Display.')\n\n _event_basep = ctypes.c_int()\n _error_basep = ctypes.c_int()\n extension = self._lib_xss.XScreenSaverQueryExtension(\n self._dpy_p, ctypes.byref(_event_basep), ctypes.byref(_error_basep))\n if extension == 0:\n raise OSError('XScreenSaver Extension not available on display.')\n\n self._xss_info_p = self._lib_xss.XScreenSaverAllocInfo()\n if self._xss_info_p is None:\n raise OSError('XScreenSaverAllocInfo: Out of Memory.')\n\n self.root_window = lib_x11.XDefaultRootWindow(self._dpy_p)\n\n def get_idle_sec(self) -> int:\n info = self._lib_xss.XScreenSaverQueryInfo(\n self._dpy_p, self.root_window, self._xss_info_p)\n if info == 0:\n return info\n return self._xss_info_p.contents.idle // 1000\n\n\nclass Windows(IdleMonitor):\n def __init__(self) -> None:\n IdleMonitor.__init__(self)\n self._OpenInputDesktop = ctypes.windll.user32.OpenInputDesktop\n self._CloseDesktop = ctypes.windll.user32.CloseDesktop\n self._SystemParametersInfo = ctypes.windll.user32.SystemParametersInfoW\n self._GetTickCount = ctypes.windll.kernel32.GetTickCount\n self._GetLastInputInfo = ctypes.windll.user32.GetLastInputInfo\n\n self._locked_time = None\n\n class LastInputInfo(ctypes.Structure):\n _fields_ = [\n ('cbSize', ctypes.c_uint),\n ('dwTime', ctypes.c_uint)\n ]\n\n self._lastInputInfo = LastInputInfo()\n self._lastInputInfo.cbSize = ctypes.sizeof(self._lastInputInfo)\n\n def get_idle_sec(self) -> int:\n self._GetLastInputInfo(ctypes.byref(self._lastInputInfo))\n return int(self._GetTickCount() - self._lastInputInfo.dwTime) // 1000\n\n def set_extended_away(self, state: bool) -> None:\n raise NotImplementedError\n\n def is_extended_away(self) -> bool:\n # Check if Screen Saver is running\n # 0x72 is SPI_GETSCREENSAVERRUNNING\n saver_runing = ctypes.c_int(0)\n info = self._SystemParametersInfo(\n 0x72, 0, ctypes.byref(saver_runing), 0)\n if info and saver_runing.value:\n return True\n\n # Check if Screen is locked\n # Also a UAC prompt counts as locked\n # So just return True if we are more than 10 seconds locked\n desk = self._OpenInputDesktop(0, False, 0)\n unlocked = bool(desk)\n self._CloseDesktop(desk)\n\n if unlocked:\n self._locked_time = None\n return False\n\n if self._locked_time is None:\n self._locked_time = time.time()\n return False\n\n threshold = time.time() - 10\n if threshold > self._locked_time:\n return True\n return False\n\n\nclass IdleMonitorManager(GObject.Object):\n\n __gsignals__ = {\n 'state-changed': (\n GObject.SignalFlags.RUN_LAST | GObject.SignalFlags.ACTION,\n None,\n ()\n )}\n\n def __init__(self):\n GObject.Object.__init__(self)\n self.set_interval()\n self._state = IdleState.AWAKE\n self._idle_monitor = self._get_idle_monitor()\n\n if self.is_available():\n GLib.timeout_add_seconds(5, self._poll)\n\n def set_interval(self,\n away_interval: int = 60,\n xa_interval: int = 120) -> None:\n\n log.info('Set interval: away: %s, xa: %s',\n away_interval, xa_interval)\n self._away_interval = away_interval\n self._xa_interval = xa_interval\n\n def set_extended_away(self, state: bool) -> None:\n if self._idle_monitor is None:\n raise ValueError('No idle monitor available')\n\n self._idle_monitor.set_extended_away(state)\n\n def is_available(self) -> bool:\n return self._idle_monitor is not None\n\n @property\n def state(self) -> IdleState:\n if not self.is_available():\n return IdleState.UNKNOWN\n return self._state\n\n def is_xa(self) -> bool:\n return self.state == IdleState.XA\n\n def is_away(self) -> bool:\n return self.state == IdleState.AWAY\n\n def is_awake(self) -> bool:\n return self.state == IdleState.AWAKE\n\n def is_unknown(self) -> bool:\n return self.state == IdleState.UNKNOWN\n\n @staticmethod\n def _get_idle_monitor() -> IdleMonitor | None:\n if sys.platform == 'win32':\n return Windows()\n\n try:\n return DBusFreedesktop()\n except GLib.Error as error:\n log.info('Idle time via org.freedesktop.Screensaver '\n 'not available: %s', error)\n\n try:\n return DBusGnome()\n except GLib.Error as error:\n log.info('Idle time via org.gnome.Mutter.IdleMonitor '\n 'not available: %s', error)\n\n if app.is_display(Display.WAYLAND):\n return None\n\n try:\n return Xss()\n except OSError as error:\n log.info('Idle time via XScreenSaverInfo not available: %s', error)\n\n return None\n\n def get_idle_sec(self) -> int:\n if self._idle_monitor is None:\n raise ValueError('No idle monitor available')\n return self._idle_monitor.get_idle_sec()\n\n def _poll(self) -> bool:\n '''\n Check to see if we should change state\n '''\n assert self._idle_monitor is not None\n\n if self._idle_monitor.is_extended_away():\n log.info('Extended Away: Screensaver or Locked Screen')\n self._set_state(IdleState.XA)\n return True\n\n idle_time = self.get_idle_sec()\n\n # xa is stronger than away so check for xa first\n if idle_time > self._xa_interval:\n self._set_state(IdleState.XA)\n elif idle_time > self._away_interval:\n self._set_state(IdleState.AWAY)\n else:\n self._set_state(IdleState.AWAKE)\n return True\n\n def _set_state(self, state: IdleState) -> None:\n if self._state == state:\n return\n\n self._state = state\n log.info('State changed: %s', state)\n self.emit('state-changed')\n\n\nMonitor = IdleMonitorManager()\n","repo_name":"gajim/gajim","sub_path":"gajim/common/idle.py","file_name":"idle.py","file_ext":"py","file_size_in_byte":11364,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"22"} +{"seq_id":"41482576485","text":"# -*- coding: iso-8859-1 -*-\nimport sys\nimport time\nimport serial\nimport threading\nimport paho.mqtt.client as mqtt\n\nDEVICE='/dev/ttyACM0'\n# DEVICE= 'COM3'\nSPEED=115200\nTOPICS=[(\"/c0/eng\", 2),(\"/c0/servo\",2)]\nHOSTNAME= \"10.1.1.110\"\n# HOSTNAME=\"192.168.1.109\"\n# HOSTNAME= \"localhost\"\n\ndef open_serial(dev, speed, show_info=False):\n\tser = serial.Serial(dev, speed, timeout=1)\n\ttime.sleep(0.5)\n\tif show_info:\n\t\tprint ('\\nStatus: %s ' % (ser.isOpen()))\n\t\tprint ('Device: %s ' % (ser.name))\n\t\tprint ('Settings:\\n %s ' % (ser))\n\treturn ser\n\ndef read_serial(ser, stop):\n\twhile True:\n\t\ttopic = \"\"\n\t\trec = ser.readline()\n\t\tif rec != b'':\n\t\t\tmsg=rec.decode('utf-8')\n\t\t\tprint(msg)\n\t\t\tif msg[0] == \"t\":\n\t\t\t\tpay = msg[2:]\n\t\t\t\ttopic = \"/c0/temp\"\n\t\t\tif msg[0] == \"i\":\n\t\t\t\tpay = msg[2:]\n\t\t\t\ttopic = \"/c0/ir\"\n\t\t\tif msg[0] == \"u\":\n\t\t\t\tpay = msg[2:]\n\t\t\t\ttopic = \"/c0/ultra\"\n\t\t\tif msg[0] == \"a\":\n\t\t\t\tpay = msg[2:]\n\t\t\t\ttopic = \"/c0/acel\"\n\t\t\tif (topic != \"\"):\n\t\t\t\tclient.publish(topic, pay, 2, 0)\n\t\t\t#print (rec.decode('utf-8'))\n\t\tif stop():\n\t\t\tbreak\n\ndef on_connect(client, userdata, flags, rc):\n # O subscribe fica no on_connect pois, caso perca a conexão ele a renova\n # Lembrando que quando usado o #, você está falando que tudo que chegar após a barra do topico, será recebido\n\tprint(\"Conectou no Broker\")\n\tclient.subscribe(TOPICS)\n\t# client.subscribe(\"#\",0)\n\ndef on_subscribe(client, userdata, mid, granted_qos):\n\tprint(\"Inscrito em: \", TOPICS)\n\ndef on_message(client, userdata, msg):\n\tprint(\"Mensagem recebida: \")\n\tprint(msg.topic+\" - \"+str(msg.payload))\n\t# if(msg.topic == \"/c0/eng\"):\n\t\t# id=\"m,\"\n\t\t# if(msg.payload.decode()==\"s\"):\n\t\t\t# id=\"\"\n\t# if(msg.topic == \"/c0/servo\"):\n\t\t# id=\"v,\"\n\t# print(id + msg.payload.decode())\n\t# snd=id + msg.payload.decode() + \"\\n\"\n\tsnd=msg.payload.decode() + \"\\n\"\n\tprint(snd)\n\tser.write(snd.encode())\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tser = open_serial(DEVICE, SPEED, True)\n\t\tprint(\"Porta serial conectada.\")\n\texcept:\n\t\tprint(\"Erro ao conectar na porta serial.\")\n\t\tsys.exit()\n\tif len(sys.argv) == 2:\n\t\tDEVICE = sys.argv[1]\n\telif len(sys.argv) == 3:\n\t\tDEVICE = sys.argv[1]\n\t\tSPEED = sys.argv[2]\n\n\tstop=False\n\tthreading.Thread(target=read_serial, args =(ser, lambda : stop, )).start()\n\n\tclient = mqtt.Client()\n\tclient.on_connect = on_connect\n\tclient.on_subscribe = on_subscribe\n\tclient.on_message = on_message\n\t# Conecta no MQTT Broker, no meu caso, o Mosquitto\n\tclient.connect(HOSTNAME,1883,6000)\n\t# Inicia o loop\n\tclient.loop_forever()\n","repo_name":"LMicol/KontraTesla","sub_path":"Protocolo/mqttserial.py","file_name":"mqttserial.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39839634685","text":"import json, fire, os, re\nfrom googletrans import Translator\n\n\ndef translate_markdown(text, dest_language='pt'):\n # Regex expressions\n MD_CODE_REGEX='```[a-z]*\\n[\\s\\S]*?\\n```'\n CODE_REPLACEMENT_KW = 'xx_markdown_code_xx'\n\n MD_LINK_REGEX=\"\\[[^)]+\\)\"\n LINK_REPLACEMENT_KW = 'xx_markdown_link_xx'\n\n # Markdown tags\n END_LINE='\\n'\n IMG_PREFIX='!['\n HEADERS=['### ', '###', '## ', '##', '# ', '#'] # Should be from this order (bigger to smaller)\n\n # Inner function to replace tags from text from a source list\n def replace_from_list(tag, text, replacement_list):\n list_to_gen = lambda: [(x) for x in replacement_list]\n replacement_gen = list_to_gen()\n return re.sub(tag, lambda x: next(iter(replacement_gen)), text)\n\n # Create an instance of Tranlator\n translator = Translator()\n\n # Inner function for translation\n def translate(text):\n # Get all markdown links\n md_links = re.findall(MD_LINK_REGEX, text)\n\n # Get all markdown code blocks\n md_codes = re.findall(MD_CODE_REGEX, text)\n\n # Replace markdown links in text to markdown_link\n text = re.sub(MD_LINK_REGEX, LINK_REPLACEMENT_KW, text)\n\n # Replace links in markdown to tag markdown_link\n text = re.sub(MD_CODE_REGEX, CODE_REPLACEMENT_KW, text)\n\n # Translate text\n text = translator.translate(text, dest=dest_language).text\n\n # Replace tags to original link tags\n text = replace_from_list('[Xx]'+LINK_REPLACEMENT_KW[1:], text, md_links)\n\n # Replace code tags\n text = replace_from_list('[Xx]'+CODE_REPLACEMENT_KW[1:], text, md_codes)\n\n return text\n\n # Check if there are special Markdown tags\n if len(text)>=2:\n if text[-1:]==END_LINE:\n return translate(text)+'\\n'\n\n if text[:2]==IMG_PREFIX:\n return text\n\n for header in HEADERS:\n len_header=len(header)\n if text[:len_header]==header:\n return header + translate(text[len_header:])\n\n return translate(text)\n\n#export\ndef jupyter_translate(fname, language='pt', rename_source_file=False, print_translation=False):\n \"\"\"\n TODO:\n add dest_path: Destination folder in order to save the translated files.\n \"\"\"\n data_translated = json.load(open(fname, 'r'))\n\n skip_row=False\n for i, cell in enumerate(data_translated['cells']):\n for j, source in enumerate(cell['source']):\n if cell['cell_type']=='markdown':\n if source[:3]=='```':\n skip_row = not skip_row # Invert flag until I find next code block\n\n if not skip_row:\n if source not in ['```\\n', '```', '\\n'] and source[:4] != ' ëëë 2. '\\n' disappeared 3. image's links damaged\n data_translated['cells'][i]['source'][j] = \\\n translate_markdown(source, dest_language=language)\n if print_translation:\n print(data_translated['cells'][i]['source'][j])\n\n if rename_source_file:\n fname_bk = f\"{'.'.join(fname.split('.')[:-1])}_bk.ipynb\" # index.ipynb -> index_bk.ipynb\n\n os.rename(fname, fname_bk)\n print(f'{fname} has been renamed as {fname_bk}')\n\n open(fname,'w').write(json.dumps(data_translated))\n print(f'The {language} translation has been saved as {fname}')\n else:\n dest_fname = f\"{'.'.join(fname.split('.')[:-1])}_{language}.ipynb\" # any.name.ipynb -> any.name_pt.ipynb\n open(dest_fname,'w').write(json.dumps(data_translated))\n print(f'The {language} translation has been saved as {dest_fname}')\n\ndef markdown_translator(input_fpath, output_fpath, input_name_suffix=''):\n with open(input_fpath,'r') as f:\n content = f.readlines()\n content = ''.join(content)\n content_translated = translate_markdown(content)\n if input_name_suffix!='':\n new_input_name=f\"{'.'.join(input_fpath.split('.')[:-1])}{input_name_suffix}.md\"\n os.rename(input_fpath, new_input_name)\n with open(output_fpath, 'w') as f:\n f.write(content_translated)\n\n\nif __name__ == '__main__':\n fire.Fire(jupyter_translate)\n","repo_name":"WittmannF/jupyter-translate","sub_path":"jupyter_translate.py","file_name":"jupyter_translate.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"22"} +{"seq_id":"26365951925","text":"FTN_HTTP_MAGIC_NUM = 0xabcd9876\nFTN_HTTP_CMD_UPLOAD_SUPER4G_FILE = 1007\n#FTN_HTTP_CMD_UPLOAD_SUPER4G_FILE = 1\n\nFTN_UPLOAD_KEY_LEN = 304\nUPLOADFILE_RECV_ERR = -1004\nUPLOADFILE_SEND_ERR = -1003\nUPLOADFILE_CONNECT_ERR = -1002\nUPLOADFILE_SUCCESS = 1000\n\nHTTPSVRPORT = 80\nDATA_SIZE = 1024*1024\nSENDBUF = 16*1024*1024\n","repo_name":"P79N6A/backendProject","sub_path":"server/third_res/acloud/m_struct/define.py","file_name":"define.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15999757255","text":"\"\"\"This module stores all client's network functions\"\"\"\n\nimport socket\nfrom time import sleep\nfrom Common import messageLib as mlib\nfrom Quartz import IPTYPE\n\n\ndef connectSocket(host: str, port: int) -> socket.socket:\n \"\"\"This function creates a socket and connect it to the host and port\"\"\"\n\n try:\n if IPTYPE == \"IPV6\":\n s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\n s.connect((host, port, 0, 0))\n else:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n\n s.setblocking(False)\n return s\n\n except:\n print(\"The host could not be reached\")\n sleep(3)\n print(\"Trying again!\")\n sleep(5)\n return connectSocket(host, port)\n\n\ndef receiveCMessage(s: socket.socket) -> mlib.Msg:\n \"\"\"This function returns a message sent from the server\"\"\"\n\n try:\n msg: mlib.Msg = mlib.Msg.decode(s.recv(4096))\n if msg:\n return msg\n\n else:\n print(\"CONNECTION WITH HOST WAS INTERRUPTED!\")\n sleep(1)\n print(\"RESTORING CONNECTION\")\n sleep(3)\n receiveCMessage(s)\n\n except:\n pass\n\n\ndef sendCMessage(s: socket.socket, msg: mlib.Msg) -> None:\n \"\"\"This function sends a message to the server\"\"\"\n\n try:\n s.sendall(msg.encode())\n except:\n print(\"Connection with host is unstable, trying to send message again...\")\n sleep(2)\n sendCMessage(s, msg)\n","repo_name":"clr-cera/Quartz","sub_path":"src/Quartz/ClientLib/clientWire.py","file_name":"clientWire.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"75572438775","text":"from django.shortcuts import render\nfrom .models import Casa, Cota\nfrom django.template import loader\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect\nimport locale\nimport re\nimport requests\n#from bs4 import BeautifulSoup\n\n\n# Create your views here.\nfrom imoveis.forms import CadastroImovel\n\n\ndef index(request):\n casas = Casa.objects.order_by('data_cadastro')\n template = loader.get_template('index.html')\n context = {\n 'casas': casas,\n }\n return HttpResponse(template.render(context, request))\n\ndef imovel(request, imovel_id):\n imovel = get_object_or_404(Casa, pk=imovel_id)\n cota = Cota.objects.filter(valor__range=(imovel.valor, imovel.valor*2))[0]\n return render(request, 'detalhe_imovel.html', {'imovel': imovel, 'cota':cota})\n\n\ndef cadastro_imovel(request):\n if request.method == 'POST': \n form = CadastroImovel(request.POST, request.FILES) \n if form.is_valid(): \n form.save() \n \n # Getting the current instance object to display in the template \n img_object = form.instance \n \n return render(request, 'cadastro.html', {'form': form, 'img_obj': img_object}) \n else: \n form = CadastroImovel() \n \n return render(request, 'cadastro.html', {'form': form}) \n\n\n\n\ndef imoveis_filter(request):\n casas = Casa.objects.all()\n\n dorms = request.POST.get('dorms')\n if int(dorms) > 0:\n casas = Casa.objects.filter(dormitorios = dorms)\n\n \n suites = request.POST.get('suites')\n if int(suites) > 0:\n casas = Casa.objects.filter(suites=suites)\n\n garagem = request.POST.get('garagem')\n if int(garagem) > 0:\n casas = Casa.objects.filter(vagas = garagem)\n\n \n area_servico = request.POST.get('area_servico')\n if area_servico == \"1\":\n casas = Casa.objects.filter(area_servico= True)\n else:\n area_servico = False\n\n piscina = request.POST.get('piscina')\n if piscina == \"1\":\n casas = Casa.objects.filter(piscina = True)\n\n\n# if piscina == \"1\":\n# casas = Casa.objects.filter(piscina = True)\n# else:\n# casas = Casa.objects.filter(piscina = False)\n\n\n\n churrasqueira = request.POST.get('churrasqueira')\n if churrasqueira == \"1\":\n casas = Casa.objects.filter(churrasqueira = True)\n\n area_gourmet = request.POST.get('area_gourmet')\n if area_gourmet == \"1\":\n casas = Casa.objects.filter(area_gourmet = True)\n\n values = [ 100000, 300000 ],\n\n fp_min = request.POST.get('fp_min')\n fp_max = request.POST.get('fp_max')\n\n #if fp_min != values[0] or fp_max != values[1]:\n # casas = Casa.objects.filter(valor__range=(fp_min, fp_max))\n\n\n \n \n \n \n \n template = loader.get_template('index.html')\n context = {\n 'casas': casas,\n }\n return HttpResponse(template.render(context, request))\n\n\n\n\n\n\n\ndef salvar_imovel(request):\n #query_dict = request.POST\n #print(query_dict)\n\n nome_exibicao = request.POST.get('nome_exibicao')\n endereco = request.POST.get('endereco')\n bairro = request.POST.get('bairro')\n cidade = request.POST.get('cidade')\n valor = request.POST.get('valor')\n m2_construido = request.POST.get('m2_construido')\n m2_total = request.POST.get('m2_total')\n dormitorios = request.POST.get('dormitorios')\n suites = request.POST.get('suites')\n vagas = request.POST.get('vagas')\n area_servico = request.POST.get('area_servico')\n \n if area_servico == \"on\":\n area_servico = True\n else:\n area_servico = False\n \n churrasqueira = request.POST.get('churrasqueira')\n \n if churrasqueira == 'on':\n churrasqueira = True\n else:\n churrasqueira = False\n\n piscina = request.POST.get('piscina')\n \n if piscina == 'on':\n piscina = True\n else:\n piscina = False\n \n area_gourmet = request.POST.get('area_gourmet')\n \n if area_gourmet == 'on':\n area_gourmet = True\n else:\n area_gourmet = False\n \n descricao = request.POST.get('descricao')\n foto1 = request.FILES.get('foto1')\n foto2 = request.FILES.get('foto2')\n foto3 = request.FILES.get('foto3')\n foto4 = request.FILES.get('foto4')\n lat = request.POST.get('lat')\n lng = request.POST.get('lng')\n\n Casa.objects.create(nome_exibicao=nome_exibicao, \n endereco=endereco, \n bairro=bairro,\n cidade=cidade,\n valor=valor, \n m2_construido=m2_construido,\n m2_total=m2_total,\n dormitorios=dormitorios,\n suites=suites,\n vagas=vagas,\n area_servico=area_servico,\n churrasqueira=churrasqueira,\n piscina=piscina,\n area_gourmet=area_gourmet,\n descricao=descricao,\n foto1=foto1,\n foto2=foto2,\n foto3=foto3,\n foto4=foto4,\n )\n\n return HttpResponseRedirect('/')\n\n\ndef update_agent(request):\n# class Cotas:\n# url = \"\"\n# carta = \"\"\n# credito = 0\n# entrada = 0\n# parcelas = \"\"\n# segmento = \"Imóveis\"\n# vencimento = \"\"\n# codigo = 0\n#\n#\n# locale.setlocale(locale.LC_MONETARY, \"pt_BR.UTF-8\")\n# headers = {\n# 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0',\n# }\n#\n# html_content = requests.get(\"https://contempladoschapeco.com.br/consorcio/imovel/\", headers=headers).text\n# soup = BeautifulSoup(html_content,features=\"html.parser\")\n# lista_maior = []\n# obj_list = []\n#\n# table = soup.find_all('table')\n# tds = soup.find_all('td')\n#\n# for a in tds:\n# data = a.contents\n# lista_maior.append(data)\n#\n# chunks = [lista_maior[x:x+6] for x in range(0, len(lista_maior), 6)]\n#\n# for a in chunks:\n# index = chunks.index(a)\n# obj = Cotas()\n# credito = int(re.sub('\\D','',a[0][0]))/100\n# entrada = (int(re.sub('\\D','',a[1][0]))/100) + (credito * 0.07)\n# try:\n# parcelas = a[2][0] + \" \" + a[5][0]\n# except:\n# parcelas = a[2][0]\n# finally:\n# administradora = a[3][0]\n# vencimento = \"Dia \" + a[4][0][0:2]\n#\n# obj.credito = credito\n# obj.carta = administradora\n# obj.entrada = entrada\n# obj.parcelas = parcelas\n# obj.vencimento = vencimento\n#\n# if administradora == \"Caixa\":\n# obj.url = \"https://www.contempladaaqui.com.br/wp-content/uploads/2021/05/caixa.png\"\n# elif administradora == \"Bradesco\":\n# obj.url = \"https://www.contempladaaqui.com.br/wp-content/uploads/2021/07/Bradesco.png\"\n# elif administradora == \"Itau\":\n# obj.url = \"https://www.contempladaaqui.com.br/wp-content/uploads/2021/07/Itau.png\"\n# elif administradora == \"Caixa | SX5\":\n# obj.url = \"https://www.contempladaaqui.com.br/wp-content/uploads/2021/05/caixa.png\"\n# else:\n# obj.url = \"\"\n#\n# obj.codigo = 12585 + index\n# obj.credito = credito\n# obj.entrada = entrada\n# obj_list.append(obj)\n#\n# for a in obj_list:\n# cota = Cota.objects.create(codigo = a.codigo, administradora = a.carta,\n# valor = a.credito, entrada = a.entrada, parcelas = a.parcelas, segmento = a.segmento, vencimento = a.vencimento, img = a.url )\n\n\n return HttpResponse(\"Dados inseridos!\")\n\n\n\n","repo_name":"alexafonsodossantos/dfsimoveis","sub_path":"imoveis/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7342,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"72391990777","text":"\"\"\"Classes to handle plotting during the training.\"\"\"\nfrom __future__ import print_function, division\nimport math\nimport cPickle as pickle\nfrom collections import OrderedDict\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nGROWTH_BY = 500\n\nclass History(object):\n def __init__(self):\n self.line_groups = OrderedDict()\n\n @staticmethod\n def from_string(s):\n return pickle.loads(s)\n\n def to_string(self):\n return pickle.dumps(self, protocol=-1)\n\n @staticmethod\n def load_from_filepath(fp):\n #return json.loads(open(, \"r\").read())\n with open(fp, \"r\") as f:\n history = pickle.load(f)\n return history\n\n def save_to_filepath(self, fp):\n with open(fp, \"w\") as f:\n pickle.dump(self, f, protocol=-1)\n\n def add_group(self, group_name, line_names, increasing=True):\n self.line_groups[group_name] = LineGroup(group_name, line_names, increasing=increasing)\n\n def add_value(self, group_name, line_name, x, y, average=False):\n self.line_groups[group_name].lines[line_name].append(x, y, average=average)\n\n def get_group_names(self):\n return list(self.line_groups.iterkeys())\n\n def get_groups_increasing(self):\n return [group.increasing for group in self.line_groups.itervalues()]\n\n def get_max_x(self):\n return max([group.get_max_x() for group in self.line_groups.itervalues()])\n\n def get_recent_average(self, group_name, line_name, nb_points):\n ys = self.line_groups[group_name].lines[line_name].ys[-nb_points:]\n return np.average(ys)\n\nclass LineGroup(object):\n def __init__(self, group_name, line_names, increasing=True):\n self.group_name = group_name\n self.lines = OrderedDict([(name, Line()) for name in line_names])\n self.increasing = increasing\n self.xlim = (None, None)\n\n def get_line_names(self):\n return list(self.lines.iterkeys())\n\n def get_line_xs(self):\n #return [line.xs for line in self.lines.itervalues()]\n \"\"\"\n for key, line in self.lines.items():\n if not hasattr(line, \"last_index\"):\n print(self.group_name, key, \"no last index\")\n else:\n print(self.group_name, key, \"OK\")\n print(type(line.xs), type(line.ys), type(line.counts), type(line.datetimes))\n \"\"\"\n return [line.get_xs() for line in self.lines.itervalues()]\n\n def get_line_ys(self):\n #return [line.ys for line in self.lines.itervalues()]\n return [line.get_ys() for line in self.lines.itervalues()]\n\n def get_max_x(self):\n #return max([max(line.xs) if len(line.xs) > 0 else 0 for line in self.lines.itervalues()])\n return max([np.maximum(line.get_xs()) if line.last_index > -1 else 0 for line in self.lines.itervalues()])\n\n\"\"\"\nclass Line(object):\n def __init__(self, xs=None, ys=None, counts=None, datetimes=None):\n self.xs = xs if xs is not None else []\n self.ys = ys if ys is not None else []\n self.counts = counts if counts is not None else []\n self.datetimes = datetimes if datetimes is not None else []\n self.last_index = -1\n\n def append(self, x, y, average=False):\n # legacy (for loading from pickle)\n #if not hasattr(self, \"counts\"):\n # self.counts = [1] * len(self.xs)\n # ---\n\n if not average or len(self.xs) == 0 or self.xs[-1] != x:\n self.xs.append(x)\n self.ys.append(float(y)) # float to get rid of numpy\n self.counts.append(1)\n self.datetimes.append(time.time())\n else:\n count = self.counts[-1]\n self.ys[-1] = ((self.ys[-1] * count) + y) / (count+1)\n self.counts[-1] += 1\n self.datetimes[-1] = time.time()\n\"\"\"\n\nclass Line(object):\n def __init__(self, xs=None, ys=None, counts=None, datetimes=None):\n zeros = np.tile(np.array([0], dtype=np.int32), GROWTH_BY)\n self.xs = xs if xs is not None else np.copy(zeros)\n self.ys = ys if ys is not None else zeros.astype(np.float32)\n self.counts = counts if counts is not None else zeros.astype(np.uint16)\n self.datetimes = datetimes if datetimes is not None else zeros.astype(np.uint64)\n self.last_index = -1\n\n # for legacy as functions, replace with properties\n def get_xs(self):\n # legacy\n if isinstance(self.xs, list):\n self._legacy_convert_from_list_to_np()\n\n return self.xs[0:self.last_index+1]\n\n def get_ys(self):\n return self.ys[0:self.last_index+1]\n\n def get_counts(self):\n return self.counts[0:self.last_index+1]\n\n def get_datetimes(self):\n return self.datetimes[0:self.last_index+1]\n\n def _legacy_convert_from_list_to_np(self):\n #print(\"is list!\")\n print(\"[plotting] Converting from list to numpy...\")\n self.last_index = len(self.xs) - 1\n self.xs = np.array(self.xs, dtype=np.int32)\n self.ys = np.array(self.ys, dtype=np.float32)\n self.counts = np.array(self.counts, dtype=np.uint16)\n self.datetimes = np.array([int(dt*1000) for dt in self.datetimes], dtype=np.uint64)\n\n def append(self, x, y, average=False):\n # legacy (for loading from pickle)\n #if not hasattr(self, \"counts\"):\n # self.counts = [1] * len(self.xs)\n # ---\n\n #legacy\n if isinstance(self.xs, list):\n self._legacy_convert_from_list_to_np()\n\n if (self.last_index+1) == self.xs.shape[0]:\n #print(\"growing from %d by %d...\" % (self.xs.shape[0], GROWTH_BY), self.xs.shape, self.ys.shape, self.counts.shape, self.datetimes.shape)\n zeros = np.tile(np.array([0], dtype=np.int32), GROWTH_BY)\n self.xs = np.append(self.xs, np.copy(zeros))\n self.ys = np.append(self.ys, zeros.astype(np.float32))\n self.counts = np.append(self.counts, zeros.astype(np.uint16))\n self.datetimes = np.append(self.datetimes, zeros.astype(np.uint64))\n #print(\"growing done\", self.xs.shape, self.ys.shape, self.counts.shape, self.datetimes.shape)\n\n first_entry = (self.last_index == -1)\n if not average or first_entry or self.xs[self.last_index] != x:\n idx = self.last_index + 1\n self.xs[idx] = x\n self.ys[idx] = y\n self.counts[idx] = 1\n self.datetimes[idx] = int(time.time()*1000)\n self.last_index = idx\n else:\n idx = self.last_index\n count = self.counts[idx]\n self.ys[idx] = ((self.ys[idx] * count) + y) / (count+1)\n self.counts[idx] = count + 1\n self.datetimes[idx] = int(time.time()*1000)\n\n #print(\"added\", x, y, average)\n #print(self.xs[self.last_index-10:self.last_index+10+1])\n #print(self.ys[self.last_index-10:self.last_index+10+1])\n #print(self.counts[self.last_index-10:self.last_index+10+1])\n #print(self.datetimes[self.last_index-10:self.last_index+10+1])\n\nclass LossPlotter(object):\n def __init__(self, titles, increasing, save_to_fp):\n assert len(titles) == len(increasing)\n n_plots = len(titles)\n self.titles = titles\n self.increasing = dict([(title, incr) for title, incr in zip(titles, increasing)])\n self.xlim = dict([(title, (None, None)) for title in titles])\n self.colors = [\"red\", \"blue\", \"cyan\", \"magenta\", \"orange\", \"black\"]\n\n self.nb_points_max = 500\n self.save_to_fp = save_to_fp\n self.start_batch_idx = 0\n self.autolimit_y = False\n self.autolimit_y_multiplier = 5\n\n #self.fig, self.axes = plt.subplots(nrows=2, ncols=2, figsize=(20, 20))\n nrows = max(1, int(math.sqrt(n_plots)))\n ncols = int(math.ceil(n_plots / nrows))\n width = ncols * 10\n height = nrows * 10\n\n self.fig, self.axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(width, height))\n\n if nrows == 1 and ncols == 1:\n self.axes = [self.axes]\n else:\n self.axes = self.axes.flat\n\n title_to_ax = dict()\n for idx, (title, ax) in enumerate(zip(self.titles, self.axes)):\n title_to_ax[title] = ax\n self.title_to_ax = title_to_ax\n\n self.fig.tight_layout()\n self.fig.subplots_adjust(left=0.05)\n\n def plot(self, history):\n for plot_idx, title in enumerate(self.titles):\n ax = self.title_to_ax[title]\n group_name = title\n group_increasing = self.increasing[title]\n group = history.line_groups[title]\n line_names = group.get_line_names()\n #print(\"getting line x/y...\", time.time())\n line_xs = group.get_line_xs()\n line_ys = group.get_line_ys()\n #print(\"getting line x/y FIN\", time.time())\n\n \"\"\"\n print(\"title\", title)\n print(\"line_names\", line_names)\n for i, xx in enumerate(line_xs):\n print(\"line_xs i: \", xx)\n for i, yy in enumerate(line_ys):\n print(\"line_ys i: \", yy)\n \"\"\"\n if any([len(xx) > 0 for xx in line_xs]):\n xs_min = min([min(xx) for xx in line_xs if len(xx) > 0])\n xs_max = max([max(xx) for xx in line_xs if len(xx) > 0])\n xlim = self.xlim[title]\n xlim = [\n max(xs_min, self.start_batch_idx) if xlim[0] is None else min(xlim[0], xs_max-1),\n xs_max+1 if xlim[1] is None else xlim[1]\n ]\n if xlim[0] < 0:\n xlim[0] = max(xs_max - abs(xlim[0]), 0)\n if xlim[1] < 0:\n xlim[1] = max(xs_max - abs(xlim[1]), 1)\n else:\n # none of the lines has any value, so just use dummy values\n # to avoid min/max of empty sequence errors\n xlim = [\n 0 if self.xlim[title][0] is None else self.xlim[title][0],\n 1 if self.xlim[title][1] is None else self.xlim[title][1]\n ]\n\n self._plot_group(ax, group_name, group_increasing, line_names, line_xs, line_ys, xlim)\n self.fig.savefig(self.save_to_fp)\n\n # this seems to be slow sometimes\n def _line_to_xy(self, line_x, line_y, xlim, limit_y_min=None, limit_y_max=None):\n def _add_point(points_x, points_y, curr_sum, counter):\n points_x.append(batch_idx)\n y = curr_sum / counter\n if limit_y_min is not None and limit_y_max is not None:\n y = np.clip(y, limit_y_min, limit_y_max)\n elif limit_y_min is not None:\n y = max(y, limit_y_min)\n elif limit_y_max is not None:\n y = min(y, limit_y_max)\n points_y.append(y)\n\n nb_points = 0\n for i in range(len(line_x)):\n batch_idx = line_x[i]\n if xlim[0] <= batch_idx < xlim[1]:\n nb_points += 1\n\n point_every = max(1, int(nb_points / self.nb_points_max))\n points_x = []\n points_y = []\n curr_sum = 0\n counter = 0\n for i in range(len(line_x)):\n batch_idx = line_x[i]\n if xlim[0] <= batch_idx < xlim[1]:\n curr_sum += line_y[i]\n counter += 1\n if counter >= point_every:\n _add_point(points_x, points_y, curr_sum, counter)\n counter = 0\n curr_sum = 0\n if counter > 0:\n _add_point(points_x, points_y, curr_sum, counter)\n\n return points_x, points_y\n\n def _plot_group(self, ax, group_name, group_increasing, line_names, line_xs, line_ys, xlim):\n ax.cla()\n ax.grid()\n\n if self.autolimit_y and any([len(line_xs) > 0 for line_xs in line_xs]):\n min_x = min([np.min(line_x) for line_x in line_xs])\n max_x = max([np.max(line_x) for line_x in line_xs])\n min_y = min([np.min(line_y) for line_y in line_ys])\n max_y = max([np.max(line_y) for line_y in line_ys])\n\n if group_increasing:\n if max_y > 0:\n limit_y_max = None\n limit_y_min = max_y / self.autolimit_y_multiplier\n if min_y > limit_y_min:\n limit_y_min = None\n else:\n if min_y > 0:\n limit_y_max = min_y * self.autolimit_y_multiplier\n limit_y_min = None\n if max_y < limit_y_max:\n limit_y_max = None\n\n if limit_y_min is not None:\n ax.plot((min_x, max_x), (limit_y_min, limit_y_min), c=\"purple\")\n\n if limit_y_max is not None:\n ax.plot((min_x, max_x), (limit_y_max, limit_y_max), c=\"purple\")\n\n # y achse range begrenzen\n yaxmin = min_y if limit_y_min is None else limit_y_min\n yaxmax = max_y if limit_y_max is None else limit_y_max\n yrange = yaxmax - yaxmin\n yaxmin = yaxmin - (0.05 * yrange)\n yaxmax = yaxmax + (0.05 * yrange)\n ax.set_ylim([yaxmin, yaxmax])\n else:\n limit_y_min = None\n limit_y_max = None\n\n for line_name, line_x, line_y, line_col in zip(line_names, line_xs, line_ys, self.colors):\n #print(\"line to xy...\", time.time())\n x, y = self._line_to_xy(line_x, line_y, xlim, limit_y_min=limit_y_min, limit_y_max=limit_y_max)\n #print(\"line to xy FIN\", time.time())\n #print(\"plotting ax...\", time.time())\n ax.plot(x, y, color=line_col, linewidth=1.0)\n #print(\"plotting ax FIN\", time.time())\n\n ax.set_title(group_name)\n","repo_name":"aleju/self-driving-truck","sub_path":"lib/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":13772,"program_lang":"python","lang":"en","doc_type":"code","stars":381,"dataset":"github-code","pt":"22"} +{"seq_id":"9488251879","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom tensorflow.python.keras.models import Sequential\r\nfrom tensorflow.python.keras.layers import Dense\r\nfrom tensorflow.python.keras.layers import LSTM\r\n\r\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\ncolumnas_factores = [3, 8, 13, 18, 23]\r\nfichero_factores = 'excel/factor_adecuaçao.xlsx'\r\n\r\n# specify the number of lag hours\r\nn_hours = 1 # hasta (t - n_hours)\r\nn_features = 4 # variables\r\nn_obs = n_hours * n_features\r\n\r\nventana = 24\r\n\r\n\r\n#-----------------------------------------------------------------------------\r\n#-----------------------------------------------------------------------------\r\n#-----------------------------------------------------------------------------\r\ndef to_stationary(ts):\r\n\r\n ts = pd.DataFrame(ts)\r\n ts_log = np.log(ts)\r\n moving_avg = ts_log.rolling(min_periods=1, center=True, window=ventana).mean()\r\n\r\n # quitando rolling mean\r\n ts_log_moving_avg_diff = ts_log - moving_avg\r\n ts_log_moving_avg_diff = ts_log_moving_avg_diff.dropna()\r\n\r\n plt.plot(ts)\r\n plt.plot(ts_log_moving_avg_diff, color='green')\r\n plt.plot(moving_avg, color='red')\r\n plt.title('Estacionaria')\r\n plt.show()\r\n\r\n return ts_log_moving_avg_diff.values\r\n#-----------------------------------------------------------------------------\r\n#-----------------------------------------------------------------------------\r\n#-----------------------------------------------------------------------------\r\n# convert series to supervised learning\r\ndef series_to_supervised(data, n_in=1, n_out=1, dropnan=True):\r\n\r\n n_vars = 1 if type(data) is list else data.shape[1]\r\n df = pd.DataFrame(data)\r\n cols, names = list(), list()\r\n # input sequence (t-n, ... t-1)\r\n for i in range(n_in, 0, -1):\r\n cols.append(df.shift(i))\r\n names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\r\n\r\n # forecast sequence (t, t+1, ... t+n)\r\n for i in range(0, n_out):\r\n cols.append(df.shift(-i))\r\n if i == 0:\r\n names += [('var%d(t)' % (j+1)) for j in range(n_vars)]\r\n else:\r\n names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\r\n # put it all together\r\n agg = pd.concat(cols, axis=1)\r\n agg.columns = names\r\n # drop rows with NaN values\r\n if dropnan:\r\n agg.dropna(inplace=True)\r\n\r\n return agg\r\n#-----------------------------------------------------------------------------\r\n#-----------------------------------------------------------------------------\r\n#-----------------------------------------------------------------------------\r\ndef plot_dataset(dataset):\r\n\r\n # specify columns to plot\r\n groups = np.arange(n_features)\r\n i = 1\r\n # plot each column\r\n plt.figure()\r\n for group in groups:\r\n plt.subplot(len(groups), 1, i)\r\n plt.plot(dataset[:, group])\r\n plt.title(group, y=0.5, loc='right')\r\n i += 1\r\n plt.show()\r\n#-----------------------------------------------------------------------------\r\n#-----------------------------------------------------------------------------\r\n#-----------------------------------------------------------------------------\r\ndef pre_proccesing_and_train(fichero_factores):\r\n\r\n dataframe_factores = pd.read_excel(fichero_factores, sheet_name='pasado')\r\n\r\n factores = dataframe_factores[['ENE18', 'MAR18', 'ABR18', 'MAY18']].values\r\n factores = factores.transpose()\r\n factores = factores.flatten()\r\n factores = factores[np.logical_not(np.isnan(factores))]\r\n\r\n temperaturas = dataframe_factores[['TEMP_ENE18', 'TEMP_MAR18', 'TEMP_ABR18', 'TEMP_MAY18']].values\r\n temperaturas = temperaturas.transpose()\r\n temperaturas = temperaturas.flatten()\r\n temperaturas = temperaturas[np.logical_not(np.isnan(temperaturas))]\r\n\r\n festivos = dataframe_factores[['FESTIVO_ENE18', 'FESTIVO_MAR18', 'FESTIVO_ABR18', 'FESTIVO_MAY18']].values\r\n festivos = festivos.transpose()\r\n festivos = festivos.flatten()\r\n festivos = festivos[np.logical_not(np.isnan(festivos))]\r\n\r\n demandas = dataframe_factores[['DEMANDA_ENE18', 'DEMANDA_MAR18', 'DEMANDA_ABR18', 'DEMANDA_MAY18']].values\r\n demandas = demandas.transpose()\r\n demandas = demandas.flatten()\r\n demandas = demandas[np.logical_not(np.isnan(demandas))]\r\n\r\n # lo vuelvo estacionario\r\n factores = to_stationary(factores)\r\n factores = factores.flatten()\r\n\r\n dataset = np.vstack((factores, temperaturas, festivos, demandas))\r\n dataset = dataset.transpose()\r\n\r\n values = dataset\r\n # integer encode direction (para strings, en este caso no hace falta)\r\n '''encoder = LabelEncoder()\r\n values[:, 3] = encoder.fit_transform(values[:, 3])'''\r\n # ensure all data is float\r\n values = values.astype('float32')\r\n # normalize features\r\n scaler = MinMaxScaler(feature_range=(0, 1))\r\n scaled = scaler.fit_transform(values)\r\n # frame as supervised learning\r\n reframed = series_to_supervised(scaled, n_hours, 1)\r\n # drop columns we don't want to predict\r\n if n_hours == 1:\r\n reframed.drop(reframed.columns[[5, 6, 7]], axis=1, inplace=True)\r\n print(reframed.head())\r\n\r\n # split into train and test sets\r\n values = reframed.values\r\n n_train_hours = 2208 # 2208\r\n train = values[:n_train_hours, :]\r\n test = values[n_train_hours:, :]\r\n # split into input and outputs\r\n if n_hours == 1:\r\n train_X, train_y = train[:, :-1], train[:, -1]\r\n test_X, test_y = test[:, :-1], test[:, -1]\r\n # reshape input to be 3D [samples, timesteps, features]\r\n train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))\r\n test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))\r\n else:\r\n train_X, train_y = train[:, :n_obs], train[:, -n_features]\r\n test_X, test_y = test[:, :n_obs], test[:, -n_features]\r\n # reshape input to be 3D [samples, timesteps, features]\r\n train_X = train_X.reshape((train_X.shape[0], n_hours, n_features))\r\n test_X = test_X.reshape((test_X.shape[0], n_hours, n_features))\r\n\r\n print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)\r\n\r\n # design network\r\n model = Sequential()\r\n model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))\r\n model.add(Dense(1))\r\n model.compile(loss='mae', optimizer='adam')\r\n # fit network\r\n history = model.fit(train_X, train_y, epochs=50, batch_size=72, validation_data=(test_X, test_y), verbose=0,\r\n shuffle=False)\r\n '''# plot history\r\n plt.plot(history.history['loss'], label='train')\r\n plt.plot(history.history['val_loss'], label='test')\r\n plt.legend()\r\n plt.show()'''\r\n\r\n # make a prediction\r\n yhat = model.predict(test_X)\r\n test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))\r\n # invert scaling for forecast\r\n inv_yhat = np.concatenate((yhat, test_X[:, 1:]), axis=1)\r\n inv_yhat = scaler.inverse_transform(inv_yhat)\r\n print(inv_yhat)\r\n inv_yhat = inv_yhat[:, 0]\r\n # invert scaling for actual\r\n test_y = test_y.reshape((len(test_y), 1))\r\n inv_y = np.concatenate((test_y, test_X[:, 1:]), axis=1)\r\n inv_y = scaler.inverse_transform(inv_y)\r\n inv_y = inv_y[:, 0]\r\n # calculate RMSE\r\n # quitamos el ultimo por el descuadre (en la fila de validacion el factor es el actual mientras\r\n # que en el de prediccion es el siguiente\r\n rmse = np.sqrt(mean_squared_error(inv_y[:-1], inv_yhat[1:]))\r\n print('Test RMSE: %.3f' % rmse)\r\n\r\n # reconstruyendo\r\n yhat_exp = np.exp(inv_yhat)\r\n\r\n # dibujamos\r\n plt.plot(np.exp(inv_y[:-1]))\r\n plt.plot(yhat_exp[1:], color='green') #subimos por el descuadre al dibujar\r\n plt.show()\r\n\r\n np.savetxt('txt/predicciones_factores_validacion.txt', yhat_exp, newline='\\n')\r\n\r\n return model\r\n#-----------------------------------------------------------------------------\r\n#-----------------------------------------------------------------------------\r\n#-----------------------------------------------------------------------------\r\ndef predecir_siguiente(dataset):\r\n\r\n # los transformo para el modelo\r\n values = dataset\r\n # ensure all data is float\r\n values = values.astype('float32')\r\n # normalize features\r\n scaler = MinMaxScaler(feature_range=(0, 1))\r\n scaled = scaler.fit_transform(values)\r\n # frame as supervised learning\r\n reframed = series_to_supervised(scaled, n_hours, 1)\r\n # drop columns we don't want to predict\r\n if n_hours == 1:\r\n reframed.drop(reframed.columns[[5, 6, 7]], axis=1, inplace=True)\r\n\r\n # split into train and test sets\r\n test = reframed.values\r\n if n_hours == 1:\r\n test_X = test[:, :-1]\r\n # reshape input to be 3D [samples, timesteps, features]\r\n test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))\r\n else:\r\n test_X = test[:, :n_obs]\r\n test_X = test_X.reshape((test_X.shape[0], n_hours, n_features))\r\n\r\n # hago la prediccion\r\n yhat = model.predict(test_X)\r\n if n_hours == 1:\r\n test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))\r\n else:\r\n test_X = test_X.reshape((test_X.shape[0], n_obs))\r\n # invert scaling for forecast\r\n if n_hours == 1:\r\n inv_yhat = np.concatenate((yhat, test_X[:, 1:]), axis=1)\r\n else:\r\n inv_yhat = np.concatenate((yhat, test_X[:, -3:]), axis=1)\r\n inv_yhat = scaler.inverse_transform(inv_yhat)\r\n inv_yhat = inv_yhat[:, 0]\r\n\r\n # reconstruyendo\r\n yhat_exp = np.exp(inv_yhat)\r\n\r\n return yhat_exp\r\n#-----------------------------------------------------------------------------\r\n#-----------------------------------------------------------------------------\r\n#-----------------------------------------------------------------------------\r\ndef get_filas(fichero_factores, num):\r\n\r\n df_mes = pd.read_excel(fichero_factores, sheet_name='pasado')\r\n\r\n factores = df_mes['ENE18'].values\r\n factores = factores[np.logical_not(np.isnan(factores))]\r\n temperaturas_mes = df_mes['TEMP_ENE18'].values\r\n temperaturas_mes = temperaturas_mes[np.logical_not(np.isnan(temperaturas_mes))]\r\n festivos_mes = df_mes['FESTIVO_ENE18'].values\r\n festivos_mes = festivos_mes[np.logical_not(np.isnan(festivos_mes))]\r\n demandas_mes = df_mes['DEMANDA_ENE18'].values\r\n demandas_mes = demandas_mes[np.logical_not(np.isnan(demandas_mes))]\r\n\r\n dataset_mes = np.vstack((factores, temperaturas_mes, festivos_mes, demandas_mes))\r\n dataset_mes = dataset_mes.transpose()\r\n\r\n return dataset_mes[:num, :]\r\n\r\n#-----------------------------------------------------------------------------\r\n#-----------------------------------------------------------------------------\r\n#-----------------------------------------------------------------------------\r\ndef predict_enero(mes, year, hoja):\r\n\r\n print('*********************************************************')\r\n print('*********************************************************')\r\n print('*********************************************************')\r\n # *******************************************************************\r\n # ******************************************************************\r\n # ******************************************************************\r\n #leo los que quiero predecir (en este caso may18)\r\n df_mes = pd.read_excel(fichero_factores, sheet_name=hoja)\r\n temperaturas_mes = df_mes['TEMP_' + mes + str(year)].values\r\n temperaturas_mes = temperaturas_mes[np.logical_not(np.isnan(temperaturas_mes))]\r\n festivos_mes = df_mes['LAB_' + mes + str(year)].values\r\n festivos_mes = festivos_mes[np.logical_not(np.isnan(festivos_mes))]\r\n demandas_mes = df_mes['DEMANDA_' + mes + str(year)].values\r\n demandas_mes = demandas_mes[np.logical_not(np.isnan(demandas_mes))]\r\n\r\n # inicializo con el mes pasado de 2018\r\n df_pasado = pd.read_excel(fichero_factores, sheet_name='pasado')\r\n factores_iniciales = df_pasado[mes + '18'].values\r\n factores_iniciales = factores_iniciales[np.logical_not(np.isnan(factores_iniciales))]\r\n # lo vuelvo estacionario\r\n factores_iniciales = to_stationary(factores_iniciales)\r\n factores_iniciales = factores_iniciales.flatten()\r\n\r\n dataset_mes = np.vstack((factores_iniciales, temperaturas_mes, festivos_mes, demandas_mes))\r\n dataset_mes = dataset_mes.transpose()\r\n\r\n # preparo el array y predigo la siguiente hora\r\n fac_pred_sig = predecir_siguiente(dataset_mes)\r\n\r\n # y guardo\r\n np.savetxt('txt/predicciones_factores.txt', fac_pred_sig, newline='\\n')\r\n\r\n\r\n#-----------------------------------------------------------------------------\r\n#-----------------------------------------------------------------------------\r\n#-----------------------------------------------------------------------------\r\nmodel = pre_proccesing_and_train(fichero_factores)\r\npredict_enero('ENE', 19, 'esperado')","repo_name":"acardoco/keras_timeSeriesForecasting_REN_Portugal","sub_path":"predecir_factores.py","file_name":"predecir_factores.py","file_ext":"py","file_size_in_byte":12940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"71813579255","text":"## BIBLIOTECAS\nfrom st_pages import add_page_title\nfrom pandas import json_normalize\nfrom bs4 import BeautifulSoup\nfrom io import BytesIO\n\nimport urllib.request, json\nimport plotly.express as px\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\nimport requests\nimport urllib\nimport re\n\nadd_page_title(layout=\"wide\")\n# CACHEAR OS DADOS AO ABRIR A PAQUINA\n# @st.cache_data\n\n# DADOS\n## WEB SCRAPING CSV LEITOS HOSPITALARES COVID\nurl = 'https://opendatasus.saude.gov.br/dataset/registro-de-ocupacao-hospitalar-covid-19'\n\nreq = requests.get(url)\nsoup = BeautifulSoup(req.text, features=\"html.parser\")\n\nli = [i.split(\" \")[0].replace('\"',\"\") for i in str(soup.find_all(href=re.compile('LeitoOcupacao'))).replace('[ 0:\n estados = estados \nelse:\n estados = df_leitos_hosp_covid['estado'].unique()\n\nif len(municipio) > 0:\n municipio = municipio \nelse:\n municipio = df_leitos_hosp_covid['municipio'].unique()\n\n# APLICANDO OS FILTROS \n## CRIA A QUERY PARA OS FILTROS\nquery = '''\n estado in @estados and \\\n municipio in @municipio\n'''\n\n## APLICA OS FILTROS DA QUERY\ndf_leitos_hosp_covid = df_leitos_hosp_covid.query(query)\n\ndf_leitos_hosp_covid['ocupacao_cli_total'] = (df_leitos_hosp_covid['ocupacao_suspeito_cli'] + \n df_leitos_hosp_covid['ocupacao_confirmado_cli'] + \n df_leitos_hosp_covid['ocupacao_covid_cli'] + \n df_leitos_hosp_covid['ocupacao_hospitalar_cli'])\n\ndf_leitos_hosp_covid['ocupacao_uti_total'] = (df_leitos_hosp_covid['ocupacao_suspeito_uti'] + \n df_leitos_hosp_covid['ocupacao_confirmado_uti'] + \n df_leitos_hosp_covid['ocupacao_covid_uti'] + \n df_leitos_hosp_covid['ocupacao_hospitalar_uti'])\n\nl = [\n 0,\n 'GOIAS'\n]\nleitos_percentual = df_leitos_hosp_covid[~df_leitos_hosp_covid['estado'].isin(l)].groupby('estado')[['ocupacao_cli_total','ocupacao_uti_total','ocupacao_covid_uti','ocupacao_covid_cli']].sum().reset_index()\n\nleitos_percentual['percentual_uti'] = leitos_percentual['ocupacao_covid_uti'] / leitos_percentual['ocupacao_uti_total']\nleitos_percentual['percentual_cli'] = leitos_percentual['ocupacao_covid_cli'] / leitos_percentual['ocupacao_cli_total']\n\nap1 = leitos_percentual[['estado','percentual_uti']]\nap1 = ap1.rename(columns={'percentual_uti':'vl_percente'})\nap1['status'] = 'Percentual Ocupacao UTI por COVID-19'\n\nap2 = leitos_percentual[['estado','percentual_cli']]\nap2 = ap2.rename(columns={'percentual_cli':'vl_percente'})\nap2['status'] = 'Percentual Ocupacao Clinica por COVID-19'\n\nleitos_percentual_geral_estados = pd.concat([ap1, ap2])\n\nl = [\n 0,\n 'GOIAS'\n]\nleitos_percentual = df_leitos_hosp_covid[~df_leitos_hosp_covid['municipio'].isin(l)].groupby('municipio')[['ocupacao_cli_total','ocupacao_uti_total','ocupacao_covid_uti','ocupacao_covid_cli']].sum().reset_index()\n\nleitos_percentual['percentual_uti'] = leitos_percentual['ocupacao_covid_uti'] / leitos_percentual['ocupacao_uti_total']\nleitos_percentual['percentual_cli'] = leitos_percentual['ocupacao_covid_cli'] / leitos_percentual['ocupacao_cli_total']\n\nap1 = leitos_percentual[['municipio','percentual_uti']]\nap1 = ap1.rename(columns={'percentual_uti':'vl_percente'})\nap1['status'] = 'Percentual Ocupacao UTI por COVID-19'\n\nap2 = leitos_percentual[['municipio','percentual_cli']]\nap2 = ap2.rename(columns={'percentual_cli':'vl_percente'})\nap2['status'] = 'Percentual Ocupacao Clinica por COVID-19'\n\nleitos_percentual_geral_municipios = pd.concat([ap1, ap2])\n\n## LEITOS HOSPITALARES COVID\nfig_leitos_estados = px.histogram(leitos_percentual_geral_estados, \n x=\"estado\", \n y=\"vl_percente\",\n color='status', \n barmode='group',\n height=400,\n title='Leitos por Estados')\n\nfig_leitos_estados.update_layout(yaxis_title='')\nfig_leitos_estados.update_layout(xaxis_title='')\n\nfig_leitos_municipios = px.histogram(leitos_percentual_geral_municipios, \n y=\"municipio\", \n x=\"vl_percente\",\n color='status', \n barmode='group', \n height=1000,\n title='Leitos por Municipios')\n\nfig_leitos_municipios.update_layout(yaxis_title='')\nfig_leitos_municipios.update_layout(xaxis_title='')\n\n# VISUALIZACAO STREAMLIT\nst.plotly_chart(fig_leitos_estados, use_container_width=True)\nst.plotly_chart(fig_leitos_municipios, use_container_width=True)\n ","repo_name":"matatathiasdev/Analise-COVID-19","sub_path":"pages/leitos.py","file_name":"leitos.py","file_ext":"py","file_size_in_byte":8170,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"75101607094","text":"import os\n\nfrom breezy import osutils, tests, workingtree\n\n\nclass TestJoin(tests.TestCaseWithTransport):\n\n def make_trees(self):\n base_tree = self.make_branch_and_tree('tree',\n format='development-subtree')\n base_tree.commit('empty commit')\n self.build_tree(['tree/subtree/', 'tree/subtree/file1'])\n sub_tree = self.make_branch_and_tree('tree/subtree')\n sub_tree.add('file1', ids=b'file1-id')\n sub_tree.commit('added file1')\n return base_tree, sub_tree\n\n def check_success(self, path):\n base_tree = workingtree.WorkingTree.open(path)\n self.assertEqual(b'file1-id', base_tree.path2id('subtree/file1'))\n\n def test_join(self):\n base_tree, sub_tree = self.make_trees()\n self.run_bzr('join tree/subtree')\n self.check_success('tree')\n\n def test_join_dot(self):\n base_tree, sub_tree = self.make_trees()\n self.run_bzr('join .', working_dir='tree/subtree')\n self.check_success('tree')\n\n def test_join_error(self):\n base_tree, sub_tree = self.make_trees()\n os.mkdir('tree/subtree2')\n osutils.rename('tree/subtree', 'tree/subtree2/subtree')\n self.run_bzr_error(\n ('Cannot join .*subtree. Parent directory is not versioned',),\n 'join tree/subtree2/subtree')\n # disabled because this gives an ugly error at present -- mbp 20070306\n # self.run_bzr_error(\n ## ('Cannot join .*subtree. Parent directory is not versioned',),\n # 'join', '--reference', 'tree/subtree2/subtree')\n self.run_bzr_error(('Not a branch:.*subtree2',),\n 'join tree/subtree2')\n\n def test_join_reference(self):\n \"\"\"Join can add a reference if --reference is supplied.\"\"\"\n base_tree, sub_tree = self.make_trees()\n subtree_root_id = sub_tree.path2id('')\n self.run_bzr('join . --reference', working_dir='tree/subtree')\n sub_tree.lock_read()\n self.addCleanup(sub_tree.unlock)\n if sub_tree.supports_setting_file_ids():\n self.assertEqual(b'file1-id', sub_tree.path2id('file1'))\n self.assertEqual('file1', sub_tree.id2path(b'file1-id'))\n self.assertEqual(subtree_root_id, sub_tree.path2id(''))\n self.assertEqual('', sub_tree.id2path(subtree_root_id))\n self.assertEqual(\n sub_tree.path2id('file1'), base_tree.path2id('subtree/file1'))\n\n base_tree.lock_read()\n self.addCleanup(base_tree.unlock)\n self.assertEqual(['subtree'], list(base_tree.iter_references()))\n if base_tree.supports_setting_file_ids():\n self.assertEqual(b'file1-id', sub_tree.path2id('file1'))\n self.assertEqual('file1', sub_tree.id2path(b'file1-id'))\n self.assertEqual(subtree_root_id, base_tree.path2id('subtree'))\n self.assertEqual('subtree', base_tree.id2path(subtree_root_id))\n\n def test_references_check_repository_support(self):\n \"\"\"Users are stopped from adding a reference that can't be committed.\"\"\"\n # in 0.15 the default format has a dirstate workingtree, that can\n # support tree references, but the default repository format\n # cannot.\n self.make_branch_and_tree('tree', format='dirstate')\n self.make_branch_and_tree('tree/subtree')\n out, err = self.run_bzr('join --reference tree/subtree',\n retcode=3)\n self.assertContainsRe(err, r\"Can't join trees\")\n self.assertContainsRe(err, r\"use brz upgrade\")\n","repo_name":"breezy-team/breezy","sub_path":"breezy/tests/blackbox/test_join.py","file_name":"test_join.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"22"} +{"seq_id":"17049134324","text":"from django.shortcuts import render\nfrom . import functions\nfrom django.shortcuts import render, render_to_response\nfrom django.http import HttpResponse\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse\n\ndef home(request, template_name=\"chatApp/home.html\"):\n #respuesta=get_response(request)\n context = {'title': 'Chatbot Version 1.0'}\n return render_to_response(template_name, context)\n\n@csrf_exempt\ndef get_response(request):\n response = {'status': None}\n if request.method == 'POST':\n data = json.loads(request.body.decode('utf-8'))\n message = data['message']\n print(message)\n chat_response = functions.chat_bot(message)\n response['message'] = {'text': chat_response, 'user': False, 'chat_bot': True}\n response['status'] = 'ok'\n \n else:\n response['error'] = 'no post data found'\n print(\"no\")\n print(response)\n return HttpResponse(\n\t\tjson.dumps(response),\n\t\t\tcontent_type=\"application/json\"\n\t\t)\n\n","repo_name":"adriruizo/Tesis_grado","sub_path":"chatApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12203352186","text":"from django.db import models\nfrom django.db.models.signals import pre_save, post_save\nfrom addresses.models import Address\nfrom billing.models import BillingProfile\nfrom carts.models import Cart\nfrom ecommerce.utils import unique_order_code_generator\nfrom decimal import Decimal\n\n\nORDER_STATUS_CHOICES = (\n ('created', 'Created'),\n ('paid', 'Paid'),\n ('shipped', 'Shipped'),\n ('refunded', 'Refunded')\n)\n\n\nclass OrderManager(models.Manager):\n def new_or_get(self, billing_profile, cart_obj):\n qs = self.get_queryset()\\\n .filter(billing_profile=billing_profile, cart=cart_obj, active=True, status='created')\\\n .exclude(status='paid')\n\n if qs.exists():\n created = False\n obj = qs.first()\n else:\n obj = self.model.objects.create(billing_profile=billing_profile, cart=cart_obj)\n created = True\n\n return obj, created\n\n\nclass Order(models.Model):\n billing_profile = models.ForeignKey(BillingProfile, on_delete=models.SET_NULL, null=True, blank=True)\n order_code = models.CharField(max_length=120, blank=True)\n shipping_address = models.ForeignKey(Address, related_name='shipping_address', null=True, blank=True, on_delete=models.SET_NULL)\n billing_address = models.ForeignKey(Address, related_name='billing_address', null=True, blank=True, on_delete=models.SET_NULL)\n cart = models.ForeignKey(Cart, on_delete=models.SET_NULL, null=True, blank=False)\n status = models.CharField(max_length=120, default='created', choices=ORDER_STATUS_CHOICES)\n shipping_total = models.DecimalField(max_digits=30, decimal_places=4, default=10)\n order_total = models.DecimalField(max_digits=30, decimal_places=4, default=0)\n active = models.BooleanField(default=True)\n\n def __str__(self):\n return self.order_code\n\n objects = OrderManager()\n\n def check_done(self):\n if self.billing_profile and self.shipping_address and self.billing_address and self.order_total > 0:\n return True\n return False\n\n def update_total(self):\n cart_total = self.cart.total\n shipping_total = self.shipping_total\n new_total = Decimal(cart_total) + Decimal(shipping_total)\n self.order_total = new_total\n self.save()\n return new_total\n\n def mark_paid(self):\n if self.check_done():\n self.status = 'paid'\n self.save()\n return self.status\n\n\ndef pre_save_create_order_code(sender, instance, *args, **kwargs):\n if not instance.order_code:\n instance.order_code = unique_order_code_generator(instance)\n qs = Order.objects.filter(cart=instance.cart).exclude(billing_profile=instance.billing_profile)\n if qs.exists():\n qs.update(active=False)\n\n\npre_save.connect(pre_save_create_order_code, sender=Order)\n\n\ndef post_save_cart_total(sender, instance, created, *args, **kwargs):\n if not created:\n cart_obj = instance\n cart_id = cart_obj.id\n qs = Order.objects.filter(cart__id=cart_id)\n if qs.count() == 1:\n order_obj = qs.first()\n order_obj.update_total()\n\n\npost_save.connect(post_save_cart_total, sender=Cart)\n\n\ndef post_save_order(sender, instance, created, *args, **kwargs):\n if created:\n instance.update_total()\n\n\npost_save.connect(post_save_order, sender=Order)","repo_name":"umutbektas/django-ecommerce","sub_path":"orders/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"22"} +{"seq_id":"23753362148","text":"from exceptions import FatalError\nfrom constants import QuestionType\n\nfrom hashlib import sha256\n\n\nclass Curriculum:\n def replicable_hash(self):\n rep_hash = sha256()\n for bytes_obj in self._properties_as_bytes_objects():\n rep_hash.update(bytes_obj)\n return rep_hash.hexdigest()\n\n @staticmethod\n def _as_bytes_obj(val):\n if isinstance(val, QuestionType):\n val = val.name\n if isinstance(val, list):\n val = \",\".join(val)\n return bytes(val, \"utf-8\")\n\n def _properties_as_bytes_objects(self):\n return [self._as_bytes_obj(self.__dict__[attr]) for attr in vars(self)]\n\n def __init__(\n self,\n data_filename,\n question_type,\n question_column_names,\n row_keys,\n key_column_name\n ):\n self.data_filename = data_filename\n self.question_type = question_type\n self.question_column_names = question_column_names\n self.row_keys = row_keys\n self.key_column_name = key_column_name\n\n @classmethod\n def _require_non_empty_list(cls, attributes, key):\n value = attributes[key]\n if not isinstance(value, list):\n raise FatalError(\n f\"List of {key} not found in curriculum.\")\n if len(value) == 0:\n raise FatalError(f\"There must be at least one element in the list of {key}\"\n \" found in curriculum.\")\n return value\n\n @classmethod\n def from_dict(cls, attributes):\n try:\n data_filename = (attributes[\"data\"])\n type_text = attributes[\"type\"]\n question_column_names = cls._require_non_empty_list(\n attributes,\n \"column names\"\n )\n row_keys = cls._require_non_empty_list(\n attributes,\n \"row keys\"\n )\n key_column_name = attributes[\"key column name\"]\n except KeyError as e:\n raise FatalError(\n f\"Curriculum is missing expected '{e.args[0]}' attribute.\")\n try:\n question_type = QuestionType[type_text]\n except KeyError as e:\n raise FatalError(\n f\"'{e.args[0]}' is not a recognised question type.\"\n )\n return cls(\n data_filename,\n question_type,\n question_column_names,\n row_keys,\n key_column_name)\n","repo_name":"KevinCHiggins/futur","sub_path":"curriculum.py","file_name":"curriculum.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30876108235","text":"from selenium.webdriver.chrome.options import Options\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium import webdriver\nimport time\nfrom selenium.webdriver.support import expected_conditions as EC\nimport re\nfrom typing import Literal, List, Generator,Any\nfrom dotenv import load_dotenv\nimport os\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nimport pandas as pd\nfrom inserted import insert_dim_pedido\nfrom datetime import datetime\nfrom dateutil import parser\n\nload_dotenv()\n\n\noptions = webdriver.ChromeOptions() \noptions.add_experimental_option('useAutomationExtension', False)\ndriver = webdriver.Chrome(options=options,\n executable_path=r\"C:\\Users\\Mybox Marcenaria\\Documents\\ETL_rev3\\extracao_promob\\chromedriver\\chromedriver.exe\")\n\n\ndef scroll_page() -> None:\n lenOfPage = driver.execute_script(\n \"window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;\")\n match=False\n while(match==False):\n lastCount = lenOfPage\n time.sleep(1)\n lenOfPage = driver.execute_script(\n \"window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;\")\n if lastCount==lenOfPage:\n match=True\n\n\ndef user_login() -> None:\n driver.implicitly_wait(7)\n driver.get(\"https://consultasweb.promob.com/Authentication/Index?ReturnUrl\")\n \n time.sleep(4)\n\n empresa = driver.find_element(By.ID, \"company\")\n ActionChains(driver)\\\n .send_keys_to_element(empresa, \"HR\")\\\n .perform()\n\n usuario = driver.find_element(By.ID, \"username\")\n ActionChains(driver)\\\n .send_keys_to_element(usuario, \"MYBOXFRANQUIA\")\\\n .perform()\n\n password = driver.find_element(By.ID, \"password-clear\")\n ActionChains(driver)\\\n .send_keys_to_element(password, \"mybox\")\\\n .perform()\n \n time.sleep(7)\n try:\n element= driver.find_element(By.XPATH,'#div-login > div:nth-child(5) > input').click()\n except:\n print(\"error\")\n\n avancar = driver.find_element(By.CSS_SELECTOR, '#div-login > div:nth-child(5) > input')\n actions = ActionChains(driver)\n actions.click(avancar)\n actions.perform()\n\n time.sleep(20)\n \n \n\ndef get_urls(*args, **kwargs):\n lista_dicts = []\n driver.implicitly_wait(7)\n driver.get(\"https://consultasweb.promob.com/order\")\n\n try:\n data_de = driver.find_element(By.ID,'datepickerinit')\n data_de.clear()\n data_de.send_keys('01/01/2020')\n except:\n print(\"erro\")\n\n try:\n data_ate = driver.find_element(By.ID,'datepickerfin')\n data_ate.clear()\n data_ate.send_keys('12/03/2023')\n except:\n print(\"erro\")\n\n\n try:\n clicar = driver.find_element(\n By.CSS_SELECTOR,'#OrderGrid > div.k-header.k-grid-toolbar.k-grid-top > a.toolbar-refresh.k-icon.k-button.k-button-icontext')\n actions = ActionChains(driver)\n actions.click(clicar)\n actions.perform()\n except:\n pass\n\n\n try:\n liberado = driver.find_element(\n By.CSS_SELECTOR,'#status > div > label:nth-child(3) > input[type=checkbox]')\n actions = ActionChains(driver)\n actions.click(clicar)\n actions.perform()\n except:\n pass\n\n time.sleep(1)\n try:\n tliberado = driver.find_element(\n By.CSS_SELECTOR,'#status > div > label:nth-child(5) > input[type=checkbox]')\n actions = ActionChains(driver)\n actions.click(clicar)\n actions.perform()\n except:\n pass\n\n\n try:\n total = driver.find_element(\n By.CSS_SELECTOR,'#cbAll')\n actions = ActionChains(driver)\n actions.click(clicar)\n actions.perform()\n except:\n pass\n\n\n time.sleep(7)\n\n\n\ndef get_orders():\n lista_urls = []\n get_urls()\n driver.implicitly_wait(7)\n\n time.sleep(3)\n\n scroll_page()\n \n urls_pedidos = driver.find_elements(By.XPATH,'//*[@id=\"grid\"]/tbody/tr/td[1]/div/a')\n for urls in urls_pedidos:\n urls_pedidos = {}\n urls_pedidos['urls'] = urls.get_attribute(\"href\")\n print(urls_pedidos)\n lista_urls.append(urls_pedidos)\n data = pd.DataFrame(lista_urls)\n data.to_excel(\"urls_orders.xlsx\")\n\n\n#user_login()\n\n#get_orders()\n\ndef extract_item():\n driver.implicitly_wait(7)\n data = pd.read_excel(r\"C:\\Users\\Mybox Marcenaria\\Documents\\ETL_rev3\\extracao_promob\\urls_orders.xlsx\")\n new_dict = data.to_dict(\"records\")\n for item in new_dict:\n \n driver.get(item['urls'])\n \n try:\n informacoes = driver.find_elements(By.XPATH,'//*[@id=\"grid\"]/tbody/tr[1]/td/div')\n \n #for informacao in informacoes:\n # print(informacao.text)\n except:\n pass\n \n try:\n cliente_fantasia = driver.find_elements(By.XPATH,'//*[@id=\"grid\"]/tbody/tr[1]/td[10]')[0].text\n print(cliente_fantasia)\n except:\n print(\"error\")\n\n\n try:\n transp_fantasia = driver.find_elements(By.XPATH,'//*[@id=\"grid\"]/tbody/tr[1]/td[11]')[0].text\n print(transp_fantasia)\n except:\n print(\"error\")\n\n try:\n valor_total = driver.find_elements(By.XPATH,'//*[@id=\"grid\"]/tbody/tr[1]/td[12]/div')[0].text\n print(valor_total)\n except:\n print(\"error\")\n\n\nuser_login()\n#extract_item()\n \n","repo_name":"willvieirawill/extracao_promob","sub_path":"teste_pesquisa.py","file_name":"teste_pesquisa.py","file_ext":"py","file_size_in_byte":5642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"747005338","text":"\"\"\"\n-------------------------------------------------------------------------------- \n Author: Richard Terry.\n Date: February 12, 2008.\n Modified by: Mirko Palla\n Date: March 5, 2008.\n\n For: G.007 polony sequencer design [fluidics software] at the Church Lab - \n Genetics Department, Harvard Medical School.\n \n Purpose: This program contains the complete code for class Syringe_pump, \n containing Cavro XCalibur syringe pump communication subroutines in Python.\n\n This software may be used, modified, and distributed freely, but this\n header may not be modified and must appear at the top of this file. \n------------------------------------------------------------------------------- \n\"\"\"\n\nclass Syringe_pump:\n\n\tglobal serport\n\n\tdef __init__(self, config, serial_port, logger=None):\n\t\t\"Initialize Cavro XCalibur syringe pump object with default parameters.\"\n\n\t\t#--------------------------------- Serial configuration ---------------------------\n\n\t\tself._baud_rate = int(config.get(\"communication\",\"syringe_pump_baud\"))\n\t\tself._read_length = int(config.get(\"communication\",\"read_length\"))\n\t\tself._sleep_time = float(config.get(\"communication\",\"sleep_time\"))\n\n\t\tif logger is not None:\n\t\t\tself.logging = logger\n\n\t\tself.serport = serial_port\t\t\t\t\n\t\tself.state = 'syringe pump initialized'\n\n\t\tself.logging.info(\"---\\t-\\t--> Syringe pump object constructed\")\n\n#--------------------------------------------------------------------------------------#\n#\t\t\t\t\t\t\t\t\t\t\t\tCavro XCalibur syringe pump FUNCTIONS\t\t\t\t\t\t\t\t\t\t\t\t\t #\n#--------------------------------------------------------------------------------------#\n#\n# Performs low-level functional commands (e.g. set pump flow rate, draw volume, etc). \n# Each command implemented here must know the command set of the hardware being \n# controlled, but does not need to know how to communicate with the device (how to poll \n# it, etc). Each functional command will block until execution is complete.\n#\n\n#--------------------------------------------------------------------------------------#\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tBASIC SETTINGS\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n#--------------------------------------------------------------------------------------#\n\n\tdef initialize_syringe(self):\t\n\t\t\"Initializes syringe pump with default operation settings.\"\n\t\t\t\t\t \n\t\tself.serport.set_baud(self._baud_rate)\n\n\t\t# Initialize syringe dead volume\n\t\tself.serport.write_serial('/1k5R\\r')\n\t\tself.serport.read_serial(3)\n\n\t\tfind_string = chr(96)\n\t\tresponse_string_size = 4\n\t\tself.serport.parse_read_string('/1QR\\r', find_string, response_string_size)\n\n\t\t# Initialize move to zero position, full dispense, full force\n\t\tself.serport.write_serial('/1Z0R\\r')\n\t\tself.serport.read_serial(3)\n\n\t\tfind_string = chr(96)\n\t\tresponse_string_size = 4\n\t\tself.serport.parse_read_string('/1QR\\r', find_string, response_string_size)\n\n\t\t# Initialize speed, range is 0-40, the maximum speed is 0 (1.25 strokes/second)\n\t\tself.serport.write_serial('/1S20R\\r')\n\t\tself.serport.read_serial(3)\n\n\t\tfind_string = chr(96)\n\t\tresponse_string_size = 4\n\t\tself.serport.parse_read_string('/1QR\\r', find_string, response_string_size)\n\n\t\tself.logging.info(\"---\\t-\\t--> Initialized syringe pump object\")\n\n\tdef set_valve_position(self, valve_position):\n\t\t\"Sets to given syringe pump valve position, an integer\"\n\t\t\t\t\t \n\t\tself.serport.set_baud(self._baud_rate)\n\n\t\tself.serport.write_serial('/1I' + str(valve_position) + 'R\\r')\n\t\tself.serport.read_serial(3)\n\n\t\tfind_string = chr(96)\n\t\tresponse_string_size = 4\n\t\tself.serport.parse_read_string('/1QR\\r', find_string, response_string_size)\n\n\t\tself.logging.info(\"---\\t-\\t--> Set syringe pump valve position to %i\" % valve_position)\n\n\tdef set_speed(self, speed):\n\t\t\"\"\"Sets syringe pump move speed (an integer) in range of 0-40, where the \n\t\tmaximum speed is 0 equivalent to 1.25 strokes/second = 1250 ul/s.\"\"\"\n\n\t\tself.serport.set_baud(self._baud_rate)\n\n\t\tself.serport.write_serial('/1S' + str(speed) + 'R\\r')\n\t\tself.serport.read_serial(3)\n\n\t\tfind_string = chr(96)\n\t\tresponse_string_size = 4\n\t\tself.serport.parse_read_string('/1QR\\r', find_string, response_string_size)\n\n\t\tself.logging.info(\"---\\t-\\t--> Set syringe pump speed to %i\" % speed)\n\n\tdef set_absolute_volume(self, absolute_volume):\n\t\t\"\"\"Sets syringe pump absolute volume (an integer) in ragne of 0-1000, where 0 is\n\t\tthe syringe initial position and the maximum filling volume is the stroke of \n\t\tthe syringe (1000 ul).\"\"\"\n\n\t\tself.serport.set_baud(self._baud_rate)\n\n\t\t# Increments = (pump resolution * volume ul) / (syringe size ml * ul/ml)\n\t\tabsolute_steps = (3000 * absolute_volume) / (1 * 1000)\n\n\t\tself.serport.write_serial('/1A' + str(absolute_steps) + 'R\\r')\t# 'P' command for relative pick-up, 'A' for absolute position \n\t\tself.serport.read_serial(3)\n\n\t\tfind_string = chr(96)\n\t\tresponse_string_size = 4\n\t\tself.serport.parse_read_string('/1QR\\r', find_string, response_string_size)\n\n\t\tself.logging.info(\"---\\t-\\t--> Set syringe pump absolute volume to %i\" % absolute_volume)\n\n","repo_name":"pirimidi/Polonator","sub_path":"syringe_pump.py","file_name":"syringe_pump.py","file_ext":"py","file_size_in_byte":4939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32016070966","text":"\"\"\"empty message\n\nRevision ID: e906601262af\nRevises: \nCreate Date: 2019-03-02 18:11:31.697480\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_jsonfield\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e906601262af'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('competition',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.Column('updated_at', sa.DateTime(), nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.Column('code', sa.String(length=20), nullable=False),\n sa.Column('image_url', sa.String(), nullable=True),\n sa.Column('description', sa.String(), nullable=True),\n sa.Column('training_data_url', sa.String(), nullable=False),\n sa.Column('validation_data_url', sa.String(), nullable=True),\n sa.Column('validation_script_url', sa.String(), nullable=True),\n sa.Column('is_active', sa.Boolean(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('evaluation',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.Column('updated_at', sa.DateTime(), nullable=False),\n sa.Column('competition_id', sa.Integer(), nullable=False),\n sa.Column('team_name', sa.String(length=128), nullable=False),\n sa.Column('task_id', sa.String(length=128), nullable=False),\n sa.Column('docker_image_name', sa.String(), nullable=True),\n sa.Column('docker_image_tag', sa.String(), nullable=True),\n sa.Column('docker_image_hash', sa.String(), nullable=True),\n sa.Column('docker_image_size', sa.Integer(), nullable=True),\n sa.Column('test_scores', sqlalchemy_jsonfield.jsonfield.JSONField(), nullable=True),\n sa.Column('final_score', sa.DECIMAL(), nullable=True),\n sa.Column('duration', sa.DECIMAL(), nullable=True),\n sa.ForeignKeyConstraint(['competition_id'], ['competition.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('evaluation')\n op.drop_table('competition')\n # ### end Alembic commands ###\n","repo_name":"MingStar/internal-kaggle","sub_path":"migrations/versions/2019-03-02_18:11:31_e906601262af_.py","file_name":"2019-03-02_18:11:31_e906601262af_.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"32448114647","text":"# -- coding: utf-8 --\n'''\nCreated on 2019年7月23日\n\n@author: lyc\n'''\nfrom Common.ChexingPage import Chengxing \nimport allure \n@allure.feature('系统设置 -角色管理')\nclass Sys_role_manage(Chengxing): \n def __init__(self,Browser_type): \n super(Sys_role_manage, self).__init__(Browser_type)\n \n #角色选择框\n self.role_select = ('id','role') \n #状态选择框\n self.status_select = ('id','status') \n #分页选择框\n self.page_split = ('css selector',\"div[title = '5 条/页']\")\n \n #新增角色名称输入框\n self.role_name = ('css selector',\"input[id ^='roleName']\") \n \n #新增角色说明输入框\n self.role_description = ('css selector',\"textarea[id ^='description']\")\n \n #状态选择框\n self.status_chebox = ('tag name','label') \n \n #权限列表 \n self.authority_list_checkbox = ('css selector',\"span[class = 'ant-tree-checkbox']\") \n self.authority_list_span = ('css selector',\"span[class = 'ant-tree-title']\")\n #异常提示\n self.form_explain = ('class name','ant-form-explain') \n \n \n \n def input_role_name(self,text): \n '''输入新增角色名称'''\n self.send_keys(locator = self.role_name, text = text) \n \n def select_role_status(self,status): \n '''选择新增角色状态''' \n elements = self.find_elements(locator = self.status_chebox, timeout = 10) \n for element in elements: \n if status == element.text:\n self.click_By_element(element) \n break\n \n \n def select_role_authority(self,name): \n '''选择角色权限''' \n _ = {}\n _list_checkbox = self.find_elements(locator = self.authority_list_checkbox, timeout = 10) \n _list_span = self.find_elements(locator = self.authority_list_span, timeout = 10) \n if len(_list_checkbox) == len(_list_span): \n for i in range(len(_list_checkbox)): \n _[_list_span[i].text] = _list_checkbox[i]\n for _k,_v in _.items(): \n if _k == name: \n self.click_By_element(_v)\n \n \n def input_role_description(self,text): \n '''输入新增角色描述'''\n self.send_keys(locator = self.role_description, text = text) \n \n \n def select_page_split(self,index): \n '''调整分页''' \n self.select_by_index(locator = self.page_split,index = index) \n \n def add_click(self): \n '''点击新增按钮''' \n self.click_btn(btn_name = '新增')\n def determin_click(self): \n '''点击确定按钮''' \n self.click_btn(btn_name = '确定')\n \n def get_Role_table_value(self): \n '''返回所有用户列表值'''\n return self.get_table_value()\n \n\n \n \n\nif __name__ == '__main__':\n obj = Sys_role_manage('Chrome') \n obj.login('superadmin', '123456')\n obj.click_menu_li(name = '角色管理') \n obj.click_btn(btn_name = '新增')\n obj.is_text_in_element(locator = ('css selector',\"input[id ^='rcDialogTitle']\"), text = '增加角色') \n \n# obj.select_page_split(index = 1) \n# print(obj.check_table_by_value(expect_value = 'hahaha', col_name = '角色'))\n \n\n \n\n","repo_name":"lmx0621/Web_UI_Auto_Test","sub_path":"Page/sys_role_manege.py","file_name":"sys_role_manege.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"13579706909","text":"from bs4 import BeautifulSoup\nfrom datetime import date\nfrom lxml import html\n\nimport requests\nimport re\nimport json\n\nclass CovidScraper:\n def __init__(self):\n self.api_url = 'http://127.0.0.1:5000/covidgr'\n self.api_sum_url = 'http://127.0.0.1:5000/summary/covidgr'\n self.api_test_url = 'http://127.0.0.1:5000/covidgr/tests'\n self.scrape_url = 'https://www.worldometers.info/coronavirus/country/greece/'\n self.scrape_tests_url = 'https://github.com/owid/covid-19-data/blob/master/public/data/testing/covid-testing-latest-data-source-details.csv'\n self.today = ''\n self.covid_data = []\n self.summary_data= []\n\n def scrape_data(self):\n data = []\n self.today = str(date.today())\n\n soup = self.scrape_page_content()\n soup_test_page = self.scrape_page_content_contains_tests()\n\n if soup:\n self.get_daily_data(soup)\n self.get_summary_data(soup)\n\n if self.summary_data and self.covid_data:\n post_daily_and_sum_covid_data = self.call_api_put_data(\n self.today, self.covid_data, self.summary_data)\n data.append(post_daily_and_sum_covid_data)\n \n if soup_test_page:\n tests_data = self.get_tests_per_day(soup_test_page)\n\n if tests_data[0]:\n post_daily_tests_covid_data = self.call_api_post_tested_covid_data(\n tests_data[0], tests_data[1])\n data.append(post_daily_tests_covid_data)\n\n return data\n\n def scrape_page_content(self):\n page = requests.get(self.scrape_url)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n return soup\n \n def scrape_page_content_contains_tests(self):\n page = requests.get(self.scrape_tests_url)\n soup = BeautifulSoup(page.content, 'html.parser')\n \n return soup\n \n def get_daily_data(self, soup):\n covid_data = []\n\n daily_covidgr_html_content = soup.find('li', class_='news_li')\n get_daily_covidgr_text = daily_covidgr_html_content.text\n\n for elem in get_daily_covidgr_text.split():\n regex = '\\d*(.|)\\d+'\n match = re.findall(regex, elem)\n if match:\n covid_data.append(elem)\n \n self.covid_data = covid_data\n \n def get_summary_data(self, soup):\n summary_data = []\n\n all_cases_covidgr_html_content = soup.find_all(\n 'div', class_='maincounter-number')\n \n for item in range(len(all_cases_covidgr_html_content)):\n regex = r'(\\n)|\\s'\n all_cases_data = re.sub(\n regex, '', all_cases_covidgr_html_content[item].text)\n summary_data.append(all_cases_data)\n \n self.summary_data = summary_data\n \n def get_tests_per_day(self, tree):\n\n html_content = tree.find('tr', id='LC34').find_all('td')\n country_code = html_content[1]\n date_test = html_content[3].text\n\n if country_code.text == 'GRC':\n today_tests = html_content[10].text\n total_tests = html_content[8].text\n \n return [date_test, today_tests]\n \n def call_api_post_tested_covid_data(self, today, tests):\n headers = {\n 'Content-type': 'application/json',\n }\n\n data = json.dumps({\"date\": today, \"daily_test\": tests})\n\n response_tests = requests.post(\n self.api_test_url, headers=headers, data=data)\n\n return response_tests.json()\n\n def call_api_put_data(self, today, covid_data, summary_data):\n headers = {\n 'Content-type': 'application/json',\n }\n\n data = json.dumps(\n {\"date\": today, \"cases\": covid_data[0], \"deaths\": covid_data[1]})\n\n sum_data = json.dumps(\n {\"sum_cases\": summary_data[0], \"sum_deaths\": summary_data[1], \"sum_recovered\": summary_data[2]})\n\n response = requests.post(self.api_url, headers=headers, data=data)\n\n response_sum = requests.put(\n self.api_sum_url, headers=headers, data=sum_data)\n\n return [response.json(), response_sum.json()]\n\nif __name__ == '__main__':\n cs = CovidScraper()\n results = cs.scrape_data()\n print(results)\n","repo_name":"ZachGeo/covidGR_API","sub_path":"scrapers/covid_scraper.py","file_name":"covid_scraper.py","file_ext":"py","file_size_in_byte":4282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33708764402","text":"#!/usr/bin/env python3\n#executes a number guess with 2 attempts\n\nhigh=50\nlow=5\n\nprint (\"guess a number from 5 to 50 \")\nguess=int(input())\n\nif guess < 50:\n\tprint (\"pick a higher number \")\n\tguess=int(input())\n\tif guess == 50:\n\t\tprint (\"Correct!!!\")\n\telse:\n\t\tprint (\"Better luck next time!\")\nelif guess > 50:\n\tprint (\"guess a lower number!\")\n\tguess=int(input())\n\n\tif guess == 50:\n\t\tprint (\"Amazing! You're correct.\")\n\telse:\n\t\tprint (\"Not correct.\")\nelse:\n\t\tprint (\"Congrats! you got that right!\")\n\n","repo_name":"StellaGift/script-bashing","sub_path":"python/ifthen2.py","file_name":"ifthen2.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11037745597","text":"def letToNum(sack):\n this_sack = []\n for letter in sack:\n if ord(letter) > 90:\n letter = ord(letter)-96\n this_sack.append(letter)\n else:\n letter = ord(letter)-38\n this_sack.append(letter)\n return(set(this_sack))\n this_sack = []\n\nwith open(\"day3.txt\", \"r\") as f:\n sacks = f.readlines() \n group = []\n total = 0\n while len(sacks)>0:\n while len(group) < 3:\n group.append(letToNum(sacks[0].strip()))\n del sacks[0]\n a = (group[0] & group[1] & group[2])\n for i in a:\n total += i\n group = []\nprint(total)","repo_name":"petejbell/AdventOfCode","sub_path":"Day3/Day3_2.py","file_name":"Day3_2.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"11536681411","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0003_article_author'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='article',\n name='frontpage',\n field=models.BooleanField(default=False),\n preserve_default=True,\n ),\n ]\n","repo_name":"evedal/Studenten-Django-alpha","sub_path":"blog/migrations/0004_article_frontpage.py","file_name":"0004_article_frontpage.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38357321359","text":"from docx import Document\nimport gui\nimport os\n\n\n# Function for replacing the words\ndef find_replace(paragraph_keyword, draft_keyword, paragraph):\n if paragraph_keyword in paragraph.text:\n paragraph.text = paragraph.text.replace(paragraph_keyword, draft_keyword)\n\n\n# Function which invokes replacing the words. Gets executed when the button 'Apstiprinat' is pressed\ndef replace_words():\n # Get string entered from the user\n def get_entry_from():\n global entry_from\n entry_from = gui.e1.get()\n\n # Get string enered from the user\n def get_entry_to():\n global entry_to\n entry_to = gui.e2.get()\n\n get_entry_from()\n get_entry_to()\n\n # Get all .docx files in directory\n for file in os.listdir():\n if file.endswith(\".docx\"):\n document = Document(file)\n for paragraph in document.paragraphs:\n find_replace(entry_from, entry_to, paragraph)\n document.save(file)\n\n\n","repo_name":"meldzhaLV/simple_search_v2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"46435323948","text":"from uuid import uuid4\nimport pygame\nimport pygame_gui\n\nfrom pygame_gui.core.ui_element import ObjectID\nfrom client.ihm.common.component import Component\nfrom pygame_gui.elements.ui_window import UIWindow\nfrom config import config\nfrom common.data_structures.profiles import Player, Profile\n\n\nclass WinnerPopupComponent(Component):\n def __init__(self, pygame_manager: pygame_gui.UIManager) -> None:\n super().__init__(pygame_manager)\n self.pygame_manager = pygame_manager\n\n # Monitor Size\n\n window_width = config.get(\"monitor\")[\"width\"]\n window_height = config.get(\"monitor\")[\"height\"]\n\n self.set_width(500)\n self.set_height(320)\n\n self.set_pos_x((window_width * 0.25 + 50))\n self.set_pos_y((window_height * 0.25))\n\n self.title = \"Game Finished\"\n\n self.hide_button = None\n\n self.winner = Player(\"\", uuid4())\n self.class_id = \"\"\n\n def render(self) -> None:\n\n self.gui_element = UIWindow(\n pygame.Rect(\n (self.get_pos_x(), self.get_pos_y()),\n (self.get_width(), self.get_height()),\n ),\n manager=self.manager,\n window_display_title=self.title,\n object_id=ObjectID(\"@move_not_possible_pop_up_window\"),\n )\n pygame_gui.elements.UILabel(\n relative_rect=pygame.Rect((25, 10), (420, 70)),\n manager=self.pygame_manager,\n container=self.gui_element,\n text=\"Le joueur\",\n object_id=ObjectID(\"@message_popup_game_finished\"),\n )\n pygame_gui.elements.UILabel(\n relative_rect=pygame.Rect((25, 75), (420, 70)),\n manager=self.pygame_manager,\n container=self.gui_element,\n text=self.winner.nickname,\n object_id=ObjectID(self.class_id),\n )\n pygame_gui.elements.UILabel(\n relative_rect=pygame.Rect((9, 140), (450, 70)),\n manager=self.pygame_manager,\n container=self.gui_element,\n text=\"a gagné la partie!\",\n object_id=ObjectID(\"@message_popup_game_finished\"),\n )\n self.hide_button = pygame_gui.elements.UIButton(\n relative_rect=pygame.Rect((135, 215), (200, 60)),\n text=\"Quitter\",\n manager=self.pygame_manager,\n starting_height=1,\n container=self.gui_element,\n object_id=ObjectID(class_id=\"@ihm_main_pop_up_button\"),\n )\n\n def set_winner(self, winner: Player) -> None:\n self.winner = winner\n if winner.nickname == self.controller.local_game.red_player.nickname:\n self.class_id = \"@red_player_won_the_game\"\n else:\n self.class_id = \"@white_player_won_the_game\"\n\n def handle_event(self, event: pygame.event.Event) -> None:\n if event.user_type == pygame_gui.UI_BUTTON_PRESSED:\n if event.ui_element == self.hide_button:\n self.controller.hide_winner_window()\n self.controller.get_my_interface_to_ihm_main().ihm_game_stoped()\n","repo_name":"maylisdet/nomad-game","sub_path":"src/client/ihm/game/components/winner_popup_container.py","file_name":"winner_popup_container.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70063317201","text":"from itertools import combinations\n\n\ndef _count_split_inv(left, right):\n \"\"\"Helper function to count_inv. Based on merge subroutine of mergesort and counts all instances\n of the right array having elements larger than left array\"\"\"\n\n # i is a counter for the left list, j is a counter for the right list, and\n # split_count keeps track of how many split pairs there are between the left\n # and right lists\n i, j, split_count = 0, 0, 0\n output = []\n # iterate through all elements of both lists\n for k in range(len(left) + len(right)):\n\n # if we've passed the end of the left list add all the remaining elements\n # of the right list to the output and increment j accordingly\n if i >= len(left):\n output.append(right[j])\n j += 1\n continue\n # if we've passed the end of the right list add all the remaining elements\n # of the left list to the output and increment i accordingly\n if j >= len(right):\n output.append(left[i])\n i += 1\n continue\n\n # append smaller element to output list\n if left[i] <= right[j]:\n output.append(left[i])\n i += 1\n else:\n output.append(right[j])\n j += 1\n # if smaller element is in the right list, we have as many inversions as there are\n # elements remaining in the left list\n split_count += len(left) - i\n return output, split_count\n\n\ndef _count_inv(array):\n \"\"\"Helper function for count_inv. Based on mergesort: recursively counts inversions in\n left half of array, right half of array, and inversions between the two\"\"\"\n n = len(array)\n if n <= 1:\n return array, 0\n else:\n mid = n // 2\n left_sorted, left_inv = _count_inv(array[:mid])\n right_sorted, right_inv = _count_inv(array[mid:])\n merge_sorted, split_inv = _count_split_inv(left_sorted, right_sorted)\n return merge_sorted, left_inv + right_inv + split_inv\n\n\ndef count_inv(array):\n return _count_inv(array)[1]\n\n\ndef _distance(point_pair):\n if point_pair is None:\n return float('inf')\n p1 = point_pair[0]\n p2 = point_pair[1]\n return (p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2\n\n\ndef dist(point1, point2):\n return (point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2\n\n\ndef _closest_split_pair(x_sorted, y_sorted, delta):\n mid = len(x_sorted) // 2\n x_bar = x_sorted[mid][0] # largest x coord in left half\n # we only look at a subset of the points, those within delta of the midline, where delta\n # is the closest distance that has already been found in the left or right halves\n y_restricted = [point for point in y_sorted if x_bar - delta < point[0] < x_bar + delta]\n\n # determine the closest pair within this subset, or return None if there is no pair\n # that has a distance less than delta\n closest = None\n closest_distance = delta\n for i in range(len(y_restricted) - 1):\n for j in range(i + 1, min(i + 7, len(y_restricted))):\n if dist(y_restricted[i], y_restricted[j]) < closest_distance:\n closest_distance = dist(y_restricted[i], y_restricted[j])\n closest = (y_restricted[i], y_restricted[j])\n return closest\n\n\ndef _execute_closest_pair(x_sorted, y_sorted):\n \"\"\"Recursive helper function for closest pair\"\"\"\n n = len(x_sorted)\n\n # base case of 3: find the closest pair by exhaustive search\n if n <= 3:\n return min(combinations(x_sorted, 2), key=_distance)\n\n mid = n // 2\n lx = x_sorted[:mid]\n ly = y_sorted[:mid]\n rx = x_sorted[mid:]\n ry = y_sorted[mid:]\n\n # find the closest pair in the left half of the points, and the closest pair in the right half\n # each of these is a list of two points (tuples), ex. [(1,2),(3,4)]\n best_left = _execute_closest_pair(lx, ly)\n best_right = _execute_closest_pair(rx, ry)\n\n # record the distance between the closest pair found within the left half or the right half\n smallest_so_far = _distance(min(best_left, best_right, key=_distance))\n best_split = _closest_split_pair(x_sorted, y_sorted, smallest_so_far)\n\n return min(best_left, best_right, best_split, key=_distance)\n\n\ndef closest_pair(points):\n \"\"\"Computes the closest pair of points in an array of points (2D tuples)- runs in O(n log n)\"\"\"\n\n x_sorted = sorted(points, key=lambda x: x[0])\n y_sorted = sorted(points, key=lambda y: y[1])\n return _execute_closest_pair(x_sorted, y_sorted)\n\n\ndef unimodal_max(array):\n \"\"\"Problem 3.3. Returns max element of uni-modal array. Takes O(log n) runtime\"\"\"\n mid = len(array) // 2\n check1 = mid - 1\n check2 = mid\n check3 = mid + 1\n if len(array) <= 2:\n return max(array)\n\n if array[check1] < array[check2] > array[check3]:\n return array[check2]\n elif array[check1] < array[check2] < array[check3]:\n return unimodal_max(array[check2:])\n elif array[check1] > array[check2] > array[check3]:\n return unimodal_max(array[:check2])\n\n\ndef equal_index(array, offset=0):\n \"\"\"Algorithms Illuminated problem 3.4. Returns True/ False if sorted integer array has an element\n that is equal to its search_index. O(log n) runtime\"\"\"\n if len(array) == 0:\n return False\n\n index_to_check = len(array) // 2\n\n if array[index_to_check] == index_to_check + offset:\n return True\n elif array[index_to_check] < index_to_check + offset:\n offset += index_to_check + 1\n return equal_index(array[index_to_check + 1:], offset)\n else:\n return equal_index(array[:index_to_check], offset)\n\n\ndef inefficient_closest(points):\n \"\"\"Compute closest pair via exhaustive search- O(n^2) runtime. Purpose of this function is\n a comparison to check correctness of the efficient implementation of closest pair\"\"\"\n closest = None\n closest_distance = float('inf')\n for i in range(0, len(points) - 1):\n for j in range(i + 1, len(points)):\n if dist(points[i], points[j]) < closest_distance:\n closest_distance = dist(points[i], points[j])\n closest = (points[i], points[j])\n return closest\n","repo_name":"Anton-Tarazi/Algorithms-Illuminated","sub_path":"part1/chapter3.py","file_name":"chapter3.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10053585050","text":"# 3 Kiritilgan raqamni kvadratini ekranga chiqarish dasturini tuzing. Dastur to'xtovsiz ishlashi kerak.\n# Har safar yangi raqam kiritilganda uni kvadranini ekranga chiqarsin\n\n\nn = int(input('Raqamni kvadratini xisoblash uchun istalgan raqam kiriting: '))\n# print(pow(n,2))\nprint(n**2)\nwhile n != 0:\n x = int(input('Navbatdagi raqamni kiriting: '))\n print(x**2)\n # print(pow(x,2))\n # if n == 0 or x == 0:\n # print(\"Siz nol kiritdingiz!\")\n # break","repo_name":"rustamovilyos/python_lessons","sub_path":"py_lessons_for_github/dars_7/Vazifa-3.py","file_name":"Vazifa-3.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"uz","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"13780311053","text":"from tests.libs.util_cinq import aws_get_client, collect_resources, setup_test_aws\n\n\ndef test_collect(cinq_test_service):\n \"\"\"\n\n :return:\n \"\"\"\n\n # Prep\n setup_info = setup_test_aws(cinq_test_service)\n account = setup_info['account']\n\n cinq_test_service.start_mocking_services('ec2')\n\n # Add resources\n client = aws_get_client('ec2')\n resource = client.run_instances(ImageId='i-10000', MinCount=1, MaxCount=1)\n\n # Start collector\n collect_resources(account=account, resource_types=['ec2'])\n\n # verify\n assert cinq_test_service.has_resource('non-exist-id') is False\n assert cinq_test_service.has_resource(resource['Instances'][0]['InstanceId']) is True\n\n cinq_test_service.stop_mocking_services('ec2')\n","repo_name":"RiotGames/cloud-inquisitor","sub_path":"backend/tests/test_cinq_collector_aws.py","file_name":"test_cinq_collector_aws.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":453,"dataset":"github-code","pt":"3"} +{"seq_id":"72080918163","text":"from vk_api import VkApi\nfrom vk.Chat_bot_VK.src.my_methods import process_message, process_button\nfrom vk_api.bot_longpoll import VkBotEventType, VkBotLongPoll\nimport json\n\n\nif __name__ == '__main__':\n try:\n with open('../../config.json', 'r') as file:\n settings = json.load(file)['api']\n except Exception:\n print('НЕ УДАЛОСЬ ПОЛУЧИТЬ НАСТРОЙКИ ДЛЯ ПОДКЛЮЧЕНИЯ')\n exit(1)\n\n print(settings)\n # Подключаем токен и long_poll\n try:\n session = VkApi(token=settings['token'], api_version=settings['version'])\n print(\"Старт сессии\")\n\n api = session.get_api()\n long_poll = VkBotLongPoll(session, group_id=settings['group_id'])\n print(\"Старт long poll подключения\")\n\n # Слушаем long poll(Сообщения)\n for event in long_poll.listen():\n\n if event.type == VkBotEventType.MESSAGE_NEW:\n\n if event.message['text'] != '':\n process_message(api, event.message)\n\n elif event.type == VkBotEventType.MESSAGE_EVENT:\n process_button(api, event.object)\n except Exception:\n print(f'Возникла ошибка при работе бота. Исключение: {Exception}')\n\n exit(0)\n","repo_name":"AnastasiaMuzichek/Chat_bot_VK","sub_path":"src/vk_chat_bot.py","file_name":"vk_chat_bot.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13575066812","text":"from django.urls import path\nfrom . import views\nfrom .forms import ContactForm\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nurlpatterns = [\n path('about/',views.about,name='ESL-about'),\n path('',views.home,name='ESL-home'),\n path('home2/',views.home2,name='ESL-home2'),\n path('Contact_Us/',views.Contact_Us,name='ESL-Contact_Us'),\n path('practice/',views.prac,name='ESL-practice'),\n path('learning/',views.learning,name='ESL-learning'),\n path('form/',views.contact,name='ESL-form'),\n path('snippet/',views.snippet_detail,name='ESL-form'),\n path('login/',views.login,name='ESL-login'),\n path('signin/', views.loginpage, name='loginpage'),\n path('profile/', views.profile, name='profile'),\n path('logout/', views.logot, name='logot'),\n path('signup/', views.signup, name='signup'),\n path('exam/', views.exam, name='ESL-exam'),\n path('Vgenre/', views.exam_genre, name='ESL-exam-genre'),\n path('Vexam/', views.Ques, name='ESL-exam-genre-exam'),\n path('audio/', views.audio, name='ESL-audio'),\n path('Agenre/', views.audio_genre, name='ESL-audio-genre'),\n path('Aexam/', views.QuesA, name='ESL-audio-genre-exam'),\n path('Editprof/', views.editprofile, name='ESL-editprofile'),\n]\nurlpatterns += staticfiles_urlpatterns()\n","repo_name":"sherlockholmes211/English-learning-website","sub_path":"django_project/ESL/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"74674633362","text":"class Solution:\n def add(self, root):\n if not root:\n return []\n\n return [ root.val ] + self.add(root.child) + self.add(root.next)\n\n\n def flatten(self, head: 'Optional[Node]') -> 'Optional[Node]':\n if not head:\n return None\n\n all = self.add(head)\n size = len(all)\n\n result = [ Node(all[i], None, None, None) for i in range(size) ]\n\n for i in range(size):\n if i > 0:\n result[i].prev = result[i-1]\n if i < size - 1:\n result[i].next = result[i+1]\n\n return result[0]\n","repo_name":"stbrumme/leetcode","sub_path":"0430.py","file_name":"0430.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"8198983542","text":"from dis import disco\nimport discord\nfrom discord.ext import commands\nfrom bot_commands.reddit.redditfetcher import RedditClient\nfrom config import get_reddit_user,reddit\nimport json\nreddit_fetcher = RedditClient()\n\n\nclass RedditUser(commands.Cog):\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\n\t@commands.command(help='link reddit account')\n\tasync def user(self, ctx, reddit_username :str = None, user :discord.Member = None):\n\t\tif not reddit_username and not user:\n\t\t\tuser = ctx.author\n\t\tif not reddit_username:\n\t\t\ttry:\n\t\t\t\tjson_data = json.loads(get_reddit_user(f'id={user.id}').text)\n\t\t\t\tif not len(json_data):\n\t\t\t\t\treturn await ctx.send('reddit is not linked')\n\t\t\t\tuser_data = json_data[0]\n\t\t\texcept:\n\t\t\t\treturn await ctx.send('failed to get user data')\n\t\telse:\n\t\t\treddit_name = reddit_fetcher.fetch_redditor(reddit_username)\n\t\t\tif not reddit_name:\n\t\t\t\treturn await ctx.send(f'failed to find user {reddit_username}')\n\t\t\twith open(reddit) as json_file:\n\t\t\t\tuser_details = json.load(json_file)\n\t\t\t\tjson_file.close()\n\n\t\t\tdatas = {\n\t\t\t\t'id' : str(ctx.author.id),\n\t\t\t\t'data': reddit_name.url,\n\t\t\t\t'name': reddit_name.name,\n\t\t\t\t'icon': reddit_name.icon_img\n\t\t\t}\n\t\t\tuser_details['user-data'].append(datas)\n\n\t\t\twith open(reddit, 'w') as json_file:\n\t\t\t\tjson.dump(user_details, json_file)\n\t\t\t\tjson_file.close()\n\n\t\t\t\n\t\t\ttry:\n\t\t\t\tjson_data = json.loads(get_reddit_user(f'id={user.id}').text)\n\t\t\t\tif not len(json_data):\n\t\t\t\t\treturn await ctx.send('reddit is not linked')\n\t\t\t\tuser_data = json_data[0]\n\t\t\texcept:\n\t\t\t\treturn await ctx.send('failed to get user data')\n\n\t\t\tawait ctx.send(str(user_data['data']), str(user_data['name']))\n\n\t\t\n","repo_name":"mohamedarish/unKnown-bot","sub_path":"bot_commands/reddit/reddit_user.py","file_name":"reddit_user.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70575927442","text":"from pwn import *\r\n\r\n\r\ndef guestbook(file_name='/mnt/hgfs/CyberSecurity/PWN/jarvisoj/guestbook'):\r\n print('guestbook start')\r\n target = remote('pwn.jarvisoj.com', 9876)\r\n target_elf = ELF(file_name)\r\n\r\n good_game_ptr = p64(target_elf.symbols['good_game'])\r\n\r\n payload = b'A' * 136\r\n # good_game()\r\n payload += good_game_ptr\r\n\r\n target.sendline(payload)\r\n\r\n print(target.recvuntil('I have received your message, Thank you!\\n').decode('utf-8'))\r\n print(target.recvline().decode('utf-8'))\r\n\r\n print('guestbook end')\r\n\r\n\r\nif __name__ == '__main__':\r\n guestbook()","repo_name":"fangzhixi/binary-pwn","sub_path":"jarvisoj/guestbook.py","file_name":"guestbook.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14231846459","text":"\"\"\"\nProvides the base view for other Generic views. It is unlikely that you will use this. Refer to this class, however, on overriding functionality of\ngeneric views for your application.\n\"\"\"\n\nimport time\n\nimport flask\nfrom flask.views import MethodView\n\nfrom . import _from_camel\n\n\nclass GenericBase(MethodView):\n model = None\n template = None\n base_template = \"base.html\"\n\n variable_current_object = 'current_object'\n variable_rows = 'rows'\n variable_next_cursor = 'next_cursor'\n variable_last_cursor = 'last_cursor'\n\n variable_form = 'form'\n\n name_singular = None\n name_plural = None\n\n retrieve_view = None\n edit_view = None\n new_view = None\n list_view = None\n delete_view = None\n\n form_exclude = ['class'] # Exclude these when editing/viewing fields.\n form_include = None # IF specified, only show these fields\n\n list_fields = None # Include these when listing entities.\n wtforms_field_args = None # Field args to pass to wtform_appengine model_form\n\n page_size = 25\n render_as = 'table'\n\n not_found_template = '404.html'\n permission_denied_template = '403.html'\n sleep_on_not_found = .25 # To slow down brute-force URL guessing schemes, sleep this many seconds each time a 404 is generated.\n\n extra_context = {}\n\n def __init__(self):\n super(GenericBase, self).__init__()\n\n #\n # Coerce list_Fields if necessary\n if self.list_fields and isinstance(self.list_fields[0], basestring):\n new_list_fields = []\n for v in self.list_fields:\n new_list_fields.append((v, v.replace('_', ' ').title()))\n self.list_fields = new_list_fields\n\n if not self.name_singular:\n self.name_singular = _from_camel(self.model._class_name()).replace('_', ' ')\n\n if not self.name_plural:\n if self.name_singular.endswith('s'):\n self.name_plural = '%ses' % self.name_singular\n else:\n self.name_plural = '%ss' % self.name_singular\n\n def get_retrieve_url(self, object):\n if self.retrieve_view:\n return flask.url_for(self.retrieve_view, urlsafe=object.key.urlsafe())\n else:\n return None\n\n def get_edit_url(self, object):\n if self.edit_view:\n return flask.url_for(self.edit_view, urlsafe=object.key.urlsafe())\n else:\n return None\n\n def add_extra_fields(self, obj):\n obj._retrieve_url = self.get_retrieve_url(obj)\n obj._edit_url = self.get_edit_url(obj)\n\n for field, prop in obj._properties.items():\n if getattr(prop, '_auto_now_add', False):\n obj._created = getattr(obj, field)\n if getattr(prop, '_auto_now', False):\n obj._modified = getattr(obj, field)\n return obj\n\n def user_has_access(self, object):\n \"\"\"\n Override to determine whether user has access to a particular object.\n \"\"\"\n return True\n\n def show_403(self):\n return flask.render_template(self.permission_denied_template), 403\n\n def show_404(self):\n if self.sleep_on_not_found:\n time.sleep(self.sleep_on_not_found)\n return flask.render_template(self.not_found_template), 404\n\n def get_context(self):\n context = self.extra_context\n\n context.update(dict(\n model=self.model,\n name_singular=self.name_singular,\n name_plural=self.name_plural,\n retrieve_view=self.retrieve_view,\n new_view=self.new_view,\n list_view=self.list_view,\n edit_view=self.edit_view,\n delete_view=self.delete_view,\n page_size=self.page_size,\n base_template=self.base_template,\n list_fields=self.list_fields\n ))\n\n return context\n\n def render(self, **extra_context):\n context = self.get_context()\n context.update(extra_context)\n return flask.render_template(self.template, **context)\n","repo_name":"kkinder/GAEStarterKit","sub_path":"GenericViews/GenericBase.py","file_name":"GenericBase.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"3"} +{"seq_id":"33402015876","text":"\"\"\"Enumerate all the cycles of an undirected graph.\"\"\"\n\n# . From Hanser T, Jauffret P, Kaufmann G. J Chem Inf Comp Sci 36, 1146-1152, 1996.\n# . With hints from John May's thesis 2014.\n\n# . PathGraph and PathEdge are classes constructed to work with the algorithm. They are incomplete for general use.\n\nfrom collections import defaultdict\nfrom itertools import combinations\nfrom .BiconnectedComponents import BiconnectedComponents\nfrom .Edge import Edge\nfrom .Graph import Graph\nfrom .GraphStatus import GraphError\n\n#===================================================================================================================================\n# . Parameters.\n#===================================================================================================================================\n_DefaultMaximumCycleSize = None # . Maximum cycle size.\n_DefaultMaximumDegree = 1000 # . Maximum reduced node degree.\n\n#===================================================================================================================================\n# . Class.\n#===================================================================================================================================\nclass PathEdge ( Edge ):\n \"\"\"A path edge.\"\"\"\n\n _attributable = dict ( Edge._attributable )\n _attributable.update ( { \"nodes\" : list , # . The intermediate nodes along the path.\n \"_path\" : None } )\n\n def __len__ ( self ):\n return ( len ( self.nodes ) + 2 )\n\n def IsDisjoint ( self, other ):\n \"\"\"Are the paths disjoint apart from endpoints.\"\"\"\n return ( len ( set ( self.nodes + other.nodes ) ) == ( len ( self.nodes ) + len ( other.nodes ) ) )\n\n @property\n def path ( self ):\n \"\"\"Return the path.\"\"\"\n if self._path is None:\n self._path = [ self.node1 ] + self.nodes + [ self.node2 ]\n return self._path\n\n#===================================================================================================================================\n# . Class.\n#===================================================================================================================================\nclass PathGraph ( Graph ):\n \"\"\"A path graph.\"\"\"\n\n _attributable = dict ( Graph._attributable )\n _attributable.update ( { \"degree\" : 0 ,\n \"maximumCycleSize\" : _DefaultMaximumCycleSize ,\n \"maximumDegree\" : _DefaultMaximumDegree ,\n \"ordering\" : None } )\n\n def AddEdge ( self, edge ):\n \"\"\"Add an edge to the graph.\"\"\"\n self.edges.append ( edge )\n self.adjacentNodes[edge.node1].add ( edge.node2 ) # . order n1 < n2 so only n1 does indexing.\n self.adjacentEdges[edge.node1].add ( edge )\n\n @classmethod\n def FromSubGraph ( selfClass, graph, nodes, maximumCycleSize = _DefaultMaximumCycleSize ,\n maximumDegree = _DefaultMaximumDegree ):\n \"\"\"Constructor from a graph and a subset of nodes.\"\"\"\n self = selfClass ( )\n if maximumCycleSize is None: self.maximumCycleSize = max ( len ( nodes ) + 1, 3 )\n else: self.maximumCycleSize = maximumCycleSize\n self.maximumDegree = maximumDegree\n if len ( nodes ) > 0:\n # . Add nodes by degree (low to high).\n degrees = defaultdict ( int )\n edges = [ edge for edge in graph.edges if ( edge.node1 in nodes ) and ( edge.node2 in nodes ) ]\n for edge in edges:\n degrees[edge.node1] += 1\n degrees[edge.node2] += 1 \n work = sorted ( [ ( degrees[node], node ) for node in nodes ] )\n ordering = { node : order for ( order, ( _, node ) ) in enumerate ( work ) }\n self.nodes = [ node for ( _, node ) in work ]\n for edge in edges:\n n1 = edge.node1\n n2 = edge.node2\n if ordering[n1] < ordering[n2]: self.AddEdge ( PathEdge.WithNodes ( n1, n2 ) )\n else: self.AddEdge ( PathEdge.WithNodes ( n2, n1 ) )\n self.ordering = ordering\n return self\n\n def Reduce ( self ):\n \"\"\"Reduce the graph by removing all the nodes.\"\"\"\n # . Loop over nodes - low to high priority.\n cycles = []\n while len ( self.nodes ) > 0:\n node = self.nodes.pop ( 0 )\n edges = self.adjacentEdges.pop ( node, [] )\n degree = len ( edges )\n self.degree = max ( self.degree, degree ) # . Information only.\n if degree <= self.maximumDegree:\n # . Find edge order - use order here as path edges cannot be compared directly (as they may have the same node2).\n lEdges = list ( edges )\n edgeOrder = sorted ( [ ( self.ordering[edge.node2], order ) for ( order, edge ) in enumerate ( lEdges ) ] )\n edges = [ lEdges[order] for ( _, order ) in edgeOrder ]\n # . Loop over pairs of edges emanating from the node with e1 < e2.\n for i in range ( degree - 1 ):\n edge1 = edges[i]\n limit = self.maximumCycleSize + 1 - len ( edge1 )\n n1 = edge1.node2\n nodes1 = edge1.nodes[::-1] + [ node ] # . Reversed.\n for j in range ( i+1, degree ):\n edge2 = edges[j]\n # . Accept the new path if the intermediate nodes in the edge paths are unique, and the new path is not too long.\n if edge1.IsDisjoint ( edge2 ) and ( len ( edge2 ) <= limit ):\n n2 = edge2.node2\n if n1 is n2: cycles.append ( [ n1 ] + nodes1 + edge2.nodes ) # . Cycle not closed.\n else: self.AddEdge ( PathEdge.WithNodes ( n1, n2, nodes = nodes1 + edge2.nodes ) )\n # . Finish up.\n isOK = ( self.degree <= self.maximumDegree ) # . A flag for incomplete searching.\n self.Clear ( )\n return ( cycles, isOK )\n\n#===================================================================================================================================\n# . Function.\n#===================================================================================================================================\n# . Could use checks here for simple cases (e.g. no branching).\ndef HanserAllCycles ( graph, biconnectedComponents = None ,\n maximumCycleSize = _DefaultMaximumCycleSize ,\n maximumDegree = _DefaultMaximumDegree ):\n \"\"\"Calculate the relevant cycles of an undirected graph.\"\"\"\n cycleSets = []\n isOK = True\n if biconnectedComponents is None:\n biconnectedComponents = BiconnectedComponents ( graph )\n for component in biconnectedComponents:\n pathGraph = PathGraph.FromSubGraph ( graph, component, maximumCycleSize = maximumCycleSize, maximumDegree = maximumDegree )\n ( cycles, localOK ) = pathGraph.Reduce ( )\n #print ( \"\\nHAC> Maximum Reduced Node Degree = {:d}.\\n\".format ( pathGraph.degree ) )\n if len ( cycles ) > 0: cycleSets.append ( cycles )\n isOK = isOK and localOK\n return ( cycleSets, isOK )\n\n#===================================================================================================================================\n# . Testing.\n#===================================================================================================================================\nif __name__ == \"__main__\":\n pass\n","repo_name":"pdynamo/pDynamo3","sub_path":"pScientific/Graph/Hanser.py","file_name":"Hanser.py","file_ext":"py","file_size_in_byte":7862,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"3"} +{"seq_id":"20102706818","text":"import numpy as np\n\n\nclass Utilities:\n\n def __init__(self, simlaw):\n self.sl = simlaw\n # utilities here\n\n @staticmethod\n def isNear(a1, a2, threshold=np.nan):\n # handles structs and arrays\n if isinstance(a1, list) or isinstance(a1, np.ndarray):\n a1x = a1[0]\n a1y = a1[1]\n a1z = a1[2]\n else:\n a1x = a1.position[0]\n a1y = a1.position[1]\n a1z = a1.position[2]\n if isinstance(a2, list) or isinstance(a2, np.ndarray):\n a2x = a2[0]\n a2y = a2[1]\n a2z = a2[2]\n else:\n a2x = a2.position[0]\n a2y = a2.position[1]\n a2z = a2.position[2]\n # Returns nan if too far, returns actual distance if close enough\n # A NaN Threshold will tell isNear to ignore the threshold.\n verdict = np.nan\n if not np.isnan(threshold):\n if abs(a1x - a2x) > threshold:\n return verdict\n elif abs(a1y - a2y) > threshold:\n return verdict\n elif abs(a1z - a2z) > threshold:\n return verdict\n elif a1x == a2x and a1y == a2y:\n return verdict\n\n dist = np.linalg.norm([a1x - a2x,\n a1y - a2y,\n a1z - a2z])\n if np.isnan(threshold) or dist <= threshold:\n verdict = dist\n\n return verdict\n\n @staticmethod\n def posToPixels(simlaw, position):\n pixels = int(position * simlaw.pixelsPerMeter)\n return pixels\n\n @staticmethod\n def findItem(swarm, agentNum, sl, itemType):\n # itemType can be Agent, Target, or Hazard.\n currentAgent = swarm.agents[agentNum]\n env = swarm.environment\n numLocalAgents = 0\n localAgentIndices = -1 * np.ones(sl.numAgents)\n numLocalTargets = 0\n localTargetIndices = -1 * np.ones(env.numTargets)\n numLocalHazards = 0\n localHazardIndices = -1 * np.ones(env.numTargets)\n if itemType == \"Agents\":\n for j in range(sl.numAgents):\n if j == agentNum:\n continue\n\n otherAgent = swarm.agents[j]\n dist = Utilities.isNear(currentAgent, otherAgent, sl.visualRange)\n if dist is np.nan:\n continue\n\n diff = otherAgent.position - currentAgent.position\n dotProduct = (\n diff[0] * np.cos(currentAgent.heading) -\n diff[1] * np.sin(currentAgent.heading)\n ) / dist\n dotProduct = max(-1, min(1, dotProduct))\n angle = np.arccos(dotProduct)\n if angle > sl.FOV / 2:\n continue\n # print(f\"angle between {agentNum}, {j}: {angle:.4f}. Heading of {agentNum}: {currentAgent.heading:.4f}\")\n # print(f\"{agentNum} Acquired: {j}\")\n localAgentIndices[numLocalAgents] = j\n numLocalAgents = numLocalAgents + 1\n\n localAgents = [swarm.agents[0] for _ in range(numLocalAgents)]\n for k in range(numLocalAgents):\n localAgentIndex = localAgentIndices[k]\n localAgents[k] = swarm.agents[int(localAgentIndex)]\n return localAgents\n\n elif itemType == \"Targets\":\n for j in range(env.numTargets):\n otherTarget = env.targets[j]\n dist = Utilities.isNear(currentAgent, otherTarget, sl.visualRange + otherTarget.radius)\n if dist is np.nan:\n continue\n # print(f\"distance between Agent {agentNum} and target {j} is {dist}\")\n\n diff = otherTarget.position - currentAgent.position\n dotProduct = (\n diff[0] * np.cos(currentAgent.heading) -\n diff[1] * np.sin(currentAgent.heading)\n ) / dist\n dotProduct = max(-1, min(1, dotProduct))\n angle = np.arccos(dotProduct)\n if angle > sl.FOV / 2:\n continue\n # print(f\"angle between Agent {agentNum}, Target {j}: \"\n # f\"{angle:.2f}. Heading of Agent {agentNum}: {currentAgent.heading:.2f}\")\n # print(f\"Agent {agentNum} Acquired Target {j}\")\n localTargetIndices[numLocalTargets] = j\n numLocalTargets = numLocalTargets + 1\n # print(f\"Agent {agentNum} sees {numLocalTargets} targets\")\n\n localTargets = [env.targets[0] for _ in range(numLocalTargets)]\n for k in range(numLocalTargets):\n localTargetIndex = localTargetIndices[k]\n localTargets[k] = env.targets[int(localTargetIndex)]\n return localTargets\n\n elif itemType == \"Hazards\":\n for j in range(env.numHazards):\n otherHazard = env.hazards[j]\n dist = Utilities.isNear(currentAgent, otherHazard, sl.visualRange + otherHazard.radius)\n if dist is np.nan:\n continue\n\n diff = otherHazard.position - currentAgent.position\n angle = np.arccos(\n (\n diff[0] * np.cos(currentAgent.heading) -\n diff[1] * np.sin(currentAgent.heading)\n # second component is negative because pygame flips y-axis\n ) / dist\n )\n if angle > sl.FOV / 2:\n continue\n localHazardIndices[numLocalHazards] = j\n numLocalHazards = numLocalHazards + 1\n\n localHazards = [env.hazards[0] for _ in range(numLocalHazards)]\n for k in range(numLocalHazards):\n localHazardIndex = localHazardIndices[k]\n localHazards[k] = env.hazards[int(localHazardIndex)]\n return localHazards","repo_name":"MaxlGao/Agent-Based-BT-Model","sub_path":"Utilities.py","file_name":"Utilities.py","file_ext":"py","file_size_in_byte":5982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18446858564","text":"class ItemToPurchase:\n def __init__(self, item_name='none', item_price=0, item_quantity=0, item_description='none'):\n self.item_name = item_name\n self.item_price = item_price\n self.item_quantity = item_quantity\n self.item_description = item_description\n \n def print_item_cost(self):\n self.item_total = (self.item_price * self.item_quantity)\n print('%s %i @ $%i = $%i' % (self.item_name, self.item_quantity, self.item_price, self.item_total))\n \n def print_item_description(self):\n print('%s: %s.' % (self.item_name, self.item_description))\n\nclass shoppingCart:\n def __init__(self, customer_name='none', current_date='January 1, 2016', cart_items=[]):\n self.customer_name = customer_name\n self.current_date = current_date\n self.cart_items = cart_items\n \n def add_item(self, ItemToPurchase):\n self.cart_items.append(self.item_name)\n \n def remove_item(self, name):\n joint = ' '.join(self.cart_items)\n index = joint.find(name)\n if index == -1:\n print('Item not found in cart. Nothing removed.')\n else:\n index = self.cart_items.index(name)\n self.cart_items.pop(index)\n \n def modify_item(self, ItemToPurchase):\n joint = ' '.join(self.cart_items)\n index = joint.find(name)\n if index == -1:\n print('Item not found in cart. Nothing modified.')\n else:\n index = self.cart_items.index(name)\n \n\nif __name__ == \"__main__\":\n\n print('Item 1')\n name = input('Enter the item name:\\n')\n price = int(input('Enter the item price:\\n'))\n quantity = int(input('Enter the item quantity:\\n'))\n item1 = ItemToPurchase(name, price, quantity)\n print() \n print('Item 2')\n name = input('Enter the item name:\\n')\n price = int(input('Enter the item price:\\n'))\n quantity = int(input('Enter the item quantity:\\n'))\n item2 = ItemToPurchase(name, price, quantity)\n print()\n print('TOTAL COST')\n item1.print_item_cost()\n item2.print_item_cost()\n print()\n print('Total: $%s' % (item1.item_total + item2.item_total))\n","repo_name":"OGSnoop/Chapter-7","sub_path":"cartofdeath.py","file_name":"cartofdeath.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72421887443","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.models import Group\nfrom .models import *\nfrom .forms import *\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\n#All views are here\n\n#Views For Auth Section\ndef register(request):\n\t#The new form is used\n\tif request.method == 'POST':\n\t\tform = SignUpForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tuser = form.save()\n\t\t\tuser_group = form.cleaned_data['user_type']\n\t\t\tgroup = Group.objects.get(name=user_group)\n\t\t\tuser.groups.add(group)\n\t\t\treturn redirect('login')\n\t\telse:\n\t\t\treturn render(request, 'registration/register.html', {'form': form})\n\tform = SignUpForm()\n\treturn render(request, 'registration/register.html', {'form': form})\n\n#Views For Landlord Section\n@login_required\ndef calculate_property(request):\n\tif request.method == 'POST':\n\t\tform = PropertyRent(request.POST)\n\t\tif form.is_valid():\n\t\t\t#To save but not commit and hence we can add more values\n\t\t\tform = form.save(commit=False)\n\t\t\tform.rent = '5000000'\n\t\t\t#The whole user object has to be passed\n\t\t\tform.landlord = request.user\n\t\t\t#Finally commiting after doing changes\n\t\t\tform.save()\n\t\t\t#To for a reverse url/dynamic url\n\t\t\treturn redirect('mainapp:property_page',property_id=form.id,user_id=request.user.id)\n\t\telse:\n\t\t\treturn render(request,'landlord/property.html',{'form':form})\n\tform = PropertyRent()\n\treturn render(request,'landlord/property.html',{'form':form})\n\n@login_required\ndef landlord_properties(request):\n\tproperties_list = Properties.objects.filter(pk=request.user.id)\n\treturn render(request,'landlord/lproperties.html',{'properties':properties_list},)\n\n@login_required\ndef custom_property_page(request, user_id, property_id):\n\tproperty_details = Properties.objects.get(pk=property_id)\n\treturn render(request,'landlord/property_details.html',{'property':property_details})\n\n@login_required\ndef landlord_enquiries(request):\n\tenquiries = Enquiries.objects.filter(enquirer=request.user)\n\treturn render(request,'landlord/lenquiries.html',{'enquiries':enquiries})\n\n#Views for Tenant Section\n@login_required\ndef tenant_enquiries(request):\n\tenquiries = Enquiries.objects.filter(enquirer=request.user)\n\treturn render(request,'landlord/lenquiries.html',{'enquiries':enquiries})\n\n@login_required\ndef tenant_property_page(request,user_id, property_id):\n\tproperty_details = Properties.objects.get(pk=property_id)\n\treturn render(request,'tenant/property_details.html',{'property':property_details})\n\n@login_required\ndef send_enquiry(request, user_id, property_id):\n\tp = Properties.objects.get(pk=property_id)\n\te = Enquiries(enquirer=request.user, property=p)\n\te.save()\n\treturn HttpResponse(\"Enquiry Sent\")\n\n#Views for home page\ndef index(request):\n\tif request.method == 'POST':\n\t\tform = SearchForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tlease_type = form.cleaned_data['lease_type']\n\t\t\tfurnished = form.cleaned_data['furnished']\n\t\t\trooms = form.cleaned_data['rooms']\n\t\t\tprint(rooms)\n\t\t\tproperties = Properties.objects.filter(rooms=rooms)\n\t\t\treturn render(request,'home/results.html',{'properties':properties})\n\t\telse:\n\t\t\treturn render(request,'home/search.html',{'form':form})\n\tform = SearchForm()\n\treturn render(request,'home/search.html',{'form':form})\n\ndef search_result(request):\n\treturn render(request, 'home/results.html')\n\ndef search_property_page(request, property_id):\n\tproperty_details = Properties.objects.get(pk=property_id)\n\treturn render(request,'home/property_details.html',{'property':property_details})\n\ndef contact(request):\n\treturn render(request, 'home/contact.html')\n\ndef about(request):\n\treturn render(request, 'home/about.html')","repo_name":"Man-Jain/NoBroker-Django","sub_path":"mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29247394104","text":"\n# Owen Kroeger\n# My Own Work\n\n# Recursive\n# 3a(n+1) - 4a(n) = 0\n# 3a(n+1) = 4a(n)\n# a(n+1) = 4/3a(n)\n\n# Explicit\n# a(n) = (5)*(4/3)^n\n\ndef recRelation():\n \n # n in explicit formula\n count = 0\n\n # recursive var\n an = 5.00\n\n # explicit var\n am = 0\n\n # temporary variables for rounding\n tempr = 0\n tempe = 0\n\n print(\"Recursive: \\t Explicit: \")\n print(\"-----------------------------\")\n for i in range(20):\n \n # run explicit formula\n am = 5*((4/3)**count)\n \n tempr = round(an, 2)\n tempe = round(am, 2)\n\n print(f'{tempr}\\t\\t {tempe}')\n\n # iterate both formulas \n an = 4/3*an\n count += 1\n\nrecRelation()","repo_name":"oskroeger/LabQuestion11","sub_path":"LabQuestion11/LabQuestion11.py","file_name":"LabQuestion11.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34542336299","text":"import os\nimport argparse\n\nfrom eval.gqa_ood.gqa_eval import GQAEval\nfrom eval.gqa_ood.plot_tail import plot_tail\n\nfrom utils import write_txt\n\n# python evaluation.py --ood_test\n# --predictions [prediction path (on ood_testdev_all or gqa_testdev)]\n# python evaluation.py --eval_tail_size\n# --predictions [prediction path (on ood_val_all or gqa_val)]\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--eval_tail_size',\n default=True,\n # action='store_true'\n )\n parser.add_argument(\n '--save_dir',\n default='snap/gqa_ood/0323_GGM_5')\n parser.add_argument(\n '--ood_test', default=True, type=bool)\n parser.add_argument(\n '--predictions', type=str,\n default='snap/gqa_ood/0323_GGM_5/val_all_predict.json')\n args = parser.parse_args()\n\n if args.eval_tail_size:\n result_eval_file = args.predictions\n # Retrieve scores\n alpha_list = [9.0, 7.0, 5.0, 3.6, 2.8, 2.2, 1.8, 1.4, 1.0, 0.8, 0.4, 0.3,\n 0.2, 0.1, 0.0, -0.1, -0.2, -0.3, -0.4, -0.5, -0.6, -0.7]\n acc_list = []\n for alpha in alpha_list:\n ques_file_path = \\\n f'data/gqa_ood/alpha_tail/val_bal_tail_{alpha:.1f}.json'\n gqa_eval = GQAEval(result_eval_file,\n ques_file_path,\n choices_path=None,\n EVAL_CONSISTENCY=False)\n acc = gqa_eval.get_acc_result()['accuracy']\n acc_list.append(acc)\n \n print(\"Alpha:\", alpha_list)\n print(\"Accuracy:\", acc_list)\n # Plot: save to \"tail_plot_[model_name].pdf\"\n plot_tail(alpha=list(map(lambda x: x + 1, alpha_list)), accuracy=acc_list,\n model_name='default') # We plot 1+alpha vs. accuracy\n if args.ood_test:\n result_eval_file = args.predictions\n file_list = {'Tail': 'ood_testdev_tail.json',\n 'Head': 'ood_testdev_head.json',\n 'All': 'ood_testdev_all.json'}\n result = {}\n for setup, ques_file_path in file_list.items():\n gqa_eval = GQAEval(result_eval_file,\n 'data/gqa_ood/org/' + ques_file_path,\n choices_path=None,\n EVAL_CONSISTENCY=False)\n result[setup] = gqa_eval.get_acc_result()['accuracy']\n \n result_string, detail_result_string = gqa_eval.get_str_result()\n print('\\n___%s___' % setup)\n for result_string_ in result_string:\n print(result_string_)\n \n print('\\nRESULTS:\\n')\n delta = (result['Head'] - result['Tail']) / result['Tail'] * 100.\n msg = f\"Accuracy (all, tail, head, delta):\" \\\n f\" {result['All']:.2f}, {result['Tail']:.2f}, \" \\\n f\"{result['Head']:.2f}, {delta:.2f}\\n\"\n print(msg)\n write_txt(os.path.join(args.save_dir, f'result.txt'), msg)\n","repo_name":"jingjing12110/X-GGM","sub_path":"eval/gqa_ood/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"2780559495","text":"# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, val=0, next=None):\r\n# self.val = val\r\n# self.next = next\r\nclass Solution:\r\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\r\n dummy = ListNode()\r\n cur = dummy\r\n carry = 0\r\n\r\n # l1 or l2 is each digit\r\n # since it is reversed, we start to sum the 1's place. that makes it easier\r\n\r\n while l1 or l2 or carry:\r\n v1 = l1.val if l1 else 0\r\n v2 = l2.val if l2 else 0\r\n\r\n val = v1 + v2 + carry\r\n\r\n # because we are adding digits, if it is 15 carry will be 1\r\n\r\n carry = val // 10\r\n val = val % 10\r\n cur.next = ListNode(val)\r\n\r\n # update pointers\r\n\r\n cur = cur.next\r\n l1 = l1.next if l1 else None\r\n l2 = l2.next if l2 else None\r\n return dummy.next\r\n\r\n","repo_name":"Mohitz4418/Leet-Code","sub_path":"Add Two Numbers.py","file_name":"Add Two Numbers.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"28760713920","text":"from flask import Flask, render_template, request, jsonify\nfrom .api.RecEngine import rec_engine\n\n# Static_folder: static resource, like css\n# template_folder: template resource, like index.html\n# static_url_path: Other resource\napp = Flask(\n __name__,\n static_folder=\"./static\",\n static_url_path=\"/\",\n template_folder=\"./static\")\n\n@app.route('/')\ndef index():\n '''\n When browser visit the page, render index.html files in the dist folder.\n '''\n return render_template(\"index.html\")\n\n# Get message from user and return \n@app.route('/api/send_msg', methods=['POST'])\ndef sned_msg():\n\n # An example to show how to use the Class.\n recommend_list = rec_engine.get_list_by_genre('pop', [], 10)\n \n return_data = { # data return to FE\n \"response\": f\"This is my response\",\n \"recommend_list\": recommend_list\n }\n return jsonify(return_data)\n\n\nif __name__ == '__main__':\n app.run()","repo_name":"twinkletwinklelittlestar70/flask_vue_template","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26403779123","text":"import pygame as pg # imports pyagme library with the shorthand name of pg\nimport os # imports os, a python native libarary which deals with files and paths\nimport numpy as np# imports numpy, a python library with math, arrays, and a lot of stuff. For this, mostly arrays\n\n\nmain_dir = os.path.split(os.path.abspath(__file__))[0] # establishes the defualt absolute path to the directory where this is run/located on a computer\ndata_dir = os.path.join(main_dir, \"data\") # absolute path to the data folder\n\n# a dictionary that correlates the box on the chess board to the \n# coordinate where a piece should be placed to be centered in that box\npiece_spaces = {\n 0 : 12.5,\n 1 : 87.5,\n 2 : 162.5, \n 3 : 237.5,\n 4 : 312.5,\n 5 : 387.5,\n 6 : 462.5,\n 7 : 537.5\n}\n\n# a dictionary that connects boxes on the board to x/y coordinate where movement dot would be placed\ndot_spaces = {\n 0 : 22.5,\n 1 : 97.5,\n 2 : 172.5, \n 3 : 247.5,\n 4 : 322.5,\n 5 : 397.5,\n 6 : 472.5,\n 7 : 547.5\n}\n\nnaming_nums = {\n (-1, -1) : 11,\n (-1, 0) : 12,\n (-1, 1) : 13,\n (0, -1) : 14,\n (0, 0) : 15,\n (0, 1) : 16,\n (1, -1) : 17,\n (1, 0) : 18,\n (1, 1) : 19,\n}\n\n\n\n# ranges that connect a position from the mouse to a box on the board\ncursor_range = [(0,73), (73, 148), (148, 223), (223, 298), (298, 373), (373,448), (448, 523), (523, 598)]\n\n# Boards: an ndarray (numpy array with (n) dimensions) with two dimensions representing\n# the board with a 3 digit number representing a particular piece, key below\n\n# Digit 1 : Which instance of that piece it is\n# Digit 2 : 1 = Pawn, 2 = Rook, 3 = Knight, 4 = Bishop, 5 = King, 6 = Queen, 9 = Dot\n# Digit 3 : 1 = White, 2 = Black\n# Makes the board - white at the top, black at the bottom of the matrix\nboards = np.array([\n [120, 130, 140, 150, 168, 141, 131, 121],\n [110, 111, 112, 113, 114, 115, 116, 117]\n])\nboards = np.append(boards, np.zeros((4, 8)), 0)\nboards = np.append(boards, np.array([[210, 211, 212, 213, 214, 215, 216, 217],[220, 230, 240, 250, 268, 241, 231, 221]]), 0)\nboards = boards.astype(int)\n\nturn_dict = {\n '1' : '2',\n '2' : '1'\n}\n\ndtos = np.array(np.zeros((8,8)))\ndtos.astype(int)\n \n\n\n\n# Default code which loads an image as a pygame image with the default path\ndef load_image(name, colorkey=None, scale=1):\n fullname = os.path.join(data_dir, name)\n image = pg.image.load(fullname)\n\n size = image.get_size()\n size = (size[0] * scale, size[1] * scale)\n image = pg.transform.scale(image, size)\n\n image = image.convert()\n if colorkey is not None:\n if colorkey == -1:\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey, pg.RLEACCEL)\n return image, image.get_rect()\n\n# Default code which loads a sound as a pygame image with the default path\ndef load_sound(name):\n class NoneSound:\n def play(self):\n pass\n\n if not pg.mixer or not pg.mixer.get_init():\n return NoneSound()\n\n fullname = os.path.join(data_dir, name)\n sound = pg.mixer.Sound(fullname)\n\n return sound\n\n# Function which takes the boards matrix (matrix of pieces represented by integers)\n# turns it into a matrix of object instances \ndef make_board(orig):\n bong = []\n # Goes through the entire array piece by piece\n\n # First by the horizontal/ rank\n if orig[0][0] == 0:\n bong = [0]\n bong = bong * 63\n bong.append(Dot(0))\n return np.reshape(bong, (8,8))\n\n for i, line in enumerate(orig):\n\n \n # Then within rank, it goes by \"file\"\n for j, piece in enumerate(line):\n # If it's length is 1, it has to be a 0 and is a pawn\n if len(str(piece)) == 1:\n bong.append(0)\n # If it has a 1 for the second digit, it is a pawn\n elif str(piece)[1] == str(1):\n bong.append(Fishie(piece))\n # If it has a 2 for the second digit, it is a groundhog\n elif str(piece)[1] == str(2):\n bong.append(Groundhog(piece))\n # If it has a 3 for the second digit, it is a Birdie/knight\n elif str(piece)[1] == str(3):\n #bong.append(Knight)\n bong.append(Rat(piece))\n # If it has a 4 for the second digit, it is a bishop \n elif str(piece)[1] == str(4):\n #bong.append(Bishop)\n bong.append(Snake(piece))\n # If it has a 5 for the second digit, it is a King\n elif str(piece)[1] == str(5):\n #bong.append(King)\n bong.append(King(piece))\n # If it has a 6 for the second digit, it is a Queen\n elif str(piece)[1] == str(6):\n #bong.append(Queen)\n bong.append(Queen(piece))\n else:\n bong.append(0)\n \n return np.reshape(bong, (8,8))\n\ndef blit_board(board, screen):\n # Goes through the matrix and blits (prints) the image of that piece to the screen\n # Does this through using the image variable within each instance of a piece\n for column, file in enumerate(board):\n for spot, square in enumerate(file):\n if type(square) is int or type(square) is float: continue\n\n elif int(square.name) >= 1000:\n screen.blit(square.image, (dot_spaces[spot],dot_spaces[column]))\n else:\n screen.blit(square.image, (piece_spaces[spot],piece_spaces[column]))\n\ndef check_range(num):\n # Goes through the cursor ranges and it finds which one the input number is in\n # Returns index/which number range it is in/which square it is in\n for ind,rng in enumerate(cursor_range):\n if num in range(rng[0],rng[1]):\n return ind\n\ndef nice(x : int, y : int, typee, ofset_x=0, ofset_y=0, inp=None):\n '''\n Parameters:\n ----------\n x : int\n X coordinate of the piece.\n y : int\n Y coordinate of the piece.\n typee : int\n Type of conversion/information request. \n\n 1: Return color of piece (not racist)\n 2: Change dtos - Need inp then Change dotes\n 4: Change boards - Need inp\n 5: Change Piece\n ofset_x : int\n How far the piece that you want the information of is from the piece\n Optional, default is 0 - checking the piece specified in x,y\n ofset_y : int\n How far the piece that you want the information of is from the piece\n Optional, default is 0 - checking the piece specified in x,y\n\n Output:\n ------\n Type 1:\n Str : color of piece (1 or 2)\n Type 2: \n Just changes variables\n \n '''\n if (x + ofset_x not in range(0,8)) or (y + ofset_y not in range(0,8)):\n return None\n \n\n if typee==1:\n return str(boards[x+ofset_x, y + ofset_y]).lstrip('[')[0]\n elif typee==2:\n dtos[x+ofset_x, y+ofset_y] = inp\n if int(dtos[x+ofset_x, y+ofset_y]) != 0:\n dotes[x+ofset_x, y+ofset_y] = Dot(dtos[x+ofset_x, y+ofset_y])\n return\n else:\n dotes[x+ofset_x, y+ofset_y] = 0\n return\n\n elif typee==3:\n return int(str(boards[x+ofset_x, y + ofset_y]).lstrip('[')[1])\n\n \n \n #elif typee==3:\n # if dtos[x+ofset_x, y+ofset_y] != 0:\n # dotes[x+ofset_x, y+ofset_y] = Dot(dtos[x+ofset_x, y+ofset_y])\n # else:\n # dotes[x+ofset_x, y+ofset_y] = 0\n # return\n\n\n\n\n\n#def check_square(x,y,opt):\n\n\n\n# Example class\n\n# Fishie class\n\nclass Dot(pg.sprite.Sprite):\n def __init__(self, name):\n pg.sprite.Sprite.__init__(self)\n self.image = pg.transform.scale(pg.image.load('data/dote.png'), (30,30))\n self.image.set_alpha(175)\n self.rect = (30,30) \n self.name = name\n\n\ndotes = make_board(dtos)\npieces = []\n\nclass Fishie(pg.sprite.Sprite):\n # ttvtommyinit\n def __init__(self, name):\n self.name = name\n self.box = np.where(boards == int(self.name))\n pg.sprite.Sprite.__init__(self) # call Sprite initializer\n if nice(self.box[0],self.box[1],1) == '1':\n self.image = pg.transform.scale(pg.image.load('data/Goldfish_white.png'), (50,50))\n else: \n self.image = pg.transform.scale(pg.image.load('data/Goldfish_black.png'), (50,50))\n self.rect = (50,50)\n\n # Creates the dots of possible moves for a pawn\n def create_moves(self):\n # determines which color it is (for top or bottom)\n if str(self.name)[0] == '1': #White\n if nice(self.box[0], self.box[1], 1, 1, 0) == '0':\n nice(self.box[0], self.box[1], 2, 1, 0, int(str(self.name) + '00'))\n if self.box[0] == 1:\n nice(self.box[0], self.box[1], 2, 2, 0, int(str(self.name) + '01'))\n \n if nice(self.box[0], self.box[1], 1, 1, -1) == '2':\n nice(self.box[0], self.box[1], 2, 1, -1, int(str(self.name) + '02'))\n if nice(self.box[0], self.box[1], 1, 1, 1) == '2':\n nice(self.box[0], self.box[1], 2, 1, 1, int(str(self.name) + '03'))\n\n\n elif str(self.name)[0] == '2': # Black\n if nice(self.box[0], self.box[1], 1, -1, 0) == '0':\n nice(self.box[0], self.box[1], 2, -1, 0, int(str(self.name) + '00'))\n\n if self.box[0] == 6:\n nice(self.box[0], self.box[1], 2, -2, 0, int(str(self.name) + '01'))\n \n if nice(self.box[0], self.box[1], 1, -1, -1) == '1':\n nice(self.box[0], self.box[1], 2, -1, -1, int(str(self.name) + '02'))\n if nice(self.box[0], self.box[1], 1, -1, 1) == '1':\n nice(self.box[0], self.box[1], 2, -1, 1, int(str(self.name) + '03'))\n\n \n\n\n \n \n \n # Removes the moves created\n def close_moves(self):\n # determines which color it is (for top or bottom)\n if str(self.name)[0] == '1': #White\n # Takes the box that is one below the box of the piece and places a number\n # Which is 4 digits, and has the identifier appended to the end of the piece name\n if self.box[0] == 7:\n boards[self.box[self.box[0], self.box[1]]] = int('16' + str(self.name)[-1])\n pieces[self.box[self.box[0], self.box[1]]] = Queen(str(boards[self.box[self.box[0], self.box[1]]]))\n dtos[self.box[0] + 1,self.box[1]] = 0\n dotes[self.box[0] + 1, self.box[1]] = 0\n # If it is on the 2nd or 7th rank then it can move two spaces so it adds that box\n if (self.box[0] == 1):\n dtos[self.box[0] + 2,self.box[1]] = 0\n dotes[self.box[0] + 2, self.box[1]] = 0\n \n nice(self.box[0], self.box[1], 2, 1, -1, 0)\n #nice(self.box[0], self.box[1], 3, 1, -1)\n\n nice(self.box[0], self.box[1], 2, 1, 1, 0)\n #nice(self.box[0], self.box[1], 3, 1, 1)\n \n\n elif str(self.name)[0] == '2': # Black\n # Takes the box that is one above the box of the piece and places a number\n # Which is 4 digits, and has the identifier appended to the end of the piece name\n if self.box[0] == 0:\n boards[self.box[self.box[0], self.box[1]]] = int('26' + str(self.name)[-1])\n pieces[self.box[self.box[0], self.box[1]]] = Queen(str(boards[self.box[self.box[0], self.box[1]]]))\n \n dtos[self.box[0] - 1,self.box[1]] = 0\n dotes[self.box[0] - 1, self.box[1]] = 0\n # If it is on the 2 or 7th rank then it can move two spaces so it adds that box\n if (self.box[0] == 6):\n dtos[self.box[0] - 2,self.box[1]] = 0\n dotes[self.box[0] - 2, self.box[1]] = 0\n \n nice(self.box[0], self.box[1], 2, -1, -1, 0)\n #nice(self.box[0], self.box[1], 3, -1, -1)\n nice(self.box[0], self.box[1], 2, -1, 1, 0)\n #nice(self.box[0], self.box[1], 3, -1, 1)\n\n\nclass Groundhog(pg.sprite.Sprite):\n def __init__(self, name):\n self.name = name\n self.box = np.where(boards == int(self.name))\n pg.sprite.Sprite.__init__(self)\n if nice(self.box[0],self.box[1],1) == '1':\n self.image = pg.transform.scale(pg.image.load('data/white_walrus.png'), (50,50))\n else: \n self.image = pg.transform.scale(pg.image.load('data/black_walrus.png'), (50,50))\n self.rect = (50,50)\n\n def create_moves(self):\n clr = nice(self.box[0], self.box[1], 1)\n if (nice(self.box[0], self.box[1], 1, 1, 0) != clr) and (nice(self.box[0], self.box[1], 1, 2, 0) != clr):\n nice(self.box[0], self.box[1], 2, 2, 0, int(str(self.name) + '00'))\n \n if (nice(self.box[0], self.box[1], 1, -1, 0) != clr) and (nice(self.box[0], self.box[1], 1, -2, 0) != clr):\n nice(self.box[0], self.box[1], 2, -2, 0, int(str(self.name) + '01'))\n\n if (nice(self.box[0], self.box[1], 1, 0, 1) != clr) and (nice(self.box[0], self.box[1], 1, 0, 2) != clr):\n nice(self.box[0], self.box[1], 2, 0, 2, int(str(self.name) + '02'))\n\n if (nice(self.box[0], self.box[1], 1, 0, -1) != clr) and (nice(self.box[0], self.box[1], 1, 0, -2) != clr):\n nice(self.box[0], self.box[1], 2, 0, -2, int(str(self.name) + '03'))\n\n \n \n\n for i in range(-1, 2):\n for j in range(-1,2):\n if nice(self.box[0], self.box[1], 1, i, j) == '0':\n nice(self.box[0], self.box[1], 2, i, j, int(str(self.name) + str(naming_nums[(i,j)])))\n\n\n\n # Removes the moves created\n def close_moves(self):\n nice(self.box[0], self.box[1], 2, 2, 0, 0)\n nice(self.box[0], self.box[1], 2, -2, 0, 0)\n nice(self.box[0], self.box[1], 2, 0, 2,0)\n nice(self.box[0], self.box[1], 2, 0, -2, 0)\n\n for i in range(-1, 2):\n for j in range(-1,2):\n nice(self.box[0], self.box[1], 2, i, j, 0) \n\n \nclass Queen(pg.sprite.Sprite):\n def __init__(self, name):\n self.name = name\n self.box = np.where(boards == int(self.name))\n pg.sprite.Sprite.__init__(self)\n if nice(self.box[0],self.box[1],1) == '1':\n self.image = pg.transform.scale(pg.image.load('data/white_queen.png'), (50,50))\n else: \n self.image = pg.transform.scale(pg.image.load('data/black_queen.png'), (50,50))\n self.rect = (50,50)\n\n def create_moves(self):\n clr = nice(self.box[0], self.box[1], 1)\n\n stop = None\n \n\n for i in range(-1, 2):\n for j in range(-1,2):\n for k in range(1,8):\n if nice(self.box[0], self.box[1], 1, i * k, j * k) != '0':\n if nice(self.box[0], self.box[1], 1, i * k, j * k) != clr:\n stop = k + 1\n else:\n stop = k\n break\n for l in range(1, stop):\n nice(self.box[0], self.box[1], 2, i * l, j * l, int(str(self.name) + str(naming_nums[(i,j)] - 10) + str(l)))\n\n\n\n\n # Removes the moves created\n def close_moves(self):\n \n for i in range(-1, 2):\n for j in range(-1,2):\n for k in range(1,9):\n nice(self.box[0], self.box[1], 2, i*k, j*k, 0)\n \n\nclass King(pg.sprite.Sprite):\n def __init__(self, name):\n self.name = name\n self.box = np.where(boards == int(self.name))\n pg.sprite.Sprite.__init__(self)\n if nice(self.box[0],self.box[1],1) == '1':\n self.image = pg.transform.scale(pg.image.load('data/white_king.png'), (50,50))\n else: \n self.image = pg.transform.scale(pg.image.load('data/black_king.png'), (50,50))\n self.rect = (50,50)\n\n def create_moves(self):\n clr = nice(self.box[0], self.box[1], 1)\n \n\n for i in range(-1, 2):\n for j in range(-1,2):\n if nice(self.box[0], self.box[1], 1, i, j) != clr:\n nice(self.box[0], self.box[1], 2, i, j, int(str(self.name) + str(naming_nums[(i,j)])))\n\n\n\n\n\n\n # Removes the moves created\n def close_moves(self):\n \n for i in range(-1, 2):\n for j in range(-1,2):\n nice(self.box[0], self.box[1], 2, i, j, 0)\n \n\nclass Rat(pg.sprite.Sprite):\n def __init__(self, name):\n self.name = name\n self.box = np.where(boards == int(self.name))\n pg.sprite.Sprite.__init__(self)\n if nice(self.box[0],self.box[1],1) == '1':\n self.image = pg.transform.scale(pg.image.load('data/white_rat.png'), (50,50))\n else: \n self.image = pg.transform.scale(pg.image.load('data/black_rat.png'), (50,50))\n self.rect = (50,50)\n\n def create_moves(self):\n clr = nice(self.box[0], self.box[1], 1)\n \n for i in range(-1, 2):\n for j in range(-1,2):\n if (nice(self.box[0], self.box[1], 1, i, j) != '0') and (nice(self.box[0], self.box[1], 1, i * 2, j* 2) != clr):\n nice(self.box[0], self.box[1], 2, i*2, j * 2, int(str(self.name) + str(naming_nums[(i,j)] - 10) + '0'))\n \n \n\n for i in range(-1, 2):\n for j in range(-1,2):\n if nice(self.box[0], self.box[1], 1, i, j) == '0':\n nice(self.box[0], self.box[1], 2, i, j, int(str(self.name) + str(naming_nums[(i,j)])))\n\n\n\n # Removes the moves created\n def close_moves(self):\n for i in range(-1, 2):\n for j in range(-1,2): \n nice(self.box[0], self.box[1], 2, i*2, j * 2, 0)\n \n \n\n for i in range(-1, 2):\n for j in range(-1,2):\n nice(self.box[0], self.box[1], 2, i, j, 0) \n\nclass Snake(pg.sprite.Sprite):\n def __init__(self, name):\n self.name = name\n self.box = np.where(boards == int(self.name))\n pg.sprite.Sprite.__init__(self)\n if nice(self.box[0],self.box[1],1) == '1':\n self.image = pg.transform.scale(pg.image.load('data/white_snake.png'), (50,50))\n else: \n self.image = pg.transform.scale(pg.image.load('data/black_snake.png'), (50,50))\n self.rect = (50,50)\n\n def create_moves(self):\n clr = nice(self.box[0], self.box[1], 1)\n\n stop = None\n \n\n for i in range(-1, 2, 2):\n for j in range(-1,2, 2):\n for k in range(1,9):\n if nice(self.box[0], self.box[1], 1, i * k, j * k) != '0':\n if nice(self.box[0], self.box[1], 1, i * k, j * k) != clr:\n stop = k + 1\n else:\n stop = k\n break\n for l in range(1, stop):\n nice(self.box[0], self.box[1], 2, i * l, j * l, int(str(self.name) + str(naming_nums[(i,j)] - 10) + str(l)))\n\n\n\n\n\n # Removes the moves created\n def close_moves(self):\n \n for i in range(-1, 2, 2):\n for j in range(-1,2, 2):\n for k in range(1,8):\n nice(self.box[0], self.box[1], 2, i*k, j*k, 0)\n\n\npieces = make_board(boards) # Turns numbers into object instances\n\n\ndef main():\n \"\"\"this function is called when the program starts.\n it initializes everything it needs, then runs in\n a loop until the function returns.\"\"\"\n \n \n # Initialize Everything\n pg.init()\n screen = pg.display.set_mode((600, 600), pg.SCALED)\n pg.display.set_caption(\"CHESS 3 FTW\")\n pg.mouse.set_visible(True)\n\n # Create The Background\n board = pg.transform.scale(pg.image.load('data/BlueBoard.png'), screen.get_size()) # Loads in board as pygame image\n \n \n # Put Text On The Background, Centered\n\n # Display The Background\n screen.blit(board, (0, 0)) # displays board to screen\n pg.display.flip()\n\n clock = pg.time.Clock()\n prev_box = None \n cur_box = None\n dotes[7,7] = 0\n turn = '1'\n\n # Main Loop\n going = True\n while going:\n clock.tick(60)\n\n # Handle Input Events\n for event in pg.event.get(): # Quits the game\n if event.type == pg.QUIT:\n going = False\n elif event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE:\n going = False\n if event.type == pg.KEYDOWN and event.key == pg.K_e:\n rt = True\n turn = turn_dict[turn]\n \n if (list(np.where(boards == 150)[0]) == []) or (list(np.where(boards == 250)[0]) == []):\n going = False\n\n \n if pg.mouse.get_pressed(3)[0] == True: # If the main mouse button pressed\n mouse_pos = pg.mouse.get_pos() # Gets mouse coordinate\n cur_box = check_range(mouse_pos[1]), check_range(mouse_pos[0]) # Inputs the box which the mouse is in\n # Moves piece\n \n if (type(pieces[cur_box[0]][cur_box[1]]) != int) or (type(dotes[cur_box[0]][cur_box[1]]) != int): # If it is a 0, do nothing\n \n if (type(dotes[cur_box[0]][cur_box[1]]) != int) and (dotes[cur_box[0]][cur_box[1]].name >= 10000) and (prev_box != cur_box):\n piece_move = str(int(dotes[cur_box[0]][cur_box[1]].name))[:-2]\n ind1, ind2 = np.where(boards == int(piece_move))\n ind1 = int(ind1); ind2 = int(ind2)\n if str(pieces[ind1][ind2].name)[1] == '2': # Walrus\n pieces[ind1][ind2].close_moves()\n if abs(ind1-cur_box[0]) == 2:\n spt = int(np.average([ind1,cur_box[0]]))\n boards[spt][int(ind2)] = 0\n pieces[spt][int(ind2)] = 0\n elif abs(ind2-cur_box[1]) == 2:\n spt = int(np.average([ind2,cur_box[1]]))\n boards[int(ind1)][spt] = 0\n pieces[int(ind1)][spt] = 0\n\n boards[cur_box[0]][cur_box[1]] = int(piece_move)\n pieces[cur_box[0]][cur_box[1]] = pieces[int(ind1),int(ind2)]\n boards[int(ind1)][int(ind2)] = 0\n pieces[int(ind1)][int(ind2)] = 0\n pieces[cur_box[0],cur_box[1]].box = int(cur_box[0]),int(cur_box[1])\n turn = turn_dict[turn]\n elif str(pieces[ind1][ind2].name)[1] == '3': # Rat\n n = len(list(set(i for j in boards for i in j)))\n pieces[ind1][ind2].close_moves()\n boards[cur_box[0]][cur_box[1]] = int(piece_move)\n pieces[cur_box[0]][cur_box[1]] = pieces[int(ind1),int(ind2)]\n boards[int(ind1)][int(ind2)] = 0\n pieces[int(ind1)][int(ind2)] = 0\n pieces[cur_box[0],cur_box[1]].box = int(cur_box[0]),int(cur_box[1])\n if rt == True:\n turn = turn_dict[turn]\n elif (abs(ind1-cur_box[0]) == 2) and (len(list(set(i for j in boards for i in j))) == n):\n turn = turn\n else: \n turn = turn_dict[turn]\n \n else:\n pieces[ind1][ind2].close_moves()\n boards[cur_box[0]][cur_box[1]] = int(piece_move)\n pieces[cur_box[0]][cur_box[1]] = pieces[int(ind1),int(ind2)]\n boards[int(ind1)][int(ind2)] = 0\n pieces[int(ind1)][int(ind2)] = 0\n pieces[cur_box[0],cur_box[1]].box = int(cur_box[0]),int(cur_box[1])\n turn = turn_dict[turn]\n # Creating moves\n elif str(pieces[cur_box[0]][cur_box[1]].name)[1] == '1' or '2': # If is a pawn, make the moves - later will be all pieces\n #open dots\n if (nice(cur_box[0], cur_box[1], 1) == turn):\n if (prev_box != None) and (prev_box != cur_box) and (type(pieces[prev_box[0]][prev_box[1]]) != int): \n pieces[prev_box[0]][prev_box[1]].close_moves() # closes movement\n\n pieces[cur_box[0]][cur_box[1]].create_moves() # Uses method to make moves\n\n \n \n #if (prev_box != None) and (prev_box != cur_box) and (type(pieces[prev_box[0]][prev_box[1]]) != int): # if clicked (nested if) and the previous box has been set and the previous box and current box are different\n # pieces[prev_box[0]][prev_box[1]].close_moves() # closes movement\n\n \n \n \n\n \n\n # Draw Everything\n screen.blit(board, (0, 0))\n blit_board(pieces, screen) # Custom function which prints the entire board to the screen\n blit_board(dotes, screen)\n pg.display.flip()\n\n prev_box = cur_box\n rt = False\n\n \n pg.quit()\n\n\n# Game Over\n\n\n# this calls the 'main' function when this script is executed\nif __name__ == \"__main__\":\n main()","repo_name":"MustafaKhan0/Chess_3","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":25233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18494724512","text":"import sys\nfrom collections import defaultdict\nimport os\nimport time\n\ndef label_output(filename):\n name = filename\n new_name = name[:name.rfind(\".\")] + \".gv\"\n f = open(name, \"r\")\n\n deps = []\n for line in f:\n deps.append(line)\n\n # cfg line dependencies between consecutive cells\n cfg_deps_btwn_cells = defaultdict(list)\n cfg_deps_btwn_cells_count = int(deps[-1])\n\n for i in range(len(deps) - cfg_deps_btwn_cells_count - 1, len(deps) - 1):\n line = deps[i]\n l = line.split(\"->\")\n l = list(map(str.strip, l))\n cfg_deps_btwn_cells[int(l[0])].append(int(l[1]))\n\n # cfg line dependencies \n cfg_deps = defaultdict(set)\n cfg_count = int(deps[-1 - cfg_deps_btwn_cells_count - 1])\n cfg_start = len(deps) - cfg_deps_btwn_cells_count - 1 - cfg_count - 1\n\n for i in range(cfg_start, cfg_start + cfg_count):\n line = deps[i]\n l = line.split(\"->\")\n l = list(map(str.strip, l))\n cfg_deps[int(l[0])].add(int(l[1]))\n\n cell_count = int(deps[0])\n deps_count = int(deps[1 + cell_count])\n sources_list = (deps[-3 - cfg_count - 1 - cfg_deps_btwn_cells_count - 1]).strip()\n sources = []\n if (len(sources_list) > 0):\n sources = list(map(int, sources_list.split(',')))\n sinks_list = (deps[-2 - cfg_count - 1 - cfg_deps_btwn_cells_count - 1]).strip()\n sinks = []\n if (len(sinks_list) > 0):\n sinks = list(map(int, sinks_list.split(',')))\n all_lines_list = (deps[-1 - cfg_count - 1 - cfg_deps_btwn_cells_count - 1].strip())\n all_lines = []\n if (len(all_lines_list) > 0):\n all_lines = list(map(int, all_lines_list.split(',')))\n\n colors_start = cell_count + 1 + deps_count + 1\n colors_end = len(deps) - 3 - cfg_count - 1 - cfg_deps_btwn_cells_count - 1\n initial_seeds = set()\n\n # a mapping from cell exe count to colors(set)\n colors = defaultdict(set)\n for i in range(colors_start, colors_end):\n line = deps[i]\n l = line.split(\"->\")\n l = list(map(str.strip, l))\n colors[int(l[0])].add(l[1])\n initial_seeds.add(int(l[0]))\n\n cell_to_lines = defaultdict(list)\n line_to_cell = defaultdict(int)\n for i in range(1, 1 + cell_count):\n line = deps[i]\n l = line.split(\"->\")\n l = list(map(str.strip, l))\n if (len(l[1]) == 0):\n cell_to_lines[int(l[0])] = []\n continue\n lines = list(map(int, l[1].split(',')))\n cell_to_lines[int(l[0])] = lines\n for line in lines:\n line_to_cell[line] = int(l[0])\n\n dep_graph = defaultdict(set)\n parent_graph = defaultdict(set)\n cell_dep = defaultdict(set)\n cell_parent = defaultdict(set)\n\n for i in range(1 + cell_count + 1, 1 + cell_count + 1 + deps_count):\n line = deps[i]\n l = line.split(\"->\")\n l = list(map(str.strip, l))\n dep_graph[int(l[0])].add(int(l[1]))\n parent_graph[int(l[1])].add(int(l[0]))\n cell_dep[line_to_cell[int(l[0])]].add(line_to_cell[int(l[1])])\n cell_parent[line_to_cell[int(l[1])]].add(line_to_cell[int(l[0])])\n\n for line in all_lines:\n if (not (line in colors)):\n colors[line] = {'lightgrey'}\n\n prop_colors = defaultdict(set)\n for k in colors:\n prop_colors[k] = colors[k]\n\n cell_colors = defaultdict(list)\n\n for k in sorted(prop_colors):\n if(len(prop_colors[k]) == 0):\n cell_colors[line_to_cell[k]].append(\"lightgrey\")\n else:\n cell_colors[line_to_cell[k]].extend(list(prop_colors[k]))\n\n cell_color_map = dict()\n color_to_label_map = {\"yellow\":\"training+evaluation\", \"purple\":\"training\", \"orange\":\"evaluation\", \"red\":\"collection\", \"green\":\"wrangling\", \"lightblue\":\"exploration\", \"lightgrey\":\"n/a\"}\n output = \"\"\n for cell in cell_colors:\n color_set = set(cell_colors[cell])\n if \"purple\" in color_set and \"orange\" in color_set:\n cell_color_map[cell] = \"yellow\"\n elif \"purple\" in color_set:\n cell_color_map[cell] = \"purple\"\n elif \"orange\" in color_set:\n cell_color_map[cell] = \"orange\"\n elif \"red\" in color_set:\n cell_color_map[cell] = \"red\"\n elif \"green\" in color_set:\n cell_color_map[cell] = \"green\"\n elif \"lightblue\" in color_set:\n cell_color_map[cell] = \"lightblue\"\n else:\n cell_color_map[cell] = \"lightgrey\"\n output += str(cell) + \" : \" + color_to_label_map[cell_color_map[cell]] + \"\\n\"\n\n f_name = name[:name.rfind(\".\")] + \"_no_prop_or_inf_output.txt\"\n f = open(f_name, \"w\")\n f.write(output)\n f.close()\n return\n\ndirectory_in_str = sys.argv[1]\ndirectory = os.fsencode(directory_in_str)\nnum_files_processed = 0\nnum_notebooks_in_folder = 0\nnum_files_with_labels = 0\n\nstart_time = time.time()\nfor file in os.listdir(directory):\n filename = os.fsdecode(file)\n if filename.endswith(\"_new_labels_no_type_inf.txt\"):\n num_files_with_labels += 1\n try:\n label_output(str(directory)[1:][1:-1] + filename)\n num_files_processed += 1\n except Exception as e:\n print(\"Exception from analyzing \\\"{}\\\": {}\".format(filename, e))\n continue\n if filename.endswith(\".ipynb\"):\n num_notebooks_in_folder += 1\n\nend_time = time.time()\nprint(\"num_files_processed = \" + str(num_files_processed) + \"\\n\")\nprint(\"num_files_with_labels = \" + str(num_files_with_labels) + \"\\n\")\nprint(\"num_notebooks_in_folder = \" + str(num_notebooks_in_folder))\nprint(\"total time used = {}\".format(end_time - start_time))","repo_name":"cindyyuanjiang/Jupyter-Notebook-Project","sub_path":"no_propagation_label_output.py","file_name":"no_propagation_label_output.py","file_ext":"py","file_size_in_byte":5571,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"5874454851","text":"import streamlit as st\nimport time\nimport pandas as pd\nimport numpy as np\nimport requests\nimport json\nimport nltk\n# nltk.download('punkt')\nimport time\nimport requests\nimport math\nimport re\nimport gg_search_norank\nimport gdown\n\nif \"URL\" not in st.session_state:\n st.session_state.URL = \"\"\nif \"response\" not in st.session_state:\n st.session_state.response = \"\"\n\n\ndef download_url(URL):\n if \"\" not in str(st.session_state.response) or URL == \"\": \n gdown.download(id=\"1-HFzqcNs7oDJaR9moNyzqom88hg4m3AT\", output=\"C:/Users/ASUS/Desktop/Mooc_Project/demo/Web_App_nghia/url.json\") \n with open(\"C:/Users/ASUS/Desktop/Mooc_Project/demo/Web_App_nghia/url.json\", \"r\") as bf:\n URL_json = json.load(bf)\n URL = URL_json[\"url\"]\n else: \n URL = URL\n return URL\n\n\n\n\n\n\n\nst.title('Question Answering Demo')\nmenu = ['Close domain', 'Open domain', 'Open domain ranking']\nmodel = ['Model 74%', 'Model 78%']\nchoice = st.sidebar.selectbox('Choose demo type', menu, 0)\nchoice_model = st.sidebar.selectbox('Choose model', model, 0)\nif choice:\n\t\tst.write(\"Your choose option is\" + \" \" + choice)\nif choice == \"Close domain\":\n questions = st.text_input('Question')\n contexts = st.text_area('Context', height=50)\n button = st.button('Submit')\n if button:\n if not questions:\n st.warning(\"Please fill out so required question field !\")\n elif not contexts:\n st.warning(\"Please fill out so required context field !\")\n else:\n model = ''\n if choice_model=='Model 74%':\n model = 'hieu-close'\n else:\n model = 'binh-close'\n myobj = {'question': str(questions),'context':str(contexts), 'model': model}\n \n try:\n st.session_state.URL = download_url(st.session_state.URL)\n url_1 = st.session_state.URL +'/closedomain'\n my_bar = st.progress(0)\n response = requests.post(url_1, json=myobj)\n print(response)\n st.session_state.response = response\n if response.ok:\n # print(response.ok)\n rs = response.json()\n # print(rs['answer'])\n for percent_complete in range(100):\n time.sleep(0)\n my_bar.progress(percent_complete + 1)\n if rs['score'] > 0.5:\n st.success(str(\"Answer: \"+rs['answer']))\n st.success(str(\"Score: \")+str(round(rs['score'],3)))\n st.success(str(\"Time predict: \")+str(round(rs['total_time'], 2))+str(\"s\"))\n else:\n st.warning(\"Not Answer\")\n \n except AssertionError as error:\n st.subheader('Error Connect to Server.')\n print(error)\nelif choice == 'Open domain':\n # st.subheader(\"ĐANG BẢO TRÌ Ạ\")\n container = st.container()\n questions_ = st.text_input('Question')\n button = st.button('Submit')\n if button:\n if not questions_:\n st.warning(\"Please fill out so required question field !\")\n else:\n container.subheader(\"Google search context with 5 link web:\")\n token_query = gg_search_norank.tokenize(questions_)[0]\n # print(\"a\",token_query)\n keywords = gg_search_norank.keywords_extraction(token_query)\n # start = time.time()\n li, urls = gg_search_norank.reurl_li(questions_, keywords)\n # print(time.time()-start)\n b = gg_search_norank.reb(li)\n\n result = {}\n max = 0\n for i,item_b in enumerate(b):\n contexts_ = item_b.replace('_',' ')\n if contexts_:\n container.write(\"URL {}: \".format(i + 1)+ urls[i])\n container.text_area(\"Context {}:\".format(i + 1),contexts_)\n else:\n container.write(\"URL {}: \".format(i + 1)+ urls[i])\n container.warning(\"Please fill out so required context {} field !\".format(i+1))\n model = ''\n if choice_model=='Model 74%':\n model = 'hieu-open'\n else:\n model = 'binh-open'\n\n myobj = {'question': questions_, 'model': model} \n # print(myobj)\n try:\n st.session_state.URL = download_url(st.session_state.URL)\n url_2 = st.session_state.URL + '/opendomain'\n response = requests.post(url_2, json=myobj)\n print(response)\n st.session_state.response = response\n if response.ok:\n rs = response.json()\n st.success(str(\"Final Answer: \"+rs['answer']) + str(\" ----- Score: \")+str(round(rs['score'],3)) + str(\" ----- Time predict: \")+str(round(rs['total_time'], 2))+str(\"s\"))\n except AssertionError as error:\n st.subheader('Error Connect to Server.')\n print(error)\n \n\nelif choice == 'Open domain ranking':\n container = st.container()\n container.write(\"link và ranking nằm ở đây\")\n questions_ = st.text_input('Question')\n button = st.button('Submit')\n if button:\n model = ''\n if choice_model=='Model 74%':\n model = 'hieu-open'\n else:\n model = 'binh-open'\n myobj = {'question': str(questions_)} \n try:\n st.session_state.URL = download_url(st.session_state.URL)\n url_3 = st.session_state.URL +'/opendomainranking'\n response = requests.post(url_3, json=myobj)\n print(response)\n st.session_state.response = response\n if response.ok:\n rs = response.json()\n print(rs)\n st.success(str(\"Final Answer: \"+rs['answer']) + str(\" ----- Score: \")+str(round(rs['score'],3)) + str(\" ----- Time predict: \")+str(round(rs['total_time'], 2))+str(\"s\"))\n except AssertionError as error:\n st.subheader('Error Connect to Server.')\n print(error)\n","repo_name":"nghiaanh108/Question_Answering","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":6222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8141108104","text":"import ipywidgets as ipw\n\nFUNCTIONAL_LINK_MAP = {\n \"PBE\": \"https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.77.3865\",\n \"PBEsol\": \"https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.100.136406\",\n}\n\nPSEUDO_LINK_MAP = {\n \"SSSP\": \"https://www.materialscloud.org/discover/sssp/table/efficiency\",\n \"PseudoDojo\": \"http://www.pseudo-dojo.org/\",\n}\n\nFUNCTIONAL_REPORT_MAP = {\n \"LDA\": \"local density approximation (LDA)\",\n \"PBE\": \"generalized gradient approximation of Perdew-Burke-Ernzerhof (PBE)\",\n \"PBEsol\": \"the revised generalized gradient approximation of Perdew-Burke-Ernzerhof (PBE) for solids\",\n}\n\n# Periodicity\nPERIODICITY_MAPPING = {\n (True, True, True): \"xyz\",\n (True, True, False): \"xy\",\n (True, False, False): \"x\",\n}\n\n\ndef generate_report_parameters(qeapp_wc):\n \"\"\"Generate the report parameters from the ui parameters and workchain's input.\n\n Parameters extracted from ui parameters, directly from the widgets,\n such as the ``pseudo_family`` and ``relax_type``.\n\n Parameters extracted from workchain's inputs, such as the ``energy_cutoff_wfc``\n and ``energy_cutoff_rho``.\n\n Return a dictionary of the parameters.\n \"\"\"\n from aiida.orm.utils.serialize import deserialize_unsafe\n\n ui_parameters = qeapp_wc.base.extras.get(\"ui_parameters\", {})\n if isinstance(ui_parameters, str):\n ui_parameters = deserialize_unsafe(ui_parameters)\n # Construct the report parameters needed for the report\n # drop support for old ui parameters\n if \"workchain\" not in ui_parameters:\n return {}\n report = {\n \"relaxed\": ui_parameters[\"workchain\"][\"relax_type\"],\n \"relax_method\": ui_parameters[\"workchain\"][\"relax_type\"],\n \"electronic_type\": ui_parameters[\"workchain\"][\"electronic_type\"],\n \"material_magnetic\": ui_parameters[\"workchain\"][\"spin_type\"],\n \"protocol\": ui_parameters[\"workchain\"][\"protocol\"],\n \"initial_magnetic_moments\": ui_parameters[\"advanced\"][\n \"initial_magnetic_moments\"\n ],\n \"properties\": ui_parameters[\"workchain\"][\"properties\"],\n }\n #\n report.update(\n {\n \"bands_computed\": \"bands\" in ui_parameters[\"workchain\"][\"properties\"],\n \"pdos_computed\": \"pdos\" in ui_parameters[\"workchain\"][\"properties\"],\n }\n )\n # update pseudo family information to report\n pseudo_family = ui_parameters[\"advanced\"].get(\"pseudo_family\")\n pseudo_family_info = pseudo_family.split(\"/\")\n pseudo_library = pseudo_family_info[0]\n functional = pseudo_family_info[2]\n if pseudo_library == \"SSSP\":\n pseudo_protocol = pseudo_family_info[3]\n elif pseudo_library == \"PseudoDojo\":\n pseudo_protocol = pseudo_family_info[4]\n report.update(\n {\n \"pseudo_family\": pseudo_family,\n \"pseudo_library\": pseudo_library,\n \"pseudo_version\": pseudo_family_info[1],\n \"functional\": functional,\n \"pseudo_protocol\": pseudo_protocol,\n \"pseudo_link\": PSEUDO_LINK_MAP[pseudo_library],\n \"functional_link\": FUNCTIONAL_LINK_MAP[functional],\n }\n )\n # Extract the pw calculation parameters from the workchain's inputs\n # energy_cutoff is same for all pw calculations when pseudopotentials are fixed\n # as well as the smearing settings (semaring and degauss) and scf kpoints distance\n # read from the first pw calculation of relax workflow.\n # It is safe then to extract these parameters from the first pw calculation, since the\n # builder is anyway set with subworkchain inputs even it is not run which controlled by\n # the properties inputs.\n pw_parameters = qeapp_wc.inputs.relax.base.pw.parameters.get_dict()\n energy_cutoff_wfc = pw_parameters[\"SYSTEM\"][\"ecutwfc\"]\n energy_cutoff_rho = pw_parameters[\"SYSTEM\"][\"ecutrho\"]\n occupation = pw_parameters[\"SYSTEM\"][\"occupations\"]\n scf_kpoints_distance = qeapp_wc.inputs.relax.base.kpoints_distance.value\n report.update(\n {\n \"energy_cutoff_wfc\": energy_cutoff_wfc,\n \"energy_cutoff_rho\": energy_cutoff_rho,\n \"occupation_type\": occupation,\n \"scf_kpoints_distance\": scf_kpoints_distance,\n }\n )\n if occupation == \"smearing\":\n report[\"degauss\"] = pw_parameters[\"SYSTEM\"][\"degauss\"]\n report[\"smearing\"] = pw_parameters[\"SYSTEM\"][\"smearing\"]\n report[\"tot_charge\"] = pw_parameters[\"SYSTEM\"].get(\"tot_charge\", 0.0)\n report[\"periodicity\"] = PERIODICITY_MAPPING.get(\n qeapp_wc.inputs.structure.pbc, \"xyz\"\n )\n # hard code bands and pdos\n if \"bands\" in qeapp_wc.inputs:\n report[\n \"bands_kpoints_distance\"\n ] = qeapp_wc.inputs.bands.bands_kpoints_distance.value\n if \"pdos\" in qeapp_wc.inputs:\n report[\n \"nscf_kpoints_distance\"\n ] = qeapp_wc.inputs.pdos.nscf.kpoints_distance.value\n return report\n\n\ndef _generate_report_html(report):\n \"\"\"Read from the bulider parameters and generate a html for reporting\n the inputs for the `QeAppWorkChain`.\n \"\"\"\n from importlib import resources\n\n from jinja2 import Environment\n\n from aiidalab_qe.app import static\n\n def _fmt_yes_no(truthy):\n return \"Yes\" if truthy else \"No\"\n\n env = Environment()\n env.filters.update(\n {\n \"fmt_yes_no\": _fmt_yes_no,\n }\n )\n template = resources.read_text(static, \"workflow_summary.jinja\")\n style = resources.read_text(static, \"style.css\")\n report = {key: value for key, value in report.items() if value is not None}\n\n return env.from_string(template).render(style=style, **report)\n\n\ndef generate_report_text(report_dict):\n \"\"\"Generate a text for reporting the inputs for the `QeAppWorkChain`\n\n :param report_dict: dictionary generated by the `generate_report_dict` function.\n \"\"\"\n\n report_string = (\n \"All calculations are performed within the density-functional \"\n \"theory formalism as implemented in the Quantum ESPRESSO code. \"\n \"The pseudopotential for each element is extracted from the \"\n f'{report_dict[\"Pseudopotential library\"][0]} '\n \"library. The wave functions \"\n \"of the valence electrons are expanded in a plane wave basis set, using an \"\n \"energy cutoff equal to \"\n f'{round(report_dict[\"Plane wave energy cutoff (wave functions)\"][0])} Ry '\n \"for the wave functions and \"\n f'{round(report_dict[\"Plane wave energy cutoff (charge density)\"][0])} Ry '\n \"for the charge density and potential. \"\n \"The exchange-correlation energy is \"\n \"calculated using the \"\n f'{FUNCTIONAL_REPORT_MAP[report_dict[\"Functional\"][0]]}. '\n \"A Monkhorst-Pack mesh is used for sampling the Brillouin zone, where the \"\n \"distance between the k-points is set to \"\n )\n kpoints_distances = []\n kpoints_calculations = []\n\n for calc in (\"SCF\", \"NSCF\", \"Bands\"):\n if f\"K-point mesh distance ({calc})\" in report_dict:\n kpoints_distances.append(\n str(report_dict[f\"K-point mesh distance ({calc})\"][0])\n )\n kpoints_calculations.append(calc)\n\n report_string += \", \".join(kpoints_distances)\n report_string += \" for the \"\n report_string += \", \".join(kpoints_calculations)\n report_string += \" calculation\"\n if len(kpoints_distances) > 1:\n report_string += \"s, respectively\"\n report_string += \".\"\n\n return report_string\n\n\nclass SummaryView(ipw.VBox):\n def __init__(self, wc_node, **kwargs):\n self.report = generate_report_parameters(wc_node)\n self.report_html = _generate_report_html(self.report)\n\n self.summary_view = ipw.HTML(self.report_html)\n super().__init__(\n children=[self.summary_view],\n **kwargs,\n )\n","repo_name":"aiidalab/aiidalab-qe","sub_path":"src/aiidalab_qe/app/result/summary_viewer.py","file_name":"summary_viewer.py","file_ext":"py","file_size_in_byte":7818,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"36455746749","text":"# -*- mode: python; coding: utf-8 -*-\n\nfrom __future__ import absolute_import, unicode_literals, print_function\n\nimport functools\nimport operator\nimport uuid\nimport logging\n\nfrom django import forms\nfrom django.contrib import admin\nfrom django.core import validators\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_extensions import admin as admin_extensions\n\n\nclass ForeignKey(models.ForeignKey):\n '''Customised ForeignKey subclass with our defaults'''\n\n def __init__(self, to, verbose_name, **kwargs):\n kwargs.setdefault('on_delete', models.PROTECT)\n kwargs.setdefault('db_index', True)\n kwargs.setdefault('limit_choices_to', {'active': True})\n\n super().__init__(to=to, verbose_name=verbose_name, **kwargs)\n\n\ndef _default_state():\n # avoid import cycle by using a local import\n from .. import models\n\n try:\n return models.State.objects.get(code=0)\n except models.State.DoesNotExist:\n return None\n\n\nclass AbstractModel(models.Model):\n \"\"\"\n Abstract BaseModel class specifying a unique object.\n \"\"\"\n\n class Meta(object):\n abstract = True\n\n state = models.ForeignKey('addrreg.State', models.PROTECT,\n verbose_name=_('Condition'), db_index=True,\n related_name='+',\n default=_default_state)\n active = models.BooleanField(_('Active'), default=True)\n note = models.CharField(_('Notes'), blank=True, null=True, max_length=255)\n\n @classmethod\n def type_name(cls):\n return cls.__name__.lower()\n\n @classmethod\n def alias_names(cls):\n return []\n\n @classmethod\n def type_names(cls):\n return [cls.type_name()] + cls.alias_names()\n\n\ndef _random_sumiffiik():\n return '{{{}}}'.format(uuid.uuid4())\n\n\nclass SumiffiikIDField(models.CharField):\n '''Field for storing a Sumiffiik, which is a UUID wrapped in {}. We\n could use a UUID field, but MS SQL doesn't support those directly,\n so they offer little value.\n\n '''\n\n def __init__(self, verbose_name=_('Sumiffiik ID'),\n max_length=38,\n default=_random_sumiffiik,\n db_index=True,\n null=False, blank=False,\n **kwargs):\n\n for k, v in list(locals().items()):\n if k not in ('self', 'kwargs') and k[0] != '_':\n kwargs.setdefault(k, v)\n\n super().__init__(**kwargs)\n\n def get_db_prep_value(self, value, *args, **kwargs):\n if value == '[n/a]':\n return None\n else:\n value = '{{{}}}'.format(uuid.UUID(value.strip('{}')))\n\n return super().get_db_prep_value(value, *args, **kwargs)\n\n\nclass SumiffiikDomainField(models.CharField):\n\n def __init__(self, verbose_name=_('Sumiffiik Domain'),\n max_length=64,\n default='https://data.gl/najugaq/road/v1',\n validators=[validators.URLValidator()],\n **kwargs):\n for k, v in list(locals().items()):\n if k not in ('self', 'kwargs') and k[0] != '_':\n kwargs.setdefault(k, v)\n\n super().__init__(**kwargs)\n\n def formfield(self, **kwargs):\n # Passing max_length to forms.CharField means that the value's length\n # will be validated twice. This is considered acceptable since we want\n # the value in the form field (to pass into widget for example).\n defaults = {\n 'widget': forms.URLField,\n }\n defaults.update(kwargs)\n return super().formfield(**defaults)\n\n\nclass FormBase(forms.ModelForm):\n\n class Meta:\n widgets = {\n 'note': forms.Textarea(attrs={'cols': 80, 'rows': 4}),\n 'last_changed': forms.Textarea(attrs={'cols': 80, 'rows': 4}),\n }\n\n def clean_sumiffiik(self):\n sumiffiik = str(self.cleaned_data['sumiffiik'])\n try:\n return '{{{}}}'.format(\n uuid.UUID(sumiffiik.strip('{}')),\n )\n except ValueError:\n raise forms.ValidationError(\n _('Enter a valid Sumiffiik, such as {%s}'),\n params=str(uuid.uuid4()),\n )\n\n\nclass AdminBase(admin_extensions.ForeignKeyAutocompleteAdmin):\n form = FormBase\n\n view_on_site = False\n\n _fieldsets = (\n (_('State'), {\n 'fields': ('state', 'active', 'note'),\n 'classes': ('wide',),\n }),\n )\n\n list_filter = (\n 'active',\n 'state',\n )\n\n radio_fields = {\n \"state\": admin.HORIZONTAL,\n }\n\n superuser_only = False\n\n def get_readonly_fields(self, request, obj=None):\n fields = super().get_readonly_fields(request, obj)\n user = request.user\n\n if (\n not (user.is_superuser or user.rights.count() > 1) and\n hasattr(self.model, 'municipality')\n ):\n fields += ('municipality',)\n\n return fields\n\n def get_related_filter(self, remote_model, request):\n user = request.user\n filters = []\n\n if getattr(remote_model, 'active', None):\n filters.append(models.Q(active=True))\n\n if not user.is_superuser:\n if remote_model._meta.label == 'addrreg.Municipality':\n filters.append(models.Q(rights__users=user))\n\n if hasattr(remote_model, 'municipality'):\n filters.append(models.Q(municipality__rights__users=user))\n\n return functools.reduce(operator.and_, filters)\n\n def get_field_queryset(self, db, db_field, request):\n remote_model = db_field.remote_field.model\n queryset = (\n super().get_field_queryset(db, db_field, request) or\n remote_model.objects\n )\n\n return queryset.filter(self.get_related_filter(remote_model, request))\n\n def get_queryset(self, request):\n user = request.user\n qs = super().get_queryset(request)\n\n if not user.is_superuser and hasattr(self.model, 'municipality'):\n qs = qs.filter(municipality__rights__users=user)\n\n return qs\n\n def get_search_results(self, request, queryset, search_term):\n user = request.user\n\n if not user.is_superuser and hasattr(self.model, 'municipality'):\n queryset = queryset.filter(municipality__rights__users=user)\n\n return super().get_search_results(request, queryset, search_term)\n\n def __has_municipality(self, request, obj=None):\n if request.user.is_superuser:\n return True\n elif not hasattr(request.user, 'rights'):\n return False\n # can do this in general?\n\n if not obj:\n return (request.user.rights.all() and\n hasattr(self.model, 'municipality'))\n\n elif hasattr(obj, 'municipality'):\n return request.user.rights.filter(\n municipality=obj.municipality\n ).exists()\n else:\n return False\n\n def save_model(self, request, obj, form, change):\n if (hasattr(type(obj), 'municipality') and\n not hasattr(obj, 'municipality')):\n obj.municipality = request.user.rights.only().get().municipality\n\n obj._registration_user = request.user\n\n super().save_model(request, obj, form, change)\n\n diff = []\n for key in form.changed_data:\n if key not in ['registrations']:\n value = getattr(obj, key)\n diff.append(\"%s: %s\" % (key, value))\n logging.getLogger('django.server').info(\n \"%s (%s id=%s) was updated by %s\\nChanges:\\n%s\" %\n (\n str(obj), obj.__class__.__name__, obj.id,\n request.user,\n '\\n'.join(diff)\n )\n )\n\n def has_delete_permission(self, request, obj=None):\n return request.user.is_superuser\n\n def has_change_permission(self, request, obj=None):\n return self.__has_municipality(request, obj)\n\n def has_add_permission(self, request):\n return self.__has_municipality(request)\n\n def has_module_permission(self, request):\n if self.superuser_only:\n return request.user.is_superuser\n return self.__has_municipality(request)\n","repo_name":"magenta-aps/gladdrreg","sub_path":"addrreg/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":8283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23386710090","text":"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport pandas as pd\nimport backtrader as bt\nimport bitfinex\nimport datetime\nimport time\nfrom PandasData import PandasData\nimport os\nimport tensorflow as tf\nfrom tensorflow import keras\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef fetch_data(start, stop, symbol, interval, tick_limit, step):\n # Create api instance\n api_v2 = bitfinex.bitfinex_v2.api_v2()\n data = []\n start = start - step\n while start < stop:\n start = start + step\n end = start + step\n res = api_v2.candles(symbol=symbol, interval=interval,\n limit=tick_limit, start=start,\n end=end)\n data.extend(res)\n time.sleep(1.2)\n return data\n\n\ndef get_data(pair, t_start, t_stop, bin_size):\n time_step = 60000000\n\n limit = 1000\n df = {}\n path = f'./data/{pair}_{t_start}-{t_stop}_{bin_size}.csv'\n if (os.path.exists(path)) and (os.path.isfile(path)):\n df = pd.read_csv(path, index_col=0)\n else:\n data = fetch_data(start=t_start, stop=t_stop, symbol=pair, interval=bin_size, tick_limit=limit, step=time_step)\n names = ['time', 'open', 'close', 'high', 'low', 'volume']\n df = pd.DataFrame(data, columns=names)\n df.drop_duplicates(inplace=True)\n df['time'] = pd.to_datetime(df['time'], unit='ms')\n df.set_index('time', inplace=True)\n df.sort_index(inplace=True)\n print(df.head())\n df.to_csv(f'./data/{pair}_{t_start}-{t_stop}_{bin_size}.csv')\n return df\n\n\ndef main():\n t_start = datetime.datetime(2020, 1, 1, 0, 0)\n t_start = time.mktime(t_start.timetuple()) * 1000\n\n t_stop = datetime.datetime(2020, 1, 31, 0, 0)\n t_stop = time.mktime(t_stop.timetuple()) * 1000\n df = get_data(pair='btcusd', t_start=t_start, t_stop=t_stop, bin_size='1d')\n print(df.head())\n\n # cerebro = bt.Cerebro()\n # data = PandasData(dataname=df, timeframe=1)\n # cerebro.adddata(data)\n # print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())\n # cerebro.run()\n # print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())\n\n #df.plot(subplots=True)\n #plt.show()\n\n # df.astype('float').dtypes\n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"xhusar2/CryptoBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15189693588","text":"import random\nfrom typing import List\n\nSEEDS = [\n \"A {} and a {} and a {}\",\n \"What does the future hold for {}?\",\n \"I saw a {} with a {} holding a {}\",\n \"A {} can tell you the future using a {}\",\n \"Ask what a {} is for. Can a {} help you?\",\n \"The man wanted a {} for predicting the future\",\n]\n\n\ndef seed_from(labels: List[str]) -> str:\n clean = (lab.replace(\"_\", \" \") for lab in labels[::-1])\n return random.choice(SEEDS).format(*clean)\n","repo_name":"melnyczuk/wool-gather","sub_path":"src/app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"42677267517","text":"'''\nCreated on Jun. 17, 2022\n\n@author: AsifMahmud\n'''\ndef diameterOfBinaryTree(self, root: Optional[TreeNode]) -> int:\n def maxDepth(root):\n if not root:\n return 0\n leftDepth = maxDepth(root.left)\n rightDepth = maxDepth(root.right)\n totalDiameter = leftDepth + rightDepth\n self.maxDiameter = max(self.maxDiameter, totalDiameter)\n \n return max(leftDepth, rightDepth) + 1\n \n self.maxDiameter = 0\n maxDepth(root)\n return self.maxDiameter","repo_name":"asiffmahmudd/leetcode-problems","sub_path":"leetcode_problems/problems/543_Diameter_of_Binary_Tree.py","file_name":"543_Diameter_of_Binary_Tree.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"44571993392","text":"########################\n### Build a linked list \n########################\n\n# create an element \n\nclass Element(object): #\n\tdef __init__(self, value):\n\t\tself.value = value\t\t# an element stores a value \n\t\tself.next = None\t \t# element.next is a pointer; the element has property (variable) to refer to the next element \n\n\nclass LinkedList(object):\n\n\t# Members: \n\t# Element self.head // head 的类型是Element (class)\n\n\tdef __init__(self, head=None):\n\t\tself.head = head\n\n\t# create a linked list\n\n\tdef append(self, new_element): # 把Element(车厢) append 到 队尾\n\t\tcurrent = self.head # 火车车头;the first element in the list. Note: if no head is defined in a linked list, it will default to None. \n\t\tif self.head: \t\t# True: self.head exists; False: self.head is null\n\t\t\twhile current.next: # True: current.next exists; False: current.next is null\n\t\t\t\tcurrent = current.next\n\t\t\tcurrent.next = new_element # .next is a hook to link the next elment \n\t\telse:\n\t\t\tself.head = new_element #If there is no head already (null), you should assign new_element to it to become the head and do nothing else.\n\n\t\t# 如果没有head, 要存两个new_elements,那么第一个new_element进else, 第二个new_element进if\n\n\tdef get_element_by_position(self, position):\n\t\tif position < 1:\n\t\t\treturn None\n\n\t\tcounter = 1 # counter 是个计数器,每走过一节车厢就增加1\n\t\tcurrent = self.head\n\t\twhile current and counter < position:\n\t\t\tcurrent = current.next\n\t\t\tcounter += 1\n\n\t\treturn current\n\n\tdef insert_to_position(self, new_element, position):\n\t\tcounter = 1\n\t\tcurrent = self.head\n\t\tif position > 1:\n\t\t\twhile current and counter < position:\n\t\t\t\tif counter == position - 1:\n\t\t\t\t\tnew_element.next = current.next\n\t\t\t\t\tcurrent.next = new_element\n\t\t\t\t\t# the previous current_next is replaced with new_element and no longer exists \n\t\t\t\t\t# both current.next and new_element.next are pointers. \n\t\t\t\t\t# A pointer is an object that stores the memory address of another value located in computer memory. \n\t\t\t\t\t# A pointer references a location in memory, and obtaining the value stored at that location is known as dereferencing the pointer.\n\t\t\t\tcurrent = current.next\n\t\t\t\tcounter += 1\n\t\telif position == 1: # 插第一个时\n\t\t\tnew_element.next = self.head # new_element 的 next 的 pointer 指向 self.head \n\t\t\tself.head = new_element # both self.head and new_element have type of Element (Class)\n\n\tdef delete(self, value):\n\t\tif not self.head:\n\t\t\treturn\n\n\t\tif self.head.value == value: # 如果想要删除的值是head\n\t\t\tself.head = self.head.next\n\n\t\tcurrent = self.head\n\t\twhile current.next and current.next.value != value: # this is to check if the next value equals to the given value to be deleted; if not, move on to check the next \n\t\t\tcurrent = current.next\n\n\t\tif current.next:\n\t\t\tcurrent.next = current.next.next;\n\n\n\n# Test cases\n# Set up some Elements\ne1 = Element(1)\ne2 = Element(2)\ne3 = Element(3)\ne4 = Element(4)\n\n# Start setting up a LinkedList\nll = LinkedList(e1)\nll.append(e2)\nll.append(e3)\n\n# Test get_element_by_position\n# Should print 3\nprint(ll.head.next.next.value)\n# Should also print 3\nprint(ll.get_element_by_position(3).value)\n\n# Test insert_to_position\nll.insert_to_position(e4,3)\n# Should print(4 now\nprint(ll.get_element_by_position(3).value)\n\n# Test delete\nll.delete(1)\n# Should print(2 now\nprint(ll.get_element_by_position(1).value)\n# Should print(4 now\nprint(ll.get_element_by_position(2).value)\n# Should print(3 now\nprint(ll.get_element_by_position(3).value)","repo_name":"annsway/Python","sub_path":"Algorithm & Data Structures/UL2_linked_list.py","file_name":"UL2_linked_list.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"938697756","text":"import argparse\nimport os, sys\nfrom itertools import islice\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom joblib import Parallel, delayed\nfrom more_itertools import chunked\nfrom torch.utils.data import IterableDataset, DataLoader\nfrom torch_geometric.data import Batch\nfrom tqdm import tqdm\nimport pytorch_lightning as pl\nfrom pricePrediction.nets.netsGraph import PricePredictorModule\nfrom pricePrediction.config import NUM_WORKERS_PER_GPU, BATCH_SIZE, DEFAULT_MODEL, USE_FEATURES_NET, \\\n BUFFER_N_BATCHES_FOR_PRED\n\n\nclass GraphPricePredictor():\n name = \"price_GNN\"\n\n def __init__(self, model_path=DEFAULT_MODEL, n_gpus = 1, n_cpus= NUM_WORKERS_PER_GPU, batch_size:int=BATCH_SIZE,\n **kwargs):\n self.model_path = model_path\n self.n_gpus = n_gpus\n self.n_cpus = n_cpus\n self.batch_size = batch_size\n self.trainer = pl.Trainer(gpus=self.n_gpus, logger=False)\n self.model = PricePredictorModule.load_from_checkpoint(self.model_path, batch_size=self.batch_size)\n\n if USE_FEATURES_NET:\n from pricePrediction.preprocessData.smilesToDescriptors import smiles_to_graph\n\n else:\n from pricePrediction.preprocessData.smilesToGraph import smiles_to_graph\n\n self.smiles_to_graph = smiles_to_graph\n\n def prepare_smi(self, idx_smi):\n idx, smi = idx_smi\n graph = self.smiles_to_graph(smi)\n if graph is None:\n return None\n graph.input_idx = idx\n return graph\n\n def yieldPredictions(self, smiles_generator, buffer_n_batches=BUFFER_N_BATCHES_FOR_PRED):\n buffer_size = buffer_n_batches * self.batch_size\n preds_iter = map(lambda x: self.predictListOfSmiles(x), tqdm(chunked(smiles_generator, buffer_size)))\n for preds_batch in preds_iter:\n for pred in preds_batch:\n yield pred\n\n def predictListOfSmiles(self, smiles_list):\n smiles_list = list(smiles_list)\n graphs_list = list(filter(None.__ne__, map(self.prepare_smi, enumerate(smiles_list) )))\n graphs_fn = lambda : graphs_list\n dataset = MyIterableDataset(graphs_fn, self.n_cpus)\n dataloader = DataLoader(dataset=dataset, batch_size=self.batch_size, collate_fn=Batch.from_data_list,\n num_workers=self.n_cpus)\n\n preds = self.trainer.predict(self.model, dataloader)\n n_smiles = len(smiles_list)\n all_preds = np.nan * np.ones(n_smiles)\n for i, batch in enumerate(dataloader):\n batch_preds = preds[i].to(\"cpu\").numpy()\n idxs = batch.input_idx.to(\"cpu\").numpy().astype(np.int64).tolist()\n all_preds[idxs] = batch_preds\n return all_preds\n\n\nclass MyIterableDataset(IterableDataset):\n def __init__(self, generator_fun, num_workers):\n super().__init__()\n self.generator_fun = generator_fun\n self.num_workers = num_workers\n\n def __iter__(self):\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is not None:\n uid = torch.utils.data.get_worker_info().id\n return islice( self.generator_fun(), uid, None, self.num_workers)\n else:\n return iter(self.generator_fun())\n\n\ndef main():\n parser = argparse.ArgumentParser(prog=\"CoPriNet\")\n parser.add_argument(\"input_csv_file\", type=str, help=\"The input csv filename containing a smiles column\")\n parser.add_argument(\"-o\", \"--output_file\", type=str, required=False, default=None, help=\"The output filename that will be \"\n \"identical to the input_csv_file but with one additional column for the f1\")\n parser.add_argument(\"--smiles_colname\", type=str, required=False, default=\"SMILES\", help=\"The colname for SMILES \"\n \"in the input file. Default: %(default)s\")\n parser.add_argument(\"--model_path\", type=str, required=False, default=DEFAULT_MODEL,\n help=\"The CoPriNet model checkpoing path. Default: %(default)s\")\n\n parser.add_argument(\"--n_cpus\", type=int, required=False, default=NUM_WORKERS_PER_GPU,\n help=\"The number of cpu workers. Default: %(default)s\")\n\n parser.add_argument(\"--batch_size\", type=int, required=False, default=BATCH_SIZE,\n help=\"Batch size. Default: %(default)s\")\n\n parser.add_argument(\"--convert_to_g\", action=\"store_true\", help=\"Convert the f1 from $/mmol to $/g\")\n\n coprinet_colname = \"CoPriNet\"\n\n args = parser.parse_args()\n\n df = pd.read_csv(args.input_csv_file)\n nans = df[args.smiles_colname].isna()\n df = df[~nans]\n smiles_list = df[args.smiles_colname]\n predictor = GraphPricePredictor( **vars(args))\n preds = predictor.yieldPredictions(smiles_list)\n if args.convert_to_g:\n from rdkit import Chem\n def convert_pred(smi, pred):\n mol = Chem.MolFromSmiles(smi)\n if mol is None:\n return np.nan\n mw = Chem.Descriptors.ExactMolWt(mol)\n price = np.log( np.exp(pred)*1000/mw)\n return price\n\n preds = Parallel(n_jobs=args.n_cpus)(delayed(convert_pred)(smi, pred) for smi, pred in zip(smiles_list, preds))\n if args.output_file is None:\n for smi, pred in zip(smiles_list, preds):\n print(\"%s\\t%.4f\" % (smi, pred))\n else:\n df[coprinet_colname] = list(preds)\n df.to_csv(args.output_file, index=False)\n\nif __name__ == '__main__':\n main()","repo_name":"rsanchezgarc/CoPriNet","sub_path":"pricePrediction/predict/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":5522,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"21175718390","text":"from database.models import *\nfrom database.imports import printD\n\n###----------------------------\n### Populate Constraint tables\n###----------------------------\n\n#global variables that would be magic otherwise\n#most of these need chcp 65001 on windows and that requires py33\n_OMEGA='\\u03A9' #use instead of 2126 which is for backward compatability \n_degree='\\u00B0'\n_mu='\\u03BC'\n_male_symbol='\\u2642'\n_female_symbol='\\u2640'\n_unknown_symbol='\\u26AA'\n\n#all names, prefixes, symbols, and Es from wikipedia\n#http://en.wikipedia.org/wiki/SI_base_unit\n#http://en.wikipedia.org/wiki/SI_derived_units\n#http://en.wikipedia.org/wiki/Units_accepted_for_use_with_SI\n\n#TODO make auto unit conversions for DA?\n#TODO need some way to implement sets of units? bugger\n\n\ndef popSIUnit(session): #FIXME TODO switch over to quanitities for this?\n _SI_UNITS=(\n #name, symbol\n ('meter','m'),\n\n ('gram','g'), #and now I see why they have kg as the base...\n\n ('liter','L'),\n\n ('mole','mol'),\n\n ('molarity','M'),\n #('molar','M'),\n ('molality','_m'), #FIXME\n #('molal','_m'), #FIXME\n\n ('kelvin','K'),\n\n ('degree Celcius',_degree+'C'), #degrees = U+00B0\n ('degree Celcius','~oC'), #Tom also accepts using the digraph for the degree symbol...\n\n ('candela','ca'),\n\n ('lumen','lm'),\n\n ('lux','lx'),\n\n ('second','s'),\n\n ('hertz','Hz'),\n\n ('minute','min'),\n\n ('hour','h'),\n\n ('day','d'),\n\n ('radian','rad'),\n\n ('steradian','sr'),\n\n ('newton','N'),\n\n ('pascal','Pa'),\n\n ('joule','J'),\n\n ('watt','W'),\n\n ('ampere','A'),\n #('amp','A'),\n\n ('coulomb','C'),\n\n ('volt','V'),\n\n ('farad','F'),\n\n ('ohm',_OMEGA),\n\n ('ohm','R'), #R also accepted per the note on wikipedia and brit standard\n\n ('siemens','S'),\n\n ('weber','Wb'),\n\n ('tesla','T'),\n\n ('henry','H'),\n\n\n ('becquerel','Bq'),\n\n ('gray','Gy'),\n\n ('sievert','Sv'),\n\n ('katal','kat'),\n \n ('decibel','dB'),\n )\n session.add_all([SI_UNIT(name=name,symbol=symbol) for name,symbol in _SI_UNITS])\n\ndef popNonSIUnit(session):\n _NON_SI_UNITS=(\n #name, symbol\n ('osmole','Osm'), #total moles of solute contributing to osmotic pressure\n\n ('degree',_degree),\n ('degree','~o'), #also accepted\n ('number','num'), #explicitly 'of something'\n ('boolean','bool'),\n )\n session.add_all([SI_UNIT(name=name,symbol=symbol) for name,symbol in _NON_SI_UNITS])\n\ndef popSIPrefix(session):\n _SI_PREFIXES=(\n #prefix, symbol, E\n ('yotta','Y',24),\n ('zetta','Z',21),\n ('exa','E',18),\n ('peta','P',15),\n ('tera','T',12),\n ('giga','G',9),\n ('mega','M',6),\n ('kilo','k',3),\n ('hecto','h',2),\n ('deca','da',1),\n ('','',0),\n ('deci','d',-1),\n ('centi','c',-2),\n ('milli','m',-3),\n ('micro',_mu,-6),\n ('micro','u',-6,), #also unoffically used\n ('nano','n',-9),\n ('pico','p',-12),\n ('femto','f',-15),\n ('atto','a',-18),\n ('zepto','z',-21),\n ('yocto','y',-24)\n )\n session.add_all([SI_PREFIX(prefix=prefix,symbol=symbol,E=E) for prefix,symbol,E in _SI_PREFIXES])\n\ndef popSex(session):\n _SEXES=(\n ('male',_male_symbol,'m',),\n ('female',_female_symbol,'f'),\n ('unknown',_unknown_symbol,'u')\n )\n session.add_all([SEX(name=name,abbrev=abbrev,symbol=symbol) for name,symbol,abbrev in _SEXES])\n\ndef popHardwareType(session):\n _HWTYPES=(\n ('surgical tool','forceps, scalpels, spatuals, scissors, you name it'),\n ('rig','ALL THE THINGS'),\n ('amplifier','MAKE SIGNAL BIG'),\n ('bnc','Connector between amps and digitizers etc. Could be used to make really specific HW trees but since atm there is no use for those it is sort of pointless.'),\n ('headstage','the thing that actually holds the pipette holder and electrode'),\n ('computer','beep boop!'),\n ('manipulator','the thing a headstage sits on so it can be moved around with high percision and accuracy'),\n ('motion controller/driver','A box for controlling actuators and/or motors, usually for moving an objective around.'),\n ('led','Electrically controllable photon source, probably has a specific wavelenght or distribution of wavelengths it produces.'),\n ('filter','Super expensive piece of glass for bandpassing or high/low passing photons.'),\n ('microscope','Light! Focus! Objectives! Filters! Oh my!'),\n ('objective','That super expensive thing for focusing light.'),\n ('camera','Pictures thing!'),\n ('digitizer','DAC, probably hooked to your computer, metadata should have how many bits it is'),\n ('signal generator','things like a master8 that can generate arbitrary waveforms without a computer'),\n ('pipette','the unpulled glass cappilary tube'), #FIXME is this a reagent?@??@?\n ('pipette puller','Make that cappilary pointy!'),\n ('chamber','Box for keeping dead brain slices alive.'),\n ('actuator','something (usually motoroized) for moving something else very accurately, seems related to a manipulator'),\n ('keyboard','quite useful for typing in data manually >_<'),\n )\n session.add_all([HardwareType(id=t,description=d) for t,d in _HWTYPES])\n\ndef popHardware(session):\n root=Hardware(type_id='rig',name='Tom\\'s Rig')\n session.add(root)\n session.commit()\n\n session.add(Hardware(type_id='microscope',name='BX51WI'))\n chamber=Hardware(type_id='chamber',name='interface chamber',Properties={'model':'jim\\'s'})\n session.add(chamber)\n\n patchPipette=Hardware(type_id='pipette',name='patch pipette',Properties={'model':'BF150-110-10','manufacturer':'Sutter Instrument'})\n iuepPipette=Hardware(type_id='pipette',name='iuep pipette',Properties={'model':'3-000-203-G/X','manufacturer':'Drummond Scientific'}) #FIXME is this not a 'type'\n session.add_all([patchPipette,iuepPipette])\n\n rigcam=Hardware(parent_id=root,type_id='camera',name='rigcam') #TODO\n\n esp300=Hardware(parent_id=root,type_id='motion controller/driver',name='ESP300')\n session.add(esp300)\n digidata=Hardware(parent_id=root,type_id='digitizer',name='Digidata 1322A',Properties={'unique_id':'105309'})\n session.add(digidata)\n session.add(Hardware(parent_id=root,type_id='digitizer',name='nidaq',Properties={'model':'NI PCIe-6259','unique_id':'0x138FADB'}))\n session.commit()\n \n #wierd, since these can also be controlled directly, but I guess that ok?\n session.add(Hardware(parent_id=esp300,type_id='actuator',name='espX',Properties={'unique_id':'B12 9463'})) #FIXME naming\n session.add(Hardware(parent_id=esp300,type_id='actuator',name='espY',Properties={'unique_id':'B08 2284'}))\n session.add(Hardware(parent_id=digidata,type_id='amplifier',name='mc1',Properties={'model':'Multiclamp 700B','unique_id':'00106956'}))\n session.add(Hardware(parent_id=digidata,type_id='amplifier',name='mc2',Properties={'model':'Multiclamp 700B','unique_id':'00106382'}))\n session.commit()\n\n amp1=session.query(Hardware).filter_by(name='mc1')[0]\n session.add(Hardware(parent_id=amp1,type_id='headstage',name='hs 0 (left)',Properties={'unique_id':'115054'})) #FIXME needs to go via bnc, there has GOT to be a better way?\n session.add(Hardware(parent_id=amp1,type_id='headstage',name='hs 1 (right)',Properties={'unique_id':'95017'})) #so the bnc doesn't add anything because it doesn't propagate or constrain pysical reality\n session.commit()\n #basically, make sure reality matches what the computer thinks it is, could make a self test for that asking user to hit 0 and then hit 1?\n #good old corrispondence problems\n\n nidaq=session.query(Hardware).filter_by(name='nidaq')[0]\n session.add(Hardware(parent_id=nidaq,type_id='led',name='470',Properties={'model':'M470L2','unique_id':'M00277763'}))\n session.commit()\n \n session.add(Hardware(name='keyboard',type_id='keyboard'))\n\ndef popReagentType(session):\n acsf=ReagentType(name='acsf')#,iupac=None)\n\ndef popDataIO(session):\n session.add(DataIO(name='urio',docstring='mareti'))\n\ndef popStep(session): #FIXME we really should never have to do this directly!\n session.add(Step(name='no steps',docstring='fixme',dataio_id=1))\n\ndef popPeople(session):\n session.add(Person(FirstName='Tom',LastName='Gillespie'))\n session.flush()\n\ndef popProject(session):\n proj=Project(lab='Scanziani',blurb='Horizontal projections on to SOM cells')\n session.add(proj)\n tom=session.query(Person).filter(Person.FirstName=='Tom',Person.LastName=='Gillespie').one()\n proj.people.append(tom) #FIXME this should autoprop from experiments?\n\n\n\ndef popExperimentType(session): #FIXME\n session.add(ExperimentType('acute slice prep','slice',1))\n session.add(ExperimentType('in vitro patch','patch',1))\n\ndef popDataFileSources(session):\n session.add(DataFileSource(name='clampex9_scope',extension='abf',docstring='a clampex!'))\n session.add(DataFileSource(name='clampex 9.2',extension='abf',docstring='a clampex!'))\n session.commit() #LOL OOPS\n\ndef popMetaDataSources(session):\n espX=None\n espY=None\n stage_z=None\n tomsEyeballs=None\n number_from_protocol=None\n super_accurate_scale=None\n mouse_scale=None\n multiclampcommmader_shit_tons_of_fields_shit=None\n clampex_same_problem_as_above_fuck=None\n pass\n\ndef popRepos(session):\n jax='http://jaxmice.jax.org/strain'\n hrr='file://HILL_RIG/D:/tom_data/rigcam'\n hrc='file://HILL_RIG/D:/tom_data/clampex'\n anc='file://andromeda/C:/tom_data/clampex'\n atc='file://athena/home/tom/mlab_data/clampex'\n session.add(Repository(jax,name='jax strain db'))\n session.add(Repository(hrr,name='rig rigcam'))\n\n r1=Repository(hrc,name='rig clampex')\n r2=Repository(anc,name='andromeda clampex')\n r3=Repository(atc,name='athena clampex')\n session.add(r1)\n session.add(r2)\n session.add(r3)\n r1.mirrors_from_here.extend((r2,r3))\n\n session.commit()\n\ndef popFiles(session):\n rep=session.query(Repository).filter_by(name='jax strain db')[0]\n session.add(File('003718.html',rep))\n pass\n\ndef popCiteType(session):\n session.add(CiteableType('publication'))\n session.add(CiteableType('website'))\n session.add(CiteableType('methods'))\n session.add(CiteableType('blueprint'))\n session.commit()\n \ndef popCiteables(session):\n f=session.query(File).filter_by(filename='003718.html')[0]\n session.add(Citeable(type='website',Files=[f])) #FIXME\n session.commit()\n\ndef popSubjectType(session):\n session.add(SubjectType('litter'))\n session.add(SubjectType('mouse',has_sex=True))\n session.add(SubjectType('cell'))\n session.add(SubjectType('slice'))\n session.commit()\ndef popStrains(session):\n #session.add(Website('http://jaxmice.jax.org/strain/003718.html'))\n session.add(Strain(jax_id='003718',abbrev='dkgin'))\n session.add(Strain(jax_id='009103',abbrev='wfs1')) #wfs1-creERT2 Tg2\n session.commit()\n\ndef popDataSourceAssociations(session):\n #TODO make this as simple as possible\n #so that hopefully the hardware tree is only needed for debugging/consistency checks\n\n #fuck, datasource is going to change depending on the mode the amp is in... how to propagate forward\n pass\n\ndef populateConstraints(session): #FIXME this has become testing because of how things have been reworked\n \"\"\"Populate the tables used to constrain datatypes\"\"\"\n popPeople(session)\n popProject(session)\n popSIUnit(session)\n popNonSIUnit(session)\n popSIPrefix(session)\n popSex(session)\n popHardwareType(session)\n popDataIO(session)\n session.flush()\n popStep(session)\n session.flush()\n popExperimentType(session)\n popSubjectType(session)\n return session.commit()\n\ndef populateTables(session):\n \"\"\"A run once to load current data (not existing elsewhere into the database (ie may use google docs as a web interface for entering/viewing certain types of data eg mice)\"\"\"\n popHardware(session)\n popRepos(session)\n popFiles(session)\n popCiteType(session)\n popCiteables(session)\n popStrains(session)\n popDataFileSources(session)\n\nif __name__=='__main__':\n import re\n printT=lambda tup:print(re.sub('\\), ','),\\r\\n',str(tup)))\n printT(_SI_UNITS)\n print('')\n printT(_NON_SI_UNITS)\n print('')\n printT(_SI_PREFIXES)\n print('')\n printT(_SEXES)\n\n","repo_name":"tgbugs/mlab","sub_path":"database/setupDB.py","file_name":"setupDB.py","file_ext":"py","file_size_in_byte":12892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25566360606","text":"from rest_framework import viewsets, status, permissions\nfrom rest_framework.response import Response\nfrom rest_framework_simplejwt.tokens import RefreshToken\nfrom rest_framework.decorators import action\n\nfrom django.contrib.auth.models import User\n\nfrom LittleLemonDRF.models import Menu, Booking\n\nfrom LittleLemonDRF.serializers import (\n MenuSerializer,\n BookingSerializer,\n UserSerializer,\n)\n\n\nclass MenuViewSet(viewsets.ModelViewSet):\n queryset = Menu.objects.all()\n serializer_class = MenuSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n\nclass BookingViewSet(viewsets.ModelViewSet):\n queryset = Booking.objects.all()\n serializer_class = BookingSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n def get_queryset(self):\n # Filter bookings for the log in user only\n return self.queryset.filter(user=self.request.user)\n\n def perform_create(self, serializer):\n # Automatically set the current user as the user for the booking\n serializer.save(user=self.request.user)\n\n\nclass SignUpView(viewsets.GenericViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n permission_classes = [permissions.AllowAny]\n\n @action(detail=False, methods=[\"post\"])\n def register(self, request):\n # Custom logic for registration can be added here\n serializer = self.get_serializer(data=request.data)\n if serializer.is_valid():\n user = serializer.save()\n refresh = RefreshToken.for_user(user)\n res_data = {\n \"refresh\": str(refresh),\n \"access\": str(refresh.access_token),\n }\n return Response(res_data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n @action(\n detail=False, methods=[\"post\"], permission_classes=[permissions.IsAuthenticated]\n )\n def logout(self, request):\n # Blacklist or deactivate the token, so it cannot be used anymore\n try:\n refresh_token = request.data[\"refresh\"]\n token = RefreshToken(refresh_token)\n token.blacklist()\n return Response(status=status.HTTP_205_RESET_CONTENT)\n except Exception as e:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"IvaninITworld/shoppingmall","sub_path":"LittleLemonDRF/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"12426130246","text":"from django.test import TestCase, override_settings\nfrom rest_framework.test import APIClient\nfrom unittest.mock import patch\n\nimport os\nimport os\nfrom dotenv import load_dotenv\n\n# Load environment variables from .env file\nload_dotenv()\n\nclass ChatHandlerViewTestCase(TestCase):\n def setUp(self):\n self.client = APIClient()\n\n def test_chat_handler_view(self):\n # Construct a mock request object with data containing a prompt.\n data = {'prompt': 'Hello, how are you?'}\n response = self.client.post('/v1/chat/', data)\n\n\n # Assert that the response status code is 200.\n self.assertEqual(response.status_code, 200)\n\n # Assert that the response body contains a 'response' key with a non-empty value.\n self.assertIn('response', response.data)\n self.assertNotEqual(response.data['response'], '')\n\n def test_invalid_chat_handler_view(self):\n # Construct a mock request object with invalid data.\n data = {'invalid_field': 'Hello, how are you?'}\n response = self.client.post('/v1/chat/', data)\n\n # Assert that the response status code is 400.\n self.assertEqual(response.status_code, 400)\n\n def test_chat_prompt_view(self):\n response = self.client.get('/v1/prompts/')\n\n # Assert that the response status code is 200.\n self.assertEqual(response.status_code, 200)\n\n # Assert that the response body is a list of prompts.\n self.assertIsInstance(response.data, list)\n\n\n@override_settings(LOGGING_CONFIG=None)\nclass ChatHandlerViewErrorTestCase(TestCase):\n def setUp(self):\n self.client = APIClient()\n\n @patch('services.chatbot.helpers.ChatHelper.get_response', side_effect=Exception('test error'))\n def test_chat_handler_view_error(self, mock_get_response):\n # Construct a mock request object with data containing a prompt.\n data = {'prompt': 'Hello, how are you?'}\n response = self.client.post('/v1/chat/', data)\n\n # Assert that the response status code is 500.\n self.assertEqual(response.status_code, 500)\n\n # Assert that the response body contains an 'error' key with a non-empty value.\n self.assertIn('error', response.data)\n self.assertNotEqual(response.data['error'], '')\n","repo_name":"notty-geek/BookGpt","sub_path":"services/chatbot/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20124409819","text":"from django.urls import path\nfrom . import views\n#blog 애플리케이션에서 사용할 모든 views를 가져왔어요.\n\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.post_list, name='post_list'),\n path('post//', views.post_detail, name='post_detail'),\n path('post//edit/', views.post_edit, name='post_edit'),\n path('post/new/', views.post_new, name='post_new'),\n path('drafts/', views.post_draft_list, name='post_draft_list'),\n path('post//publish/', views.post_publish, name='post_publish'),\n path('post//remove/', views.post_remove, name='post_remove'),\n path('post//comment/', views.add_comment_to_post, name='add_comment_to_post'),\n path('comment//approve/', views.comment_approve, name='comment_approve'),\n path('comment//remove/', views.comment_remove, name='comment_remove'),\n]\n# 이제 post_list라는 view가 루트 URL에 할당되었습니다. ''\n# 루트 디렉토리에 View의 post_list함수가 (이 안에 가르키는 html실행) 할당됨.\n# 이 패턴은 장고에게 누군가 웹사이트에 'http://127.0.0.1:8000/' 주소로 들어왔을 때\n# views.post_list를 보여주라고 말해줍니다.\n# 마지막 부분인 name='post_list'는 URL에 이름을 붙인 것으로 뷰를 식별합니다.\n# 뷰의 이름과 같을 수도 완전히 다를 수도 있습니다.\n\n# 브라우저에 http://127.0.0.1:8000/post/5/라고 입력하면,\n# 장고는 post_detail 뷰를 찾아 매개변수 pk가 5인 값을 찾아 뷰로 전달합니다.","repo_name":"mongdolappa/my-first-blog","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35972493701","text":"import cv2\n\n\nMODEL_PATH = 'best.pt'\nPYTESSARACT_PATH = 'your_pytessaract_exe_path'\nTESSDATA_CONFIG = 'your_path_for_tessdata_dir_in_your_project_folder'\nFONT = cv2.FONT_HERSHEY_SIMPLEX\nBLUE_COLOR = (255, 0, 0)\nBLACK_COLOR = (0, 0, 0)\nWHITE_COLOR = (255, 255, 255)\nCLASSES = ['stop', 'speedlimit', 'crosswalk', 'trafficlight']\nTHICKNESS = 2\nCUSTOM_CONFIG = '--psm 10 --oem 3 -c tessedit_char_whitelist=0123456789'\n","repo_name":"irfanbykara/Yolov8-Tesseract-Pipeline-for-Speed-Limit-Recognition","sub_path":"consts_sample.py","file_name":"consts_sample.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"72455935121","text":"# https://blog.naver.com/handuelly/221681992524\nimport sys\n\n# DFS\ndef dfs(graph, root) :\n visited = []\n stack = [root] \n while stack :\n vertex = stack.pop() \n if vertex not in visited :\n visited.append(vertex)\n if vertex not in graph : return [vertex]\n else :\n for node in sorted(graph[vertex], reverse=True) :\n if node not in visited :\n stack.append(node) \n return visited\n\n# BFS\ndef bfs(graph, root) :\n visited = []\n queue = [root]\n visited.append(root)\n while queue :\n vertex = queue.pop(0)\n if vertex not in graph : return [vertex]\n else :\n for node in sorted(graph[vertex]) :\n if node not in visited :\n queue.append(node) \n visited.append(node) \n return visited\n'''\n# input \nvertices, edges, root = map(int, input().split())\ngraph = {}\n\n# create grage\n\nfor i in range(edges) :\n vertexA, vertexB = map(int, input().split())\n if vertexA not in graph : graph[vertexA] = [vertexB]\n else : graph[vertexA] += [vertexB]\n \n if vertexB not in graph : graph[vertexB] = [vertexA]\n else : graph[vertexB] += [vertexA]\n'''\n\n# output\n'''\nprint()\nprint(' '.join(map(str,dfs(graph, root))))\nprint(' '.join(map(str,bfs(graph, root))))\n'''\n'''\nresult_dfs = dfs(graph, root)\nfor i in range(len(result_dfs)) :\n print(\"{} \".format(result_dfs[i]), end='')\n\nprint()\nresult_bfs = bfs(graph, root)\nfor i in range(len(result_bfs)) :\n print(\"{} \".format(result_bfs[i]), end='')\n'''\n\naaa = ['a', 'b', 'c']\nbbb = aaa.copy()\nprint(aaa, bbb)\naaa[0] = 'd'\nprint(aaa, bbb)\n","repo_name":"HandeulLy/CodingTest","sub_path":"BOJ/1260.py","file_name":"1260.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"44198696551","text":"from flask import Flask, jsonify\nfrom SPARQLWrapper import SPARQLWrapper, JSON\nimport requests\nimport re\n\n\napp = Flask(__name__, static_url_path=\"\")\n\n\n@app.route('/google/', methods=['GET'])\ndef get_query_google(query):\n search_term = str(query)\n url = 'https://www.google.com/complete/search?client=hp&hl=en&sugexp=msedr&gs_rn=62&gs_ri=hp&cp=1&gs_id=9c&q='\\\n + search_term + ' vs&xhr=t'\n r = requests.get(url)\n data = r.json()\n values = list(map(lambda x: re.search(r'(?<=)(.*?)(?=)', x[0]).group(0), data[1]))\n return jsonify({search_term: values})\n\n\n@app.route('/wikidata/', methods=['GET'])\ndef get_query_wikidata(query):\n search_term = query\n # get the object id from wikidata api\n api_endpoint = \"https://www.wikidata.org/w/api.php\"\n params = {\n 'action': 'wbsearchentities',\n 'format': 'json',\n 'language': 'en',\n 'search': search_term\n }\n r = requests.get(api_endpoint, params=params)\n object_id = r.json()['search'][0]['id']\n\n # query wikidata for subclass of\n query_string = 'SELECT ?item ?itemLabel WHERE { \\\n ?item wdt:P279 wd:' + object_id + '. \\\n SERVICE wikibase:label { bd:serviceParam wikibase:language \"[AUTO_LANGUAGE],en\". }}'\n\n sparql = SPARQLWrapper(\"https://query.wikidata.org/sparql\")\n sparql.setQuery(query_string)\n sparql.setReturnFormat(JSON)\n results = sparql.query().convert()\n bindings = results['results']['bindings']\n all_values = [n['itemLabel']['value'] for n in bindings]\n values = [n for n in all_values if not n.startswith('Q')]\n return jsonify({search_term: values})\n\n\n@app.route('/')\ndef index():\n return \"it's working!\"\n\n\nif __name__ == \"__main__\":\n app.run()\n\n","repo_name":"ronnyen/related-terms","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3796951782","text":"import re\nfrom discord.ext import commands\nfrom discord.ext.commands import command\nfrom datetime import datetime\nfrom logger import logger\nfrom checks import is_not_applicant, is_applicant, has_role\nfrom config import APPLICATIONS, PREFIX, ADMIN_ROLE, NOT_APPLIED_ROLE, SUPPORT\nfrom exiles_api import session, Users, TextBlocks, Applications as AppsTable\nfrom exceptions import NotNumberError, NumberNotInRangeError\nfrom functions import (\n parse, get_guild, get_channels, get_member, get_roles, whitelist_player, split_message\n)\n\n\nclass Applications(commands.Cog, name=\"Application commands\"):\n def __init__(self, bot):\n self.bot = bot\n self.guild = get_guild(bot)\n\n @staticmethod\n async def get_question_msg(guild, questions, author, id=1, msg=\"\"):\n txt = questions[id - 1].question\n num = len(questions)\n return f\"{msg}\\n__**Question {id} of {num}:**__\\n> {parse(guild, author, txt)}\"\n\n @staticmethod\n async def get_overview_msgs(questions, author, guild, msg=\"\"):\n give_overview = False\n for q in questions:\n if q.answer != \"\":\n give_overview = True\n break\n if not give_overview:\n return [\"No questions answered yet!\" + msg]\n chunk = \"\"\n overview = []\n for id in range(len(questions)):\n answer = questions[id].answer + \"\\n\"\n question = f\"__**Question {id + 1}:**__\\n> {parse(guild, author, questions[id].question)}\\n\"\n if answer != \"\":\n if len(chunk) + len(question) >= 1800:\n overview.append(chunk)\n chunk = \"\"\n chunk += question\n if len(chunk) + len(answer) >= 1800:\n overview.append(chunk)\n chunk = \"\"\n chunk += answer\n if msg and len(chunk) + len(msg) >= 1800:\n overview.append(chunk)\n overview.append(msg)\n elif msg:\n overview.append(chunk + msg)\n else:\n overview.append(chunk)\n return overview\n\n @staticmethod\n async def get_funcom_id_in_text(text, upper_case=True):\n # get all strings consisting only of the letters a-f and digits that's at\n # least 14 and at most 16 characters long\n result = re.search(r\"([a-fA-F0-9]{14,16})\", text)\n if not result:\n return None\n funcom_id = result.group(1)\n start = text.find(funcom_id)\n end = start + len(funcom_id) - 1\n # if given funcom_id isn't either at the beginning and/or end of the text or delimited by a blank\n if (start > 0 and text[start - 1] != \" \") or (end < len(text) - 1 and text[end + 1] != \" \"):\n return None\n if funcom_id and upper_case:\n return funcom_id.upper()\n elif funcom_id and not upper_case:\n return funcom_id\n else:\n return None\n\n @staticmethod\n async def get_last_applicant(ctx, bot, applicant):\n channels = get_channels(bot=bot)\n async for message in channels[APPLICATIONS].history(limit=100):\n if message.author == bot.user:\n pos_end = message.content.find(\" has filled out the application.\")\n if pos_end < 0:\n pos_end = message.content.find(\"'s application overview.\")\n if pos_end < 0:\n continue\n pos_start = message.content.rfind(\"\\n\", 0, pos_end) + 1\n applicant = message.content[pos_start:pos_end]\n if applicant:\n return await get_member(ctx, applicant)\n return None\n\n @staticmethod\n async def add_new_user(member, funcom_id):\n user = session.query(Users).filter_by(disc_id=member.id).first()\n if user:\n user.disc_user = str(member)\n user.funcom_id = funcom_id\n else:\n new_user = Users(disc_user=str(member), disc_id=member.id, funcom_id=funcom_id)\n session.add(new_user)\n session.commit()\n\n @command(name=\"apply\", help=\"Starts the application process\")\n @is_not_applicant()\n async def apply(self, ctx):\n guild = get_guild(self.bot)\n channels = get_channels(guild)\n if ctx.author.dm_channel is None:\n await ctx.author.create_dm()\n new_app = AppsTable(ctx.author.id)\n session.add(new_app)\n session.commit()\n msg = parse(guild, ctx.author, TextBlocks.get(\"APPLIED\"))\n question = await self.get_question_msg(guild, new_app.questions, ctx.author, 1, msg)\n await ctx.author.dm_channel.send(question)\n await channels[APPLICATIONS].send(f\"{ctx.author} has started an application.\")\n logger.info(f\"Author: {ctx.author} / Command: {ctx.message.content}. {ctx.author} has started an application.\")\n\n @command(\n name=\"question\",\n help=\"Used to switch to a given question. \" \"If no number is given, repeats the current question\",\n )\n @is_applicant()\n @commands.dm_only()\n async def question(self, ctx, Number=None):\n guild = get_guild(self.bot)\n if ctx.author.dm_channel is None:\n await ctx.author.create_dm()\n app = session.query(AppsTable).filter_by(disc_id=ctx.author.id).one()\n if not app.can_edit_questions():\n await ctx.author.dm_channel.send(parse(guild, ctx.author, TextBlocks.get(\"APP_CLOSED\")))\n return\n if Number is None:\n if app.status != \"open\":\n await ctx.author.dm_channel.send(parse(guild, ctx.author, TextBlocks.get(\"FINISHED\")))\n return\n question = await self.get_question_msg(guild, app.questions, ctx.author, app.current_question)\n await ctx.author.dm_channel.send(question)\n return\n num_questions = len(app.questions)\n if not Number.isnumeric():\n raise NotNumberError(f\"Argument must be a number between 1 and {num_questions}.\")\n if not Number.isnumeric() or int(Number) < 1 or int(Number) > num_questions:\n raise NumberNotInRangeError(f\"Number must be between 1 and {num_questions}.\")\n question = await self.get_question_msg(guild, app.questions, ctx.author, int(Number))\n await ctx.author.dm_channel.send(question)\n app.current_question = int(Number)\n session.commit()\n\n @command(name=\"overview\", help=\"Display all questions that have already been answered\")\n @is_applicant()\n async def overview(self, ctx):\n app = session.query(AppsTable).filter_by(disc_id=ctx.author.id).one()\n overview = await self.get_overview_msgs(app.questions, ctx.author, self.guild)\n for part in overview:\n await ctx.send(part)\n\n @command(name=\"submit\", help=\"Submit your application and send it to the admins\")\n @is_applicant()\n async def submit(self, ctx):\n guild = get_guild(self.bot)\n roles = get_roles(guild)\n channels = get_channels(guild)\n if ctx.author.dm_channel is None:\n await ctx.author.create_dm()\n app = session.query(AppsTable).filter_by(disc_id=ctx.author.id).one()\n if app.first_unanswered > 0:\n await ctx.author.dm_channel.send(\"Please answer all questions first.\")\n return\n if not app.can_edit_questions():\n await ctx.author.dm_channel.send(parse(guild, ctx.author, TextBlocks.get(\"APP_CLOSED\")))\n return\n app.status = \"submitted\"\n app.open_date = datetime.utcnow()\n session.commit()\n await ctx.author.dm_channel.send(parse(guild, ctx.author, TextBlocks.get(\"COMMITED\")))\n submission_date = datetime.utcnow().strftime(\"%d-%b-%Y %H:%M UTC\")\n logger.info(\n f\"Author: {ctx.author} / Command: {ctx.message.content}. {ctx.author} has submitted their application.\"\n )\n msg = (\n f\"{roles[ADMIN_ROLE].mention}\\n\"\n f\"{ctx.author.mention} has filled out the application. ({submission_date})\\n\"\n f\"You can now either:\\n\"\n f\"`{PREFIX}accept `, `{PREFIX}reject ` or \"\n f\"`{PREFIX}review ` (asking the Applicant to review their answers) it.\\n\"\n f\"If is omitted a default message will be sent.\\n\"\n f\"If is also omitted, it will try to target the last application. \"\n )\n overview = await self.get_overview_msgs(app.questions, ctx.author, self.guild, msg)\n for part in overview:\n await channels[APPLICATIONS].send(part)\n\n @command(name=\"cancel\", help=\"Cancel your application\")\n @is_applicant()\n async def cancel(self, ctx):\n anc = f\"Author: {ctx.author} / Command: {ctx.message.content}.\"\n channels = get_channels(bot=self.bot)\n app = session.query(AppsTable).filter_by(disc_id=ctx.author.id).one()\n # can't cancel an application that's already approved or rejected\n if app.status in (\"rejected\", \"approved\"):\n await ctx.send(\" Can't cancel an application that's already approved or rejected.\")\n logger.info(f\"{anc} Can't cancel an application that's already approved or rejected.\")\n return\n\n session.delete(app)\n session.commit()\n await channels[APPLICATIONS].send(f\"{ctx.author} has canceled their application.\")\n await ctx.author.dm_channel.send(\"Your application has been canceled.\")\n logger.info(f\"{anc} {ctx.author} has canceled their application.\")\n\n @command(\n name=\"accept\",\n help=\"Accept the application. If message is ommitted a default message will be sent. \"\n \"If message and Applicant are omitted target the last submitted application.\",\n )\n @has_role(ADMIN_ROLE)\n async def accept(self, ctx, Applicant=None, *Message):\n applicant = Applicant\n message = Message\n guild = get_guild(self.bot)\n roles = get_roles(guild)\n channels = get_channels(guild)\n\n # convert applicant string to member.\n if applicant:\n member = await get_member(ctx, applicant)\n if not member:\n msg = (\n f\"Couldn't get id for {applicant}. Are you sure they are still on this discord server? \"\n \"Users who leave the server while they still have an open application are \"\n f\"automatically removed. Use {PREFIX}showapp to check if the app is still there.\"\n )\n await channels[APPLICATIONS].send(msg)\n logger.info(f\"Author: {ctx.author} / Command: {ctx.message.content}. {msg}\")\n return\n\n # If no applicant was given, try to determine them from the channel history\n else:\n member = await self.get_last_applicant(ctx, self.bot, applicant)\n if not member:\n msg = (\n \"Couldn't find a submitted application within the last 100 messages. \"\n f\"Please specify the Applicant via `{PREFIX}accept `.\"\n )\n await channels[APPLICATIONS].send(msg)\n logger.info(f\"Author: {ctx.author} / Command: {ctx.message.content}. {msg}\")\n return\n\n # confirm that there is a closed application for that Applicant\n app = session.query(AppsTable).filter_by(disc_id=member.id).first()\n if not app:\n msg = (\n f\"Couldn't find a submitted application for {member}. \"\n \"Please verify that the name is written correctly and try again.\"\n )\n await ctx.send(msg)\n logger.info(f\"Author: {ctx.author} / Command: {ctx.message.content}. {msg}\")\n return\n elif app.can_edit_questions():\n msg = \"Can't accept application while it's still being worked on.\"\n await ctx.send(msg)\n logger.info(f\"Author: {ctx.author} / Command: {ctx.message.content}. {msg}\")\n return\n\n # remove Not Applied role\n if roles[NOT_APPLIED_ROLE] in member.roles:\n await member.remove_roles(roles[NOT_APPLIED_ROLE])\n\n # remove application from list of open applications\n app.status = \"approved\"\n session.commit()\n\n if message:\n await member.send(\"Your application was accepted:\\n\" + \" \".join(message))\n else:\n message = parse(guild, ctx.author, TextBlocks.get(\"ACCEPTED\"))\n await member.send(\"Your application was accepted:\\n\" + message)\n\n await ctx.send(f\"{member}'s application has been accepted.\")\n logger.info(f\"Author: {ctx.author} / Command: {ctx.message.content}. {member}'s application has been accepted.\")\n\n # Whitelist Applicant\n text = app.questions[app.funcom_id_row - 1].answer\n funcom_id = await self.get_funcom_id_in_text(text)\n info = parse(guild, ctx.author, f\"They have been informed to request whitelisting in {channels[SUPPORT]}.\")\n if funcom_id:\n funcom_id = funcom_id.upper()\n result, _ = await whitelist_player(funcom_id)\n if result == f\"Player {funcom_id} added to whitelist.\":\n await self.add_new_user(member, funcom_id)\n await member.send(parse(guild, ctx.author, TextBlocks.get(\"WHITELISTING_SUCCEEDED\")))\n await channels[APPLICATIONS].send(result)\n logger.info(f\"Author: {ctx.author} / Command: {ctx.message.content}. {result}\")\n elif result.find(\"FailedError\") >= 0:\n result = result[12:]\n await channels[APPLICATIONS].send(f\"Whitelisting {member} failed (error message: {result}). {info}\")\n await member.send(\n \"Whitelisting failed. \" + (parse(guild, member, TextBlocks.get(\"WHITELISTING_FAILED\")))\n )\n logger.info(f\"Author: {ctx.author} / Command: {ctx.message.content}. FailedError (error: {result})\")\n else:\n await member.send(\n \"Whitelisting failed. \" + (parse(guild, member, TextBlocks.get(\"WHITELISTING_FAILED\")))\n )\n await channels[APPLICATIONS].send(f\"Whitelisting {member} failed (error message: {result}). {info}\")\n logger.info(f\"Author: {ctx.author} / Command: {ctx.message.content}. FailedError (error: {result})\")\n\n else:\n await member.send(\n \"Whitelisting failed, you have given no valid FuncomId your answer. \"\n + (parse(guild, member, TextBlocks.get(\"WHITELISTING_FAILED\")))\n )\n await channels[APPLICATIONS].send(\n f\"Whitelisting {member} failed. No valid FuncomID found in answer:\\n\"\n f\"> {app.questions[app.funcom_id_row - 1].answer}\\n{info}\"\n )\n logger.info(f\"Author: {ctx.author} / Command: {ctx.message.content}. NoSteamIDinAnswer\")\n\n @command(\n name=\"reject\",\n help=\"Reject the application. If message is omitted a default message will be sent. \"\n \"If message and Applicant are omitted target the last submitted application.\",\n )\n @has_role(ADMIN_ROLE)\n async def reject(self, ctx, Applicant=None, *Message):\n applicant = Applicant\n message = Message\n guild = get_guild(self.bot)\n channels = get_channels(guild)\n\n # convert applicant string to member.\n if applicant:\n member = await get_member(ctx, applicant)\n if not member:\n msg = (\n f\"Couldn't get id for {applicant}. Are you sure they are still on this discord server? \"\n \"Users who leave the server while they still have an open application are \"\n f\"automatically removed. Use {PREFIX}showapp to check if the app is still there.\"\n )\n await channels[APPLICATIONS].send(msg)\n logger.info(f\"Author: {ctx.author} / Command: {ctx.message.content}. {msg}\")\n return\n\n # If no applicant was given, try to determine them from the channel history\n else:\n member = await self.get_last_applicant(ctx, self.bot, applicant)\n if not member:\n msg = (\n \"Couldn't find a submitted application within the last 100 messages. \"\n f\"Please specify the Applicant via `{PREFIX}reject `.\"\n )\n await channels[APPLICATIONS].send(msg)\n logger.info(f\"Author: {ctx.author} / Command: {ctx.message.content}. {msg}\")\n return\n\n # confirm that there is a closed application for that Applicant\n app = session.query(AppsTable).filter_by(disc_id=member.id).first()\n if not app:\n msg = (\n f\"Couldn't find a submitted application for {member}. \"\n \"Please verify that the name is written correctly and try again.\"\n )\n await ctx.send(msg)\n logger.info(f\"Author: {ctx.author} / Command: {ctx.message.content}. {msg}\")\n return\n elif app.can_edit_questions():\n msg = (\n \"Can't reject application while it's still being worked on. \"\n f\"Try {PREFIX}cancelapp instead.\"\n )\n await ctx.send(msg)\n logger.info(f\"Author: {ctx.author} / Command: {ctx.message.content}. {msg}\")\n return\n\n # remove application from list of open applications\n app.status = \"rejected\"\n session.commit()\n\n if not message:\n await member.send(parse(guild, ctx.author, \"Your application was rejected:\\n\" + TextBlocks.get(\"REJECTED\")))\n else:\n await member.send(\"Your application was rejected:\\n> \" + \" \".join(message))\n\n await ctx.send(f\"{member}'s application has been rejected.\")\n logger.info(f\"Author: {ctx.author} / Command: {ctx.message.content}. {member}'s application has been rejected.\")\n\n @command(\n name=\"review\",\n help=\"Ask the applicant to review their application. \"\n \"If message is omitted a default message will be sent. \"\n \"If message and Applicant are omitted target the last submitted application.\",\n )\n @has_role(ADMIN_ROLE)\n async def review(self, ctx, Applicant=None, *Message):\n anc = f\"Author: {ctx.author} / Command: {ctx.message.content}.\"\n applicant = Applicant\n message = Message\n channels = get_channels(bot=self.bot)\n\n # convert applicant string to member.\n if applicant:\n member = await get_member(ctx, applicant)\n if not member:\n msg = (\n f\"Couldn't get id for {applicant}. Are you sure they are still on this discord server? \"\n \"Users who leave the server while they still have an open application are \"\n f\"automatically removed. Use {PREFIX}showapp to check if the app is still there.\"\n )\n await channels[APPLICATIONS].send(msg)\n logger.info(f\"{anc} {msg}\")\n return\n\n # If no applicant was given, try to determine them from the channel history\n else:\n member = await self.get_last_applicant(ctx, self.bot, applicant)\n if not member:\n msg = (\n \"Couldn't find a submitted application within the last 100 messages. \"\n f\"Please specify the Applicant via `{PREFIX}review `.\"\n )\n await channels[APPLICATIONS].send(msg)\n logger.info(f\"{anc} {msg}\")\n return\n\n # confirm that there is a closed application for that Applicant\n app = session.query(AppsTable).filter_by(disc_id=member.id).first()\n if not app:\n msg = (\n f\"Couldn't find a submitted application for {member}. \"\n f\"Please verify that the name is written correctly and try again.\"\n )\n await ctx.send(msg)\n logger.info(f\"{anc} {msg}\")\n return\n elif app.can_edit_questions():\n msg = \"Can't return application for review while it's still being worked on.\"\n await ctx.send(msg)\n logger.info(f\"{anc} {msg}\")\n return\n\n # remove application from list of open applications\n app.status = \"review\"\n session.commit()\n\n explanation = (\n f\"\\nYou can change the answer to any question by going to that question with \"\n f\"`{PREFIX}question ` and then writing your new answer.\\n\"\n f\"You can always review your current answers by entering `{PREFIX}overview`.\"\n )\n if not message:\n msg = \"Your application was returned to you for review:\\n\" + TextBlocks.get(\"REVIEWED\") + explanation\n else:\n msg = \"Your application was returned to you for review:\\n> \" + \" \".join(message) + explanation\n\n await ctx.send(f\"{member}'s application has been returned.\")\n overview = await self.get_overview_msgs(app.questions, member, self.guild, msg)\n for part in overview:\n if member.dm_channel is None:\n await member.create_dm()\n\n await member.dm_channel.send(part)\n logger.info(f\"{anc} {member}'s application has been returned for review.\")\n\n @command(\n name=\"showapp\",\n aliases=[\"showapps\"],\n help=\"Displays the given Applicants application if it has been submitted. \"\n \"If applicant is omitted, shows all applications.\",\n )\n @has_role(ADMIN_ROLE)\n async def showapp(self, ctx, *, Applicant=None):\n anc = f\"Author: {ctx.author} / Command: {ctx.message.content}.\"\n applicant = Applicant\n if applicant:\n member = await get_member(ctx, applicant)\n if not member:\n await ctx.send(\n f\"Couldn't get id for {applicant}. \"\n f\"Are you sure they are still on this discord server? \"\n f\"Users who leave the server while they still have an open application are automatically removed. \"\n f\"Use {PREFIX}showapp without a name to get a list of all active applications.\"\n )\n\n app = session.query(AppsTable).filter_by(disc_id=member.id).first()\n if not app:\n await ctx.send(f\"No application for {member} found.\")\n logger.info(f\"{anc} No application for {member} found.\")\n elif app.can_edit_questions():\n await ctx.send(\"Can't access application while it's still being worked on.\")\n logger.info(f\"{anc} Can't access application while it's still being worked on.\")\n else:\n submission_date = app.open_date.strftime(\"%d-%b-%Y %H:%M UTC\")\n msg = f\"{member}'s application overview. ({submission_date})\"\n overview = await self.get_overview_msgs(app.questions, member, self.guild, msg)\n for part in overview:\n await ctx.send(part)\n logger.info(f\"{anc} Sending {member}'s application overview.\")\n\n return\n\n else:\n display = [\"open\", \"submitted\", \"review\", \"finished\"]\n apps = session.query(AppsTable).filter(AppsTable.status.in_(display)).all()\n msg = \"\" if len(apps) > 0 else \"No open applications right now.\"\n for app in apps:\n member = await get_member(ctx, app.disc_id)\n open_date = app.open_date.strftime(\"%d-%b-%Y %H:%M UTC\")\n if app.can_edit_questions():\n msg += (\n f\"Applicant **{member}** is **still working** on their application. \"\n f\"(Application started on {open_date})\\n\"\n )\n else:\n msg += (\n f\"Applicant **{member}** is **waiting for admin approval**. \"\n f\"(Application submitted on {open_date})\\n\"\n )\n\n if len(apps) > 0:\n msg += f\"You can view a specific application by entering `{PREFIX}showapp `.\"\n\n for part in await split_message(msg):\n await ctx.channel.send(part)\n logger.info(f\"{anc} {msg}\")\n return\n\n @command(name=\"cancelapp\", help=\"Cancels the given application.\")\n @has_role(ADMIN_ROLE)\n async def cancelapp(self, ctx, Applicant, *Message):\n anc = f\"Author: {ctx.author} / Command: {ctx.message.content}.\"\n applicant = Applicant\n message = Message\n member = await get_member(ctx, applicant)\n channels = get_channels(bot=self.bot)\n if not member:\n await channels[APPLICATIONS].send(\n f\"Couldn't get id for {applicant}. Are you sure they are still on this discord server? \"\n f\"Users who leave the server while they still have an open application are automatically removed. \"\n f\"Use {PREFIX}showapp to check if the app is still there.\"\n )\n logger.info(f\"{anc} Couldn't get id for {applicant}.\")\n return\n\n # confirm that there is a closed application for that Applicant\n app = session.query(AppsTable).filter_by(disc_id=member.id).first()\n if not app:\n await ctx.send(\n f\"Couldn't find an application for {member}. \"\n f\"Please verify that the name is written correctly and try again.\"\n )\n logger.info(f\"{anc} Couldn't find an application for {member}.\")\n return\n\n if app.status in (\"approved\", \"rejected\"):\n await ctx.send(\"Can't cancel an application that was already accepted or rejected.\")\n logger.info(f\"{anc} Can't cancel an application that was already accepted or rejected.\")\n return\n\n session.delete(app)\n session.commit()\n await ctx.send(f\"Application for {member} has been cancelled.\")\n if message:\n await member.send(f\"Your application was cancelled by an administrator.\\n> {' '.join(message)}\")\n else:\n await member.send(\"Your application was cancelled by an administrator.\")\n\n logger.info(f\"{anc}. {member}'s application has been cancelled.\")\n\n\ndef setup(bot):\n bot.add_cog(Applications(bot))\n","repo_name":"Midnighit/TERPBot","sub_path":"cogs/applications.py","file_name":"applications.py","file_ext":"py","file_size_in_byte":26678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20126795771","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\nimport time\nimport bisect\n\n# Complete the activityNotifications function below.\ndef activityNotifications(expenditure, d):\n def get_median(arr):\n if d % 2 == 1:\n median = arr[int(d / 2)]\n else:\n median = (arr[int(d / 2) - 1] + arr[int(d / 2)]) / 2.0\n return median\n\n count = 0\n l = expenditure[0:d].copy()\n l.sort()\n for i in range(0, len(expenditure) - d, 1):\n med = get_median(l)\n newTerm = expenditure[i+d]\n if newTerm >= 2*med:\n count += 1\n l.remove(expenditure[i])\n l.insert(bisect.bisect(l, newTerm), newTerm)\n return count\n\n\nif __name__ == '__main__':\n\n nd = input().split()\n\n n = int(nd[0])\n\n d = int(nd[1])\n\n expenditure = list(map(int, input().rstrip().split()))\n startTime = time.time()\n print(activityNotifications(expenditure, d))\n print(time.time() - startTime)\n\n","repo_name":"mikeyling18/HackerLeet","sub_path":"FraudulentActivityNotification.py","file_name":"FraudulentActivityNotification.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71827597842","text":"import json\n\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase\nfrom django.urls import reverse\n\nfrom client_space.models import Client, ClientUser\n\ntest_user = {\"username\": \"testuser\", \"email\": \"testuser@example.com\", \"password\": \"testpassword\"}\n\n\nclass ClientTests(TestCase):\n def setUp(self):\n \"\"\"Set up databse\"\"\"\n new_user = User.objects.create(username=test_user[\"username\"], email=test_user[\"email\"])\n new_user.set_password(test_user[\"password\"])\n new_user.save()\n\n cl1, _ = Client.objects.get_or_create(name=\"Client1\", )\n cl2, _ = Client.objects.get_or_create(name=\"Client2\", )\n\n clu, _ = ClientUser.objects.get_or_create(user=new_user)\n clu.client.add(cl1)\n clu.save()\n\n def get_token(self):\n \"\"\"Authorization request\"\"\"\n res = self.client.post('/api/token/',\n data=json.dumps({\n 'email': test_user[\"email\"],\n 'password': test_user[\"password\"],\n }),\n content_type='application/json',\n )\n result = json.loads(res.content)\n self.assertTrue(\"access\" in result)\n return result[\"access\"]\n\n def test_get_clients_ok(self):\n \"\"\"Created clients are accessible\"\"\"\n cl1 = Client.objects.get(name=\"Client1\")\n cl2 = Client.objects.get(name=\"Client2\")\n self.assertEqual(cl1.name, \"Client1\")\n self.assertEqual(cl2.name, \"Client2\")\n\n token = self.get_token()\n res = self.client.get(reverse('client_space:client'),\n content_type='application/json',\n HTTP_AUTHORIZATION=f'Bearer {token}'\n )\n self.assertEquals(res.status_code, 200)\n data = res.json()\n self.assertEquals(data['count'], 1)\n self.assertEquals(data['data'][0]['name'], \"Client1\")\n\n def test_get_clients_unauthorized(self):\n \"\"\"Check unauthorized access to client\"\"\"\n res = self.client.get(reverse('client_space:client', ),\n content_type='application/json',\n HTTP_AUTHORIZATION=f'Bearer WRONG TOKEN'\n )\n self.assertEquals(res.status_code, 401)\n\n def test_get_one_client_ok(self):\n \"\"\"Created one client is accessible\"\"\"\n\n token = self.get_token()\n client_id = Client.objects.get(name='Client1').pk\n res = self.client.get(reverse('client_space:client', kwargs={'client_id': client_id}),\n content_type='application/json',\n HTTP_AUTHORIZATION=f'Bearer {token}'\n )\n self.assertEquals(res.status_code, 200)\n data = res.json()\n self.assertEquals(len(data), 1)\n self.assertEquals(data['data']['name'], \"Client1\")\n\n def test_get_one_client_forbidden(self):\n \"\"\"Check user is not allowed to get non-linked client\"\"\"\n token = self.get_token()\n client_id = Client.objects.get(name='Client2').pk\n res = self.client.get(reverse('client_space:client', kwargs={'client_id': client_id}),\n content_type='application/json',\n HTTP_AUTHORIZATION=f'Bearer {token}'\n )\n self.assertEquals(res.status_code, 404)\n\n def test_get_one_client_unauthorized(self):\n \"\"\"Check unauthorized access to one client\"\"\"\n client_id = Client.objects.get(name='Client1').pk\n res = self.client.get(reverse('client_space:client', kwargs={'client_id': client_id}),\n content_type='application/json',\n HTTP_AUTHORIZATION=f'Bearer WRONG TOKEN'\n )\n self.assertEquals(res.status_code, 401)\n","repo_name":"iGeophysix/xside_server","sub_path":"client_space/tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17489928724","text":"import jieba\n# f=open('3-6实践材料.txt','rb')\n# s=f.read()\n# import jieba.analyse\n# s='这些结果表明,舒尼替尼和RFA整合疗法是一种优于每种疗法的有效治疗策略,可显着抑制肿瘤生长并延长被治疗小鼠的寿命'\n# jieba.add_word('整合疗法')\n# jieba.add_word('治疗策略')\n# r=jieba.lcut_for_search(s)\n# print(r)\ns='勤洗手,戴口罩有助于预防新冠病毒肺炎'\njieba.add_word('新冠病毒')\nprint('added')\nr=jieba.lcut(s)\nprint(r)\nprint('deleted')\njieba.del_word('新冠病毒')\nr=jieba.lcut(s)\nprint(r)\n","repo_name":"CSUBioinformatics1801/Python_Bioinformatics_ZYZ","sub_path":"Exp7/jieba_test.py","file_name":"jieba_test.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"19165884803","text":"# 귤고르기\n# https://school.programmers.co.kr/learn/courses/30/lessons/138476\n\nfrom collections import Counter\n\ndef solution(k, tangerine):\n c = 0\n for i,j in enumerate(sorted(Counter(tangerine).values(),reverse = True),1):\n c += j\n if c >= k :\n return i","repo_name":"JayG-5/coding_test","sub_path":"programmers/138476.py","file_name":"138476.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73349747921","text":"\n\nedges = [['A','B',5],['B','C',4],['C','D',8],['D','C',8],['D','E',6],['A','D',5],['C','E',2],['E','B',3],['A','E',7]]\n\n\nclass GraphDistance:\n\n def calculate_distance(self, logger):\n logger.info(\"Started calculating distance\")\n route = ['A', 'D', 'C']\n logger.info(f\"Calculating distance for route : {str(route)}\")\n distance = 0\n route_step = 0\n from_node = route[route_step]\n to_node = route[route_step + 1]\n flag = True\n try:\n while flag:\n logger.info(\"Iteration through while loop\")\n for edge in edges:\n logger.info(f'Current node {str(edge)}')\n logger.debug(f'Current iteration : {str(edge)} and Node : {from_node}')\n if edge[0] is from_node:\n if edge[1] is to_node: # If next node matched we will increment our distance and node covered\n distance += edge[2]\n route_step += 1\n from_node = route[route_step]\n if route_step + 1 < len(route):\n to_node = route[route_step + 1]\n if from_node is route[len(route) - 1]:\n logger.warning(\"Reached End Point\")\n flag = False\n break\n\n logger.warning(\"Exited while loop\")\n logger.info(\"--------------------- Distance --------------------------------\")\n logger.critical(\"Traversing through Route : \" + str(route))\n logger.critical(\"Total Distance covered : \" + str(distance))\n logger.critical(\"Total Nodes covered : \" + str(route_step + 1))\n\n except:\n logger.error(\"An exception occurred during calculation\")\n\n # Below methods are used for unit testing\n def calculate_age(self, dob_year, current_year):\n return current_year - dob_year\n\n def calculate_stmt(self, winning_amount):\n if winning_amount > 100:\n return True\n else:\n return False\n","repo_name":"ayush9200/Python-Unit-testing-logging-graph","sub_path":"GraphDistance.py","file_name":"GraphDistance.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34437442599","text":"# test_data_fetcher.py\nimport pytest\nimport os\nfrom unittest.mock import patch\nfrom ..src.data_fetcher import fetch\n\n\n@pytest.fixture(autouse=True)\ndef setup_env_vars(monkeypatch):\n monkeypatch.setenv('API_URL', 'https://data.vatsim.net/v3/vatsim-data.json')\n\n\n@patch('data_collection.src.data_fetcher.APIClient')\ndef test_fetch_success(mock_api_client):\n # Arrange\n mock_data = {\"key\": \"value\"}\n mock_api_client.return_value.get_data.return_value = mock_data\n\n # Act\n result = fetch()\n\n # Assert\n mock_api_client.assert_called_once()\n assert result == mock_data\n\n\n@patch('data_collection.src.data_fetcher.APIClient')\ndef test_fetch_failure(mock_api_client):\n # Arrange\n mock_api_client.return_value.get_data.side_effect = Exception(\"Unable to fetch data\")\n\n # Act & Assert\n with pytest.raises(Exception) as e:\n fetch()\n assert \"Unable to fetch data\" in str(e.value)\n\n\n@patch('data_collection.src.data_fetcher.APIClient')\ndef test_fetch_execution_count(mock_api_client):\n # Arrange\n mock_data = {\"key\": \"value\"}\n mock_api_client.return_value.get_data.return_value = mock_data\n\n # Act\n fetch()\n fetch()\n\n # Assert\n assert mock_api_client.return_value.get_data.call_count == 2\n","repo_name":"nyartcc/application-vattix","sub_path":"lambda_function/data_collection/tests/test_data_fetcher.py","file_name":"test_data_fetcher.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"17808050905","text":"# Using filter() and list() functions and .lower() method filter all the vowels in a given string\n\ndef vow(i):\n if(i in \"aeiou\"):\n return True\n\nl = \"Het Kirtan Jinay\"\n\nlist_ = list(l)\n\nlower_str = map(lambda i: i.lower() , list_ )\n\nlower_str = list(lower_str)\n\nans = filter(vow,lower_str)\n\nprint(list(ans))\n\n\n","repo_name":"hetparekh21/PUP-1","sub_path":"filters assignment/assignment_2_Q_3.py","file_name":"assignment_2_Q_3.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19023572974","text":"\n# coding: utf-8\n\n# In[49]:\n\n\nfrom __future__ import print_function\nimport sys\n\nif __name__ == '__main__':\n\n formatted_train_out= sys. argv [1]\n formatted_validation_out= sys. argv [2]\n formatted_test_out= sys. argv [3]\n dict_input=sys. argv [4]\n train_out = sys. argv [5]\n test_out = sys. argv [6]\n metrics_out = sys. argv [7]\n num_epoch = int(sys. argv [8])\n\n\n\n\n# formatted_train_out= 'handout/smalloutput/model1_formatted_train.tsv'\n# formatted_validation_out= 'handout/smalloutput/model1_formatted_valid.tsv'\n# formatted_test_out= 'handout/smalloutput/model1_formatted_test.tsv'\n# dict_input='handout/dict.txt'\n# train_out = 'handout/smalloutput/train_.labels'\n# test_out = 'handout/smalloutput/test_.labels'\n# metrics_out = 'handout/smalloutput/metrics_.txt'\n# num_epoch = int(30)\n\n\n\n import numpy as np\n import math\n\n f_formatted_train_out= open(formatted_train_out,\"r\")\n f_formatted_validation_out= open(formatted_validation_out,\"r\")\n f_formatted_test_out= open(formatted_test_out,\"r\")\n f_dict_input = open(dict_input,\"r\")\n f_train_out = open(train_out,\"w\")\n f_test_out= open(test_out,\"w\")\n f_metrics_out= open(metrics_out,\"w\")\n\n\n dict_dict = {}\n\n for line in f_dict_input:\n line = line.split(\" \")\n dict_dict[line[0]] = (line[1].split(\"\\n\"))[0]\n\n\n len(dict_dict)\n theta = np.zeros((len(dict_dict))+1)\n # print(len(theta))\n\n vect_x_all = []\n label_all=[]\n for line in f_formatted_train_out:\n vect_x = {}\n line = line.split('\\t')\n label_all.append(int(line[0]))\n for i in range(1,len(line)):\n ii = line[i].split(':')\n vect_x[int(ii[0])] = 1\n # print(len(dict_dict))\n vect_x[(len(dict_dict))] = 1\n vect_x_all.append(vect_x)\n\n\n # print(line[1])\n\n\n def sparse_dot(X,Y):\n product = 0.0\n for i, x in X.items():\n product+=x*Y[i]\n # print('dot:',x, Y[i])\n return product\n\n\n sparse_dot(vect_x_all[0],list(theta))\n\n def sgd_update_one(theta_input,x,y,learning_rate):\n exp_term = math.exp(sparse_dot(x,theta_input))\n for n in range(len(theta_input)):\n if n in x:\n theta_input[n] = theta_input[n] + learning_rate*x[n]*(y-exp_term/(1+exp_term))\n return theta_input\n\n\n theta = np.zeros(len(dict_dict)+1)\n for l in range(0,num_epoch):\n for k in range(len(vect_x_all)):\n theta = sgd_update_one(theta,vect_x_all[k],label_all[k],0.1)\n\n\n\n result=[]\n\n for k in range(len(vect_x_all)):\n exp_term = math.exp(sparse_dot(vect_x_all[k],theta))\n prob = (exp_term/(1+exp_term))\n # print(prob)\n if prob > 0.5:\n result.append(1)\n else:\n result.append(0)\n\n output = ''\n for r in result:\n output += str(r)+'\\n'\n\n\n f_train_out.writelines(output)\n\n\n\n error = 0.000\n for k in range(len(result)):\n if result[k] != label_all[k]:\n error += 1\n error = (error+0.0000000)/len(result)\n # print(error)\n\n output_error = ''\n output_error += 'error(train): ' + str(error) + '\\n'\n\n\n\n\n vect_x_all = []\n label_all=[]\n for line in f_formatted_test_out:\n vect_x = {}\n line = line.split('\\t')\n label_all.append(int(line[0]))\n for i in range(1,len(line)):\n ii = line[i].split(':')\n vect_x[int(ii[0])] = 1\n # print(len(dict_dict))\n vect_x[(len(dict_dict))] = 1\n vect_x_all.append(vect_x)\n\n result=[]\n\n for k in range(len(vect_x_all)):\n exp_term = math.exp(sparse_dot(vect_x_all[k],theta))\n prob = (exp_term/(1+exp_term))\n # print(prob)\n if prob > 0.5:\n result.append(1)\n else:\n result.append(0)\n\n output = ''\n for r in result:\n output += str(r)+'\\n'\n\n\n f_test_out.writelines(output)\n\n\n\n error = 0.000\n for k in range(len(result)):\n if result[k] != label_all[k]:\n error += 1\n error = (error+0.0000000)/len(result)\n # print(error)\n\n\n output_error += 'error(test): ' + str(error)\n f_metrics_out.writelines(output_error) \n\n\n\n\n\n","repo_name":"winasvin/win-asvin","sub_path":"10601-Introduction to Machine Learning /Logistic Regression/lr.py","file_name":"lr.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19683142142","text":"from itertools import chain\nfi = open('p7_input.txt')\ncontents = dict()\ncount = bag_count = 0\n\n\ndef read_contents():\n for rule in fi.readlines():\n bag = rule.split('bags')[0].strip()\n bag_content = rule.split('bags contain ')[1].strip()[:-1]\n bag_content = [(x.split(' ', 1)[0], x.split(' ', 1)[1].\n rsplit(' ', 1)[0]) for x in bag_content.split(', ')]\n if bag_content[0][0] == 'no':\n contents[bag] = [('0', 'no other bags')]\n else:\n contents[bag] = bag_content\n\n\ndef contain_shiny_gold(color):\n colors = [x[1] for x in contents[color]]\n if 'no other bags' in colors:\n return False\n if 'shiny gold' in colors:\n return True\n else:\n for color in colors:\n if contain_shiny_gold(color):\n return True\n else:\n continue\n return False\n\n\ndef shiny_gold_contain(color):\n global bag_count\n colors = [[x[1]]*int(x[0]) for x in contents[color]]\n colors = list(chain.from_iterable(colors))\n for color in colors:\n if not color == 'no other bags':\n bag_count += 1\n shiny_gold_contain(color)\n else:\n continue\n\n\nread_contents()\nfor color in contents.keys():\n if contain_shiny_gold(color):\n count += 1\nprint('The answer to part 1 is: {}'.format(count))\nshiny_gold_contain('shiny gold')\nprint('The answer to part 2 is: {}'.format(bag_count))\n","repo_name":"mleijon/AoC2020","sub_path":"day7/day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"9523278454","text":"from bs4 import BeautifulSoup\nimport requests\n\nresponse = requests.get('https://www.qiushibaike.com/')\nresponse.encoding = response.apparent_encoding\nsoup = BeautifulSoup(response.text, 'html.parser')\n# for i in soup.select('.content'):\n# next=i.text\n# print(next)\n\nprint('------------------------')\nfor x in soup.find_all('div', class_=\"content\"): #\n if x.text is not None:\n print(x.text)\n f=open('test.txt','a+')\n f.write(x.text)\n f.close()\n# print(soup.)\n\n# for t in soup今天在院里打扫雪,用铁楸铲雪往垃圾车上装的时候,突然想起来一件小时候发生的一件事童年趣事……
小时候,我和弟弟在家门口玩,弟弟拉了便便,由于老妈在忙,就让我拿着铁楸把弟弟拉的便便铲了,可能是年龄小,也可能是没拿过铁楸,当我拿着铁楸学着大人的样子,弓着腰,对着那坨翔,一用力,铁楸一扬,那坨便便飞出去一米远的距离,我走过去,接着瞄准,弓腰,用力一扬,那坨便便又飞出去一米远,然后就出现了这样的画面,一个小女孩在前面执着的拿着铁楸追着那坨便便,一个小男孩撅着屁股,一只手拉着裤子,一只手扬着纸,在后面追着.select('content'):\n# print(t.t…ext)\n# print(soup.te
xt)\n# print(soup.prettify())\n# print(soup.a['class'])\n# for i in soup.find_all('div', class_='content'):\n# print(i.string)\n# print(type(i.string))\n# print('------------------------------')\n# print(soup.span.contents)\n# for i in soup.find_all('span'):\n# print(i.get_text)\n\n# print(soup.name)\n# print(soup.a['href'])\n# for i in soup.find_all('a'):\n# print(type(i))\n# print(soup.prettify())\nfor m in soup.find_all('div', class_='content'):\n # if m.string is not None:\n print(m.text)\n","repo_name":"hostpost114/-t","sub_path":"qiushibaike.py","file_name":"qiushibaike.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72403475921","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# Author: kerwin.cn@gmail.com\r\n# Created Time:2017-09-16 15:45:33\r\n# Last Change: 2017-09-17 19:11:15\r\n# File Name: tonghuashun_AGUSDO_config.py\r\n\r\n\r\nconfig = {\r\n \"base\": {\r\n \"start_date\": \"2010-06-01\",\r\n \"end_date\": \"2016-12-01\",\r\n \"accounts\": {\r\n \"stock\": 100000\r\n }\r\n },\r\n \"extra\": {\r\n \"log_level\": \"verbose\",\r\n },\r\n \"mod\": {\r\n \"sys_analyser\": {\r\n \"enabled\": True,\r\n \"plot\": True\r\n },\r\n }\r\n}\r\n","repo_name":"kerwinxu/Kerwin_C_Compiler","sub_path":"python/RQAlpha/tonghuashun_AGUSDO_config.py","file_name":"tonghuashun_AGUSDO_config.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29073394218","text":"import pygame\nfrom settings import *\nfrom gamestate import GameState\nfrom gameresource import GameResource\n\ndef main():\n pygame.init()\n screen = pygame.display.set_mode((800, 800))\n pygame.display.set_caption(\"三连棋\")\n game_state = GameState()\n\n while game_state.is_playing:\n game_state = check_events(game_state)\n\n screen.fill(WHITE_BGCOLOR)\n draw_window(screen, game_state)\n pygame.display.update()\n\ndef check_events(game_state):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_state.stop_game()\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n click_pos = pygame.mouse.get_pos()\n if game_state.stage == CHOOSE_SIDE:\n if select_defensive_side(click_pos):\n game_state.set_player_side(DEFENSIVE_SIDE)\n if select_offensive_side(click_pos):\n game_state.set_player_side(OFFENSIVE_SIDE)\n elif game_state.stage == PLAYING:\n if valid_drop(click_pos, game_state):\n drop_cell = find_cell(click_pos)\n game_state.player_make_move(drop_cell[0], drop_cell[1])\n # game_state.print_board()\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_r and game_state.stage == GAME_END:\n game_state = GameState()\n\n return game_state\n\n\ndef valid_drop(click_pos, game_state):\n '''鼠标点击在可以落子的格子上吗?'''\n if click_pos[0] <= MARGIN_LEFT or click_pos[0] >= MARGIN_LEFT + BOARD_SIZE \\\n or click_pos[1] <= MARGIN_TOP or click_pos[1] >= MARGIN_TOP + BOARD_SIZE:\n return False\n\n click_x_board = click_pos[0] - MARGIN_LEFT\n click_y_board = click_pos[1] - MARGIN_TOP\n cell_width = BOARD_SIZE // 3\n c = click_x_board // cell_width\n r = click_y_board // cell_width\n #点击位置落在单元格边界线附近吗?\n border = 5\n if (c * cell_width + border <= click_x_board <= (c + 1) * cell_width - border ) \\\n and (r * cell_width + border <= click_y_board <= (r + 1) * cell_width - border):\n if game_state.board[r][c] == GameState.BLANK_CELL:\n return True\n else:\n return False\n else: #点在单元格边界线上的话,视为无效\n return False\n\ndef find_cell(click_pos):\n '''点击的是哪一个格子?'''\n click_x_board = click_pos[0] - MARGIN_LEFT\n click_y_board = click_pos[1] - MARGIN_TOP\n cell_width = BOARD_SIZE // 3\n row = click_y_board // cell_width\n column = click_x_board // cell_width\n assert(0 <= row <= 3)\n assert(0 <= column <= 3)\n return (row, column)\n\n\ndef select_defensive_side(click_pos):\n return DEFENSIVE_SIDE_X <= click_pos[0] <= DEFENSIVE_SIDE_X + BUTTON_WIDTH \\\n and DEFENSIVE_SIDE_Y <= click_pos[1] <= DEFENSIVE_SIDE_Y + BUTTON_HEIGHT\n\ndef select_offensive_side(click_pos):\n return OFFENSIVE_SIDE_X <= click_pos[0] <= OFFENSIVE_SIDE_X + BUTTON_WIDTH \\\n and OFFENSIVE_SIDE_Y <= click_pos[1] <= OFFENSIVE_SIDE_Y + BUTTON_HEIGHT\n\ndef draw_window(screen, game_state):\n draw_title(screen)\n\n if game_state.stage == CHOOSE_SIDE:\n draw_select_side(screen)\n else:\n draw_vs_img(screen, game_state.player_side)\n draw_board(screen, game_state)\n if game_state.stage == GAME_END:\n draw_winner_img(screen, game_state.winner)\n draw_newgame_img(screen)\n\ndef draw_title(screen):\n title_postion = (MARGIN_LEFT, MARGIN_TOP - 300)\n screen.blit(GameResource.load_game_title_img(), title_postion)\n\n\ndef draw_board(screen, game_state):\n cell_width = BOARD_SIZE // 3\n for r in range(1, 3):\n left_top = ( MARGIN_LEFT, MARGIN_TOP + r * cell_width )\n w_h = (BOARD_SIZE, 5)\n line = pygame.Rect(left_top, w_h)\n pygame.draw.rect(screen, LINE_COLOR, line)\n\n for c in range(1, 3):\n left_top = ( MARGIN_LEFT + c * cell_width, MARGIN_TOP)\n w_h = (5, BOARD_SIZE)\n line = pygame.Rect(left_top, w_h)\n pygame.draw.rect(screen, LINE_COLOR, line)\n\n for r in range(3):\n for c in range(3):\n if game_state.board[r][c] != GameState.BLANK_CELL:\n draw_piece(screen, (r, c),\n game_state.board[r][c])\n\ndef draw_piece(screen, cell, piece_type):\n cell_width = BOARD_SIZE // 3\n r, c = cell\n left = MARGIN_LEFT + 25 + c * cell_width\n top = MARGIN_TOP + 25 + r * cell_width\n if piece_type == DEFENSIVE_SIDE:\n screen.blit(GameResource.load_x_piece_img(), (left, top))\n else:\n screen.blit(GameResource.load_o_piece_img(), (left, top))\n\n\ndef draw_select_side(screen):\n select_tip_font = pygame.font.SysFont('simhei', 24)\n select_tip_surface = select_tip_font.render('点击鼠标选择:', False, BLACK)\n select_tip_position = (MARGIN_LEFT - 200, DEFENSIVE_SIDE_Y + 15)\n screen.blit(select_tip_surface, select_tip_position)\n\n draw_select_button(screen, DEFENSIVE_SIDE_X, DEFENSIVE_SIDE_Y, '选后手(X)')\n draw_select_button(screen, OFFENSIVE_SIDE_X, OFFENSIVE_SIDE_Y, '选先手(O)')\n\n demo_position = (MARGIN_LEFT - 100, DEFENSIVE_SIDE_Y + 100)\n screen.blit(GameResource.load_howto_sanlianqi_img(), demo_position)\n\ndef draw_select_button(screen, x, y, btn_label):\n side_rect = (x, y, 130, 50)\n pygame.draw.rect(screen, SELECT_AREA_BGCOLOR, side_rect)\n\n side_font = pygame.font.SysFont('simhei', 28)\n side_surface = side_font.render(btn_label, False, BLACK)\n side_position = (x + 5, y + 10)\n screen.blit(side_surface, side_position)\n\ndef draw_vs_img(screen, player_side):\n computer_side = OFFENSIVE_SIDE if player_side == DEFENSIVE_SIDE else DEFENSIVE_SIDE\n side_position = (DEFENSIVE_SIDE_X - 120, DEFENSIVE_SIDE_Y - 40)\n if player_side == DEFENSIVE_SIDE:\n screen.blit(GameResource.load_you_x_vs_computer_o_img(), side_position)\n else:\n screen.blit(GameResource.load_you_o_vs_computer_x_img(), side_position)\n\n\ndef draw_winner_img(screen, winner):\n img_postion = (MARGIN_LEFT + 50, MARGIN_TOP + 50)\n if winner == \"player\":\n screen.blit(GameResource.load_you_won_img(), img_postion)\n elif winner == \"duce\":\n screen.blit(GameResource.load_duce_img(), img_postion)\n else:\n screen.blit(GameResource.load_computer_won_img(), img_postion)\n\ndef draw_newgame_img(screen):\n img_position = (MARGIN_LEFT - 50, MARGIN_TOP + BOARD_SIZE + 50)\n screen.blit(GameResource.load_newgame_tip_img(), img_position)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"yeahatgithub/LightComputerGames","sub_path":"san-lian-qi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33283599379","text":"__author__ = 'billsu'\nimport MapReduce\n\n# Part 1\nmr = MapReduce.MapReduce()\n\n# Part 2\ndef mapper(record):\n # key: document identifier\n # value: document contents\n key = record[1]\n value = record\n mr.emit_intermediate(key, value)\n\n# Part 3\ndef reducer(key, list_of_values):\n # key: word\n # value: list of occurrence counts\n order = []\n item = []\n for list in list_of_values:\n if (list[0]== \"line_item\"):\n item.append(list)\n elif (list[0] == \"order\"):\n order.append(list)\n\n for list_o in order:\n for list_i in item:\n overall = list_o + list_i\n mr.emit(overall)\n\n\n# Part 4\ninputdata = open(\"data/records.json\")\nmr.execute(inputdata, mapper, reducer)","repo_name":"su20yu1919/MapReduce-Programming","sub_path":"join.py","file_name":"join.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11354019817","text":"from flask import Flask, Response, request\nfrom urllib import parse\nimport requests\nfrom xml.dom import minidom as DOM\nimport re\napp = Flask(__name__)\n\n\n@app.route('/', defaults={'path': ''})\n@app.route('/')\ndef rss(path):\n class Item():\n '''能够根据指定属性判断item重复'''\n\n def __init__(self, node, diff=\"guid\"):\n self._node = node\n self.diff = diff\n diff = self._node.getElementsByTagName(self.diff)\n if diff:\n diff = diff[0]\n for node in diff.childNodes:\n if node.nodeName == \"#text\":\n diff = node\n break\n if diff:\n self.id = diff.data\n\n def __eq__(self, other):\n return self.id == other.id\n\n def get_rss_xml(url: str):\n ''':param url: rss url\\n\n :return: xml string'''\n res = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'})\n return res.text\n \n def get_dom(xml: str):\n tree = DOM.parseString(xml)\n return tree\n\n def get_items_from_xml(tree):\n ''':param xml: xml string\\n\n :return: items list'''\n items = []\n root = tree.documentElement\n channel = root.getElementsByTagName(\"channel\")\n if channel:\n channel = channel[0]\n items = channel.getElementsByTagName(\"item\")\n return items if items else []\n\n def concat_param(param: str):\n return \"?{}=\".format(param)\n\n params = [\"rss\"]\n params = [concat_param(param) for param in params]\n query = \"?\" + parse.urlsplit(request.full_path).query\n results = re.findall(\"\\?.*?=\", query)\n n = 0\n for i in range(0, len(results)):\n if results[i - n] not in params:\n del results[i - n]\n n += 1\n first = None\n i = 0\n query_dict = {}\n while not i + 1 >= len(results):\n # 依次查找\n query = re.sub(\"\\\\\" + results[i], \"\", query, 1)\n tail = results[i + 1]\n pattern = \"^.*?\\{}\".format(tail)\n value = re.search(pattern, query).group(0)\n value = re.sub(\"\\\\\" + tail, \"\", value)\n if not tail[1:-1] in query_dict:\n query_dict[tail[1:-1]] = value\n elif isinstance(list, query_dict[tail[1:-1]]):\n query_dict[tail[1:-1]] = query_dict[tail[1:-1]].append(value)\n else:\n query_dict[tail[1:-1]] = [query_dict[tail[1:-1]], value]\n query = query[len(value):]\n i += 1\n query_dict[results[-1]] = re.sub(\"\\\\\" + results[-1], \"\", query, 1)\n # 获取模板rss与items\n if \"type\" in query:\n # TODO: 为实现多种可能,应该使用抽象类实现转化的过程\n pass\n if \"rss\" in query:\n # TODO: 允许带有参数的链接(不使用库而直接使用'?rss='分割request.full_path,建立list存储?{}=格式从而分割预定的参数)\n urls = query[\"rss\"]\n first = get_dom(get_rss_xml(urls[0]))\n items = [item for item in get_items_from_xml(first)]\n items_first = items.copy()\n items = [Item(item) for item in items]\n for url in urls[1:]:\n xml = get_rss_xml(url)\n tree = get_dom(xml)\n tree_item = get_items_from_xml(tree)\n for item in tree_item:\n item = Item(item)\n if item not in items:\n items.append(item)\n if items:\n items = [item._node for item in items]\n root = first.documentElement\n channel = root.getElementsByTagName(\"channel\")\n if channel:\n channel = channel[0]\n for item in items_first:\n channel.removeChild(item)\n for item in items:\n channel.appendChild(item)\n res = first.toprettyxml(encoding=\"utf-8\") if first else None\n return Response(res, mimetype='application/xml')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"amedays/vercel_flask","sub_path":"api/rss.py","file_name":"rss.py","file_ext":"py","file_size_in_byte":4071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40183842030","text":"#!/usr/bin/env python2\n#-*- coding:utf-8 -*-\nimport rospy\nimport sys\n\nfrom std_msgs.msg import Bool\nfrom std_msgs.msg import Float64\nfrom std_msgs.msg import Float64MultiArray\n\npub_person = rospy.Publisher('camera/person', Bool, queue_size=10)\npub_pcx = rospy.Publisher('camera/person_cx', Float64, queue_size=10)\npub_psize = rospy.Publisher('camera/person_size', Float64, queue_size=10)\n\npub_sign = rospy.Publisher('camera/sign', Bool, queue_size=10)\npub_scx = rospy.Publisher('camera/sign_cx', Float64, queue_size=10)\npub_ssize = rospy.Publisher('camera/sign_size', Float64, queue_size=10)\n\ndef max_bbox():\n\tput = rospy.Publisher('max_bbox', Array)\n\ndef callback(_msgs_):\n\tprint(\"START\")\n\tmsg_person = Bool()\n\tmsg_pcx = Float64()\n\tmsg_psize = Float64()\n\n\tmsg_sign = Bool()\n\tmsg_scx = Float64()\n\tmsg_ssize = Float64()\n\n\tmax_psize = 0\n\tmax_ssize = 0\n\n\t#seperate each bbox in a frame\n\tfor k in range(0, len(_msgs_.data), 3): \n\t\tclass_id = _msgs_.data[k]\n\t\tbbox_cx = _msgs_.data[k+1]\n\t\tbbox_size = _msgs_.data[k+2]\n\t\tif class_id == 1:\n\t\t\tmsg_person = True\n\n\t\t\tif bbox_size >= max_psize :\n\t\t\t\tmax_psize = bbox_size\n\t\t\t\tmsg_pcx = bbox_cx\n\n\t\t\tpub_person.publish(msg_person)\n\t\t\tpub_pcx.publish(msg_pcx)\n\t\t\tpub_psize.publish(msg_psize)\n\n\t\telif class_id == 13:\n\t\t\tmsg_sign = True\n\t\t\t\n\t\t\tif bbox_size >= max_ssize :\n\t\t\t\tmax_ssize = bbox_size\n\t\t\t\tmsg_scx = bbox_cx\n\n\t\t\tpub_sign.publish(msg_sign)\n\t\t\tpub_scx.publish(msg_scx)\n\t\t\tpub_ssize.publish(msg_ssize)\n\t\telse:\n\t\t\tmsg_person = False\n\t\t\tmsg_sign = False\n\tprint(\"END\")\n\ndef listener():\n\trospy.init_node('max_node')\n\trospy.Subscriber(\"/camera/topic\", Float64MultiArray, callback)\n\trospy.spin()\n\nif __name__=='__main__':\n listener()\n","repo_name":"GGamangCoder/CrashLab_Project","sub_path":"Part_AI/max_bbox.py","file_name":"max_bbox.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"42100128098","text":"import re\nimport random\nimport matplotlib.pyplot as plt\nimport warnings\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n plt.rcParams['toolbar'] = 'toolmanager'\nfrom PyQt6 import QtCore, QtGui\nfrom PyQt6.QtWidgets import QApplication, QMainWindow, QMessageBox\nfrom binaryRelation import BinaryRelation\nfrom hasseDiagram import HasseDiagram\nfrom ui_mainwindow import Ui_MainWindow\n\n\n# Закрытие окна создания диаграммы Хассе\nclass MainWindow(QMainWindow, Ui_MainWindow):\n window_closed = QtCore.pyqtSignal()\n number_of_diagrams = 0\n\n def __init__(self, parent=None):\n super().__init__()\n self.setupUi(self)\n\n self.error_msg = None\n self.help_msg = None\n\n self.btn_run.clicked.connect(self.button_click)\n self.btn_back.clicked.connect(lambda: self.return_to_mainmenu(parent))\n self.btn_help.clicked.connect(self.open_help)\n self.btn_gen.clicked.connect(self.input_random_order)\n\n def closeEvent(self, event):\n self.window_closed.emit()\n event.accept()\n\n @staticmethod\n def create_diagram(bin_rel) -> HasseDiagram:\n print(\"Диаграмма хассе на множестве \", bin_rel.A)\n return HasseDiagram(bin_rel)\n\n def resize_event(self):\n self.resize(410, 410)\n self.setMinimumSize(QtCore.QSize(410, 410))\n self.setMaximumSize(QtCore.QSize(410, 410))\n\n def output(self):\n try:\n binary = self.create_binary_relation()\n\n if binary.is_reflexive():\n reflex_text = \"Рефлексивно\"\n elif binary.is_irreflexive():\n reflex_text = \"Иррефлексивно\"\n else:\n reflex_text = \"Нерефлексивно\"\n self.lbl_reflex.setText(f\" • {reflex_text}!\")\n\n symm_text = \"Симметрично\" if binary.is_symmetrical() else \"Несимметрично\"\n self.lbl_symm.setText(f\" • {symm_text}!\")\n\n trans_text = \"Транзитивно\" if binary.is_transitive() else \"Нетранзитивно\"\n self.lbl_trans.setText(f\" • {trans_text}!\")\n\n antisymm_text = \"Антисимметрично\" if binary.is_antisymm() else \"Не антисимметрично\"\n self.lbl_antisym.setText(f\" • {antisymm_text}!\")\n\n if binary.is_order():\n hd = self.create_diagram(binary)\n self.lbl_bin_class.setStyleSheet(\"color: #008000;\\n\"\n \"font-weight: bold;\\n\"\n \"font-size: 12;\\n\"\n \"font-family: Arial;\")\n\n # Удаляем ненужные кнопки на панели инструментов\n unwanted_buttons = ['pan', 'help', 'subplots']\n fig = plt.figure()\n for button in unwanted_buttons:\n fig.canvas.manager.toolmanager.remove_tool(button)\n\n # Задаем название диаграмме\n self.number_of_diagrams += 1\n name_of_diagram = \"Диаграмма \" + str(self.number_of_diagrams)\n fig.canvas.manager.set_window_title(name_of_diagram)\n\n plt.show()\n hd.draw()\n\n # Сохраняем параметры диаграммы в соответствующем файле\n with open(name_of_diagram + \".txt\", \"w\") as file:\n params = \"A: \" + str(hd.get_bin_rel().A) + \"\\nR: \" + str(hd.get_bin_rel().R) + \"\\n\"\n file.write(params)\n\n else:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Icon.Warning)\n msg.setText(\"Не является отношением порядка!\")\n msg.setWindowTitle(\"Не является отношением порядка!\")\n msg.exec()\n\n self.lbl_bin_class.setStyleSheet(\"color: rgb(184, 0, 0);\\n\"\n \"font-weight: bold;\\n\"\n \"font-size: 12;\\n\"\n \"font-family: Arial;\")\n\n bin_class = binary.class_of_relation()\n if bin_class == \"unknown\":\n self.lbl_bin_class.setText(\"Не входит ни в один класс бинарных отношений\")\n if bin_class == \"tolerance\":\n self.lbl_bin_class.setText(\"Толерантность\")\n if bin_class == \"equivalence\":\n self.lbl_bin_class.setText(\"Эквивалентность\")\n if bin_class == \"partial order\":\n self.lbl_bin_class.setText(\"Частичный порядок\")\n if bin_class == \"preorder\":\n self.lbl_bin_class.setText(\"Предпорядок\")\n if bin_class == \"strict order\":\n self.lbl_bin_class.setText(\"Строгий порядок\")\n if bin_class == \"strict preorder\":\n self.lbl_bin_class.setText(\"Строгий предпорядок\")\n\n self.resize_event()\n\n except IOError as e:\n self.error_handle(e)\n\n def button_click(self):\n self.output()\n\n def create_binary_relation(self) -> BinaryRelation:\n return BinaryRelation(*self.input())\n\n def input(self) -> list: # Ввод данных\n rubbish_text = re.sub(r'\\([^()]*\\)', '', self.edt_setR.toPlainText())\n alnum_outta_brackets = re.search(r'\\w|\\d', rubbish_text)\n\n A = list(map(str, re.split(r' *, *', ',' + self.edt_setA.toPlainText() + ','))) # Ввод числового множества\n A = list(x for x in A if x != '')\n R = []\n R_strings = re.findall(r'\\( *([^\\)]*) *\\) *, *', self.edt_setR.toPlainText() + \",\") # Ввод бинарного отношения перечислением пар\n\n for elem in R_strings:\n R_str = tuple(map(str, re.split(r' *, *', elem))) # Преобразуем в пару строковых значений\n R.append(R_str)\n\n if len(self.edt_setA.toPlainText()) == 0 or len(self.edt_setR.toPlainText()) == 0:\n raise IOError(\"Поля ввода не могут быть пустыми.\")\n\n if re.findall(r'\\) *\\(', self.edt_setR.toPlainText()):\n raise IOError(\"Некорректный ввод бинарного отношения. Пары должны вводитьс�� через запятую.\")\n\n if alnum_outta_brackets or not R:\n raise IOError(\"Некорректный ввод бинарного отношения.\")\n\n # Уникальные элементы в множестве пар, задающих бинарное отношение\n list_unique = []\n for i in R:\n i = list(i)\n list_unique.extend(i)\n list_unique = list(set(list_unique))\n\n if not set(list_unique).issubset(set(A)):\n raise IOError(\n \"Бинарное отношение R не является подмножеством декартова \"\n \"произведения множества A на себя. Пожалуйста, задайте R ⊆ A^2.\")\n\n for x in R:\n if len(x) != 2:\n raise IOError(\n \"Неверное количество элементов в паре, задающей бинарное отношение.\")\n\n if len(set(A)) > 40:\n raise IOError(\"Слишком большое количество элементов множества.\")\n\n if len(set(R)) > 300:\n raise IOError(\"Слишком большое количество пар, задающих бинарное отношение.\")\n\n return [A, R]\n\n def create_random_binary_relation(self) -> BinaryRelation:\n num_A = random.randrange(3, 7)\n A = list()\n R = list()\n for i in range(1, num_A + 1):\n A.append(i)\n\n num_R = random.randrange(7, 12)\n for i in range(num_R):\n first = random.choice(A)\n second = random.choice(A)\n R.append((first, second))\n\n return BinaryRelation(A, R)\n\n def input_random_order(self):\n bin_rel = self.create_random_binary_relation()\n bin_rel.make_order()\n self.edt_setA.setText(str(bin_rel.A)[1:-1])\n self.edt_setR.setText(str(bin_rel.R)[1:-1])\n\n def input_random_binary_relation(self):\n bin_rel = self.create_random_binary_relation()\n self.edt_setA.setText(str(bin_rel.A)[1:-1])\n self.edt_setR.setText(str(bin_rel.R)[1:-1])\n\n def error_handle(self, err_text):\n self.error_msg = QMessageBox()\n self.error_msg.setIcon(QMessageBox.Icon.Critical)\n self.error_msg.setText(\"Ошибка\")\n self.error_msg.setInformativeText(str(err_text))\n self.error_msg.setWindowTitle(\"Ошибка\")\n self.error_msg.exec()\n\n def return_to_mainmenu(self, menu):\n self.close()\n menu.setVisible(True)\n\n def open_help(self):\n if self.help_msg is None:\n self.help_msg = QMessageBox()\n self.help_msg.setIcon(QMessageBox.Icon.Information)\n self.help_msg.setText(\"Справка по вводу данных\")\n self.help_msg.setInformativeText(\n \"Поле ввода множества A должно заполняться элементами множества через запятую. Например: 1, 2, 3.\\n\"\n \"Поле ввода бинарного отношения R должно заполняться парами элементов, \"\n \"записанных в круглых скобках, через запятую. \"\n \"Сами пары должны быть разделены запятыми. Например: (1,2), (1,3), (2,3).\")\n self.help_msg.setWindowTitle(\"Справка по вводу данных\")\n self.help_msg.exec()\n","repo_name":"RozeQz/HasseDiagram","sub_path":"mainWindow.py","file_name":"mainWindow.py","file_ext":"py","file_size_in_byte":10298,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41532686134","text":"import re\nimport getopt\nimport sys\nimport datetime\n\ncol_seq, att_seq, att_ass = '\\t', ';', '='\natt_transcript_name = \"Parent\"\natt_transcript_seq = \":\"\natt_rank_key = 'rank'\natt_exon_key = 'exon_id'\natt_exon_alias = 'exon_id,Name'\nfreature_gene_name = \"gene,ncRNA_gene\"\nfreature_exon_name = \"exon\"\nreplace_exon_key = \"exon\"\nreplace_intron_key = \"intron\"\noutput_order = \"Parent,Name,constitutive,ensembl_end_phase,ensembl_phase,exon_id,rank\"\n\ndef usage():\n print(\"\"\"\n USAGE: python generate_intron_annotation_from_gff3.py [options]\n For example:\n python generate_intron_annotation_from_gff3.py -i gene_annotation.gff3 -o intron_annotation.gff3 -g gene,ncRNA_gene -e exon -p Parent &\n\n options:\n -i: the gene annotation with gff3 format.\n -o: the intron annotation with gff3 format (default: intron_annotation.gff3).\n -g: the gene name of gff3 in 3rd column (default: gene).\n -e: the exon name of gff3 in 3rd column (default: exon).\n -p: the parent's attribute name in 9rd column of gff3 (default: Parent).\n -h: print this page.\n \"\"\")\n sys.exit()\n\n\ndef obtainParameter():\n opts, args = getopt.getopt(sys.argv[1:], \"hi:o:g:e:p:\")\n output_file = \"intron_annotation.gff3\"\n\n if opts:\n for op, value in opts:\n if op == \"-i\":\n input_file = value\n elif op == \"-o\":\n output_file = value\n elif op == \"-g\":\n freature_gene_name = value\n elif op == \"-e\":\n freature_exon_name = value\n elif op == \"-p\":\n att_transcript_name = value\n elif op == \"-h\":\n usage()\n else:\n usage()\n return(input_file, output_file)\n\n############\n\n\ndef covert_str(exon):\n new_atts = ''\n for att in output_order:\n if att in exon[-1]:\n new_atts = new_atts + att + att_ass + exon[-1][att] + att_seq\n output_str = col_seq.join(exon[:-1]) + col_seq + new_atts + '\\n'\n return(output_str)\n\n\ndef write_gene(whole_gene, o):\n for transcript in whole_gene:\n if transcript:\n o.writelines(\"###\\n\")\n for exon in transcript:\n out = covert_str(exon)\n out = out.replace(replace_exon_key, replace_intron_key)\n o.writelines(out)\n else:\n pass\n\ndef remove_duplicate_minus_strand_intron_name(whole_gene):\n intron_loci = {}\n gene_len = len(whole_gene)\n for m in range(gene_len):\n tran_len = len(whole_gene[gene_len-m-1])\n for n in range(tran_len):\n exon = whole_gene[gene_len-m-1][tran_len-n-1]\n intron_loci_str = '_'.join([exon[0], exon[3], exon[4], exon[6]])\n if intron_loci_str not in intron_loci:\n intron_loci[intron_loci_str] = exon[-1][att_exon_key]\n else:\n for name in att_exon_alias:\n whole_gene[gene_len-m-1][tran_len-n-1][-1][name] = intron_loci[intron_loci_str]\n return(whole_gene)\n\ndef remove_duplicate_plus_strand_intron_name(whole_gene):\n intron_loci = {}\n for m in range(len(whole_gene)):\n for n in range(len(whole_gene[m])):\n exon = whole_gene[m][n]\n intron_loci_str = '_'.join([exon[0], exon[3], exon[4], exon[6]])\n if intron_loci_str not in intron_loci:\n intron_loci[intron_loci_str] = exon[-1][att_exon_key]\n else:\n for name in att_exon_alias:\n whole_gene[m][n][-1][name] = intron_loci[intron_loci_str]\n return(whole_gene)\n\ndef remove_duplicate_intron_name(whole_gene):\n intron_loci = {}\n if len(whole_gene[0])>0:\n print(whole_gene)\n strand_symbol = whole_gene[0][0][6]\n if strand_symbol == '+':\n whole_gene = remove_duplicate_plus_strand_intron_name(whole_gene)\n elif strand_symbol == '-':\n whole_gene = remove_duplicate_minus_strand_intron_name(whole_gene)\n else:\n pass\n else:\n pass\n return(whole_gene)\n\n\ndef generate_intron_name(whole_gene):\n for m in range(len(whole_gene)):\n for n in range(len(whole_gene[m])):\n exon = whole_gene[m][n]\n intron_name = exon[-1][att_transcript_name].split(att_transcript_seq)[1] + replace_intron_key + exon[-1][att_rank_key]\n whole_gene[m][n][-1][att_exon_key] = intron_name\n for name in att_exon_alias:\n whole_gene[m][n][-1][name] = whole_gene[m][n][-1][att_exon_key]\n return(whole_gene)\n\n\ndef generate_minus_strand_intron(transcript):\n tran_len = len(transcript)\n for n in range(1, tran_len):\n transcript[-n][4] = str(int(transcript[-n][3]) - 1)\n transcript[-n][3] = str(int(transcript[-n-1][4]) + 1)\n transcript = transcript[1:]\n return(transcript)\n\ndef generate_plus_strand_intron(transcript):\n for n in range(len(transcript) - 1):\n transcript[n][3] = str(int(transcript[n][4]) + 1)\n transcript[n][4] = str(int(transcript[n+1][3]) - 1)\n transcript = transcript[:-1]\n return(transcript)\n\ndef get_transcript_strand(transcript):\n strand_symbol = ''\n for exon in transcript:\n if not strand_symbol:\n strand_symbol = exon[6]\n elif strand_symbol != exon[6]:\n print('there are different strand in:', transcript)\n exit()\n else:\n pass\n return(strand_symbol)\n\n\ndef generate_intron_per_transcript(transcript):\n strand_symbol = get_transcript_strand(transcript)\n if strand_symbol == '+':\n transcript = generate_plus_strand_intron(transcript)\n elif strand_symbol == '-':\n transcript = generate_minus_strand_intron(transcript)\n else:\n pass\n return(transcript)\n\n\ndef have_gene(whole_gene):\n if whole_gene:\n return(True)\n\ndef write_gene_into_file(whole_gene, o):\n if have_gene(whole_gene):\n for n in range(len(whole_gene)):\n whole_gene[n] = generate_intron_per_transcript(whole_gene[n])\n whole_gene = generate_intron_name(whole_gene)\n whole_gene = remove_duplicate_intron_name(whole_gene)\n write_gene(whole_gene, o)\n else:\n pass\n\n\ndef add_transcript_exon(whole_gene, line):\n whole_gene[-1].append(line)\n return(whole_gene)\n\ndef add_transcript(whole_gene, line):\n whole_gene.append([line])\n return(whole_gene)\n\ndef different_transcript(whole_gene, line):\n gene_last_transcript_name = whole_gene[-1][-1][-1][att_transcript_name]\n new_transcript_name = line[-1][att_transcript_name]\n if gene_last_transcript_name != new_transcript_name:\n return(True)\n else:\n return(False)\n\n\ndef is_new_transcript(whole_gene, line):\n if not whole_gene:\n return(True)\n elif different_transcript(whole_gene, line):\n return(True)\n else:\n return(False)\n\n\ndef split_line_last_column(line):\n line[-1] = line[-1].split(att_seq)\n per_att_dict = {}\n for n in range(len(line[-1])):\n tmp_atts = line[-1][n].split(att_ass)\n per_att_dict[tmp_atts[0]] = tmp_atts[1]\n line[-1] = per_att_dict\n return(line)\n\n\ndef add_exon(whole_gene, line):\n line = split_line_last_column(line)\n if is_new_transcript(whole_gene, line):\n whole_gene = add_transcript(whole_gene, line)\n else:\n whole_gene = add_transcript_exon(whole_gene, line)\n return(whole_gene)\n\n\ndef is_new_exon(line, freature_exon_name):\n if line[2] in freature_exon_name:\n return(True)\n else:\n return(False)\n\n\ndef is_new_gene(line, freature_gene_name):\n new_gene_symbol = False\n if line[2] in freature_gene_name:\n new_gene_symbol = True\n return(new_gene_symbol)\n\n\ndef write_into_file(line, o):\n residue = line.strip().replace('#', '').replace(' ', '')\n if residue:\n o.writelines(line)\n\n\ndef not_annotation_line(line):\n if line[0] != '@' and line[0] != '#':\n return(True)\n else:\n return(False)\n\n\ndef get_introns_per_gene_from_gff3(input_file, output_file):\n whole_gene = []\n with open(input_file, 'r') as f:\n with open(output_file, 'w') as o:\n for line in f:\n if not_annotation_line(line):\n line = line.strip().split(col_seq)\n if is_new_gene(line, freature_gene_name):\n write_gene_into_file(whole_gene, o)\n whole_gene = []\n elif is_new_exon(line, freature_exon_name):\n whole_gene = add_exon(whole_gene, line)\n else:\n pass\n else:\n write_into_file(line, o)\n write_gene_into_file(whole_gene, o)\n\n\nif __name__ == '__main__':\n\n input_file, output_file = obtainParameter()\n\n startTime = datetime.datetime.now()\n print('Start Time:', startTime)\n\n freature_gene_name = freature_gene_name.split(',')\n freature_exon_name = freature_exon_name.split(',')\n output_order = output_order.split(',')\n att_exon_alias = att_exon_alias.split(',')\n get_introns_per_gene_from_gff3(input_file, output_file)\n\n endTime = datetime.datetime.now()\n time = (endTime - startTime).seconds\n print('End Time:', endTime)\n print(\"This programme run: %s s\" % (time))\n","repo_name":"ZhangXiaoTuo/LariatTools","sub_path":"database/generate_intron_annotation_from_gff3.py","file_name":"generate_intron_annotation_from_gff3.py","file_ext":"py","file_size_in_byte":9229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73430952080","text":"# Практическое задание\n# 1. В этой игре человек загадывает число, а компьютер пытается его угадать.\n# В начале игры человек загадывает число от 1 до 100 в уме или записывает\n# его на листок бумаги. Компьютер начинает его отгадывать предлагая игроку варианты чисел.\n# Если компьютер угадал число, игрок выбирает “победа”.\n# Если компьютер назвал число меньше загаданного, игрок должен выбрать “загаданное число больше”.\n# Если компьютер назвал число больше, игрок должен выбрать “загаданное число меньше”.\n# Игра продолжается до тех пор пока компьютер не отгадает число.\n# Пример игры:\n# Допустим, пользователь загадал число 42\n# `15\n#\n# 35\n#\n# 96\n# <\n# 37\n#\n# 74\n# <\n# 52\n# <\n# 42\n# =`\nfrom random import randint\n\nprint(\"Загадайте число от 1 до 100 и запишите его\")\ninput(\"Press Enter to continue...\")\nlow = 1\ntop = 100\n\nc = \"\"\nwhile c != '=':\n if low == top:\n print(\"Я так не играю!\")\n break;\n n = randint(low, top)\n print(f\"n = {n}\")\n c = input(\"Если загаданное число больше n введите >, если меньше, введите <, если равно, введите равно: \")\n if c == '<':\n top = n - 1\n elif c == '>':\n low = n + 1\n elif (c != '=') and (c != '<') and (c != '>'):\n print(\"Введен неизвестный символ!, введите < > или = \")\nelse:\n print(\"Ура, я выиграл!\")\n\n","repo_name":"Volkivanv/IntroInPython","sub_path":"lesson6task1.py","file_name":"lesson6task1.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21526809039","text":"def base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):\n \"\"\"Converts an integer to a base36 string.\"\"\"\n if not isinstance(number, (int)):\n raise TypeError('number must be an integer')\n\n base36 = ''\n sign = ''\n\n if number < 0:\n sign = '-'\n number = -number\n\n if 0 <= number < len(alphabet):\n return sign + alphabet[number]\n\n while number != 0:\n number, i = divmod(number, len(alphabet))\n base36 = alphabet[i] + base36\n\n return sign + base36\n\n\ndef base36decode(number):\n return int(number, 36)\n\n\ndef main():\n print(base36decode('zzzzzz'))\n print(base36encode(2176782335))\n print(base36encode(2176782336))\n\n\nif __name__ == '__main__':\n main()\n\n# https://stackoverflow.com/questions/1181919/python-base-36-encoding\n","repo_name":"binderclip/code-snippets-python","sub_path":"utils/015-base36/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"3"} +{"seq_id":"10949164224","text":"import datetime\nimport logging\nimport os\nimport pathlib\nfrom collections import deque\n\nfrom Crypto.PublicKey import RSA\n\nMAX_DATA_SIZE = 64\n\nn = 0xa337e14c4fa536382c3c290af8314a53d3be2025e1ad1343c2e017e9366f4e732007edc50eb280700ea877e2feace49a298c4f1734b93d4734bcb54b705848e458caaf24e6ce013d0db638a5c6c8c05675e452d868259c19710bbb7cdbe75f97ef4526e38a11a82ae4f33c2f1a37f672ed2ae6c12d8a06b722d3745abde383b1\ne = 0x10001\npubkey = RSA.construct((n, e), )\n\n\nclass DB:\n def __init__(self, dir_name: str, limit=None):\n os.makedirs(dir_name, exist_ok=True)\n self._dir_name = dir_name\n self._data = dict()\n self._queue = deque()\n self._limit = limit\n self._load()\n\n def _load(self):\n try:\n for file_name in os.listdir(self._dir_name):\n full_name = os.path.join(self._dir_name, file_name)\n\n if os.path.isfile(full_name) and os.path.getsize(full_name) <= MAX_DATA_SIZE:\n with open(full_name) as f:\n data = f.read()\n self._data[file_name] = data\n\n logging.info(\"Loaded %d items from %r\", len(self._data), self._dir_name)\n\n def get_mtime(x):\n return pathlib.Path(os.path.join(self._dir_name, x)).stat().st_mtime\n for key in sorted(self._data.keys(), key=get_mtime):\n self._queue.append(key)\n logging.debug(\"Item: %s\", key)\n\n except Exception:\n logging.exception(\"Error loading data from %r\", self._dir_name)\n\n def _filename(self, key: str):\n return os.path.join(self._dir_name, key)\n\n def put(self, key: str, value: str):\n with open(self._filename(key), \"w\") as f:\n f.write(value)\n if key not in self._data and self._limit is not None:\n self._queue.append(key)\n self._data[key] = value\n\n if self._limit is not None:\n while self.count() > self._limit:\n key_to_remove = self._queue.popleft()\n logging.debug(\"Removing oldest key from DB: %r\", key_to_remove)\n self.remove(key_to_remove)\n\n def get(self, key: str, default=None):\n return self._data.get(key, default)\n\n def keys(self):\n return self._data.keys()\n\n def remove(self, key: str):\n try:\n os.unlink(self._filename(key))\n except FileNotFoundError:\n pass\n del self._data[key]\n\n def count(self):\n return len(self._data)\n\n\ndef check_signature(flag_id: str, signature: str):\n try:\n time_str, signature = signature.split(\":\", maxsplit=1)\n time = datetime.datetime.strptime(time_str, \"%Y%m%d%H%M%S\")\n if (datetime.datetime.now() - time).total_seconds() > 10:\n logging.debug(\"Too old signature\")\n return False\n signed_data = (flag_id + \":\" + time_str).encode()\n return pubkey.verify(signed_data, (int(signature, 16),))\n except Exception as e:\n logging.debug(\"Bad signature: %s\", e)\n return False\n","repo_name":"HITB-CyberWeek/hitbsecconf-ctf-2021","sub_path":"services/fw/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"3"} +{"seq_id":"14442083493","text":"import os\r\nimport subprocess\r\nimport stat\r\nimport shutil\r\nimport unittest\r\nfrom create_repository import *\r\n\r\ndirn = [\"myfold\", \"myfold_repo\"]\r\n\r\n\r\nclass CreateRepositoryFromDir_test(unittest.TestCase):\r\n def tearDown(self):\r\n for dirname0 in dirn:\r\n for dirpath, dirnames, filenames \\\r\n in os.walk(dirname0, topdown=False):\r\n for filename in filenames:\r\n filename = os.path.join(dirpath, filename)\r\n os.chmod(filename, stat.S_IWRITE | stat.S_IREAD)\r\n os.remove(filename)\r\n os.rmdir(dirpath)\r\n\r\n def test_create(self):\r\n def addFiles():\r\n os.mkdir(dirn[0])\r\n f = open(os.path.join(dirn[0], \".gitignore\"), \"w\")\r\n f.write(\"test\")\r\n f.close()\r\n os.mkdir(os.path.join(dirn[0], \"sub\"))\r\n f = open(os.path.join(dirn[0], \"sub\", \"abacaba\"), \"w\")\r\n f.write(\"release\")\r\n f.close()\r\n\r\n addFiles()\r\n create_repository_from_dir(dirn[0], dirn[1],\r\n User(\"Username\", \"mail\"))\r\n os.chdir(dirn[1])\r\n command_line = \"git ls-tree -r --name-only --full-tree master\"\r\n try:\r\n out = subprocess.check_output(command_line)\r\n finally:\r\n os.chdir(\"..\")\r\n self.assertListEqual(sorted(out.decode(\"ASCII\").split(\"\\n\")[:-1]),\r\n sorted([\".gitignore\", \"sub/abacaba\"]))\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","repo_name":"parallel-p/please","sub_path":"server/create_repository_test.py","file_name":"create_repository_test.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"36358342013","text":"'''\nThis script shows how to predict stock prices using a basic RNN\n'''\nimport os\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\nfrom Utils import read_file, to_output_form\n\nfrom stock import Models\n\ntf.set_random_seed(777) # reproducibility\n\nseq_length = 7\ndata_dim = 4\nhidden_dim = 10\noutput_dim = 1\nlearning_rate = 0.01\niterations = 500\n\nnormalize_data = Models.normalize_data_meanstd\nunormalize_data = Models.unormalize_data_meanstd\n\ndef predict_func(filename):\n data_filename = os.path.join(\"data\", filename + '.csv')\n output_filename = os.path.join(\"out\", filename + '.txt')\n\n length, list_open, list_high, list_low, list_close= read_file(data_filename)\n\n length, input_data, label_data = normalize_data(length, list_open, list_high, list_low, list_close)\n\n data_input, data_close = [], []\n for j in range(length - seq_length):\n _x = input_data[j:j + seq_length]\n _y = label_data[j + seq_length]\n data_input.append(_x)\n data_close.append(_y)\n\n\n train_size = int(length * 0.7) - 1\n\n #start index 1 for use prev_data for costing\n train_input, test_input = np.array(data_input[1:train_size]), np.array(data_input[train_size:])\n train_close, test_close = np.array(data_close[1:train_size]), np.array(data_close[train_size:])\n train_close_prev = np.array(data_close[0:train_size-1])\n\n X = tf.placeholder(tf.float32, [None, seq_length, data_dim])\n Y = tf.placeholder(tf.float32, [None, 1])\n Y_prev = tf.placeholder(tf.float32, [None, 1])\n\n # build a LSTM network\n cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim, forget_bias = 1.0, state_is_tuple=True)\n #cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob = 0.5)\n\n outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)\n Y_pred = tf.contrib.layers.fully_connected(outputs[:, -1], output_dim, activation_fn=None)\n cost = (Y - Y_pred) * (\n tf.cast((Y_pred - Y_prev) * (Y - Y_prev) < 0, tf.float32) * 2 + tf.cast((Y_pred - Y_prev) * (Y - Y_prev) >= 0,\n tf.float32))\n # cost/loss\n loss = tf.reduce_mean(tf.square(cost)) # sum of the squares\n # optimizer\n optimizer = tf.train.AdamOptimizer(learning_rate)\n training = optimizer.minimize(loss)\n\n # RMSE\n targets = tf.placeholder(tf.float32, [None, 1])\n predictions = tf.placeholder(tf.float32, [None,1])\n rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)))\n\n with tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # Training step\n for k in range(iterations):\n _, step_loss = sess.run([training, loss], feed_dict={X: train_input, Y: train_close, Y_prev: train_close_prev})\n\n # Test step\n test_predict = sess.run(Y_pred, feed_dict={X: test_input})\n rmse = sess.run(rmse, feed_dict={targets: test_close, predictions: test_predict})\n\n print(test_predict[-1])\n\n test_close = unormalize_data(test_close, list_close)\n test_predict = unormalize_data(test_predict, list_close)\n\n with open(output_filename, 'w') as f:\n f.write(to_output_form(test_close, test_predict))\n\ndef main():\n predict_func(sys.argv[1])\n\nif __name__ == \"__main__\":\n main()","repo_name":"jieeeun/2017Capstone_09","sub_path":"stock/PredictModel.py","file_name":"PredictModel.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36212802167","text":"# Como leer un archivo de excel\nimport openpyxl\n\nfilesheet = \"./Jugadores.xlsx\"\n\n# Leer el archivo\nwb = openpyxl.load_workbook(filesheet)\n\n# Fijar la hoja\nhojaPlayers = wb.get_sheet_by_name('Players')\n\ndef nuevoUsuario():\n #información dentro de las filas\n valido = True\n repetir = True\n\n #Repetir agregar\n while(repetir == True):\n #Validar que no exista el mismo ID\n while (valido == True):\n print(\"\\n ============AGREGAR USUARIO============ \\n\")\n \n idUsuario = input(\"ID del usuario \\n\")\n\n for cell in hojaPlayers[\"A\"]:\n if (cell.value == idUsuario):\n valido = True\n break\n elif(cell.value != idUsuario):\n valido = False\n\n if (valido == False):\n print(\"ID valido\")\n print (\"\")\n nombreUsuario = input(\"Nombre del usuario \\n\")\n datos = [(idUsuario, nombreUsuario, 0)]\n #Agregar usuario en linea\n for row in datos:\n hojaPlayers.append(row)\n wb.save(filesheet)\n print(\"******Jugador agregado******\")\n\n else:\n \n print(\"\\nEl ID ingresado ya existe\\nDigite uno nuevo\")\n \n agregar = input(\"\\n¿Desea agregar más usuarios? \\n Digite SI o NO \\n\")\n\n if(agregar == \"SI\" or (agregar == \"Si\" or agregar == \"si\")):\n repetir = True\n valido = True\n else:\n repetir = False\n print()\n print (\"\\n 1) Menu principal \\n 2) Volver\")\n opcion = int(input(\"Ingrese la opcion a la que desea ingresar: \"))\n if opcion == 1:\n import main as mn\n mn.menuPrincipal()\n else:\n menuJugadores()\n\ndef eliminarJugador():\n\n print(\"\\n******Lista de Jugadores******\")\n print(\"\\nID\", \" Nombre\")\n for i in range(2, hojaPlayers.max_row +1):\n print()\n for j in range(1, hojaPlayers.max_column -3):\n celda = hojaPlayers.cell(row=i, column =j)\n print(celda.value, \" \", end = \" \")\n print(\"\")\n idUsuario = input(\"\\nID del usuario que desea eliminar \\n\")\n contador = 0\n for cell in hojaPlayers[\"A\"]:\n contador += 1\n if (cell.value == idUsuario):\n hojaPlayers.delete_rows(contador) \n wb.save(filesheet) \n print(\"******Jugador eliminado******\")\n break\n print()\n print (\"\\n1) Menu principal\",\n \"\\n2) Volver\")\n opcion = int(input(\"Ingrese la opcion a la que desea ingresar: \"))\n if opcion == 1:\n import main as mn\n mn.menuPrincipal()\n else:\n menuJugadores()\n \ndef mostrarJugadores():\n print(\"\\nJugadores Disponibles\")\n print(\"\\nID\", \" Nombre\")\n for i in range(2, hojaPlayers.max_row +1):\n print()\n for j in range(1, hojaPlayers.max_column -3):\n celda = hojaPlayers.cell(row=i, column =j)\n print(celda.value, \" \", end = \" \")\n \n print()\n print (\"\\n1) Menu principal\",\n \"\\n2) Volver\")\n opcion = int(input(\"Ingrese la opcion a la que desea ingresar: \"))\n if opcion == 1:\n import main as mn\n mn.menuPrincipal()\n else:\n menuJugadores()\n\ndef menuJugadores():\n print (\"\\n1) Agregar jugador\",\n \"\\n2) Eliminar jugador\",\n \"\\n3) Mostrar jugadores activos\",\n \"\\n4) Volver\\n\")\n opcion = int(input(\"Ingrese la opcion a la que desea ingresar: \"))\n if opcion == 1:\n #codigo agregar jugadores \n nuevoUsuario()\n if opcion == 2:\n #codigo agregar jugadores \n eliminarJugador()\n if opcion == 3:\n #Código ahorcado\n mostrarJugadores()\n if opcion == 4:\n #codigo agregar jugadores\n import main as mn \n mn.volverMenuP()\n","repo_name":"Joseth-04/PB01","sub_path":"Usuarios.py","file_name":"Usuarios.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27316608941","text":"from behave import given, when, then\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.common.by import By\nfrom time import sleep\n\n\n\n\n@when('clico em adicionar')\ndef clicar_adicionar(context):\n\n botao_adicionar = context.navegador.find_element(By.CSS_SELECTOR, \"button[type='button'][aria-haspopup='dialog'][aria-expanded='false'][aria-controls='radix-6']\")\n botao_adicionar.click()\n\n@when('preencho o campo Nome do Produto')\ndef step_preencher_nome_produto(context):\n nome_do_produto = context.navegador.find_element(By.NAME,\"name\")\n nome_do_produto.click()\n nome_do_produto.send_keys(\" Calça Jeans\")\n\n@when('descricao do produto')\ndef step_preencher_descricao(context):\n descricao_produto = context.navegador.find_element(By.NAME,\"description\")\n descricao_produto.click()\n descricao_produto.send_keys(\"Calça Jeans da Moda\")\n\n@when('seleciono a categoria Roupa')\ndef step_selecionar_categoria_roupa(context):\n categoria_roupa = context.navegador.find_element(By.XPATH, \"//label[span[text()='Roupas']]\")\n categoria_roupa.click()\n\n\n\n@when('preencho o campo preco')\ndef step_preencher_preco(context):\n preco = context.navegador.find_element(By.NAME,\"price\")\n preco.click()\n preco.send_keys(\"100,01\")\n\n\n\n@when('seleciono uma imagem')\ndef step_selecionar_imagem(context):\n upload_imagem = context.navegador.find_element(By.NAME,\"image\")\n caminho_imagem = \"E:\\Projeto_Final_IJJ_V.F\\imagem\\calcajeans.jpeg\"\n upload_imagem.send_keys(caminho_imagem)\n\n@when('preencho o valor do frete')\ndef step_preencher_valor_frete(context):\n frete = context.navegador.find_element(By.NAME, \"shipment\")\n frete.click()\n frete.send_keys(\"15,50\")\n\n@when('clico em enviar novo produto')\ndef step_clicar_enviar_produto(context):\n enviar_cadastrado = context.navegador.find_element(By.CSS_SELECTOR, 'button[type=\"submit\"]')\n enviar_cadastrado.submit()\n\n\n@then('o alerta produto enviado com sucesso aparece')\n\ndef step_alerta_produto (context):\n sleep(1)\n caminho_salvar_imagem =\"E:\\Projeto_Final_IJJ_V.F\\evidencia\"\n context.navegador.save_screenshot(caminho_salvar_imagem)\n \n \n \n \n\n\n\n\n","repo_name":"ugomes/projeot_final_M6_IJJ","sub_path":"features/steps/cadastro_produto_steps.py","file_name":"cadastro_produto_steps.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"25083397836","text":"import numpy as np\nfrom scipy.io import wavfile\nfrom pydub import AudioSegment\nfrom pydub.playback import play\nimport csv\nimport matplotlib.pyplot as plt\n\n\ns_rate = 44100\nT = 1/s_rate\nt = 5\nN = s_rate * t \n\nfreq = 440\nomega = 2*np.pi*freq\n\nt_seq = np.arange(N)*T\ny_sin = np.sin(omega*t_seq)\nx_sin = np.cos(omega*t_seq)\n\nwavfile.write(\"output/sound_wave_x.wav\", 44100, np.int16(x_sin * 32767))\nwavfile.write(\"output/sound_wave_y.wav\", 44100, np.int16(y_sin * 32767))\n\nleft_channel = AudioSegment.from_wav(\"output/sound_wave_x.wav\")\nright_channel = AudioSegment.from_wav(\"output/sound_wave_y.wav\")\nstereo_sound = AudioSegment.from_mono_audiosegments(left_channel, right_channel)\nstereo_sound.export(\"output/circle.wav\", format=\"wav\")\n","repo_name":"leo-cf-tian/Physics-Capstone","sub_path":"dev/circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"2930575500","text":"\"\"\"\nTables which represent a connection of plants and factor_types are defined here.\n\nCurrent list is: plants_climate_zones, plants_features, plants_humidity_types, plants_light_types,\n plants_limitation_factors, plants_soil_acidity_types, plants_soil_fertility_types, plants_soil_types\n\"\"\"\nfrom sqlalchemy import Boolean, Column, Enum, ForeignKey, Table\n\nfrom plants_api.db import metadata\nfrom plants_api.db.entities.enums import CohabitationType\n\n\nCohabitationTypeEnum = Enum(CohabitationType, name=\"cohabitation_type\")\n\nplants_climate_zones = Table(\n \"plants_climate_zones\",\n metadata,\n Column(\"plant_id\", ForeignKey(\"plants.id\"), primary_key=True, nullable=False),\n Column(\n \"climate_zone_id\",\n ForeignKey(\"climate_zones.id\"),\n primary_key=True,\n nullable=False,\n ),\n Column(\"type\", CohabitationTypeEnum, nullable=False),\n)\n\"\"\"\nPlants suitable for climate zones.\n\nColumns:\n- `plant_id` - plant identifier (plants.id), int\n- `climate_zone_id` - climate zone identifier (climate_zones.id), int\n- `type` - tolerance type, CohabitationType enumeration\n\"\"\"\n\n\nplants_features = Table(\n \"plants_features\",\n metadata,\n Column(\"plant_id\", ForeignKey(\"plants.id\"), primary_key=True, nullable=False),\n Column(\"feature_id\", ForeignKey(\"features.id\"), primary_key=True, nullable=False),\n Column(\"is_stable\", Boolean, nullable=False),\n)\n\"\"\"\nPlants features.\n\nColumns:\n- `plant_id` - plant identifier (plants.id), int\n- `feature_id` - feature identifier (features.id), int\n- `is_stable` - indicates whether feature is always present, boolean\n\"\"\"\n\nplants_humidity_types = Table(\n \"plants_humidity_types\",\n metadata,\n Column(\"plant_id\", ForeignKey(\"plants.id\"), primary_key=True, nullable=False),\n Column(\n \"humidity_type_id\",\n ForeignKey(\"humidity_types.id\"),\n primary_key=True,\n nullable=False,\n ),\n Column(\"type\", CohabitationTypeEnum, nullable=False),\n)\n\"\"\"\nPlants suitable for humidity types.\n\nColumns:\n- `plant_id` - plant identifier (plants.id), int\n- `humidity_type_id` - humidity type identifier (humidity_types.id), int\n- `type` - tolerance type, CohabitationType enumeration\n\"\"\"\n\nplants_light_types = Table(\n \"plants_light_types\",\n metadata,\n Column(\"plant_id\", ForeignKey(\"plants.id\"), primary_key=True, nullable=False),\n Column(\"light_type_id\", ForeignKey(\"light_types.id\"), primary_key=True, nullable=False),\n Column(\"type\", CohabitationTypeEnum, nullable=False),\n)\n\"\"\"\nPlants suitable for light types.\n\nColumns:\n- `plant_id` - plant identifier (plants.id), int\n- `light_type_id` - light type identifier (light_types.id), int\n- `type` - tolerance type, CohabitationType enumeration\n\"\"\"\n\nplants_limitation_factors = Table(\n \"plants_limitation_factors\",\n metadata,\n Column(\"plant_id\", ForeignKey(\"plants.id\"), primary_key=True, nullable=False),\n Column(\n \"limitation_factor_id\",\n ForeignKey(\"limitation_factors.id\"),\n primary_key=True,\n nullable=False,\n ),\n Column(\"type\", CohabitationTypeEnum, nullable=False),\n)\n\"\"\"\nPlants suitable for limitation factors.\n\nColumns:\n- `plant_id` - plant identifier (plants.id), int\n- `limitation_factor_id` - limitation factor identifier (limitation_factors.id), int\n- `type` - tolerance type, CohabitationType enumeration\n\"\"\"\n\nplants_soil_acidity_types = Table(\n \"plants_soil_acidity_types\",\n metadata,\n Column(\"plant_id\", ForeignKey(\"plants.id\"), primary_key=True, nullable=False),\n Column(\n \"soil_acidity_type_id\",\n ForeignKey(\"soil_acidity_types.id\"),\n primary_key=True,\n nullable=False,\n ),\n Column(\"type\", CohabitationTypeEnum, nullable=False),\n)\n\"\"\"\nPlants suitable for soil acidity types.\n\nColumns:\n- `plant_id` - plant identifier (plants.id), int\n- `soil_acidity_type_id` - soil acidity type identifier (soil_acidity_types.id), int\n- `type` - tolerance type, CohabitationType enumeration\n\"\"\"\n\nplants_soil_fertility_types = Table(\n \"plants_soil_fertility_types\",\n metadata,\n Column(\"plant_id\", ForeignKey(\"plants.id\"), primary_key=True, nullable=False),\n Column(\n \"soil_fertility_type_id\",\n ForeignKey(\"soil_fertility_types.id\"),\n primary_key=True,\n nullable=False,\n ),\n Column(\"type\", CohabitationTypeEnum, nullable=False),\n)\n\"\"\"\nPlants suitable for soil acidity types.\n\nColumns:\n- `plant_id` - plant identifier (plants.id), int\n- `soil_fertility_type_id` - soil fertility type identifier (soil_fertility_types.id), int\n- `type` - tolerance type, CohabitationType enumeration\n\"\"\"\n\nplants_soil_types = Table(\n \"plants_soil_types\",\n metadata,\n Column(\"plant_id\", ForeignKey(\"plants.id\"), primary_key=True, nullable=False),\n Column(\"soil_type_id\", ForeignKey(\"soil_types.id\"), primary_key=True, nullable=False),\n Column(\"type\", CohabitationTypeEnum, nullable=False),\n)\n\"\"\"\nPlants suitable for soil types.\n\nColumns:\n- `plant_id` - plant identifier (plants.id), int\n- `soil_type_id` - soil type identifier (soil_types.id), int\n- `type` - tolerance type, CohabitationType enumeration\n\"\"\"\n","repo_name":"egov-itmo/derevo","sub_path":"backend/plants_api/db/entities/plants_factors.py","file_name":"plants_factors.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"25967528374","text":"#import the module inbox (that handles incoming sms things)\n\nimport inbox\n\n#import the module e32\n\nimport e32\n\n#import the module appuifw\n\nimport appuifw\n\n#import the module messaging\n\nimport messaging\n\n \n\n#set application title\n\n\n\ndef timon():\n\n# Define exit function\n\n\tappuifw.app.title=u\"SMS Timer\"\n\n \n\n\tdef exit_key_handler():\n\n \t\tapp_lock.signal()\n\n\tdef run():\n \n\n# define the list of items (items must written in unicode! -> put a u in front)\n\n\t\tL = [u'Set Timer',u'Exit']\n\n \n\n# create the selection list\n\n\t\tindex = appuifw.selection_list(choices=L , search_field=1)\n\n \n\n#Trigger action upon index\n\n \n\n\t\tif index == 0:\n\n\t\t\tdata = appuifw.query(u\"Enter SMS text\",\"text\")\n\n\t\t\tnumber=appuifw.query(u\"Enter recepient number\",\"text\")\n\n\t\t\tappuifw.note(u\"Enter time to wait before sending !\", \"info\")\n\n\t\t\tt=appuifw.query(u\"Send after time (in mins) :\",\"number\")\n\n\t\t\tt=t*60\n\n\t\t\twhile t>0:\n\n\t\t\t\te32.ao_sleep(1)\n\n\t\t\t\tt=t-1\n\n\t\t\tmessaging.sms_send(number, data)\n\n\t\t\tappuifw.note(u\"Message sent!\", \"info\")\n\n\t\t\trun() # Again call the main function\n\n\t\tif index == 1:\n\n\t\t\texit_key_handler()\n\n \n\n\t#calls the main function\n\n\trun()\n\n\tapp_lock = e32.Ao_lock()\n\n \n\n\tappuifw.app.exit_key_handler = exit_key_handler\n\n\tapp_lock.wait()\n","repo_name":"linux-devil/Python-apps-for-Symbian","sub_path":"sms_timer.py","file_name":"sms_timer.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"25156127447","text":"import argparse\nimport os, sys\nimport numpy as np\nfrom collections import defaultdict\nimport cv2\nimport json\nfrom cloudvolume import CloudVolume\nfrom taskqueue import LocalTaskQueue\nimport igneous.task_creation as tc\nimport shutil\nfrom tqdm import tqdm\n\nHOME = os.path.expanduser(\"~\")\nPATH = os.path.join(HOME, 'programming/pipeline_utility/src')\nsys.path.append(PATH)\nfrom pipeline.lib.sqlcontroller import SqlController\nfrom pipeline.lib.file_location import FileLocationManager\nPOLYGON_ID = 54\n\n\ndef create_segmentation(animal, debug=False):\n fileLocationManager = FileLocationManager(animal)\n sqlController = SqlController(animal)\n from pipeline.utilities.utilities_process import SCALING_FACTOR\n # vars\n sections = sqlController.get_sections(animal, 1)\n if len(sections) < 10:\n sections = os.listdir( os.path.join(fileLocationManager.prep, 'CH1/thumbnail_aligned'))\n \n num_sections = len(sections)\n if num_sections < 10:\n print('no sections')\n sys.exit()\n \n width = sqlController.scan_run.width\n height = sqlController.scan_run.height\n scale_xy = sqlController.scan_run.resolution\n z_scale = sqlController.scan_run.zresolution\n scales = np.array([int(scale_xy*32*1000), int(scale_xy*32*1000), int(z_scale*1000)])\n if debug:\n print('scales', scales)\n\n width = int(width * SCALING_FACTOR)\n height = int(height * SCALING_FACTOR)\n aligned_shape = np.array((width, height))\n if debug:\n print('aligned shape', aligned_shape)\n \n volume = np.zeros((aligned_shape[1], aligned_shape[0], num_sections), dtype=np.uint8)\n \n # get all distinct structures in DB\n abbreviations = sqlController.get_distinct_labels(animal)\n if debug:\n print(f'Working with {len(abbreviations)} structures')\n structure_dict = sqlController.get_structures_dict()\n segment_properties = {}\n # We loop through every structure from the CSV data in the DB for a brain\n for abbreviation in abbreviations:\n try:\n structure_info = structure_dict[abbreviation]\n color = structure_info[1]\n desc = structure_info[0]\n FK_structure_id = structure_info[2]\n abbrev = abbreviation.replace('_L','').replace('_R','')\n k = f'{abbrev}: {desc}'\n segment_properties[k] = color\n except KeyError:\n print('key error for', abbreviation)\n continue\n rows = sqlController.get_annotations_by_structure(animal, 1, abbreviation, POLYGON_ID)\n polygons = defaultdict(list)\n \n for row in rows:\n xy = (row.x/scale_xy, row.y/scale_xy)\n z = int(np.round(row.z/z_scale))\n polygons[z].append(xy)\n \n #### loop through all the sections and write to a template, then add that template to the volume\n # structure_volume = np.zeros((aligned_shape[1], aligned_shape[0], num_sections), dtype=np.uint8)\n \n minx, maxx, miny, maxy, minz, maxz = sqlController.get_structure_min_max(animal, abbreviation, POLYGON_ID)\n minx = int(round((minx/scale_xy)*SCALING_FACTOR))\n maxx = int(round((maxx/scale_xy)*SCALING_FACTOR))\n miny = int(round((miny/scale_xy)*SCALING_FACTOR))\n maxy = int(round((maxy/scale_xy)*SCALING_FACTOR))\n top_left = (minx, miny)\n bottom_right = (maxx, maxy)\n \n for section, points in tqdm(polygons.items()):\n template = np.zeros((aligned_shape[1], aligned_shape[0]), dtype=np.uint8)\n points = np.array(points)\n points = np.round(points*SCALING_FACTOR)\n points = points.astype(np.int32)\n cv2.fillPoly(template, pts = [points], color = color)\n cv2.rectangle(template, top_left, bottom_right, color, 1)\n \n volume[:, :, section - 1] += template\n # structure_volume[:, :, section - 1] += template\n \n offset = (0, 0, 0)\n layer_type = 'segmentation'\n chunks = [64, 64, 64]\n num_channels = 1\n OUTPUT_DIR = os.path.join(fileLocationManager.neuroglancer_data, 'structures')\n \n if os.path.exists(OUTPUT_DIR):\n shutil.rmtree(OUTPUT_DIR)\n \n os.makedirs(OUTPUT_DIR, exist_ok=True)\n # swap axes for neuroglancer viewing\n volume = np.swapaxes(volume, 0, 1)\n \n #### initialize the Cloudvolume\n cloudpath = f'file://{OUTPUT_DIR}'\n info = CloudVolume.create_new_info(\n num_channels = num_channels,\n layer_type = layer_type,\n data_type = str(volume.dtype), # Channel images might be 'uint8'\n encoding = 'raw', # raw, jpeg, compressed_segmentation, fpzip, kempressed\n resolution = scales, # Voxel scaling, units are in nanometers\n voxel_offset = offset, # x,y,z offset in voxels from the origin\n chunk_size = chunks, # units are voxels\n volume_size = volume.shape, # e.g. a cubic millimeter dataset\n )\n vol = CloudVolume(cloudpath, mip=0, info=info, compress=True)\n vol.commit_info()\n vol[:, :, :] = volume[:, :, :]\n #### create json for neuroglancer info files\n cv = CloudVolume(cloudpath, 0)\n cv.info['segment_properties'] = 'names'\n cv.commit_info()\n \n segment_properties_path = os.path.join(cloudpath.replace('file://', ''), 'names')\n os.makedirs(segment_properties_path, exist_ok=True)\n \n info = {\n \"@type\": \"neuroglancer_segment_properties\",\n \"inline\": {\n \"ids\": [str(v) for k, v in segment_properties.items()],\n \"properties\": [{\n \"id\": \"label\",\n \"type\": \"label\",\n \"values\": [str(k) for k, v in segment_properties.items()]\n }]\n }\n }\n with open(os.path.join(segment_properties_path, 'info'), 'w') as file:\n json.dump(info, file, indent=2)\n\n #### 1st create mesh\n mse = 40 # default value \n tq = LocalTaskQueue(parallel=1)\n mesh_dir = f'mesh_mip_0_err_{mse}'\n cv.info['mesh'] = mesh_dir\n cv.commit_info()\n tasks = tc.create_meshing_tasks(cv.layer_cloudpath, mip=0, mesh_dir=mesh_dir, max_simplification_error=mse)\n tq.insert(tasks)\n tq.execute()\n \n ##### 2nd mesh task, create manifest\n tasks = tc.create_mesh_manifest_tasks(cv.layer_cloudpath, mesh_dir=mesh_dir)\n tq.insert(tasks)\n tq.execute()\n \nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(description='Work on Animal')\n parser.add_argument('--animal', help='Enter the animal', required=True)\n parser.add_argument('--debug', help='Enter true or false', required=False, default='false')\n \n\n args = parser.parse_args()\n animal = args.animal\n debug = bool({'true': True, 'false': False}[str(args.debug).lower()])\n create_segmentation(animal, debug)\n","repo_name":"ActiveBrainAtlas2/preprocessing-pipeline","sub_path":"in_development/Litao/create_segmentations_from_DB.py","file_name":"create_segmentations_from_DB.py","file_ext":"py","file_size_in_byte":6790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"12616030633","text":"\"\"\"\nTests for openedx.core.djangolib.markup\n\"\"\"\n\n\nimport unittest\n\nimport ddt\nfrom bs4 import BeautifulSoup\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import ngettext\nfrom mako.template import Template\n\nfrom openedx.core.djangolib.markup import HTML, Text, strip_all_tags_but_br\n\n\n@ddt.ddt\nclass FormatHtmlTest(unittest.TestCase):\n \"\"\"Test that we can format plain strings and HTML into them properly.\"\"\"\n\n @ddt.data(\n (\"hello\", \"hello\"),\n (\"\", \"<hello>\"),\n (\"It's cool\", \"It's cool\"),\n ('\"cool,\" she said.', '"cool," she said.'),\n (\"Stop & Shop\", \"Stop & Shop\"),\n (\"
нтмℓ-єѕ¢αρє∂\", \"<a>нтмℓ-єѕ¢αρє∂</a>\"),\n )\n def test_simple(self, before_after):\n (before, after) = before_after\n assert str(Text(_(before))) == after # pylint: disable=translation-of-non-string\n assert str(Text(before)) == after\n\n def test_formatting(self):\n # The whole point of this function is to make sure this works:\n out = Text(_(\"Point & click {start}here{end}!\")).format(\n start=HTML(\"\"),\n end=HTML(\"\"),\n )\n assert str(out) == \"Point & click here!\"\n\n def test_nested_formatting(self):\n # Sometimes, you have plain text, with html inserted, and the html has\n # plain text inserted. It gets twisty...\n out = Text(_(\"Send {start}email{end}\")).format(\n start=HTML(\"\").format(email=\"A&B\"),\n end=HTML(\"\"),\n )\n assert str(out) == \"Send email\"\n\n def test_mako(self):\n # The default_filters used here have to match the ones in edxmako.\n template = Template(\n \"\"\"\n <%!\n from django.utils.translation import gettext as _\n\n from openedx.core.djangolib.markup import HTML, Text\n %>\n ${Text(_(u\"A & {BC}\")).format(BC=HTML(\"B & C\"))}\n \"\"\",\n default_filters=['decode.utf8', 'h'],\n )\n out = template.render()\n assert out.strip() == 'A & B & C'\n\n def test_ungettext(self):\n for i in [1, 2]:\n out = Text(ngettext(\"1 & {}\", \"2 & {}\", i)).format(HTML(\"<>\"))\n assert out == f'{i} & <>'\n\n def test_strip_all_tags_but_br_filter(self):\n \"\"\" Verify filter removes every tags except br \"\"\"\n template = Template(\n \"\"\"\n <%page expression_filter=\"h\"/>\n <%!\n from openedx.core.djangolib.markup import strip_all_tags_but_br\n %>\n ${\" course
title \n \n \n \n \n \n a link\n another link\n

a paragraph

\n
secret EVIL!
\n of EVIL!\n \n
\n Password: \n
\n annoying EVIL!\n spam spam SPAM!\n \n \n \n '''\n %>\n ${html_content | n, clean_dangerous_html}\n \"\"\"\n )\n rendered_template = template.render()\n html_soup = BeautifulSoup(rendered_template, 'html.parser')\n\n assert html_soup.find('a')\n assert html_soup.find('div')\n assert html_soup.find('div', attrs={'style': 'display: none'})\n assert html_soup.find('p')\n assert html_soup.find('img')\n\n assert not html_soup.find('a', attrs={'onclick': 'evil_function()'})\n assert not html_soup.find('html')\n assert not html_soup.find('head')\n assert not html_soup.find('script')\n assert not html_soup.find('style')\n assert not html_soup.find('link')\n assert not html_soup.find('iframe')\n assert not html_soup.find('form')\n assert not html_soup.find('blink')\n assert not html_soup.find('object')\n","repo_name":"openedx/edx-platform","sub_path":"openedx/core/djangolib/tests/test_markup.py","file_name":"test_markup.py","file_ext":"py","file_size_in_byte":6275,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"26267562904","text":"'''\nNeste arquivo são disponibilizadas as funções criadas pelos professores para o jogo. \nEssas funções utilizam o módulo curses para desenhar strings na tela e ler as teclas apertadas pelo jogador.\nVocê não precisa entender como elas funcionam, mas pode dar uma olhada se quiser.\n'''\nimport curses\n\nfrom . import constantes_do_motor as consts\n\n\ndef chama_funcao_jogo(jogo):\n def jogo_wrapper(tela):\n # Verifica tamanho disponível no terminal (em caracteres)\n altura_tela, largura_tela = tela.getmaxyx()\n \n # Cria janela na posição (0, 0)\n janela = curses.newwin(altura_tela, largura_tela, 0, 0)\n \n # Aceita teclas especiais\n janela.keypad(True)\n # Esconde o cursor\n curses.curs_set(0)\n\n # Chama função jogo\n jogo(janela, altura_tela, largura_tela)\n curses.wrapper(jogo_wrapper)\n\n\ndef pega_tecla_apertada(janela):\n # Pega a tecla apertada\n tecla = janela.getch()\n\n # Mapeia teclas do curses para valores das constantes\n # do motor gráfico. Isso é necessário porque o curses\n # usa números para representar as teclas especiais\n # (setas, enter, escape, etc.) e o motor gráfico\n # usa strings.\n mapeamento_teclas = {\n curses.KEY_UP: consts.SETA_CIMA,\n curses.KEY_DOWN: consts.SETA_BAIXO,\n curses.KEY_LEFT: consts.SETA_ESQUERDA,\n curses.KEY_RIGHT: consts.SETA_DIREITA,\n curses.KEY_ENTER: consts.ENTER,\n ord(' '): consts.ESPACO,\n 27: consts.ESCAPE,\n }\n if tecla in mapeamento_teclas:\n return mapeamento_teclas[tecla]\n # Mapeia letras de números para strings\n if ord('a') <= tecla <= ord('z') or ord('A') <= tecla <= ord('Z'):\n return chr(tecla)\n # Neste caso, a tecla não está entre as que sabemos mapear\n # então retornamos o valor da tecla diretamente\n return tecla\n\n\ndef preenche_fundo(janela, cor_rgb):\n \"\"\"\n Limpa a janela e preenche o fundo com a cor especificada.\n Todas as posições serão preenchidas com a string ' '.\n\n A cor é uma lista com 3 elementos, cada um representando a intensidade de vermelho, verde e azul no\n intervalo de 0 a 255.\n \"\"\"\n janela.clear()\n id_par = __pega_par_de_cores(cor_rgb, cor_rgb)\n janela.bkgd(' ', curses.color_pair(id_par))\n\n\ndef desenha_string(janela, x, y, string, cor_fundo, cor_texto):\n \"\"\"\n Desenha uma string na janela, na posição (x, y), com as cores\n especificadas.\n\n A posição (0, 0) é a esquerda superior da janela.\n cor_fundo e cor_texto são listas com 3 elementos, cada um\n representando a intensidade de vermelho, verde e azul no \n intervalo de 0 a 255.\n \"\"\"\n id_par = __pega_par_de_cores(cor_fundo, cor_texto)\n janela.addstr(y, x, string, curses.color_pair(id_par))\n\n\ndef mostra_janela(janela):\n janela.refresh()\n\n\n# As funções abaixo são usadas internamente pelo motor gráfico\n# e não precisam ser chamadas pelo jogo.\n# Dicionário que mapeia cores do jogo para cores do curses\n__CORES_E_IDS = {}\n__PARES_E_IDS = {}\n\n\ndef __pega_cor(rgb):\n \"\"\"\n No curses, cores são identificadas por números, que devem\n ser inicializados antes de serem usadas. Para isso, criamos\n um dicionário que mapeia cores do jogo para cores do curses.\n \"\"\"\n\n # O módulo curses usa cores de 0 a 1000, então precisamos\n # converter as cores do jogo (que são de 0 a 255) para\n # as cores do curses.\n vermelho, verde, azul = rgb\n vermelho = int(vermelho * 1000 / 255)\n verde = int(verde * 1000 / 255)\n azul = int(azul * 1000 / 255)\n\n cor = (vermelho, verde, azul)\n if cor in __CORES_E_IDS:\n id_cor = __CORES_E_IDS[cor]\n else:\n id_cor = len(__CORES_E_IDS) + 1\n __CORES_E_IDS[cor] = id_cor\n curses.init_color(id_cor, vermelho, verde, azul)\n return id_cor\n\n\ndef __pega_par_de_cores(cor_de_fundo, cor_de_frente):\n \"\"\"\n No curses, para usar cores, devemos criar um par de cores\n (uma cor de fundo e uma cor de frente) e usar esse par\n para desenhar na tela. Para isso, criamos um dicionário\n que mapeia pares de cores do jogo para pares de cores do curses.\n \"\"\"\n\n par = (__pega_cor(cor_de_fundo), __pega_cor(cor_de_frente))\n if par in __PARES_E_IDS:\n id_par = __PARES_E_IDS[par]\n else:\n id_par = len(__PARES_E_IDS) + 1\n __PARES_E_IDS[par] = id_par\n curses.init_pair(id_par, par[1], par[0])\n return id_par\n","repo_name":"Gubscruz/pygame","sub_path":"motor_grafico/motor_curses.py","file_name":"motor_curses.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21932138326","text":"\"\"\"sup_web URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^area/', views.area_map, name='area_map'),\n url(r'^lamphandle/', views.lamphandle, name='lamphandle'),\n url(r'^area_get/', views.area_get, name='area_get'),\n url(r'^parking_get/', views.parking_get, name='parking_get'),\n url(r'^lamp_online_get/', views.lamp_online_get, name='lamp_online_get'),\n url(r'^lamp_register_get/', views.lamp_register_get, name='lamp_register_get'),\n url(r'^parkings/$', views.parking_list), \n url(r'^parkings/(?P[0-9]+)$', views.parking_detail),\n]\n","repo_name":"mrow4a/UNI","sub_path":"Smart-Urban-Parking/supweb/parking/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"34698237942","text":"#Vectors calculator by: CABUNGCAL, PAGULAYAN AND DELA CRUZ S12-A\r\nfrom tkinter import *\r\n\r\nroot = Tk()\r\nroot.title(\"Vectors Calculator\")\r\nimport math\r\n#row\r\nr = 0\r\n#sums\r\nxsum = 0\r\nysum = 0\r\nforget = 0\r\nrow_list = []\r\n\r\n#defines\r\ndef compute():\r\n\tglobal r\r\n\tglobal xsum\r\n\tglobal ysum\r\n\tglobal res_lab\r\n\tglobal ysum_lab\r\n\tglobal xsum_lab\r\n\tglobal summation_lab\r\n\tglobal resultant_lab\r\n\tglobal direction_lab\r\n\tglobal dirnum_lab\r\n\tglobal forget\r\n\tforce.focus_set()\r\n\tforce_val = float(force.get())\r\n\tdirection_val = direction.get().upper()\r\n\tdegree_val = float(degree.get())\r\n\tforce.delete(0, \"end\")\r\n\tdirection.delete(0, \"end\")\r\n\tdegree.delete(0, \"end\")\r\n\t#x components and y components\r\n\tif direction_val == \"W\":\r\n\t\txcomp = -(abs(force_val))\r\n\t\tycomp = 0\r\n\telif direction_val == \"S\":\r\n\t\txcomp = 0\r\n\t\tycomp = -(abs(force_val))\r\n\telif direction_val == \"N\":\r\n\t\txcomp = 0\r\n\t\tycomp = abs(force_val)\r\n\telif direction_val == \"E\":\r\n\t\txcomp = abs(force_val)\r\n\t\tycomp = 0\r\n\telif direction_val == \"W OF N\" or direction_val == \"E OF N\" or direction_val == \"E OF S\" or direction_val == \"W OF S\":\r\n\t\txcomp = force_val * math.sin(math.radians(degree_val))\r\n\t\tycomp = force_val * math.cos(math.radians(degree_val))\r\n\telse:\r\n\t\txcomp = force_val * math.cos(math.radians(degree_val))\r\n\t\tycomp = force_val * math.sin(math.radians(degree_val))\r\n\t#directionvariety\r\n\tif direction_val == \"N OF W\" or direction_val == \"W OF N\":\r\n\t\txcomp = -(abs(xcomp))\r\n\t\tycomp = abs(ycomp)\r\n\telif direction_val == \"N OF E\" or direction_val == \"E OF N\":\r\n\t\txcomp = abs(xcomp)\r\n\t\tycomp = abs(ycomp)\r\n\telif direction_val == \"S OF E\" or direction_val == \"E OF S\":\r\n\t\txcomp = abs(xcomp)\r\n\t\tycomp = -(abs(ycomp))\r\n\telif direction_val == \"S OF W\" or direction_val == \"W OF S\":\r\n\t\txcomp = -(abs(xcomp))\r\n\t\tycomp = -(abs(ycomp))\r\n\t#xsums and ysums\r\n\tif xsum == 0:\r\n\t\txsum = float(xcomp)\r\n\telse:\r\n\t\txsum = xsum + xcomp\r\n\tif ysum == 0:\r\n\t\tysum = float(ycomp)\r\n\telse:\r\n\t\tysum = ysum + ycomp\r\n\t#resultant\r\n\tres = math.sqrt((xsum) ** 2 + (ysum) ** 2)\r\n\t#direction value\r\n\tif xsum == 0 or ysum == 0:\r\n\t\tdirnum = 0\r\n\telse:\r\n\t\tdirnum = abs(round(math.degrees(math.atan(ysum/xsum)),2))\r\n\t#vector direction\r\n\tif xsum > 0 and ysum > 0:\r\n\t\tdirection_vec = \" N OF E\"\r\n\telif xsum < 0 and ysum > 0:\r\n\t\tdirection_vec = \" N OF W\"\r\n\telif xsum < 0 and ysum < 0:\r\n\t\tdirection_vec = \" S OF W\"\r\n\telif xsum > 0 and ysum < 0:\r\n\t\tdirection_vec = \" S OF E\"\r\n\telif xsum == 0 and ysum < 0:\r\n\t\tdirection_vec = \" S\"\r\n\telif xsum == 0 and ysum > 0:\r\n\t\tdirection_vec = \" N\"\r\n\telif xsum > 0 and ysum == 0:\r\n\t\tdirection_vec = \" E\"\r\n\telif xsum < 0 and ysum == 0:\r\n\t\tdirection_vec = \" W\"\r\n\telif xsum == 0 and ysum == 0:\r\n\t\tdirection_vec = \" \"\r\n\t#printing outputs \r\n\tif r == 0:\r\n\t\tr = 1\r\n\t#forget\r\n\tif forget == 1:\r\n\t\tres_lab.grid_forget()\r\n\t\txsum_lab.grid_forget()\r\n\t\tysum_lab.grid_forget()\r\n\t\tsummation_lab.grid_forget()\r\n\t\tresultant_lab.grid_forget()\r\n\t\tdirection_lab.grid_forget()\r\n\t\tdirnum_lab.grid_forget()\r\n\t#outputslabs\r\n\tforce_lab = Label(root, text=float(force_val)).grid(row=r, column=4)\r\n\tdirection1_lab = Label(root, text=direction_val).grid(row=r, column=5)\r\n\tdegree_lab = Label(root, text=round(float(degree_val),2)).grid(row=r, column=6)\r\n\txcomp_lab = Label(root, text=round(xcomp,2)).grid(row=r, column=7)\r\n\tycomp_lab = Label(root, text=round(ycomp,2)).grid(row=r, column=8)\r\n\t#outputslabs2\r\n\tsummation_lab = Label(root, text=\"Summation:\")\r\n\tsummation_lab.grid(row=r + 1, column=6)\r\n\tspace7 = Label(root, text= \" \").grid(row=r + 2, column=6)\r\n\tresultant_lab = Label(root, text=\"Resultant:\")\r\n\tresultant_lab.grid(row=r + 3, column=6)\r\n\tdirection_lab = Label(root, text=\"Direction:\")\r\n\tdirection_lab.grid(row=r + 4, column=6)\r\n\t#vector final sums\r\n\txsum_lab = Label(root, text=round(xsum,2))\r\n\txsum_lab.grid(row=r + 1, column=7)\r\n\tysum_lab = Label(root, text=round(ysum,2))\r\n\tysum_lab.grid(row=r + 1, column=8)\r\n\tres_lab = Label(root, text=round(res,2))\r\n\tres_lab.grid(row=r + 3, column=7)\r\n\tdirnum_lab = Label(root, text=str(dirnum) + direction_vec)\r\n\tdirnum_lab.grid(row=r + 4, column=7)\r\n\trow_list.append(r)\r\n\tr += 1\r\n\tforget = 1\r\n\r\ndef enter4(event):\r\n\tglobal xsum\r\n\tglobal ysum\r\n\tglobal r\r\n\tglobal forget\r\n\tglobal row_list\r\n\tforget = 0\r\n\txsum = 0\r\n\tysum = 0\r\n\tans = answered.get().upper()\r\n\tif ans == \"Y\":\r\n\t\tres_lab.grid_forget()\r\n\t\txsum_lab.grid_forget()\r\n\t\tysum_lab.grid_forget()\r\n\t\tsummation_lab.grid_forget()\r\n\t\tresultant_lab.grid_forget()\r\n\t\tdirection_lab.grid_forget()\r\n\t\tdirnum_lab.grid_forget()\r\n\t\task1.grid_forget()\r\n\t\tanswer.grid_forget()\r\n\t\tanswered.grid_forget()\r\n\t\tfor x in row_list: \r\n\t\t\tforce_lab = Label(root, text=\" \").grid(row=x, column=4)\r\n\t\t\tdirection1_lab = Label(root, text=\" \").grid(row=x, column=5)\r\n\t\t\tdegree_lab = Label(root, text=\" \").grid(row=x, column=6)\r\n\t\t\txcomp_lab = Label(root, text=\" \").grid(row=x, column=7)\r\n\t\t\tycomp_lab = Label(root, text=\" \").grid(row=x, column=8)\r\n\t\tforce.config(state=NORMAL)\r\n\t\tdirection.config(state=NORMAL)\r\n\t\tdegree.config(state=NORMAL)\r\n\t\tr = 0\r\n\t\trow_list = []\r\n\t\tforce.focus_set()\r\n\telif ans == \"N\":\r\n\t\troot.destroy()\r\n\telse:\r\n\t\tanswered.delete(0, \"end\")\r\n\t\tanswered.insert(0, \"INVALID INPUT!\")\r\n\r\ndef ask():\r\n\tglobal answered\r\n\tglobal ask1\r\n\tglobal answer\r\n\task1 = Label(root, text=\"Would you like to put new entry?\")\r\n\task1.grid(row=7, column=1)\r\n\tanswer = Label(root, text=\"Type Y for Yes. N to Exit\")\r\n\tanswer.grid(row=8, column=1)\r\n\tanswered = Entry(root)\r\n\tanswered.grid(row=9, column=1)\r\n\tanswered.focus_set()\r\n\tanswered.bind(\"\", enter4)\r\n\t\r\ndef num_check1():\r\n\ttry:\r\n\t\tfloat(force.get())\r\n\texcept ValueError:\r\n\t\tforce.delete(0, \"end\")\r\n\t\tforce.insert(0, \"Invalid Input!\")\r\n\t\tforce.focus_set()\r\n\telse:\r\n\t f = float(force.get())\r\n\t if f > 999999:\r\n\t \tforce.delete(0,\"end\")\r\n\t \tforce.insert(0, \"VALUE IS TOO HIGH!\")\r\n\t elif f == 0:\r\n\t \tif r == 0:\r\n\t \t\tforce.delete(0,\"end\")\r\n\t \t\tforce.insert(0, \"NO 0 ON 1st INPUT\")\r\n\t \telse:\r\n\t \task()\r\n\t \tforce.delete(0,\"end\")\r\n\t \tforce.config(state=DISABLED)\r\n\t \tdirection.config(state=DISABLED)\r\n\t \tdegree.config(state=DISABLED)\r\n\t else:\r\n\t direction.focus_set()\r\n\r\ndef dir_check():\r\n\td = direction.get().upper()\r\n\tif d == \"N\" or d == \"S\" or d == \"E\" or d == \"W\" or d == \"N OF E\" or d == \"E OF N\" or d == \"N OF W\" or d == \"W OF N\" or d == \"S OF E\" or d == \"E OF S\" or d == \"S OF W\" or d == \"W OF S\":\r\n\t degree.focus_set()\r\n\telse:\r\n\t direction.delete(0, \"end\")\r\n\t direction.insert(0, \"Not a Direction!\")\r\n\t direction.focus_set()\r\n\r\ndef num_check2():\r\n\ttry:\r\n\t\tfloat(degree.get())\r\n\texcept ValueError:\r\n\t\tdegree.delete(0, \"end\")\r\n\t\tdegree.insert(0, \"Invalid Input!\")\r\n\t\tdegree.focus_set()\r\n\telse:\r\n\t\tdeg = float(degree.get())\r\n\t\tif deg > 999999:\r\n\t\t\tdegree.delete(0,\"end\")\r\n\t\t\tdegree.insert(0, \"VALUE IS TOO HIGH!\")\r\n\t\telse:\r\n\t\t\tcompute()\r\n\r\ndef enter1(event):\r\n\tnum_check1()\r\ndef enter2(event):\r\n\tdir_check()\r\ndef enter3(event):\r\n\tnum_check2()\r\n\r\n#SPACES\r\nspace1 = Label(root, text=\" \").grid(row=0, column=0)\r\nspace2 = Label(root, text=\" \").grid(row=1, column=3)\r\nspace4 = Label(root, text=\" \").grid(row=4, column=0)\r\nspace5 = Label(root, text=\" \").grid(row=5, column=0)\r\nspace6 = Label(root, text=\" \").grid(row=6, column=0)\r\n#LABELS for INPUT\r\nlabel1 = Label(root, text=\"Force:\").grid(row=1, column=0)\r\nlabel2 = Label(root, text=\"Direction:\").grid(row=2, column=0)\r\nlabel3 = Label(root, text=\"Degree:\").grid(row=3, column=0)\r\n#LABELS for OUTPUT\r\nlabel5 = Label(root, text=\" Force \").grid(row=0, column=4)\r\nlabel6 = Label(root, text=\" Direction \").grid(row=0, column=5)\r\nlabel7 = Label(root, text=\" Degree \").grid(row=0, column=6)\r\nlabel8 = Label(root, text=\" X Component \").grid(row=0, column=7)\r\nlabel9 = Label(root, text=\" Y Component \").grid(row=0, column=8)\r\n#Input fields\r\nforce = Entry(root)\r\nforce.grid(row=1, column=1)\r\nforce.bind(\"\", enter1)\r\ndirection = Entry(root)\r\ndirection.grid(row=2, column=1)\r\ndirection.bind(\"\", enter2)\r\ndegree = Entry(root)\r\ndegree.grid(row=3, column=1)\r\ndegree.bind(\"\", enter3)\r\n\r\nroot.mainloop()","repo_name":"EmGZ/Vector-Calculator","sub_path":"vectors.py","file_name":"vectors.py","file_ext":"py","file_size_in_byte":8187,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"5442594259","text":"from pathlib import Path\nimport os\nimport yaml\nfrom rich.style import Style\nfrom rich.theme import Theme\n\nDEFAULT_BQQ_HOME = f\"{Path.home()}/.bqq\"\n\nBQQ_HOME = os.getenv(\"BQQ_HOME\", DEFAULT_BQQ_HOME)\nBQQ_RESULTS = f\"{BQQ_HOME}/results\"\nBQQ_SCHEMAS = f\"{BQQ_HOME}/schemas\"\nBQQ_INFOS = f\"{BQQ_HOME}/infos.json\"\nBQQ_CONFIG = f\"{BQQ_HOME}/config.yaml\"\n\nBQQ_DISABLE_COLORS = os.getenv(\"BQQ_DISABLE_COLORS\", \"False\").lower() in (\"true\", \"1\", \"t\")\nBQQ_SKIN = os.getenv(\"BQQ_SKIN\")\n\ndefault_skin = {\n \"request\": \"gold3\",\n \"info\": \"green\",\n \"error\": \"red\",\n \"border\": \"grey27\",\n \"darker\": \"grey46\",\n \"alternate\": \"grey50\",\n \"link\": \"light_sky_blue1\",\n \"keyword\": \"dodger_blue1\",\n}\n\nskin = default_skin\n\nif BQQ_SKIN and os.path.isfile(BQQ_SKIN):\n bqq_skin = yaml.safe_load(open(BQQ_SKIN, \"r\"))\n skin = {**skin, **bqq_skin}\n\nrequest_style = Style(color=skin[\"request\"])\ninfo_style = Style(color=skin[\"info\"])\nerror_style = Style(color=skin[\"error\"])\nborder_style = Style(color=skin[\"border\"])\ndarker_style = Style(color=skin[\"darker\"])\nalternate_style = Style(color=skin[\"alternate\"])\nlink_style = Style(color=skin[\"link\"])\nkeyword_style = Style(color=skin[\"keyword\"])\n\ntheme = Theme(\n {\n \"progress.elapsed\": darker_style,\n \"prompt.default\": darker_style,\n \"prompt.choices\": \"default\",\n \"rule.line\": border_style,\n \"repr.path\": darker_style,\n \"repr.filename\": darker_style,\n \"status.spinner\": \"none\",\n \"progress.spinner\": \"none\",\n \"repr.number\": \"none\",\n \"tree.line\": border_style,\n }\n)\n\nFZF_SEPARATOR = \" ~ \"\n\nBQ_KEYWORDS = [\n \"ALL\",\n \"AND\",\n \"ANY\",\n \"ARRAY\",\n \"AS\",\n \"ASC\",\n \"ASSERT_ROWS_MODIFIED\",\n \"AT\",\n \"BETWEEN\",\n \"BY\",\n \"CASE\",\n \"CAST\",\n \"COLLATE\",\n \"CONTAINS\",\n \"CREATE\",\n \"CROSS\",\n \"CUBE\",\n \"CURRENT\",\n \"DEFAULT\",\n \"DEFINE\",\n \"DESC\",\n \"DISTINCT\",\n \"ELSE\",\n \"END\",\n \"ENUM\",\n \"ESCAPE\",\n \"EXCEPT\",\n \"EXCLUDE\",\n \"EXISTS\",\n \"EXTRACT\",\n \"FALSE\",\n \"FETCH\",\n \"FOLLOWING\",\n \"FOR\",\n \"FROM\",\n \"FULL\",\n \"GROUP\",\n \"GROUPING\",\n \"GROUPS\",\n \"HASH\",\n \"HAVING\",\n \"IF\",\n \"IGNORE\",\n \"IN\",\n \"INNER\",\n \"INTERSECT\",\n \"INTERVAL\",\n \"INTO\",\n \"IS\",\n \"JOIN\",\n \"LATERAL\",\n \"LEFT\",\n \"LIKE\",\n \"LIMIT\",\n \"LOOKUP\",\n \"MERGE\",\n \"NATURAL\",\n \"NEW\",\n \"NO\",\n \"NOT\",\n \"NULL\",\n \"NULLS\",\n \"OF\",\n \"ON\",\n \"OR\",\n \"ORDER\",\n \"OUTER\",\n \"OVER\",\n \"PARTITION\",\n \"PRECEDING\",\n \"PROTO\",\n \"RANGE\",\n \"RECURSIVE\",\n \"RESPECT\",\n \"RIGHT\",\n \"ROLLUP\",\n \"ROWS\",\n \"SELECT\",\n \"SET\",\n \"SOME\",\n \"STRUCT\",\n \"TABLESAMPLE\",\n \"THEN\",\n \"TO\",\n \"TREAT\",\n \"TRUE\",\n \"UNBOUNDED\",\n \"UNION\",\n \"UNNEST\",\n \"USING\",\n \"WHEN\",\n \"WHERE\",\n \"WINDOW\",\n \"WITH\",\n \"WITHIN\",\n]\n","repo_name":"martintupy/bqq","sub_path":"bqq/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32964027292","text":"import sys\n\nhuman_list1 = []\nhuman_list2 = []\npartner_dict1 = {}\npartner_dict2 = {}\n\ncases = int(sys.stdin.readline())\n\nhuman_list1 = sys.stdin.readline().strip().split()\nhuman_list2 = sys.stdin.readline().strip().split()\nflag = True\n\nfor i in range(cases):\n partner_dict1[human_list1[i]] = human_list2[i]\n partner_dict2[human_list2[i]] = human_list1[i]\n\n if human_list1[i] == human_list2[i]:\n flag = False\n\nif partner_dict1 == partner_dict2 and flag:\n print(\"good\")\nelse:\n print(\"bad\")\n","repo_name":"dydwnsekd/coding_test","sub_path":"baekjoon/python/10105.py","file_name":"10105.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"39772874685","text":"from flask import Flask, render_template, request, session\nimport pandas as pd\nimport os\nfrom werkzeug.utils import secure_filename\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport csv\n\n#*** Flask configuration\n \n# Define folder to save uploaded files to process further\nUPLOAD_FOLDER = os.path.join('staticFiles', 'uploads')\nSHOW = False\n# Define allowed files (for this example I want only csv file)\nALLOWED_EXTENSIONS = {'csv'}\n \napp = Flask(__name__, template_folder='templates', static_folder='staticFiles')\n# Configure upload file path flask\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n \n# Define secret key to enable session\napp.secret_key = 'This is your secret key to utilize session in Flask'\n \n \n@app.route('/')\ndef index():\n full_filename = os.path.join(app.config['UPLOAD_FOLDER'], 'dummy.png')\n return render_template('fft.html', user_image = full_filename)\n # render_template('fft.html')\n \n@app.route('/', methods=(\"POST\", \"GET\"))\ndef uploadFile():\n if request.method == 'POST':\n # upload file flask\n uploaded_df = request.files['uploaded-file']\n \n # Extracting uploaded data file name\n data_filename = secure_filename(uploaded_df.filename)\n print(data_filename)\n print(uploaded_df)\n # flask upload file to database (defined uploaded folder in static path)\n uploaded_df.save(os.path.join(app.config['UPLOAD_FOLDER'], data_filename))\n \n # Storing uploaded file path in flask session\n session['uploaded_data_file_path'] = os.path.join(app.config['UPLOAD_FOLDER'], data_filename)\n\n with open(os.path.join('./staticFiles/uploads/sheets.csv'), newline='') as csvfile:\n list_data = list(csv.reader(csvfile))\n\n list_to_arr = np.array(list_data)\n\n squareimpulse = list_to_arr.flatten()\n # squareimpulse = np.array([0,0,0,0,0,1,1,1,1,1,0,0,0,0,0])\n\n img = (squareimpulse)\n f = np.fft.fft(img)\n fshift = np.fft.fftshift(f)\n magnitude_spectrum = (np.abs(fshift))\n # plt.plt.switch_backend('Agg') \n plt.switch_backend('agg')\n plt.subplot(121)\n plt.plot(img)\n plt.title('Input Image')\n plt.xticks([]), plt.yticks([])\n\n plt.subplot(122)\n plt.plot(magnitude_spectrum)\n plt.title('Magnitude Spectrum')\n plt.xticks([]), plt.yticks([])\n plt.xlabel('Frequency')\n plt.ylabel('Magnitude')\n file_name = os.path.join(app.config['UPLOAD_FOLDER'] + '/my_plot.png')\n plt.savefig(file_name)\n # plt.show()\n # // end\n\n # Retrieving uploaded file path from session\n # data_file_path = session.get('uploaded_data_file_path', None)\n \n # read csv file in python flask (reading uploaded csv file from uploaded server location)\n # uploaded_df = pd.read_csv(data_file_path)\n \n # pandas dataframe to html table flask\n # uploaded_df_html = uploaded_df.to_html()\n full_filename = os.path.join(app.config['UPLOAD_FOLDER'], 'my_plot.png')\n print(full_filename)\n return render_template('fft.html', user_image = full_filename)\n # return render_template('fft.html')\n \n# @app.route('/', methods=(\"PUT\"))\n# def showData():\n \n \n@app.route('/back')\ndef back():\n return render_template('fft.html')\n\nif __name__=='__main__':\n app.run(host='127.0.0.1', port=5008, debug = True)\n","repo_name":"Mitchelle-Creado-15-06-1997/fft","sub_path":"fft.py","file_name":"fft.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40378127287","text":"# coding=utf-8\n\nimport os, time, argparse, network, util\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nfrom torchvision import transforms\nfrom torch.autograd import Variable\nimport dlib\nimport numpy as np\n#from torch.nn.parallel import DistributedDataParallel\nfrom torchvision.models import vgg16\n\n# os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'\n# torch.distributed.init_process_group(backend=\"nccl\")\n# torch.distributed.init_process_group(backend='nccl', init_method='tcp://localhost:23456', rank=0, world_size=1)\n# local_rank = torch.distributed.get_rank()\n# torch.cuda.set_device(local_rank)\ndevice = torch.device(\"cuda:0\")#, local_rank)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', required=False, default='img', help='')\nparser.add_argument('--train_subfolder', required=False, default='train', help='')\nparser.add_argument('--test_subfolder', required=False, default='test', help='')\nparser.add_argument('--batch_size', type=int, default=1, help='batch size')\nparser.add_argument('--nb', type=int, default=9, help='the number of resnet block layer for generator')\nparser.add_argument('--train_epoch', type=int, default=200, help='train epochs num')\nparser.add_argument('--decay_epoch', type=int, default=30, help='learning rate decay start epoch num')\nparser.add_argument('--lrD', type=float, default=0.0002, help='learning rate, default=0.0002')\nparser.add_argument('--lrG', type=float, default=0.0002, help='learning rate, default=0.0002')\nparser.add_argument('--lambdaA', type=float, default=10, help='lambdaA for cycle loss')\nparser.add_argument('--lambdaB', type=float, default=10, help='lambdaB for cycle loss')\nparser.add_argument('--lambda_eye', type=float, default=8, help='lambdaA for cycle loss')\nparser.add_argument('--lambda_lip', type=float, default=5, help='lambdaB for cycle loss')\nparser.add_argument('--lambda_face', type=float, default=3, help='lambdaB for cycle loss')\nparser.add_argument('--beta1', type=float, default=0.5, help='beta1 for Adam optimizer')\nparser.add_argument('--beta2', type=float, default=0.999, help='beta2 for Adam optimizer')\nparser.add_argument('--save_root', required=False, default='results', help='results save path')\n# parser.add_argument('--local_rank')\n\nopt = parser.parse_args()\nprint(\"type=\",type(opt))\nprint('------------ Options -------------')\nfor k, v in sorted(vars(opt).items()):\n print('%s: %s' % (str(k), str(v)))\nprint('-------------- End ----------------')\n\n# results save path\nroot = '/data/home/yang-bj/collab_beauty/Plan1_Multi-4-MobileNet_v1/'+ opt.dataset + '_' + opt.save_root + '/' #apple2orange2_results/\nmodel = opt.dataset + '_'\nif not os.path.isdir(root):\n os.mkdir(root)\n\n# data_loader\ntransform = transforms.Compose([\n transforms.Resize(256),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),\n])\npath_data_root = '/data/home/yang-bj/collab_beauty/MobileNet_beautyGAN/'\ntrain_loader_A = util.data_load(path_data_root+ opt.dataset, opt.train_subfolder + 'A', transform, opt.batch_size, shuffle=True)#.cpu()\ntrain_loader_B = util.data_load(path_data_root+ opt.dataset, opt.train_subfolder + 'B', transform, opt.batch_size, shuffle=True)#.cpu()\ntest_loader_A = util.data_load(path_data_root+ opt.dataset, opt.test_subfolder + 'A', transform, opt.batch_size, shuffle=False)#.cpu()\ntest_loader_B = util.data_load(path_data_root+ opt.dataset, opt.test_subfolder + 'B', transform, opt.batch_size, shuffle=False)#.cpu()\n\n# network\nB_encoder = network.Big_encoder()\nB_decoder = network.Big_decoder()\nGs = network.generator_encoder_Small()\nD_A = network.discriminator()\nD_B = network.discriminator()\nG_state_dict = torch.load('/data/home/yang-bj/collab_beauty/Plan1_Multi-4-MobileNet_v1/model/img_generator_param_decompose.pkl')\nD_A_dict = torch.load('/data/home/yang-bj/collab_beauty/Plan1_Multi-4-MobileNet_v1/model/img_discriminatorA_param_decompose.pkl')\nD_B_dict = torch.load('/data/home/yang-bj/collab_beauty/Plan1_Multi-4-MobileNet_v1/model/img_discriminatorB_param_decompose.pkl')\nD_A_state_dict = D_A.state_dict()\nD_B_state_dict = D_B.state_dict()\nfor k,v in D_A_state_dict.items():\n D_A_state_dict[k].copy_(D_A_dict['module.'+k])\nfor k,v in D_B_state_dict.items():\n D_B_state_dict[k].copy_(D_B_dict['module.'+k])\n\nmodel_dict_de = B_decoder.state_dict()\nmodel_dict_en = B_encoder.state_dict()\nstate_dict_de = {k[7:]:v for k,v in G_state_dict.items() if k[7:] in model_dict_de.keys()}\nmodel_dict_de.update(state_dict_de)\nB_decoder.load_state_dict(model_dict_de)\n\nkeys_B_en = list(model_dict_en)\nkeys_G = list(G_state_dict)[0:len(keys_B_en)]\ni = 0\nwhile i < len(keys_B_en):\n model_dict_en[keys_B_en[i]].copy_(G_state_dict[keys_G[i]])\n i+=1\nB_encoder.load_state_dict(model_dict_en)\n\nGs.to(device)\nD_A.to(device)\nD_B.to(device)\nB_decoder.to(device)\nB_encoder.to(device)\n# D_A = DistributedDataParallel(D_A, find_unused_parameters=True, device_ids=[local_rank], output_device=local_rank)\n# D_B = DistributedDataParallel(D_B,find_unused_parameters=True, device_ids=[local_rank], output_device=local_rank)\n# Gs = DistributedDataParallel(Gs,find_unused_parameters=True, device_ids=[local_rank], output_device=local_rank)\n# B_decoder = DistributedDataParallel(B_decoder,find_unused_parameters=True, device_ids=[local_rank], output_device=local_rank)\n# B_encoder = DistributedDataParallel(B_encoder,find_unused_parameters=True, device_ids=[local_rank], output_device=local_rank)\n\nB_decoder.eval()\nB_encoder.eval()\nGs.train()\nD_A.eval()\nD_B.eval()\n\ndef decoder_forward(model, x):\n x,y = model(x)\n return x,y\n\n# loss\nBCE_loss = nn.BCELoss().to(device)\nMSE_loss = nn.MSELoss().to(device)\nL1_loss = nn.L1Loss().to(device)\n\nvgg = vgg16(pretrained=False)\nvgg.load_state_dict(torch.load(path_data_root + 'model/vgg16-397923af.pth'))\nvgg = vgg.features[:18]\nvgg.to(device)\nvgg.eval()\n# vgg = DistributedDataParallel(vgg,find_unused_parameters=True, device_ids=[local_rank], output_device=local_rank)\n\n# Face mask model\npredictor = dlib.shape_predictor(\n path_data_root + \"model/shape_predictor_68_face_landmarks.dat\")\ndetector = dlib.get_frontal_face_detector()\n\n# Adam optimizer\nGs_optimizer = optim.Adam(Gs.parameters(), lr=opt.lrG, betas=(opt.beta1, opt.beta2))\n\n# image store\nfakeA_store = util.ImagePool(50)\nfakeB_store = util.ImagePool(50)\n\ntrain_hist = {}\ntrain_hist['G_A_losses'] = []\ntrain_hist['G_B_losses'] = []\ntrain_hist['A_cycle_losses'] = []\ntrain_hist['B_cycle_losses'] = []\ntrain_hist['histogram_losses'] = []\ntrain_hist['perceptual_losses'] = []\ntrain_hist['idt_losses'] = []\ntrain_hist['feat_losses'] = []\ntrain_hist['G_losses'] = []\ntrain_hist['per_epoch_ptimes'] = []\ntrain_hist['total_ptime'] = []\n\nminloss = 100\nprint('training start!')\nstart_time = time.time()\ntorch.backends.cudnn.benchmark = True\nfor epoch in range(opt.train_epoch):\n G_A_losses = []\n G_B_losses = []\n A_cycle_losses = []\n B_cycle_losses = []\n histogram_losses = []\n perceptual_losses = []\n idt_losses = []\n feat_losses = []\n G_losses = []\n epoch_start_time = time.time()\n iter = 0\n\n if (epoch+1) > opt.decay_epoch: #start dacay lr\n Gs_optimizer.param_groups[0]['lr'] -= opt.lrG / (opt.train_epoch - opt.decay_epoch)\n\n for (img_A, _), (img_B, _) in zip(train_loader_A, train_loader_B):\n torch.cuda.empty_cache()\n\n realA = util.to_var(img_A, requires_grad=False)\n realB = util.to_var(img_B, requires_grad=False)\n\n\n resA = (realA.cpu().squeeze(0).permute(1, 2, 0).data.numpy()+1) * 127.5\n resB = (realB.cpu().squeeze(0).permute(1, 2, 0).data.numpy()+1) * 127.5\n\n input_A_mask = np.zeros((opt.batch_size, 4, 256, 256))\n res = util.get_mask(resA.astype(np.uint8), detector, predictor)\n if res is not None:\n input_A_mask[0][0] = np.equal(res[0], 255)\n input_A_mask[0][1] = np.equal(res[1], 255)\n input_A_mask[0][2] = np.equal(res[2], 255)\n input_A_mask[0][3] = np.equal(res[3], 255)\n else: continue\n input_A_mask = Variable(torch.as_tensor(input_A_mask).to(device),requires_grad=False)\n\n input_B_mask = np.zeros((opt.batch_size, 4, 256, 256))\n res = util.get_mask(resB.astype(np.uint8), detector, predictor)\n if res is not None:\n input_B_mask[0][0] = np.equal(res[0], 255)\n input_B_mask[0][1] = np.equal(res[1], 255)\n input_B_mask[0][2] = np.equal(res[2], 255)\n input_B_mask[0][3] = np.equal(res[3], 255)\n else:continue\n input_B_mask = Variable(torch.as_tensor(input_B_mask).to(device), requires_grad=False)\n # print(\"------------mask loaded-----------------\")\n\n ##########################################################\n ################# train generator ############\n ##########################################################\n torch.cuda.empty_cache()\n # middle_res = B_encoder(realA, realB)\n # fakeB_old, fakeA_old = B_decoder(torch.cat((middle_res[1],middle_res[3]),dim=1))\n # del middle_res\n\n out_Gs = Gs(realA, realB)\n out_En = B_encoder(realA, realB)\n\n feat_loss = 0\n for i in range(len(out_En)):\n feat_loss += nn.MSELoss()(out_Gs[i], out_En[i])\n fakeB, fakeA = decoder_forward(B_decoder, out_Gs[-1])\n #fakeB_big, fakeA_big = decoder_forward(B_decoder, torch.cat((out_En[1], out_En[3]),dim=1))\n del out_Gs,out_En\n torch.cuda.empty_cache()\n\n out_Gs = Gs(fakeA, fakeB)\n out_En = B_encoder(fakeA, fakeB)\n feat_loss_rec = 0\n for i in range(len(out_En)):\n feat_loss_rec += nn.MSELoss()(out_Gs[i], out_En[i])\n recB, recA = decoder_forward(B_decoder, out_Gs[-1])\n del out_Gs, out_En\n torch.cuda.empty_cache()\n\n\n\n feat_loss_sum = feat_loss + feat_loss_rec\n #del feat_loss, feat_loss_rec\n torch.cuda.empty_cache()\n\n Gs_optimizer.zero_grad()\n\n D_B_result = D_B(fakeB) \n G_A_loss = MSE_loss(D_B_result, Variable(torch.ones(D_B_result.size()).to(device)))\n del D_B_result\n torch.cuda.empty_cache()\n\n A_cycle_loss = L1_loss(recA, realA) * opt.lambdaA\n\n D_A_result = D_A(fakeA)\n G_B_loss = MSE_loss(D_A_result, Variable(torch.ones(D_A_result.size()).to(device)))\n del D_A_result\n torch.cuda.empty_cache()\n\n B_cycle_loss = L1_loss(recB, realB) * opt.lambdaB\n\n #########################################################################################\n ############ make up loss ######################\n ############ perceptual loss ######################\n #########################################################################################\n ####################### r ###############################\n torch.cuda.empty_cache()\n\n histogram_loss_lip = 0\n histogram_loss_eye = 0\n histogram_loss_eyebrow = 0\n histogram_loss_face = 0\n histogram_loss = 0\n\n\n for cur in range(opt.batch_size):\n temp_source = ((fakeB[cur, 0, :, :] + 1) * 127.5).to(torch.float32).to(device)\n temp_template = ((realB[cur, 0, :, :] + 1) * 127.5).to(torch.float32).to(device)\n histogram_loss_lip += util.histogram_loss_cal(temp_source, temp_template, input_A_mask[cur][0],\n input_B_mask[cur][0])*opt.lambda_lip\n histogram_loss_eye += util.histogram_loss_cal(temp_source, temp_template, input_A_mask[cur][1],\n input_B_mask[cur][1])*opt.lambda_eye\n histogram_loss_eyebrow += util.histogram_loss_cal(temp_source, temp_template, input_A_mask[cur][2],\n input_B_mask[cur][2])\n histogram_loss_face += util.histogram_loss_cal(temp_source, temp_template, input_A_mask[cur][3],\n input_B_mask[cur][3])*opt.lambda_face\n del temp_source, temp_template\n torch.cuda.empty_cache()\n ##################### g #######################33\n temp_source = ((fakeB[cur, 1, :, :] + 1) * 127.5).to(torch.float32).to(device)\n temp_template = ((realB[cur, 1, :, :] + 1) * 127.5).to(torch.float32).to(device)\n histogram_loss_lip += util.histogram_loss_cal(temp_source, temp_template, input_A_mask[cur][0],\n input_B_mask[cur][0])*opt.lambda_lip\n histogram_loss_eye += util.histogram_loss_cal(temp_source, temp_template, input_A_mask[cur][1],\n input_B_mask[cur][1])*opt.lambda_eye\n histogram_loss_eyebrow += util.histogram_loss_cal(temp_source, temp_template, input_A_mask[cur][2],\n input_B_mask[cur][2])\n histogram_loss_face += util.histogram_loss_cal(temp_source, temp_template, input_A_mask[cur][3],\n input_B_mask[cur][3]) *opt.lambda_face\n del temp_source, temp_template\n torch.cuda.empty_cache()\n ###################### b ########################\n temp_source = ((fakeB[cur, 2, :, :] + 1) * 127.5).to(torch.float32).to(device)\n temp_template = ((realB[cur, 2, :, :] + 1) * 127.5).to(torch.float32).to(device)\n\n histogram_loss_lip += util.histogram_loss_cal(temp_source, temp_template, input_A_mask[cur][0],\n input_B_mask[cur][0])*opt.lambda_lip\n histogram_loss_eye += util.histogram_loss_cal(temp_source, temp_template, input_A_mask[cur][1],\n input_B_mask[cur][1])*opt.lambda_eye\n histogram_loss_eyebrow += util.histogram_loss_cal(temp_source, temp_template, input_A_mask[cur][2],\n input_B_mask[cur][2])\n histogram_loss_face += util.histogram_loss_cal(temp_source, temp_template, input_A_mask[cur][3],\n input_B_mask[cur][3])*opt.lambda_face\n del temp_source, temp_template\n torch.cuda.empty_cache()\n ####################### r #############################\n temp_source = ((fakeA[cur, 0, :, :] + 1) * 127.5).to(torch.float32).to(device)\n temp_template = ((realA[cur, 0, :, :] + 1) * 127.5).to(torch.float32).to(device)\n histogram_loss_lip += util.histogram_loss_cal(temp_source, temp_template, input_B_mask[cur][0],\n input_A_mask[cur][0]) * opt.lambda_lip\n histogram_loss_eye += util.histogram_loss_cal(temp_source, temp_template, input_B_mask[cur][1],\n input_A_mask[cur][1]) * opt.lambda_eye\n histogram_loss_eyebrow += util.histogram_loss_cal(temp_source, temp_template, input_B_mask[cur][2],\n input_A_mask[cur][2])\n histogram_loss_face += util.histogram_loss_cal(temp_source, temp_template, input_B_mask[cur][3],\n input_A_mask[cur][3]) * opt.lambda_face\n del temp_source, temp_template\n torch.cuda.empty_cache()\n ##################### g #######################33\n temp_source = ((fakeA[cur, 1, :, :] + 1) * 127.5).to(torch.float32).to(device)\n temp_template = ((realA[cur, 1, :, :] + 1) * 127.5).to(torch.float32).to(device)\n histogram_loss_lip += util.histogram_loss_cal(temp_source, temp_template, input_B_mask[cur][0],\n input_A_mask[cur][0]) * opt.lambda_lip\n histogram_loss_eye += util.histogram_loss_cal(temp_source, temp_template, input_B_mask[cur][1],\n input_A_mask[cur][1]) * opt.lambda_eye\n histogram_loss_eyebrow += util.histogram_loss_cal(temp_source, temp_template, input_B_mask[cur][2],\n input_A_mask[cur][2])\n histogram_loss_face += util.histogram_loss_cal(temp_source, temp_template, input_B_mask[cur][3],\n input_A_mask[cur][3]) * opt.lambda_face\n del temp_source, temp_template\n torch.cuda.empty_cache()\n ###################### b ########################\n temp_source = ((fakeA[cur, 2, :, :] + 1) * 127.5).to(torch.float32).to(device)\n temp_template = ((realA[cur, 2, :, :] + 1) * 127.5).to(torch.float32).to(device)\n\n histogram_loss_lip += util.histogram_loss_cal(temp_source, temp_template, input_B_mask[cur][0],\n input_A_mask[cur][0]) * opt.lambda_lip\n histogram_loss_eye += util.histogram_loss_cal(temp_source, temp_template, input_B_mask[cur][1],\n input_A_mask[cur][1]) * opt.lambda_eye\n histogram_loss_eyebrow += util.histogram_loss_cal(temp_source, temp_template, input_B_mask[cur][2],\n input_A_mask[cur][2])\n histogram_loss_face += util.histogram_loss_cal(temp_source, temp_template, input_B_mask[cur][3],\n input_A_mask[cur][3]) * opt.lambda_face\n del temp_source, temp_template\n torch.cuda.empty_cache()\n histogram_loss = (histogram_loss_lip + histogram_loss_eye + histogram_loss_eyebrow + histogram_loss_face)*0.5*0.1\n del histogram_loss_lip, histogram_loss_eye, histogram_loss_eyebrow, histogram_loss_face\n torch.cuda.empty_cache()\n\n del input_A_mask, input_B_mask\n torch.cuda.empty_cache()\n\n histogram_loss = histogram_loss.to(device)\n\n torch.cuda.empty_cache()\n\n up = nn.Upsample(size=224, mode='bilinear')\n perc_A = up((realA + 1) * 127.5).to(torch.float32)\n perc_fake_B = up((fakeB + 1) * 127.5).to(torch.float32)\n perc_B = up((realB + 1) * 127.5).to(torch.float32)\n perc_fake_A = up((fakeA + 1) * 127.5).to(torch.float32)\n\n\n perc_A = perc_A.detach()\n perc_fake_B = perc_fake_B.detach()\n perc_B = perc_B.detach()\n perc_fake_A = perc_fake_A.detach()\n\n\n # input into the pretrained VGG16 and standardize\n perc = vgg(torch.cat([perc_A, perc_B, perc_fake_B, perc_fake_A,], axis=0))\n # perc = vgg.features[:18](torch.cat([perc_A, perc_B, perc_fake_B, perc_fake_A,\n # cyc_perc_A, cyc_perc_B, cyc_perc_fake_B, cyc_perc_fake_A], axis=0)) # 18-->conv4_1\n del perc_A, perc_B, perc_fake_B, perc_fake_A\n torch.cuda.empty_cache()\n perc_mean = torch.mean(perc)\n\n perc = torch.div(perc, torch.add(perc_mean, 1e-5))\n del perc_mean\n torch.cuda.empty_cache()\n\n perceptual_loss = (torch.mean(torch.pow(torch.sub(perc[0], perc[2]), 2)) +\n torch.mean(torch.pow(torch.sub(perc[1], perc[3]), 2)) ).to(device)*0.025\n\n out_Gs = Gs(realA, realA)\n idt_A1, idt_A2 = decoder_forward(B_decoder, out_Gs[-1])\n out_Gs = Gs(realB, realB)\n idt_B1, idt_B2 = decoder_forward(B_decoder, out_Gs[-1])\n idt_loss_A1 = L1_loss(idt_A1, realA)\n idt_loss_A2 = L1_loss(idt_A2, realA)\n idt_loss_B1 = L1_loss(idt_B1, realB)\n idt_loss_B2 = L1_loss(idt_B2, realB)\n del idt_A1, idt_A2, idt_B1, idt_B2\n torch.cuda.empty_cache()\n idt_loss = (idt_loss_A1 + idt_loss_A2 + idt_loss_B1 + idt_loss_B2) * opt.lambdaA * 0.5 * 0.5\n del idt_loss_A1, idt_loss_A2, idt_loss_B1, idt_loss_B2\n torch.cuda.empty_cache()\n\n\n\n G_loss = G_A_loss + G_B_loss + A_cycle_loss + B_cycle_loss + histogram_loss + perceptual_loss + idt_loss + feat_loss_sum\n del realA, realB, fakeA, fakeB, recA, recB\n torch.cuda.empty_cache()\n\n G_loss.backward(retain_graph=True)\n Gs_optimizer.step()\n\n train_hist['G_A_losses'].append(G_A_loss.item())\n train_hist['G_B_losses'].append(G_B_loss.item())\n train_hist['A_cycle_losses'].append(A_cycle_loss.item())\n train_hist['B_cycle_losses'].append(B_cycle_loss.item())\n train_hist['histogram_losses'].append(histogram_loss.item())\n train_hist['perceptual_losses'].append(perceptual_loss.item())\n train_hist['idt_losses'].append(idt_loss.item())\n train_hist['feat_losses'].append(feat_loss_sum.item())\n train_hist['G_losses'].append(G_loss.item())\n\n G_A_losses.append(G_A_loss.item())\n G_B_losses.append(G_B_loss.item())\n A_cycle_losses.append(A_cycle_loss.item())\n B_cycle_losses.append(B_cycle_loss.item())\n histogram_losses.append(histogram_loss.item())\n perceptual_losses.append(perceptual_loss.item())\n idt_losses.append(idt_loss.item())\n feat_losses.append(feat_loss_sum.item())\n G_losses.append(G_loss.item())\n\n iter += 1\n if iter > 2000: break\n if iter%50==0:\n print(\n 'iter : %d , loss_G_A: %.3f, loss_G_B: %.3f,\\n '\n 'loss_A_cycle: %.3f, loss_B_cycle: %.3f, histogram_loss: %.3f, '\n 'perceptual_loss: %.3f, idt_loss: %.3f, G_loss: %.3f' % (\n (iter + 1), torch.mean(torch.FloatTensor(G_A_losses)),\n torch.mean(torch.FloatTensor(G_B_losses)), torch.mean(torch.FloatTensor(A_cycle_losses)),\n torch.mean(torch.FloatTensor(B_cycle_losses)), torch.mean(torch.FloatTensor(histogram_losses)),\n torch.mean(torch.FloatTensor(perceptual_losses)), torch.mean(torch.FloatTensor(idt_losses)),\n torch.mean(torch.FloatTensor(G_losses))))\n del G_A_loss, G_B_loss, A_cycle_loss, B_cycle_loss, histogram_loss, perceptual_loss, idt_loss, feat_loss_sum\n torch.cuda.empty_cache()\n if iter%10==0:\n print(\"iter=\",iter)\n\n epoch_end_time = time.time()\n per_epoch_ptime = epoch_end_time - epoch_start_time\n train_hist['per_epoch_ptimes'].append(per_epoch_ptime)\n print(\n '[%d/%d] - ptime: %.2f, loss_G_A: %.3f, loss_G_B: %.3f,\\n '\n 'loss_A_cycle: %.3f, loss_B_cycle: %.3f, histogram_loss: %.3f, '\n 'perceptual_loss: %.3f, idt_loss: %.3f, G_loss: %.3f' % (\n (epoch + 1), opt.train_epoch, per_epoch_ptime, torch.mean(torch.FloatTensor(G_A_losses)),\n torch.mean(torch.FloatTensor(G_B_losses)), torch.mean(torch.FloatTensor(A_cycle_losses)),\n torch.mean(torch.FloatTensor(B_cycle_losses)), torch.mean(torch.FloatTensor(histogram_losses)),\n torch.mean(torch.FloatTensor(perceptual_losses)), torch.mean(torch.FloatTensor(idt_losses)),\n torch.mean(torch.FloatTensor(G_losses))))\n \n if not os.path.isdir(root + 'epoch' + str(epoch)):\n os.mkdir(root + 'epoch' + str(epoch))\n if (epoch + 1) % 1 == 0 or torch.mean(torch.FloatTensor(G_losses))< minloss:\n if (epoch + 1) % 10 == 0 or torch.mean(torch.FloatTensor(G_losses))< minloss:\n minloss = torch.mean(torch.FloatTensor(G_losses))\n torch.save(Gs.state_dict(), root + 'epoch' + str(epoch) + '/' + model + 'generatorSmall_param.pkl')\n if epoch == 0:\n torch.save(B_decoder.state_dict(), root + 'epoch' + str(epoch) + '/' + model + 'B_decoder.pkl')\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!small!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\",minloss)\n # test A to B\n print(\"save!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!test\")\n iter = 0\n for (img_A, _), (img_B, _) in zip(test_loader_A, test_loader_B):\n iter += 1\n realA = util.to_var(img_A, requires_grad=False)\n realB = util.to_var(img_B, requires_grad=False)\n middle_res = B_encoder(realA, realB)\n genB, genA = B_decoder(torch.cat((middle_res[1], middle_res[3]), dim=1))\n # genB, genA = B_decoder(torch.cat((B_encoder(realA, realB)),dim=1))#G(realA, realB)\n out_Gs = Gs(realA, realB)\n #out_En = B_encoder(realA, realB)\n fakeB, fakeA = decoder_forward(B_decoder, out_Gs[-1])\n # fakeB_big, fakeA_big = decoder_forward(B_decoder, torch.cat((out_En[1], out_En[3]),dim=1))\n\n out_Gs = Gs(fakeA, fakeB)\n # out_En = B_encoder(fakeA, fakeB)\n recB, recA = decoder_forward(B_decoder, out_Gs[-1])\n\n if not os.path.isdir(root + 'epoch' + str(epoch) + '/' + 'test_results'):\n os.mkdir(root + 'epoch' + str(epoch) + '/' + 'test_results')\n\n if not os.path.isdir(root + 'epoch' + str(epoch) + '/' + 'test_results/AtoB'):\n os.mkdir(root + 'epoch' + str(epoch) + '/' + 'test_results/AtoB')\n path = root + 'epoch' + str(epoch) + '/' + 'test_results/AtoB/' + str(iter) + '_input.png'\n plt.imsave(path, (realA[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n path = root + 'epoch' + str(epoch) + '/' + 'test_results/AtoB/' + str(iter) + 'genB.png'\n plt.imsave(path, (genB[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n path = root + 'epoch' + str(epoch) + '/' + 'test_results/AtoB/' + str(iter) + 'fakeB.png'\n plt.imsave(path, (fakeB[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n path = root + 'epoch' + str(epoch) + '/' + 'test_results/AtoB/' + str(iter) + '_recon.png'\n plt.imsave(path, (recA[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n\n if not os.path.isdir(root + 'epoch' + str(epoch) + '/' + 'test_results/BtoA'):\n os.mkdir(root + 'epoch' + str(epoch) + '/' + 'test_results/BtoA')\n path = root + 'epoch' + str(epoch) + '/' + 'test_results/BtoA/' + str(iter) + '_input.png'\n plt.imsave(path, (realB[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n path = root + 'epoch' + str(epoch) + '/' + 'test_results/BtoA/' + str(iter) + '_output.png'\n plt.imsave(path, (fakeA[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n path = root + 'epoch' + str(epoch) + '/' + 'test_results/BtoA/' + str(iter) + '_recon.png'\n plt.imsave(path, (recB[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n\n \n\n util.show_train_hist(train_hist, save=True, path=root + model + 'train_hist.png')\n del realA, realB, fakeA, fakeB, recA, recB\n torch.cuda.empty_cache()\n\n else:\n print(\"save!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!train\")\n iter = 0\n for (img_A, _), (img_B, _) in zip(train_loader_A, train_loader_B):\n iter += 1\n if not os.path.isdir(root + 'epoch' + str(epoch) + '/' + 'train_results'):\n os.mkdir(root + 'epoch' + str(epoch) + '/' + 'train_results')\n if not os.path.isdir(root + 'epoch' + str(epoch) + '/' + 'train_results/makeup'):\n os.mkdir(root + 'epoch' + str(epoch) + '/' + 'train_results/makeup')\n \n if not os.path.isdir(root + 'epoch' + str(epoch) + '/' + 'train_results/AtoB'):\n os.mkdir(root + 'epoch' + str(epoch) + '/' + 'train_results/AtoB')\n realA = util.to_var(img_A, requires_grad=False)\n realB = util.to_var(img_B, requires_grad=False)\n middle_res = B_encoder(realA, realB)\n genB, genA = B_decoder(torch.cat((middle_res[1], middle_res[3]), dim=1))\n middle_res = B_encoder(genA, genB)\n recB, recA = B_decoder(torch.cat((middle_res[1], middle_res[3]), dim=1))\n # genB, genA = B_decoder(torch.cat((B_encoder(realA, realB)),dim=1))#G(realA, realB)\n # recB, recA = B_decoder(torch.cat((B_encoder(genA, genB)),dim=1))#G(genA, genB)\n path = root + 'epoch' + str(epoch) + '/' + 'train_results/makeup/' + str(iter) + '_inA.png'\n plt.imsave(path, (realA[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n path = root + 'epoch' + str(epoch) + '/' + 'train_results/makeup/' + str(iter) + '_out.png'\n plt.imsave(path, (genB[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n path = root + 'epoch' + str(epoch) + '/' + 'train_results/makeup/' + str(iter) + '_inB.png'\n plt.imsave(path, (realB[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n\n\n path = root + 'epoch' + str(epoch) + '/' + 'train_results/AtoB/' + str(iter) + '_input.png'\n plt.imsave(path, (realA[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n path = root + 'epoch' + str(epoch) + '/' + 'train_results/AtoB/' + str(iter) + '_output.png'\n plt.imsave(path, (genB[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n path = root + 'epoch' + str(epoch) + '/' + 'train_results/AtoB/' + str(iter) + '_recon.png'\n plt.imsave(path, (recA[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n\n if not os.path.isdir(root + 'epoch' + str(epoch) + '/' + 'train_results/BtoA'):\n os.mkdir(root + 'epoch' + str(epoch) + '/' + 'train_results/BtoA')\n path = root + 'epoch' + str(epoch) + '/' + 'train_results/BtoA/' + str(iter) + '_input.png'\n plt.imsave(path, (realB[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n path = root + 'epoch' + str(epoch) + '/' + 'train_results/BtoA/' + str(iter) + '_output.png'\n plt.imsave(path, (genA[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n path = root + 'epoch' + str(epoch) + '/' + 'train_results/BtoA/' + str(iter) + '_recon.png'\n plt.imsave(path, (recB[0].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)\n if iter > 9:\n break\n\nend_time = time.time()\ntotal_ptime = end_time - start_time\ntrain_hist['total_ptime'].append(total_ptime)\n\nprint(\"Avg one epoch ptime: %.2f, total %d epochs ptime: %.2f\" % (torch.mean(torch.FloatTensor(train_hist['per_epoch_ptimes'])), opt.train_epoch, total_ptime))\nprint(\"Training finish!... save training results\")\ntorch.save(G_A.state_dict(), root + model + 'generatorA_param.pkl')\ntorch.save(G_B.state_dict(), root + model + 'generatorB_param.pkl')\ntorch.save(D_A.state_dict(), root + model + 'discriminatorA_param.pkl')\ntorch.save(D_B.state_dict(), root + model + 'discriminatorB_param.pkl')\nutil.show_train_hist(train_hist, save=True, path= root + model + 'train_hist.png')\n","repo_name":"Jian-danai/Decompose-Distill-BeautyGAN","sub_path":"Distill_BeautyGAN/beauty_gpu0.py","file_name":"beauty_gpu0.py","file_ext":"py","file_size_in_byte":31152,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"3"} +{"seq_id":"35268168241","text":"import math\nfrom random import random\n\nfrom keras.layers import Dense, initializers\nfrom keras.models import Sequential\n\nfrom bot import *\n\n\nclass mBot(bot):\n def __init__(self, x, y, entityManager, model):\n super().__init__(x, y, entityManager)\n\n self.eyes = [eye(400, radians(10), radians(4), self), eye(400, radians(10), radians(-4), self)]\n self.radar = radar(self)\n self.brain = brain(model)\n self.selfDestructTime = 10\n self.currentSelfDestructTime = 10\n\n\n def update(self, delta):\n super().update(delta)\n outputs = self.brain.getOutputs(self.getInputs(delta))[0]\n self.speed = outputs[0] * 200\n if (abs(self.speed) < 50):\n self.dealWithBadBehaviour(delta)\n self.direction += outputs[1] * delta * 3\n\n self.xSpeed = self.speed * cos(self.direction)\n self.ySpeed = self.speed * sin(self.direction)\n newX = self.x + self.xSpeed * delta\n newY = self.y + self.ySpeed * delta\n\n if (newX > 1000 - self.radius):\n newX = 1000 - self.radius\n self.dealWithBadBehaviour(delta)\n elif (newX < self.radius):\n newX = self.radius\n self.dealWithBadBehaviour(delta)\n\n if (newY > 800 - self.radius):\n newY = 800 - self.radius\n self.dealWithBadBehaviour(delta)\n elif (newY < self.radius):\n newY = self.radius\n self.dealWithBadBehaviour(delta)\n\n self.x = newX\n self.y = newY\n self.currentSelfDestructTime -= delta / 30\n self.reload(delta)\n if (sigmoid(outputs[2]) > 0):\n self.shoot()\n\n if (self.currentSelfDestructTime <= 0):\n self.em.killBot(self)\n\n def dealWithBadBehaviour(self, delta):\n self.currentSelfDestructTime -= delta\n self.score -= delta / 10\n\n def reset(self):\n super().reset()\n self.currentSelfDestructTime = self.selfDestructTime\n\n def draw(self, w):\n # self.drawEyes(w)\n pygame.draw.circle(w, (0, 0, 255), (int(self.x), int(self.y)), 13)\n pygame.draw.circle(w, (255, 0, 0),\n (int(self.x + 8 * cos(self.direction)), int(self.y + 8 * sin(self.direction))), 3)\n\n def drawEyes(self, w):\n for eye in self.eyes:\n eye.draw(w)\n\n def getInputs(self, delta):\n inputs = []\n\n # eyes\n c = 0\n for i in self.eyes:\n if (c < 2):\n sight = i.canSeeEnemy(self.em.bots)\n if (sight == 1):\n self.score += delta\n else:\n sight = i.canSeeEnemyBullet(self.em.bulletPool)\n\n inputs.append(sight)\n c += 1\n\n # Radar\n closestBullet = self.radar.getClosestBullet(self.em.bulletPool)\n if closestBullet is None:\n inputs.append(0)\n inputs.append(0)\n else:\n distFromBullet = math.sqrt((self.x - closestBullet[0]) ** 2 + (self.y - closestBullet[1]) ** 2)\n if (distFromBullet <= 200):\n inputs.append(1 - distFromBullet / 200)\n targetVector = np.subtract(closestBullet, self.getPos())\n dirUnitVector = (sin(self.direction), cos(self.direction))\n if np.dot(targetVector, dirUnitVector) < 0:\n inputs.append(-1)\n else:\n inputs.append(1)\n else:\n inputs.append(0)\n inputs.append(0)\n\n #Other\n if (self.currentCooldown <= 0):\n inputs.append(1)\n else:\n inputs.append(0)\n return inputs\n\n\n# I have no idea what I'm doing\nclass brain:\n def __init__(self, model):\n if (model == None):\n self.model = Sequential()\n self.model.add(\n Dense(8, activation=\"tanh\", input_dim=6,\n kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))\n self.model.add(\n Dense(3, activation=\"tanh\",\n kernel_initializer=initializers.RandomUniform(minval=-1, maxval=1, seed=None)))\n self.model.compile(loss='mean_squared_error', optimizer='adam')\n else:\n self.model = model\n\n def getOutputs(self, inputs):\n inputs.append(1)\n return self.model.predict(np.asarray([inputs]))\n\n def mutate(self, brain1, brain2):\n newBrain = []\n for i in range(0, len(self.model.get_weights()), 2):\n newWeights = []\n b1weights = brain1.get_weights()[i]\n b2weights = brain2.get_weights()[i]\n for n in range(len(b1weights)):\n w = []\n for m in range(len(b1weights[0])):\n r = random()\n k = 0\n if random() < 0.1:\n k = randint(-100, 100) / 100\n\n if (r < 0.4):\n w.append(b1weights[n][m] + k)\n elif r > 0.6:\n w.append(b2weights[n][m] + k)\n else:\n w.append((b1weights[n][m] + b2weights[n][m]) / 2 + k)\n\n newWeights.append(w)\n newBrain.append(newWeights)\n newBrain.append(self.model.get_weights()[i + 1])\n self.model.set_weights(newBrain)\n\n\n","repo_name":"L1nde/Gladiator-s-Bowl","sub_path":"mBot.py","file_name":"mBot.py","file_ext":"py","file_size_in_byte":5379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38604717976","text":"import numpy as np\nimport cv2\nimport pandas as pd\n\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom scipy.spatial.distance import cdist\nfrom unl import unl_fourier\n\n\nclass UNLFClassifier(BaseEstimator, ClassifierMixin):\n def __init__(self, size=5):\n self.classes_ = None\n self.template_dict_ = None\n self.size = size\n\n def fit(self, X, y):\n self.classes_ = np.unique(y)\n\n unlf_descriptors = []\n labels = []\n\n for i in range(len(X)):\n im = cv2.imread(X[i], cv2.IMREAD_GRAYSCALE).astype(\"uint8\")\n unlf_desc = unl_fourier(im, self.size, whole=False)\n unlf_descriptors.append(unlf_desc)\n labels.append(y[i])\n\n data = {\"unlf\": unlf_descriptors, \"label\": labels}\n\n df = pd.DataFrame.from_dict(data)\n self.template_dict_ = df\n\n def predict(self, X):\n y_pred = []\n for i in range(len(X)):\n x = X[i]\n im = cv2.imread(x, cv2.IMREAD_GRAYSCALE).astype(\"uint8\")\n descriptors = unl_fourier(im, self.size, whole=False)\n y_pred.append(self.closest_template(descriptors))\n return y_pred\n\n def closest_template(self, descriptors):\n template_descriptors = self.template_dict_[\"unlf\"].tolist()\n distances = cdist([descriptors], template_descriptors).mean(axis=0)\n closest_label = self.template_dict_.iloc[distances.argmin()][\"label\"]\n return closest_label\n\n\ndef main():\n from simple_shape_descriptors import prepare_dataset\n\n X_train, y_train, X_test, y_test = prepare_dataset()\n\n sizes = [5, 20, 30, 35, 40]\n\n for size in sizes:\n clf = UNLFClassifier(size=size)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n\n from sklearn.metrics import accuracy_score\n\n acc = accuracy_score(y_test, y_pred)\n print(f\"Size: {size} Acc: {acc}\")\n\n\ndef experiment_30():\n from simple_shape_descriptors import prepare_dataset\n\n X_train, y_train, X_test, y_test = prepare_dataset()\n size = 5\n\n clf = UNLFClassifier(size=size)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n\n from sklearn.metrics import accuracy_score\n\n acc = accuracy_score(y_test, y_pred)\n print(f\"Size: {size} Acc: {acc}\")\n\n from sklearn.metrics import plot_confusion_matrix\n import matplotlib.pyplot as plt\n\n plot_confusion_matrix(clf, X_test, y_test)\n plt.xticks(rotation=90)\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n experiment_30()\n","repo_name":"karlosos/feature_extraction","sub_path":"05_image_descriptors/unl_classifier.py","file_name":"unl_classifier.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"620970927","text":"from behave import *\nfrom selenium import webdriver\n\nfrom SampleProject.Pages.Login_Test import Login_Test\nfrom SampleProject.Pages.Products_Test import Products_Test\n\n\n@given(u'User login into application')\ndef setUpClass(cls):\n options = webdriver.ChromeOptions()\n options.add_experimental_option(\"detach\", True)\n cls.driver = webdriver.Chrome(options=options)\n cls.driver.get(\"https://admin-demo.nopcommerce.com/login?ReturnUrl=%2Fadmin%2F\")\n cls.driver.maximize_window()\n driver = cls.driver;\n login = Login_Test(driver)\n login.enter_the_email(\"admin@yourstore.com\")\n login.enter_the_password(\"admin\")\n login.click_the_loginbutton()\n\n\n@when(u'User clicks on Catalog Section')\ndef step_impl(self):\n driver = self.driver;\n product = Products_Test(driver)\n product.click_the_Catelog()\n\n\n@when(u'User clicks on Product Section')\ndef step_impl(self):\n driver = self.driver;\n product = Products_Test(driver)\n product.Click_on_the_Products()\n\n\n@when(u'User enter the product name, SKU')\ndef step_impl(self):\n driver = self.driver;\n product = Products_Test(driver)\n product.enter_the_productname(\"Build your own computer\")\n product.enter_the_sku(\"COMP_CUST\")\n\n@then(u'User clicks on SearchButton')\ndef step_impl(self):\n driver = self.driver;\n product = Products_Test(driver)\n product.Click_on_the_SearchButton()\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Rishi232538/Python-Automation","sub_path":"features/Steps/Products.py","file_name":"Products.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42403049079","text":"\nimport unittest\n\nimport classify\n\nclass InequalityTest(unittest.TestCase):\n\t\n\tdef test_equal(self):\n\t\ta = len(classify.getData('marks.dat'))\n\t\tb = len(classif.getData('marks.dat'))\n\t\tself.assertEqual(a,b)\n\t\t\n\n\nif __name__ == '__main__':\n\tunittest.TestClassify()\n","repo_name":"WizenKarma/classifymarks","sub_path":"tests/test_classify_1.py","file_name":"test_classify_1.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34942046087","text":"from sql_helper import SQLHelper\nfrom sis_mappings import *\nimport json\nfrom datetime import datetime\nimport os\nimport pytz\n\n\nclass JSONGenerator():\n def __init__(self, database_path, table_name, strm):\n self.database_path = database_path\n self.table_name = table_name\n self.strm = strm\n self.sql_helper = SQLHelper(self.database_path, self.table_name)\n self.save_dir = 'data/' + str(strm)\n\n\n\n # method to generate main catalog page\n def generate(self):\n # make sure the json directory exists\n if not os.path.exists(self.save_dir):\n os.makedirs(self.save_dir)\n\n acad_groups = self.sql_helper.get_unique_acad_groups(self.strm)\n data_map = {}\n # all_orgs = []\n for group in acad_groups: # looping over the school level (e.g. College of Arts and Sciences, E-School)\n dict_key = f\"{acad_group_mapping[group]}\"\n orgs = []\n print(f\"Generating pages for {acad_group_mapping[group]}\")\n for org in self.sql_helper.get_acad_orgs_in_acad_group(self.strm, group): # looping over the department level (e.g. Computer Science, Biology)\n orgs.append({\n \"name\": f\"{acad_org_mapping[org]}\",\n \"abbr\": org})\n \n print(f\"\\t{acad_org_mapping[org]}\")\n self.generate_json_for_acad_org(org, self.strm)\n \n \n # get requirements for College of Arts and Sciences\n if group == 'CGAS':\n # loop over the requirements\n for req in new_requirement_mapping:\n # generate json for each requirement\n self.generate_json_for_attr(req, self.strm)\n # add the requirements to the orgs list\n orgs.append({\n \"name\": f\"{new_requirement_mapping[req]}\",\n \"abbr\": req})\n \n # generate json for engagment\n self.generate_json_for_engagement(self.strm)\n orgs.append({\n \"name\": f\"Engagement\",\n \"abbr\": 'EGMT'})\n \n \n # generate json for engagment\n\n\n # generate json for each requirement\n\n # add the requirements to the orgs list\n \n orgs.sort(key=lambda x: x['name'])\n data_map[dict_key] = orgs\n # all_orgs += orgs\n\n # all_orgs.sort(key=lambda x: x['name'])\n\n # save json object of data_map\n with open(f'{self.save_dir}/latest_sem.json', 'w') as f:\n json.dump(data_map, f, sort_keys=True)\n \n # with open(f'{self.save_dir}/departments.json', 'w') as f:\n # json.dump(all_orgs, f)\n\n # generate semester string\n semester = self.strm_to_str(self.strm)\n\n # Get the current time in GMT (UTC)\n current_time = datetime.utcnow()\n\n # Calculate the timestamp in seconds\n timestamp_seconds = int(current_time.timestamp())\n\n # Get the current time in GMT (UTC)\n utc_time = str(datetime.now(pytz.utc))\n\n metadata = {\n \"semester\": semester,\n \"last_updated\": utc_time\n }\n\n # Write the timestamp to a JSON file\n with open(f'{self.save_dir}/metadata.json', 'w') as f:\n json.dump(metadata, f)\n\n\n\n\n\n\n def convert_time_string(self, original_time):\n if original_time == \"\":\n return \"\"\n # Extract the time part (HH:MM:SS)\n time_part = original_time.split('-')[0]\n # Create a timezone object for Eastern Time (ET) with DST support\n eastern_tz = pytz.timezone('US/Eastern')\n # Convert the time string to a datetime object and localize it to ET\n time_obj = datetime.strptime(time_part, '%H.%M.%S.%f')\n localized_time = eastern_tz.localize(time_obj)\n # Convert to Eastern Time (ET) while considering DST\n est_time = localized_time.astimezone(eastern_tz)\n # Format the time in AM/PM notation\n est_time_formatted = est_time.strftime('%I:%M %p')\n return est_time_formatted\n \n\n\n def write_data_dict_to_json(self, data, json_filename):\n # writes data to json (in the form of files like CS.json, AAS.json, etc.)\n with open(json_filename, 'w') as json_file:\n # Write the opening bracket of the JSON array\n json_file.write('{')\n\n # Iterate through the elements in 'data' and write each one with a newline\n for department in data:\n json.dump(department, json_file)\n json_file.write(': [ \\n')\n for course in data[department]:\n json.dump(course, json_file)\n if course != data[department][-1]:\n json_file.write(', \\n')\n\n if department != list(data.keys())[-1]:\n json_file.write('], \\n')\n else:\n json_file.write('] \\n')\n\n # Write the closing bracket of the JSON array\n json_file.write('}')\n\n\n\n def generate_dict_for_class(self, catalog_number, subject_descr, session_list):\n session_list.sort(key=lambda x: x['display_order'])\n for session in session_list:\n session['meetings'] = eval(session['meetings'])\n for meeting in session['meetings']:\n meeting['start_time'] = self.convert_time_string(meeting['start_time'])\n meeting['end_time'] = self.convert_time_string(meeting['end_time'])\n session['instructors'] = eval(session['instructors'])\n \n class_dict = {\n 'catalog_number': catalog_number,\n 'subject_descr': subject_descr,\n 'subject': session_list[0]['subject'],\n 'sessions': session_list,\n 'descr': session_list[0]['descr'],\n 'topic': session_list[0]['topic'],\n 'units': session_list[0]['units'],\n }\n return class_dict\n\n\n\n def generate_json_for_engagement(self, strm):\n data = {'Engagement': []}\n catalog_numbers = self.sql_helper.catalog_numbers_for_subject(strm, 'CGASD', 'Engagement')\n for catalog_number in catalog_numbers:\n session_list = self.sql_helper.get_sessions_for_class_with_org(strm, 'CGASD', 'Engagement', catalog_number)\n class_dict = self.generate_dict_for_class(catalog_number, 'Engagement', session_list)\n data['Engagement'].append(class_dict)\n filename = f'{self.save_dir}/EGMT.json'\n self.write_data_dict_to_json(data, filename)\n\n\n def generate_json_for_attr(self, attr, strm):\n subjects = self.sql_helper.get_unique_subject_descr_with_attr(strm, attr)\n data = {subject: [] for subject in subjects}\n for subject_descr in subjects: # 'Computer Science' is considered a subject\n catalog_numbers = self.sql_helper.get_catalog_numbers_for_subject_with_attr(strm, attr, subject_descr)\n for catalog_number in catalog_numbers:\n session_list = self.sql_helper.get_sessions_for_class(strm, subject_descr, catalog_number)\n class_dict = self.generate_dict_for_class(catalog_number, subject_descr, session_list)\n data[subject_descr].append(class_dict)\n \n # write data to json\n json_filename = f'{self.save_dir}/{attr}.json'\n self.write_data_dict_to_json(data, json_filename)\n\n\n def generate_json_for_acad_org(self, acad_org, strm):\n subjects = self.sql_helper.get_unique_subjects_in_org(strm, acad_org)\n data = {subject: [] for subject in subjects}\n for subject_descr in subjects: # 'Computer Science' is considered a subject\n catalog_numbers = self.sql_helper.catalog_numbers_for_subject(strm, acad_org, subject_descr)\n for catalog_number in catalog_numbers:\n\n session_list = self.sql_helper.get_sessions_for_class_with_org(strm, acad_org, subject_descr, catalog_number)\n\n class_dict = self.generate_dict_for_class(catalog_number, subject_descr, session_list)\n data[subject_descr].append(class_dict)\n \n # write data to json\n stripped_acad_org = acad_org.lstrip(\"\\'\").strip(\"\\'\")\n json_filename = f'{self.save_dir}/{stripped_acad_org}.json'\n self.write_data_dict_to_json(data, json_filename)\n\n\n def strm_to_str(self, strm):\n year = str(strm)[1:3]\n season = str(strm)[3:]\n if season == '1':\n season = 'January'\n elif season == '2':\n season = 'Spring'\n elif season == '6':\n season = 'Summer'\n elif season == '8':\n season = 'Fall'\n \n # get the first two digits of the year\n current_year = datetime.now().year\n first_two_digits = str(current_year)[:2]\n\n return f'{season} {first_two_digits}{year}'\n\n\n\n\n","repo_name":"UVA-Course-Explorer/course-data","sub_path":"json_gen.py","file_name":"json_gen.py","file_ext":"py","file_size_in_byte":9058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7924958626","text":"# 텍스트 파일 생성 및 줄 입력 \nf = open(\"TextFileCreate.txt\", \"r\") # Read 읽기모드\n\nif f.mode == 'r':\n #data = f.read() # 전체 읽어서 출력\n #print(data)\n\n datas = f.readlines() # 한 줄씩 읽어서 출력\n for d in datas:\n print(d)\n\nf.close()\n","repo_name":"VisualAcademy/PythonNote","sub_path":"PythonNote/58_IO/TextFileRead.py","file_name":"TextFileRead.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"ko","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"13028711761","text":"import numpy as np\nfrom tensorflow import keras\nfrom Part1 import X\nfrom Part1 import Y\n\n\n\n\n##############################################################\n#Build Keras Model to using layers and nodes\n\ndef model(layers,nodes):\n model = keras.models.Sequential()\n for i in range(layers):\n model.add(keras.layers.Dense(nodes, input_dim= 8 ,use_bias= True, activation= \"sigmoid\"))\n model.add(keras.layers.Dense(10,use_bias= True, activation=\"sigmoid\"))\n sft = keras.optimizers.SGD(lr=0.01)\n model.compile(optimizer= sft,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n model.summary()\n return model\n\n#Run loops for layers and nodes and compute testing accuracy\ndef compute_test_accuracy(layers,nodes):\n for i in layers:\n for j in nodes:\n models = model(i,j)\n history = models.fit(x=X, y=Y, batch_size=1, epochs=100, validation_split=0.3, verbose= 0, shuffle= 0)\n a = np.array(history.history['val_acc'])\n final_val_acc = 1- a[99]\n print(\"Testing error for %f hidden layers and nodes %f is %f\" %(i,j,final_val_acc))\n\n\nlayers = [1,2,3]\nnodes = [3,6,9,12]\ncompute_test_accuracy(layers,nodes)\n\n\n\n\n\n\n\n\n\n","repo_name":"Zamiko/ECS171-Fall-2018","sub_path":"Homework2/Part5.py","file_name":"Part5.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73411796880","text":"\"\"\"\r\n依赖django的功能函数\r\n\"\"\"\r\n\r\nfrom .. import pure\r\nimport base64\r\nimport math\r\nfrom math import ceil\r\n\r\nimport pandas as pd\r\n\r\nfrom django.core.paginator import Paginator\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.pagination import PageNumberPagination\r\nfrom rest_framework.generics import GenericAPIView\r\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\r\nfrom rest_framework.request import Request\r\nfrom functools import wraps\r\nfrom django.db import connection\r\nfrom django.db.models import Q, F\r\nfrom django.db.models import QuerySet\r\nfrom django.db.models import Max\r\nfrom django.conf import settings\r\nfrom warnings import warn\r\nfrom django.core.management.color import no_style\r\nfrom django.db import connection\r\n\r\nfrom rest_framework.renderers import JSONRenderer\r\nfrom django.forms import model_to_dict\r\nfrom django.db.models import QuerySet\r\nfrom rest_framework.mixins import CreateModelMixin, UpdateModelMixin, DestroyModelMixin\r\nfrom rest_framework import status\r\nfrom django.db.models import Model\r\nfrom rest_framework.exceptions import APIException\r\nfrom rest_framework.exceptions import ErrorDetail\r\nimport re\r\nfrom django.contrib.contenttypes.models import ContentType\r\nimport json\r\nfrom rest_framework import serializers as s\r\nfrom django.http.request import QueryDict\r\nfrom .auth import get_my_api_error, my_api_assert_function\r\nimport re\r\nimport sys\r\nfrom .conf import PAGINATION_SETTINGS\r\nfrom django.db import models as m\r\nfrom django.db import connection\r\nfrom copy import deepcopy\r\nfrom .conf import db_engine\r\nimport numpy as np\r\nfrom django.core.cache import cache\r\nfrom ..pure import zip_string_by_md5\r\n\r\n\r\ndef get_list(query_dc, key):\r\n if isinstance(query_dc, QueryDict):\r\n ret = query_dc.getlist(key)\r\n elif isinstance(query_dc, dict):\r\n ret = query_dc.get(key, [])\r\n else:\r\n raise TypeError('query_dc类型不明!')\r\n\r\n if isinstance(ret, str):\r\n ret = [ret]\r\n\r\n if ret == ['']:\r\n ret = []\r\n\r\n return ret\r\n\r\n\r\ndef get_key_from_request_data_or_self_obj(request_data, self_obj, key, get_type=None):\r\n \"\"\"\r\n 优先检索request_data是否有key, 其次检索self_obj是否有key这个属性\r\n :param key: 变量名\r\n :return:\r\n \"\"\"\r\n query_dc = request_data\r\n\r\n # 让request携带的数据可以覆盖自身的key值\r\n if isinstance(self_obj, dict):\r\n ret_0 = self_obj.get(key)\r\n else:\r\n ret_0 = getattr(self_obj, key) if hasattr(self_obj, key) else None\r\n\r\n if get_type == 'list':\r\n value = get_list(query_dc, key)\r\n if '__None__' in value:\r\n ret = []\r\n else:\r\n ret = value if value not in [None, [], [''], \"\"] else ret_0\r\n return ret\r\n elif get_type == 'bool':\r\n value = query_dc.get(key)\r\n # ret_0 = getattr(self_obj, key) if hasattr(self_obj, key) else None\r\n ret_1 = pure.convert_query_parameter_to_bool(value) if value else None\r\n ret = ret_1 if ret_1 is not None else ret_0\r\n return bool(ret)\r\n else:\r\n value = query_dc.get(key)\r\n ret = value if value is not None else ret_0\r\n return ret\r\n\r\n\r\ndef set_query_dc_value(query_dc: (QueryDict, dict), new_dc: dict):\r\n assert isinstance(query_dc, (dict, QueryDict)), 'query_dc必须是`QueryDict`或`dict`类型!'\r\n if not hasattr(query_dc, \"_mutable\"):\r\n query_dc.update(new_dc)\r\n return query_dc\r\n\r\n query_dc._mutable = True\r\n for key, value in new_dc.items():\r\n ls = value if isinstance(value, (tuple, list)) else [value]\r\n query_dc.setlist(key, ls)\r\n query_dc._mutable = False\r\n return query_dc\r\n\r\n\r\ndef get_field_names_by_model(model_class, field_attr='name', exclude_fields=None):\r\n \"\"\"\r\n 获得`model_class.meta`中的字段名属性\r\n\r\n :param model_class: 目标模型\r\n :param field_attr: 可取[name, verbose_name]\r\n :param exclude_fields: 要排除的字段 # 差集 --> set(a).difference(set(b)), DRF在Meta中用exclude指定\r\n :return:\r\n \"\"\"\r\n if isinstance(model_class, m.QuerySet):\r\n model_class = get_base_model(model_class)\r\n assert hasattr(model_class, '_meta'), f'model_class没有`_meta`属性? type(model_class): {type(model_class)}'\r\n\r\n fields = model_class._meta.fields\r\n field_names = [getattr(field, field_attr) for field in fields]\r\n\r\n if exclude_fields:\r\n assert isinstance(exclude_fields, (list, tuple)), '`exclude_fields`必须为list或tuple类型!'\r\n field_names = pure.SetUtils.get_ls_a_different_ls_b(field_names, exclude_fields, keep_sort=True)\r\n return field_names\r\n\r\n\r\ndef get_base_serializer(base_model, base_fields='__all__', auto_generate_annotate_fields=None):\r\n \"\"\"\r\n 生成一个基础序列化器\r\n\r\n :param base_model: queryset或者base_model\r\n :param base_fields: 想要返回的字段\r\n :param auto_generate_annotate_fields: 指定将自动生成的annotate的字段, 为[True, '__all__']时自动替换为base_fields\r\n :return:\r\n \"\"\"\r\n if base_fields == ['__all__']:\r\n base_fields = '__all__'\r\n\r\n base_model = get_base_model(base_model)\r\n field_names = get_field_names_by_model(base_model)\r\n if base_fields != '__all__' and auto_generate_annotate_fields is None:\r\n if len(set(base_fields) - set(field_names)):\r\n auto_generate_annotate_fields = True\r\n\r\n if auto_generate_annotate_fields is None:\r\n class BaseSerializer(s.ModelSerializer):\r\n class Meta:\r\n model = base_model\r\n fields = base_fields\r\n\r\n base_serializer = BaseSerializer\r\n return base_serializer\r\n else:\r\n if auto_generate_annotate_fields is True or auto_generate_annotate_fields in ['__all__', ['__all__']]:\r\n auto_generate_annotate_fields = base_fields\r\n elif isinstance(auto_generate_annotate_fields, list):\r\n auto_generate_annotate_fields = auto_generate_annotate_fields + field_names\r\n else:\r\n raise ValueError(f\"auto_generate_annotate_fields[{auto_generate_annotate_fields}]取值错误!\")\r\n\r\n # --- 这里要把queryset里有, 但model.fields里没有的字段在serializers时自动加上\r\n new_dc_ls = []\r\n new_func_ls = []\r\n for field in auto_generate_annotate_fields:\r\n if field not in field_names and field not in '__all__':\r\n # 指定方法字段 SerializerMethodField 后, 再增加 get_field_function.\r\n field_method = s.SerializerMethodField()\r\n new_func_name = f'get_{field}'\r\n\r\n def get_field_value(self, obj):\r\n ret = None\r\n k_name = getattr(self, '__field_name__')\r\n if hasattr(obj, k_name):\r\n ret = getattr(obj, k_name)\r\n elif isinstance(obj, dict) and k_name in obj:\r\n ret = obj.get(k_name)\r\n elif '__' in k_name:\r\n qs_ls = get_base_queryset(obj)\r\n ret = qs_ls.filter(pk=obj.pk).values(k_name)[0].get(k_name)\r\n else:\r\n msg = f'自动生成的[{k_name}]字段值为空! --- from get_base_serializer'\r\n warn(msg)\r\n return ret\r\n\r\n new_func_cls = {\r\n '__field_name__': field,\r\n new_func_name: get_field_value,\r\n }\r\n new_func_cls = type(\"new_func_cls\", (object,), new_func_cls)\r\n new_func = getattr(new_func_cls(), new_func_name)\r\n\r\n new_dc_i = {\r\n field: field_method,\r\n }\r\n new_func_i = {\r\n new_func_name: new_func,\r\n }\r\n new_dc_ls.append(new_dc_i)\r\n new_func_ls.append(new_func_i)\r\n\r\n # --- 生成新序列化器base_serializer\r\n meta_dc = {\r\n 'model': base_model,\r\n 'fields': base_fields\r\n }\r\n Meta = type(\"Meta\", (object,), meta_dc)\r\n cls_dc = {\r\n 'Meta': Meta,\r\n }\r\n for i in range(len(new_dc_ls)):\r\n cls_dc.update(new_dc_ls[i])\r\n cls_dc.update(new_func_ls[i])\r\n base_serializer = type(\"BaseSerializer\", (s.ModelSerializer,), cls_dc)\r\n return base_serializer\r\n\r\n\r\ndef judge_is_obj_level_of_request(request):\r\n \"\"\"\r\n 判断本次访问是否为obj对象级, 否则就是model模型级\r\n \"\"\"\r\n if 'pk' in request._request.resolver_match.kwargs:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef conv_queryset_ls_to_serialzer_ls(qs_ls: list):\r\n \"\"\"\r\n qs_ls: 多个queryset数据的序列化, 手动转化为dc_ls\r\n \"\"\"\r\n dc_ls = []\r\n if not qs_ls:\r\n return dc_ls\r\n\r\n q = qs_ls[0]\r\n if not isinstance(q, dict):\r\n q = model_to_dict(q)\r\n kname_ls = list(q.keys())\r\n\r\n dc_ls = []\r\n for q in qs_ls:\r\n # print(q)\r\n for kname_i in kname_ls:\r\n dc = {\r\n kname_i: getattr(q, kname_i)\r\n }\r\n dc_ls.append(dc)\r\n return dc_ls\r\n\r\n\r\ndef conv_qs_ls_to_serializer_data(qs_ls):\r\n base_serializer = get_base_serializer(qs_ls)\r\n dc_ls = base_serializer(qs_ls, many=True).data\r\n return dc_ls\r\n\r\n\r\ndef get_field_type_in_db(model, field_name):\r\n \"\"\"根据模型和字段名, 获取这个字段在数据库中对应的类型\"\"\"\r\n tp = model._meta.get_field(field_name).get_internal_type()\r\n return tp\r\n\r\n\r\ndef convert_db_field_type_to_python_type(tp):\r\n tp = re.sub(r'\\(.*\\)', '', tp) # 删除括号内的内容, 如\"CharField(source='more_group.explain') \"\r\n if tp in ['TextField', 'CharField', 'DateField', 'DateTimeField', 'SerializerMethodField']:\r\n field_type = 'str'\r\n elif tp in ['IntegerField', 'AutoField', 'BigAutoField']:\r\n field_type = 'int'\r\n elif tp in ['FloatField']:\r\n field_type = 'float'\r\n elif tp == 'BooleanField':\r\n field_type = 'bool'\r\n elif '=' in tp:\r\n # 类, 一般返回一个dc_ls类型\r\n field_type = 'list'\r\n # elif tp == 'FileField':\r\n # field_type = 'file'\r\n else:\r\n field_type = tp\r\n return field_type\r\n\r\n\r\ndef get_field_type_in_py(model, field_name):\r\n \"\"\"根据模型和字段名, 获取这个字段在python中对应的类型\"\"\"\r\n tp = get_field_type_in_db(model, field_name)\r\n field_type = convert_db_field_type_to_python_type(tp)\r\n return field_type\r\n\r\n\r\ndef reset_db_sequence(model):\r\n \"\"\"重置数据库索引, 避免postgresql在手动导入csv/excel后出错.\"\"\"\r\n md = get_base_model(model)\r\n sequence_sql = connection.ops.sequence_reset_sql(no_style(), [md])\r\n with connection.cursor() as cursor:\r\n for sql in sequence_sql:\r\n cursor.execute(sql)\r\n cursor.close()\r\n\r\n\r\ndef APIResponse(ret=None, status=200, msg=None):\r\n if isinstance(ret, Response):\r\n ret = ret.data\r\n ret = pure.add_status_and_msg(ret, status=status, msg=msg)\r\n ret = Response(ret)\r\n return ret\r\n\r\n\r\nclass Pagination(PageNumberPagination):\r\n \"\"\"\r\n * 默认分页器参数设置\r\n\r\n - page_size: 每页16个\r\n - page_size_query_param: 前端控制每页数量时使用的参数名, 'page_size'\r\n - page_query_param: 页码控制参数名\"p\"\r\n - max_page_size: 最大1000页\r\n \"\"\"\r\n page_size = int(PAGINATION_SETTINGS.get('page_size'))\r\n page_size_query_param = PAGINATION_SETTINGS.get('page_size_query_param', 'page_size')\r\n page_query_param = PAGINATION_SETTINGS.get('page_query_param', 'p')\r\n max_page_size = int(PAGINATION_SETTINGS.get('max_page_size'))\r\n\r\n\r\nclass StateMsgResultJSONRenderer(JSONRenderer):\r\n\r\n def render(self, data, accepted_media_type=None, renderer_context=None):\r\n if 'status' not in data and 'msg' not in data:\r\n if 'detail' in data:\r\n e = data.pop('detail')\r\n\r\n msg = str(e)\r\n if data:\r\n msg += str(data)\r\n\r\n if isinstance(e, ErrorDetail) and e.code == 'permission_denied':\r\n status = 403\r\n else:\r\n print('! ************ 莫名返回格式 StateMsgResultJSONRenderer **************')\r\n status = 404\r\n try:\r\n msg = f\"[{str(e.code)}] {msg}\"\r\n except:\r\n pass\r\n data = {\r\n 'status': status,\r\n 'msg': msg,\r\n 'result': [],\r\n }\r\n else:\r\n pass\r\n return super(StateMsgResultJSONRenderer, self).render(data, accepted_media_type, renderer_context)\r\n\r\n\r\ndef get_base_model(obj) -> Model:\r\n \"\"\"判断是Queryset还是BaseModel\"\"\"\r\n if isinstance(obj, QuerySet):\r\n return obj.model\r\n else:\r\n if isinstance(obj, ContentType):\r\n base_model = obj.model_class()\r\n return base_model\r\n\r\n if hasattr(obj, 'objects'):\r\n # BaseModel\r\n return obj\r\n elif hasattr(obj.__class__, 'objects'):\r\n # 单个obj\r\n return obj.__class__\r\n else:\r\n return obj\r\n\r\n\r\ndef get_base_queryset(obj) -> QuerySet:\r\n \"\"\"\r\n 返回所有obj类型的QuerySet\r\n \"\"\"\r\n ret = get_base_model(obj)\r\n if ret:\r\n ret = ret.objects.all()\r\n return ret\r\n\r\n\r\ndef conv_to_queryset(obj) -> QuerySet:\r\n \"\"\"\r\n 强制转换为QuerySet\r\n \"\"\"\r\n if not isinstance(obj, QuerySet):\r\n ret = get_base_model(obj)\r\n ret = ret.objects.all()\r\n else:\r\n ret = obj\r\n return ret\r\n\r\n\r\ndef _paginate_qsls_to_dcls(qsls, serializer, page: int, per_page=16, context=None):\r\n \"\"\"\r\n * 手动分页函数\r\n\r\n - 指定模型的queryset_ls和serializer, 然后按给定的page和per_page参数获取分页后的数据\r\n \"\"\"\r\n page_size = int(per_page)\r\n\r\n if page_size == 0:\r\n page_dc = {\r\n \"count_items\": qsls.count(),\r\n \"total_pages\": None,\r\n \"page_size\": page_size,\r\n \"p\": int(page)\r\n }\r\n return [], page_dc\r\n\r\n p = Paginator(qsls, per_page)\r\n page_dc = {\r\n \"count_items\": int(p.count),\r\n \"total_pages\": int(p.num_pages),\r\n \"page_size\": page_size,\r\n \"p\": int(page)\r\n }\r\n\r\n page_obj = p.get_page(page)\r\n\r\n context = {} if not context else context # 避免序列化报错\r\n\r\n # --- 处理单个Model和多个Model的情况\r\n if serializer.__class__.__name__ == 'function':\r\n try:\r\n dc_ls = serializer(page_obj, context=context)\r\n except Exception as e:\r\n raise Exception(f'--- paginate_qsls_to_dcls错误!!! 2022/2/25, error: {e}')\r\n else:\r\n dc_ls = serializer(page_obj, many=True, context=context).data\r\n\r\n return dc_ls, page_dc\r\n\r\n\r\ndef get_md5_query_for_qs_ls(qs_ls, header=\"\"):\r\n \"\"\"\r\n 根据一个`qs_ls`, 获取其`query`对应的md5摘要\r\n \"\"\"\r\n _qs_ls = qs_ls.order_by().values(\"pk\")\r\n _query = f\"{header}{_qs_ls.query}\"\r\n query = zip_string_by_md5(_query)\r\n return query\r\n\r\n\r\ndef get_count(qs_ls, expired_time=None):\r\n \"\"\"\r\n 获取指定qs_ls.query的count\r\n :param qs_ls: 查询集\r\n :param expired_time: 缓存过期时间, None则不使用缓存\r\n :return:\r\n \"\"\"\r\n if expired_time is None:\r\n return qs_ls.count()\r\n\r\n query = get_md5_query_for_qs_ls(qs_ls, header=\"count__\")\r\n\r\n count = cache.get(query)\r\n if count is None:\r\n count = qs_ls.count()\r\n\r\n if expired_time:\r\n cache.set(query, count, expired_time)\r\n\r\n # print(f\"--- count: {count}, query: [{query}]\")\r\n # print(f\"~~~ _query: [{_query}]\")\r\n\r\n return count\r\n\r\n\r\ndef paginate_qsls_to_dcls(qs_ls, serializer, page: int, per_page=16, context=None, cache_expired_time=0, get_page_dc=True):\r\n \"\"\"\r\n 性能优化分页器\r\n\r\n :param qs_ls: queryset查询集\r\n :param serializer: 序列化器\r\n :param page: 页码\r\n :param per_page: 每页数量\r\n :param context: 上下文\r\n :param cache_expired_time: `get_count`的缓存时间\r\n :param get_page_dc: 是否获取page_dc\r\n :return: 分页后的数据dc_ls\r\n \"\"\"\r\n\r\n page_size = int(per_page)\r\n\r\n count_items = get_count(qs_ls, expired_time=cache_expired_time)\r\n total_pages = ceil(count_items / page_size)\r\n\r\n # if get_page_dc: # 性能问题\r\n # count_items = get_count(qs_ls, expired_time=cache_expired_time)\r\n # total_pages = ceil(count_items / page_size)\r\n # else:\r\n # count_items = 0\r\n # total_pages = None\r\n\r\n if page_size == 0:\r\n page_dc = {\r\n \"count_items\": count_items,\r\n \"total_pages\": None,\r\n \"page_size\": page_size,\r\n \"p\": int(page)\r\n }\r\n return [], page_dc\r\n\r\n p = int(page)\r\n page_dc = {\r\n \"count_items\": count_items,\r\n \"total_pages\": total_pages,\r\n # \"num_pages\": total_pages,\r\n \"page_size\": page_size,\r\n \"p\": p\r\n }\r\n # print(page_dc)\r\n\r\n start_i = (p - 1) * page_size\r\n start_i = 0 if start_i < 0 else start_i\r\n end_i = p * page_size\r\n end_i = count_items if end_i > count_items else end_i\r\n page_obj = qs_ls[start_i: end_i]\r\n\r\n context = {} if not context else context # 避免序列化报错\r\n\r\n # --- 处理单个Model和多个Model的情况\r\n if serializer.__class__.__name__ == 'function':\r\n try:\r\n dc_ls = serializer(page_obj, context=context)\r\n except Exception as e:\r\n raise Exception(f'*** paginate_qsls_to_dcls错误! error: {e}')\r\n else:\r\n dc_ls = serializer(page_obj, many=True, context=context).data\r\n\r\n return dc_ls, page_dc\r\n\r\n\r\ndef conv_queryset_to_dc_ls(queryset: QuerySet):\r\n dc_ls = []\r\n for q in queryset:\r\n dc_ls.append(q)\r\n return dc_ls\r\n\r\n\r\ndef order_qs_ls_by_id(qs_ls, sort_by='id', ascending=True):\r\n df = pd.DataFrame(qs_ls)\r\n if df.empty:\r\n return []\r\n df = df.sort_values(by=sort_by, ascending=ascending)\r\n\r\n cols = df.columns\r\n dc_ls = []\r\n for i, row in df.iterrows():\r\n dc = {}\r\n for j in range(len(cols)):\r\n _dc = {cols[j]: row.get(cols[j])}\r\n dc.update(_dc)\r\n dc_ls.append(dc)\r\n return dc_ls\r\n\r\n\r\ndef order_by_order_type_ls(queryset, order_type_ls) -> QuerySet:\r\n \"\"\"\r\n 根据传入的order_type_ls参数, 对queryset进行排序.\r\n\r\n 若order_type_ls包含__None__, 则清空queryset的排序规则.\r\n \"\"\"\r\n if '__None__' in order_type_ls:\r\n ret = queryset.order_by()\r\n elif order_type_ls is None or isinstance(order_type_ls, str):\r\n ret = order_by_order_type(queryset, order_type_ls)\r\n else:\r\n ls = []\r\n for order_type in order_type_ls:\r\n ls.append(order_type)\r\n # if isinstance(order_type, str):\r\n # if order_type.startswith('-'):\r\n # order_type1 = order_type[1:]\r\n # ls.append(F(order_type1).desc(nulls_last=True)) # 这个`nulls_last`之类操作极大影响性能!\r\n # else:\r\n # ls.append(F(order_type).asc(nulls_first=True))\r\n # else:\r\n # ls.append(order_type)\r\n ret = queryset.order_by(*ls)\r\n return ret\r\n\r\n\r\ndef order_by_order_type(queryset, order_type=None):\r\n ret = queryset\r\n if order_type:\r\n if order_type.startswith('-'):\r\n order_type1 = order_type[1:]\r\n ret = queryset.order_by(F(order_type1).desc(nulls_last=True))\r\n else:\r\n ret = queryset.order_by(F(order_type).asc(nulls_first=True))\r\n\r\n return ret\r\n\r\n\r\ndef api_decorator(func):\r\n \"\"\"\r\n * API装饰器\r\n\r\n - 如果运行出错, 将格式化输出错误的信息, 并返回给前端, 而不会报错.\r\n - 自动处理postgresql中idle状态connection过多的情况\r\n \"\"\"\r\n @wraps(func)\r\n def wrapped_function(*args, **kwargs):\r\n try:\r\n return func(*args, **kwargs)\r\n except Exception as e:\r\n print('--- API Error! ---')\r\n print(e)\r\n msg = f'Error! {str(e)}'\r\n\r\n e_str = str(e)\r\n if 'client' in e_str:\r\n msg += '!!!!!! 可能出现postgresql的idle链接状况???'\r\n print(msg)\r\n # --- postgres的idle链接需要解决, 关闭旧链接(以下使用), 或单线程运行`manage.py runserver --nothreading`\r\n from django.db import close_old_connections\r\n from django.db import connection\r\n close_old_connections()\r\n with connection.cursor() as cursor:\r\n sql = \"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE state = 'idle'\"\r\n cursor.execute(sql)\r\n row = cursor.fetchall()\r\n print(sql)\r\n print(row)\r\n my_api_assert_function(False, msg=msg, status='404')\r\n return wrapped_function\r\n\r\n\r\ndef get_model_max_id_in_db(model):\r\n \"\"\"\r\n 仅适用于postgresql, mysql直接忽略就行.\r\n \"\"\"\r\n meta = model._meta\r\n if not callable(model):\r\n model = type(model)\r\n ordering = meta.ordering\r\n\r\n assert sum([1 if f.name == 'id' and f.primary_key is True else 0 for f in meta.fields]), \"Model的主键必须是id!\"\r\n\r\n if ordering:\r\n if ordering[0] == '-id':\r\n obj = model.objects.first()\r\n max_id = obj.id if obj else 0\r\n ret = max_id + 1 if isinstance(max_id, int) else 1\r\n return ret\r\n if ordering[0] == 'id':\r\n obj = model.objects.last()\r\n max_id = obj.id if obj else 0\r\n ret = max_id + 1 if isinstance(max_id, int) else 1\r\n return ret\r\n qs = model.objects.all()\r\n try:\r\n max_id = qs.aggregate(max_id=Max('id')).get('max_id') if qs.count() else 0\r\n ret = max_id + 1 if isinstance(max_id, int) else 1\r\n except Exception as e:\r\n warn(e)\r\n ret = 1\r\n return ret\r\n\r\n\r\ndef old_get_model_max_id_in_db(model):\r\n meta = model._meta\r\n\r\n assert sum([1 if f.name == 'id' and f.primary_key is True else 0 for f in meta.fields]), \"Model的主键必须是id!\"\r\n\r\n cursor = connection.cursor()\r\n # db_prefix = meta.__str__().split('.')[0]\r\n\r\n # --- 先尝试创建id_seq\r\n id_seq = f\"{meta.app_label}_{meta.db_table}_id_seq\"\r\n\r\n try:\r\n sql = f\"\"\"CREATE SEQUENCE IF NOT EXISTS {id_seq}\"\"\"\r\n cursor.execute(sql)\r\n except Exception as e:\r\n # mysql不执行本函数也能正常运行\r\n print(e)\r\n print('不是PostgreSQL无法运行CREATE SEQUENCE语句! 请确认数据库类型!')\r\n\r\n # --- 找出最大的id\r\n sql = f\"\"\"select setval('{id_seq}', (select max(id) from \"{meta.db_table}\")+1);\"\"\"\r\n print('---', sql)\r\n # sql = '(select max(id) from \"{meta.db_table}\")'\r\n # print('sql---', sql)\r\n cursor.execute(sql)\r\n row = cursor.fetchall()\r\n curr_id = row[0][0]\r\n ret = 0 if curr_id is None else curr_id\r\n cursor.close()\r\n return ret\r\n\r\n\r\ndef get_abs_order_type_ls(order_type_ls):\r\n if isinstance(order_type_ls, str):\r\n order_type_ls = [order_type_ls]\r\n ret = [re.sub(r'^-', '', field_name) for field_name in order_type_ls]\r\n return ret\r\n\r\n\r\ndef get_executable_sql(queryset):\r\n \"\"\"\r\n 输入queryset, 获得可直接执行的sql语句\r\n \"\"\"\r\n cursor = connection.cursor()\r\n sql, params = queryset.query.sql_with_params()\r\n prefix = 'EXPLAIN '\r\n cursor.execute(prefix + sql, params)\r\n sql: str = cursor.db.ops.last_executed_query(cursor, sql, params)\r\n sql = sql[len(prefix):]\r\n cursor.close()\r\n return sql\r\n\r\n\r\ndef get_MySubQuery(my_model, field_name, function_name, output_field=m.IntegerField, alias=None):\r\n \"\"\"\r\n # 获取子查询\r\n\r\n ## 简介\r\n - 主要用在进行`qs_ls.all().order_by().order_by(field_name).distinct(field_name)`后, 再进行`annotate`操作.\r\n - 普通的`m.SubQuery`操作将在`distinct`后的`annotate`中报错!\r\n\r\n ## 参考\r\n - [django文档_1](https://django-orm-cookbook-zh-cn.readthedocs.io/zh_CN/latest/subquery.html)\r\n - [django文档_2](https://docs.djangoproject.com/zh-hans/4.0/ref/models/expressions/)\r\n\r\n :param my_model: 指定模型, 用以获取模型基本属性`meta`. 若为空, 则认为是annotate字段, 使用默认的field_name.\r\n :param field_name: 用来进行计算的字段名\r\n :param function_name: 要在数据库中调用的函数名\r\n :param output_field: 使用Query计算后, 输出的字段类型\r\n :param alias: 计算后储存结果变量名, 默认为`tmp`\r\n :return: 子查询类`MySubQuery`\r\n\r\n ## eg:\r\n SQCount = get_MySubQuery(my_model=classification_qs_ls_0, field_name=foreign_key_name, function_name='Count', output_field=m.IntegerField)\r\n classification_qs_ls = classification_qs_ls_0.annotate(\r\n # 每个学科有多少种\r\n count=SQCount(\r\n classification_qs_ls_0.filter(**{key: m.OuterRef(key)})\r\n ),\r\n )\r\n \"\"\"\r\n base_model = get_base_model(my_model)\r\n meta = base_model._meta\r\n\r\n field_names = [field.name for field in meta.fields]\r\n db_column_names = [field.db_column if field.db_column else field.name for field in meta.fields]\r\n field_dc = dict(zip(field_names, db_column_names))\r\n db_column_name = field_dc.get(field_name) # 获取字段在db中的列名\r\n db_column_name = db_column_name if db_column_name else field_name # 没有的话, 就用默认field_name\r\n\r\n alias = 'tmp' if not alias else alias\r\n my_template = f\"(SELECT {function_name}({db_column_name}) FROM (%(subquery)s) {alias})\"\r\n my_output_field = output_field\r\n\r\n class MySubQuery(m.Subquery):\r\n template = my_template\r\n output_field = my_output_field() if isinstance(my_output_field, type) else my_output_field\r\n\r\n return MySubQuery\r\n\r\n\r\ndef get_obj_by_content_type(obj_id, model_name, app_label):\r\n ct_qs_ls = ContentType.objects.filter(app_label=app_label, model=model_name)\r\n assert ct_qs_ls.count() == 1, f'ContentType数量不为1! current_value: {ct_qs_ls.count()}'\r\n ct_qs_i = ct_qs_ls[0]\r\n base_model = ct_qs_i.model_class()\r\n obj = base_model.objects.get(id=obj_id)\r\n return obj\r\n\r\n\r\ndef get_QS_by_dc(dc, add_type):\r\n \"\"\"\r\n 根据dc返回QS\r\n :param dc: 过滤条件\r\n :param add_type: 合并逻辑\r\n :return: QS\r\n \"\"\"\r\n QS = Q()\r\n for k, v in dc.items():\r\n d = {k: v}\r\n QS.add(Q(**d), add_type)\r\n return QS\r\n\r\n\r\ndef get_model_verbose_name_dc():\r\n \"\"\"\r\n 获得model_verbose_name对应的ContentType的id\r\n \"\"\"\r\n ct_qs_ls = ContentType.objects.all()\r\n dc = {}\r\n for ct_qs_i in ct_qs_ls:\r\n base_model = ct_qs_i.model_class()\r\n if base_model is not None:\r\n k = base_model._meta.verbose_name\r\n # v = ct_qs_i.model\r\n v = ct_qs_i\r\n dc.update({k: v})\r\n return dc\r\n\r\n\r\ndef get_user_ip(request):\r\n context = request.parser_context\r\n if 'HTTP_X_FORWARDED_FOR' in context[\"request\"].META:\r\n user_ip = context[\"request\"].META['HTTP_X_FORWARDED_FOR']\r\n else:\r\n user_ip = context[\"request\"].META['REMOTE_ADDR']\r\n return user_ip\r\n\r\n\r\ndef update_none_to_zero_by_field_name(qs_ls, field_name):\r\n \"\"\"\r\n 将qs_ls中field_name字段的None改为0\r\n \"\"\"\r\n filter_dc = {\r\n f'{field_name}__isnull': True\r\n }\r\n update_dc = {\r\n field_name: 0\r\n }\r\n qs_ls.filter(**filter_dc).update(**update_dc)\r\n\r\n\r\ndef get_df_by_freq_and_year(\r\n queryset,\r\n frequency_cname=None,\r\n aggregate_method_name='Sum',\r\n output_col_name=None,\r\n year_field_name='year',\r\n complete_year_ls=False,\r\n year_range_ls=None,\r\n):\r\n \"\"\"\r\n 获取加权的年份分布图\r\n\r\n * 这个比较快\r\n\r\n - aggregate_method_name, frequency_cname: 用来aggregate的方法和字段\r\n - complete_year_ls: 补全中间年份\r\n - output_col_name: 输出列名\r\n - year_field_name: 年份字段名\r\n \"\"\"\r\n year_qsv_ls = queryset.values(year_field_name).distinct(year_field_name).order_by(year_field_name)\r\n year_ls = [dc.get(year_field_name) for dc in year_qsv_ls]\r\n assert hasattr(m, aggregate_method_name), f'django.db.models不存在[{aggregate_method_name}]方法!'\r\n aggregate_method = getattr(m, aggregate_method_name)\r\n\r\n output_col_name = output_col_name if output_col_name else frequency_cname\r\n\r\n if complete_year_ls:\r\n if year_range_ls:\r\n year_min, year_max = year_range_ls\r\n else:\r\n year_min, year_max = min(year_ls), max(year_ls)\r\n year_range = list(range(year_min, year_max + 1))\r\n else:\r\n year_range = year_ls\r\n\r\n year_distribution_dc_ls = []\r\n for year in year_range:\r\n aggregate_dc = {'tmp': aggregate_method(frequency_cname)}\r\n value = queryset.filter(year=year).aggregate(**aggregate_dc)\r\n value = 0 if value is None or value.get('tmp') is None else value.get('tmp')\r\n\r\n year_distribution_dc = {\r\n year_field_name: year,\r\n output_col_name: value,\r\n }\r\n year_distribution_dc_ls.append(year_distribution_dc)\r\n return year_distribution_dc_ls\r\n\r\n\r\ndef judge_db_is_migrating():\r\n if 'makemigrations' in sys.argv or 'migrate' in sys.argv:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef get_total_occurrence_times_by_keywords(total_qs_ls, search_field_ls=None, keywords=None, get_frequence=True, topK=5, rank_field_name=None, rank__gte=None, search_weight_ls=None, search_conf=None):\r\n \"\"\"\r\n # 关键词次数统计\r\n - 统计keywords在qs_ls的search_field_ls中是否出现, 以及出现次数.\r\n - 可以出现次数作为相关性排序依据\r\n\r\n :param total_qs_ls: 用来统计的queryset\r\n :param search_field_ls: 要匹配的字段\r\n :param search_weight_ls: 字段对应权重\r\n :param search_conf: 检索配置\r\n :param keywords: 用来检索的关键词\r\n :param get_frequence: 是否精确计算`keywords`在字段中出现的次数\r\n :param topK: 提取关键词的个数\r\n :param rank_field_name: annotate出来的排序字段名, 默认`searh_rank`\r\n :param rank__gte: 出现次数过滤的最小阈值\r\n :return: queryset, 且annotate出现次数, 存在`rank_field_name`字段中\r\n \"\"\"\r\n from django.db import models as m\r\n from django.db.models import functions\r\n\r\n assert keywords is not None, '`search_keywords`不能为空!'\r\n\r\n if search_conf:\r\n search_field_ls = list(search_conf.keys())\r\n search_weight_ls = list(search_conf.values())\r\n\r\n rank_field_name = rank_field_name if rank_field_name else 'search_rank'\r\n rank__gte = rank__gte if rank__gte is not None else 0.00001\r\n\r\n if isinstance(keywords, str):\r\n from bddjango.tools.extract_keyword import extract_keywords\r\n keywords = extract_keywords.handle(keywords, cut_all=False, topK=topK)\r\n # print('--- extract_keywords:', keywords)\r\n\r\n assert isinstance(keywords, list), 'keywords的类型必须为str或list!'\r\n\r\n search_dc = {}\r\n occurance_times_ls = []\r\n\r\n for k in range(len(keywords)):\r\n # k = 1\r\n kw = keywords[k]\r\n kw_l = len(kw)\r\n for sf_i in search_field_ls:\r\n if get_frequence:\r\n k_name = f'k{k}_in_{sf_i}'\r\n\r\n # 统计每个`keyword`的出现次数\r\n dc = {\r\n k_name: (functions.Length(sf_i) - functions.Length(\r\n functions.Replace(sf_i, m.Value(kw), m.Value('')))) / kw_l\r\n }\r\n else:\r\n k_name = f'k{k}_in_{sf_i}'\r\n dc = {\r\n k_name: m.Exists(total_qs_ls.filter(pk=m.OuterRef('pk')).filter(**{f'{sf_i}__contains': kw})), # 判断是否在title中\r\n }\r\n occurance_times_ls.append(k_name)\r\n search_dc.update(dc)\r\n res_qs_ls = total_qs_ls.annotate(**search_dc)\r\n\r\n # from bddjango import show_json, show_ls\r\n # show_ls(res_qs_ls.values(*(['id'] + search_field_ls + list(search_dc.keys())))[:3])\r\n # print(f'--- 检索字段: {search_field_ls}')\r\n # show_ls(res_qs_ls.values(*(['id'] + list(search_dc.keys())))[:3])\r\n\r\n f_ls = 0\r\n for i in range(len(occurance_times_ls)):\r\n # i = 1\r\n k_name = occurance_times_ls[i]\r\n\r\n if search_weight_ls:\r\n assert len(search_weight_ls) == len(search_field_ls), '`search_weight_ls`和`search_field_ls`长度不一致!'\r\n keyword_i = k_name.split('_in_')[-1]\r\n _i = search_field_ls.index(keyword_i)\r\n weight_i = search_weight_ls[_i]\r\n _then = m.F(k_name) * weight_i if get_frequence else m.Value(weight_i)\r\n else:\r\n _then = m.F(k_name) if get_frequence else m.Value(1)\r\n\r\n if get_frequence:\r\n # f_ls += m.F(i)\r\n f_ls += m.Case(\r\n m.When(**{f'{k_name}__isnull': False}, then=_then),\r\n default=m.Value(0),\r\n output_field=m.FloatField()\r\n )\r\n else:\r\n f_ls += m.Case(\r\n m.When(**{k_name: True}, then=_then),\r\n default=0,\r\n output_field=m.FloatField()\r\n )\r\n\r\n ret_qs_ls = res_qs_ls.annotate(**{rank_field_name: f_ls})\r\n ret_qs_ls = ret_qs_ls.filter(**{f'{rank_field_name}__gte': rank__gte})\r\n ret_qs_ls = ret_qs_ls.order_by(*[f'-{rank_field_name}', 'pk'])\r\n ret = ret_qs_ls\r\n return ret\r\n\r\n\r\ndef get_statistic_fields_result(queryset, statistic_fields, statistic_size=5, descend=1, order_config_dc_ls=None):\r\n \"\"\"\r\n # 统计字段 fields 的值各出现了几次\r\n - statistic_fields 为迭代型时(如[name, id]), 只统计第一个字段, 然后将第二个字段用filter补上一个值\r\n - descend: {0: `counts`顺序, 1:`counts`倒叙, 2: `self`顺序, 3: `self`倒叙}\r\n \"\"\"\r\n from bddjango import conv_queryset_to_dc_ls\r\n from bddjango import conv_df_to_serializer_data\r\n\r\n assert isinstance(statistic_fields, list), 'statistic_fields必须为list类型!'\r\n statistic_size = int(statistic_size)\r\n\r\n # if descend:\r\n # ordering = ['-counts']\r\n # else:\r\n # ordering = ['counts']\r\n\r\n if descend == 0:\r\n ordering = ['counts']\r\n elif descend == 1:\r\n ordering = ['-counts']\r\n elif descend == 2: # self顺序\r\n ordering = descend\r\n elif descend == 3: # self倒序\r\n ordering = descend\r\n else:\r\n raise ValueError('ordering必须在[0, 3]之间, 取值: {0: `counts`顺序, 1:`counts`倒叙, 2: `self`顺序, 3: `self`倒叙}')\r\n\r\n order_df = None\r\n if order_config_dc_ls:\r\n _order_config_dc_i = {\r\n 'name': None,\r\n 'ordering': None,\r\n 'pop_name_ls': None,\r\n 'loc_ls': None,\r\n }\r\n order_config_dc_ls.insert(0, _order_config_dc_i)\r\n order_df = pd.DataFrame(order_config_dc_ls)\r\n order_df = order_df.set_index('name')\r\n # Drop rows with a NaN index\r\n order_df = order_df.drop(order_df.index[order_df.index.isna()])\r\n\r\n statistic_dc = {}\r\n for field in statistic_fields:\r\n # break\r\n if isinstance(field, (tuple, list)):\r\n field_qsv = queryset.values(field[0])\r\n dc_name = field[0]\r\n else:\r\n field_qsv = queryset.values(field)\r\n dc_name = field\r\n\r\n if not isinstance(ordering, list):\r\n assert ordering in [2, 3], 'descend为2, 则按自身顺序排序, 为3则按自身倒序排序'\r\n ordering = dc_name if ordering == 2 else f'-{dc_name}'\r\n ordering = [ordering]\r\n\r\n if order_df is not None and dc_name in order_df.index:\r\n _ordering = order_df.loc[dc_name][0]\r\n _ordering = [_ordering] if isinstance(_ordering, str) else _ordering\r\n _ordering = ordering if isinstance(_ordering, float) and pd.isna(_ordering) else _ordering\r\n\r\n pop_name_ls = order_df.loc[dc_name][1]\r\n pop_name_ls = [pop_name_ls] if isinstance(pop_name_ls, str) else pop_name_ls\r\n pop_name_ls = None if isinstance(pop_name_ls, float) and pd.isna(pop_name_ls) else pop_name_ls\r\n\r\n loc_ls = order_df.loc[dc_name][2]\r\n loc_ls = [loc_ls] if isinstance(loc_ls, str) else loc_ls\r\n loc_ls = None if isinstance(loc_ls, float) and pd.isna(loc_ls) else loc_ls\r\n\r\n if pop_name_ls:\r\n from bddjango import set_utils\r\n _pop_name_ls = set_utils.get_ls_a_sub_b(pop_name_ls, loc_ls)\r\n if _pop_name_ls:\r\n field_qsv = field_qsv.exclude(**{f'{dc_name}__in': _pop_name_ls})\r\n statistic_qsv = field_qsv.annotate(counts=m.Count('pk')).order_by(*_ordering)\r\n if loc_ls:\r\n dc_ls = conv_queryset_to_dc_ls(statistic_qsv)\r\n df = pd.DataFrame(dc_ls)\r\n key_series = df[dc_name]\r\n new_df = pd.DataFrame([], columns=df.columns)\r\n for loc_i in loc_ls:\r\n if loc_i in key_series.values:\r\n index = np.where(key_series == loc_i)[0][0]\r\n _v = df.iloc[index:index+1, :]\r\n new_df = new_df.append(df.iloc[index])\r\n new_df.index = list(range(len(new_df)))\r\n else:\r\n _v = pd.DataFrame([[loc_i, 0]], columns=df.columns)\r\n new_df = new_df.append(_v)\r\n\r\n df = new_df\r\n statistic_qsv = conv_df_to_serializer_data(df)\r\n else:\r\n statistic_qsv = field_qsv.annotate(counts=m.Count('pk')).order_by(*ordering)\r\n\r\n dc_ls = list(statistic_qsv[:statistic_size])\r\n\r\n for dc in dc_ls:\r\n if isinstance(field, (tuple, list)):\r\n dc['name'] = dc.pop(field[0])\r\n for f_name in field[1:]:\r\n # f_name = field[1]\r\n obj = queryset.filter(**{field[0]: dc['name']}).order_by(f_name).values(f_name)[0]\r\n f_value = obj.get(f_name)\r\n dc[f_name] = f_value\r\n else:\r\n dc['name'] = dc.pop(field)\r\n dc = {\r\n dc_name: dc_ls\r\n }\r\n statistic_dc.update(dc)\r\n return statistic_dc\r\n\r\n\r\nclass DjangoUtils:\r\n @staticmethod\r\n def distinct(qs_ls, fields):\r\n \"\"\"\r\n 对不同数据库的distince方法进行兼容\r\n \"\"\"\r\n if isinstance(fields, str):\r\n fields = [fields]\r\n if 'postgresql' in db_engine:\r\n ret = qs_ls.distinct(*fields)\r\n else:\r\n ret = qs_ls.distinct()\r\n return ret\r\n\r\n @staticmethod\r\n def get_serializer_context_with_no_host_prefix_to_media_url(self):\r\n \"\"\"\r\n 将媒体文件的url去除host前缀\r\n \"\"\"\r\n return {\r\n 'format': self.format_kwarg,\r\n 'view': self\r\n }\r\n\r\n\r\ndistinct = DjangoUtils.distinct\r\n\r\n","repo_name":"bode135/bddjango","sub_path":"bddjango/django/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":39410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23270593464","text":"class Stack:\n\tdef __init__(self):\n\t\tself.items = []\t# 데이터 저장을 위한 리스트 준비\n\tdef push(self, val):\n\t\tself.items.append(val)\n\tdef pop(self):\n\t\ttry:\t# pop할 아이템이 없으면\n\t\t\treturn self.items.pop()\n\t\texcept IndexError:\t# indexError 발생\n\t\t\tprint(\"Stack is empty\")\n\tdef top(self):\n\t\ttry:\n\t\t\treturn self.items[-1]\n\t\texcept IndexError:\n\t\t\tprint(\"Stack is empty\")\n\tdef __len__(self):\t# len()로 호출하면 stack의 item 수 반환\n \t\treturn len(self.items)\n\tdef isEmpty(self):\n\t\treturn self.__len__() == 0\n\ndef get_token_list(expr):\n token = []\n ops = '+-*/^'\n i = 0\n while i < len(expr):\n c = expr[i]\n if c.isdigit() or c == \".\":\n val, i = get_value(expr, i)\n token.append(val)\n elif c in ops:\n token.append(c)\n i += 1\n elif c.isspace():\n i += 1\n else:\n print(\"Invalid Expression!\")\n break\n return token\n\ndef infix_to_postfix(token_list):\n\topstack = Stack()\n\toutstack = []\n\t# 연산자의 우선순위 설정\n\tprec = {}\n\tprec['('] = 0\n\tprec['+'] = 1\n\tprec['-'] = 1\n\tprec['*'] = 2\n\tprec['/'] = 2\n\tprec['^'] = 3\n\t\n\tfor token in token_list:\n\t\tif token == '(':\n\t\t\topstack.push(token)\n\t\telif token == ')':\n\t\t\twhile opstack.top() != '(':\n\t\t\t\toutstack.append(opstack.pop())\n\t\t\topstack.pop()\n\t\telif token in '+-/*^':\n\t\t\tif opstack.isEmpty() == True :\n\t\t\t\topstack.push(token)\n\t\t\telif prec[token] > prec[opstack.top()]:\n\t\t\t\topstack.push(token)\n\t\t\telse:\n\t\t\t\twhile prec[opstack.top()] >= prec[token]:\n\t\t\t\t\toutstack.append(opstack.pop())\n\t\t\t\t\tif opstack.isEmpty() == True :\n\t\t\t\t\t\tbreak\n\t\t\t\topstack.push(token)\n\t\telse:\n\t\t\toutstack.append(token)\n\t\t\t\n\twhile opstack.isEmpty() == False:\n\t\toutstack.append(opstack.pop())\n\tpostfix_expr = \" \".join(outstack)\n\treturn postfix_expr\n\n\ndef compute_postfix(postfix):\n\toperand_s = Stack()\n\tif len(postfix) <= 1 :\n\t\treturn postfix[0]\n\telif len(postfix) > 1:\n\t\tfor op in postfix:\n\t\t\tif op in '+-*/^':\n\t\t\t\toperand_1 = float(operand_s.pop())\n\t\t\t\toperand_2 = float(operand_s.pop())\n\t\t\t\tif op == '+':\n\t\t\t\t\tcalculated_operand = operand_1 + operand_2\n\t\t\t\telif op == '-':\n\t\t\t\t\tcalculated_operand = operand_2 - operand_1\n\t\t\t\telif op == '*':\n\t\t\t\t\tcalculated_operand = operand_1 * operand_2\n\t\t\t\telif op == '/':\n\t\t\t\t\tcalculated_operand = operand_2 / operand_1\n\t\t\t\telif op == '^':\n\t\t\t\t\tcalculated_operand = operand_2 ** operand_1\n\t\t\t\toperand_s.push(calculated_operand)\n\t\t\telse:\n\t\t\t\toperand_s.push(op)\n\t\treturn operand_s.pop()\n \ndef get_value(E, i):\n j = i + 1\n while j < len(E) and (E[j].isdigit() or E[j] == '.'):\n j += 1\n return E[i:j], j\n \nexpr = input()\nvalue = compute_postfix(infix_to_postfix(get_token_list(expr)))\nprint(value)\n","repo_name":"skyla2692/DataStructue","sub_path":"2.calculator.py","file_name":"2.calculator.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11695292128","text":"# import subprocess\n\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom .tele_bot import *\n\n\ndef home(request):\n users = TeleBot.objects.all()\n return render(request, 'home/home.html', {'users': users})\n\n\ndef bot_run(request):\n updater = Updater(API_KEY, use_context=True)\n dp = updater.dispatcher\n\n # Commands\n dp.add_handler(CommandHandler('start', start_command))\n dp.add_handler(CommandHandler('help', help_command))\n dp.add_handler(CommandHandler('custom', custom_command))\n\n # Messages\n dp.add_handler(MessageHandler(Filters.text, handle_message))\n\n # Log all errors\n dp.add_error_handler(error)\n\n # Run the bot\n updater.start_polling(0)\n updater.idle()\n","repo_name":"rasheedkotoor/Telegram_bot","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18471910457","text":"import pygame\r\nimport constantes\r\n\r\n\"\"\"LÓGICA DO JOGO EM FORMA DE MATRIZ\r\n0 - ESPAÇO VAZIO OU BLOCO QUEBRAVEL\r\n1 - BLOCO FIXO\r\n4 - ESPAÇO PARA O TIMER\r\n5 - PLAYER1\r\n6 - PLAYER2 \r\n7 - MOEDA\r\n9 - LUGARES QUE NÃO PODEM SER BLOO QUEBRAVEIS\"\"\"\r\n\r\n\r\nclass GerenciadorLayout:\r\n def __init__(self):\r\n self.LAYOUT = [\r\n [4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\r\n [1, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 6, 1],\r\n [1, 0, 1, 8, 1, 0, 1, 0, 1, 0, 1, 0, 1, 9, 1],\r\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\r\n [1, 0, 1, 0, 1, 0, 1, 2, 1, 0, 1, 0, 1, 0, 1],\r\n [1, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 1],\r\n [1, 7, 1, 0, 1, 0, 1, 7, 1, 0, 1, 0, 1, 7, 1],\r\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\r\n [1, 0, 1, 0, 1, 0, 1, 2, 1, 0, 1, 8, 1, 0, 1],\r\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\r\n [1, 9, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],\r\n [1, 5, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ]\r\n\r\n\r\n# TELAS FINAIS DO JOGO\r\nfloresta = pygame.image.load('assets/floresta.png')\r\nfloresta = pygame.transform.scale(floresta, (constantes.WIDTH, constantes.HEIGHT))\r\ncemiterio = pygame.image.load('assets/cemiterio2.png')\r\ncemiterio = pygame.transform.scale(cemiterio, (constantes.WIDTH, constantes.HEIGHT))\r\ntela_inicial = pygame.image.load('assets/menu_inicial.png')\r\n\r\n\r\n#SPRITES DO JOGO\r\nplayer1_img = pygame.image.load('assets/kiriku.png')\r\nkiriku = 'assets/kiriku.png'\r\nesqueleto = 'assets/esqueleto brabo.png'\r\nplayer1_img = pygame.transform.scale(player1_img, (constantes.BRICK_WIDTH, constantes.BRICK_HEIGHT))\r\nplayer2_img = pygame.image.load('assets/esqueleto brabo.png')\r\nplayer2_img = pygame.transform.scale(player2_img, (constantes.BRICK_WIDTH, constantes.BRICK_HEIGHT))\r\ncoin_img = pygame.image.load('assets/coin.png')\r\ncoin_img = pygame.transform.scale(coin_img, (constantes.BRICK_WIDTH, constantes.BRICK_HEIGHT))\r\nfreeze_img=pygame.image.load('assets/freeze.png')\r\nfreeze_img=pygame.transform.scale(freeze_img, (constantes.BRICK_WIDTH, constantes.BRICK_HEIGHT))\r\nadd_time_img=pygame.image.load('assets/add_time.png')\r\nadd_time_img = pygame.transform.scale(add_time_img, (constantes.BRICK_WIDTH, constantes.BRICK_HEIGHT))\r\n","repo_name":"ata1de/AtomiKingdom","sub_path":"layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"25974705131","text":"from itertools import count\nfrom flask import Flask, render_template, request, jsonify\nfrom flask_cors import CORS\nfrom PIL import Image\nfrom io import BytesIO\nimport cv2\nfrom predict import predict\nimport summarize\nfrom scrape import scrape_bbc, scrape_hitwada, scrape_toi\nimport base64\nfrom keras.models import load_model\nimport numpy as np\nfrom keras.applications import ResNet50\nfrom keras.optimizers import Adam\nfrom keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\nfrom keras.models import Sequential, Model\nfrom keras.utils import np_utils\nfrom keras.preprocessing import image, sequence\nimport cv2\nfrom keras_preprocessing.sequence import pad_sequences\nfrom tqdm import tqdm\nimport urllib.request\n\nvocab = np.load('vocab.npy', allow_pickle=True)\n\nvocab = vocab.item()\n\ninv_vocab = {v: k for k, v in vocab.items()}\n\n\nprint(\"+\"*50)\nprint(\"vocabulary loaded\")\n\n\nembedding_size = 128\nvocab_size = len(vocab)\nmax_len = 40\n\n\nimage_model = Sequential()\n\nimage_model.add(Dense(embedding_size, input_shape=(2048,), activation='relu'))\nimage_model.add(RepeatVector(max_len))\n\n\nlanguage_model = Sequential()\n\nlanguage_model.add(Embedding(input_dim=vocab_size,\n output_dim=embedding_size, input_length=max_len))\nlanguage_model.add(LSTM(256, return_sequences=True))\nlanguage_model.add(TimeDistributed(Dense(embedding_size)))\n\n\nconca = Concatenate()([image_model.output, language_model.output])\nx = LSTM(128, return_sequences=True)(conca)\nx = LSTM(512, return_sequences=False)(x)\nx = Dense(vocab_size)(x)\nout = Activation('softmax')(x)\nmodel = Model(inputs=[image_model.input, language_model.input], outputs=out)\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='RMSprop', metrics=['accuracy'])\n\nmodel.load_weights('mine_model_weights.h5')\n\nprint(\"=\"*150)\nprint(\"MODEL LOADED\")\n\nresnet = ResNet50(include_top=False, weights='imagenet',\n input_shape=(224, 224, 3), pooling='avg')\n\n\n# resnet = load_model('model.h5')\n\nprint(\"=\"*150)\nprint(\"RESNET MODEL LOADED\")\n\n\napp = Flask(__name__)\nCORS(app)\n\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1\ncount_api = 0\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef after():\n\n global model, resnet, vocab, inv_vocab\n\n result = predict(model, resnet, vocab, inv_vocab, request.json, count_api)\n\n response = jsonify(result)\n response.headers.add('Access-Control-Allow-Origin', '*')\n\n return response\n\n\n@app.route('/text')\ndef generate_text_summarization():\n text = request.json['text']\n caption = summarize.text_summarize(text, count_api)\n result = {\n 'caption': caption,\n 'length': len(caption)\n }\n return jsonify(result)\n\n\n@app.route('/scrape', methods=['GET', 'POST'])\ndef scrape():\n url = request.json['url']\n website = request.json['website']\n err_msg = 'wrong URL entered'\n success_msg = 'Data scraped successfully'\n global model, resnet, vocab, inv_vocab, count_api\n\n if website == 'BBC':\n response = scrape_bbc(url)\n if response == None:\n result = {\n 'error': err_msg\n }\n else:\n result = {\n 'data': response,\n 'message': success_msg\n }\n elif website == 'TOI':\n response = scrape_toi(url)\n if response == None:\n result = {\n 'error': err_msg\n }\n else:\n result = {\n 'data': response,\n 'message': success_msg\n }\n else:\n response = scrape_hitwada(url)\n if response == None:\n result = {\n 'error': err_msg\n }\n else:\n result = {\n 'data': response,\n 'message': success_msg\n }\n\n result['call'] = True\n data = predict(model, resnet, vocab, inv_vocab, result, count_api)\n result['predict'] = data\n result['data']['count'] = len(result['data']['image_alts'].split())\n count_api += 1\n response = jsonify(result)\n response.headers.add('Access-Control-Allow-Origin', '*')\n\n return response\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"AyushSolanki123/captic-model","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19614813528","text":"import os\nimport json\nimport itertools\nimport math\nfrom typing import Optional, List, Iterable, Dict\nimport ubjson\nimport cbor2\nimport numpy as np\nimport scipy as sp\nimport scipy.stats\nfrom colorama import Fore, Style\n\nYELLOW = f\"{Fore.YELLOW}{Style.BRIGHT}\"\nBLUE = f\"{Fore.BLUE}{Style.BRIGHT}\"\nGREEN = f\"{Fore.GREEN}\"\nRED = f\"{Fore.RED}{Style.BRIGHT}\"\n\n\ndef describe_dict(dictionary: dict, prefix=()):\n \"\"\"Return an hierarchical structure of the dictionary.\"\"\"\n for key, value in dictionary.items():\n path = prefix + (key,)\n yield path\n if isinstance(value, dict):\n yield from describe_dict(value, path)\n\n\ndef compare_dicts_structure(dict1, dict2):\n \"\"\"Compare if two dictionaries structures matches themselves.\"\"\"\n return sorted(describe_dict(dict1)) == sorted(describe_dict(dict2))\n\n\ndef confidence_interval(data, confidence=0.95):\n \"\"\"Return data_array values interval for data_array given confidence.\"\"\"\n data_array = 1.0 * np.array(data)\n length = len(data_array)\n std_err = scipy.stats.sem(data_array)\n h = std_err * sp.stats.t._ppf((1 + confidence) / 2.0, length - 1)\n return h\n\n\ndef flip(items, ncol: int):\n \"\"\"Swap columns with rows in respect to ncol columns.\"\"\"\n return itertools.chain(*[items[i::ncol] for i in range(ncol)])\n\n\ndef cols_number(items):\n \"\"\"Determine number of columns used by the items.\n\n Determine number of columns while trying assign equal number of\n items in rows.\n \"\"\"\n items_number = len(items)\n max_per_column = 5\n if items_number <= max_per_column:\n return items_number\n if items_number <= 2 * max_per_column:\n return math.ceil(items_number / 2)\n return max_per_column\n\n\ndef get_traffic_classes_sizes(scenario):\n \"\"\"Extract from the scenario all sizes of requests.\"\"\"\n tc_sizes = {}\n for tc_id, tc_data in scenario[\"traffic_classes\"].items():\n if tc_id[0] == \"_\":\n continue\n tc_sizes[int(tc_id)] = tc_data[\"size\"]\n return (tc_sizes, scenario)\n\n\ndef is_group_in_layer(group_name, layer_name):\n \"\"\"Check if a certain group has been included in a given layer.\n\n Layer name is created by analytic model.\n \"\"\"\n if layer_name[0] != \"L\":\n return False\n return group_name in layer_name_to_groups(layer_name)\n\n\ndef is_layer_name(layer_name):\n \"\"\"Check if the group has been treated as a whole layer.\"\"\"\n return layer_name[0] == \"L\"\n\n\ndef layer_name_to_groups(layer_name: str) -> Iterable[str]:\n \"\"\"Convert layer name to a list of collection of group names.\"\"\"\n return filter(None, layer_name.split(\":\", 1)[1].split(\";\"))\n\n\ndef is_in_groups(group_name: str, groups: List[str]):\n \"\"\"Check if the group name is in the set of groups.\n\n If the group name is a layer name, check if any layer subgroups\n is in groups set.\n \"\"\"\n if is_layer_name(group_name):\n return any(is_group_in_layer(group, group_name)\n for group in groups)\n return group_name in groups\n\n\ndef get_valid_group(group_name: str, groups) -> Optional[str]:\n \"\"\"Find and return a valid group.\n\n Find and return a valid group that is an intersection of\n group_name and groups. group_name can be a layer name.\n \"\"\"\n if is_layer_name(group_name):\n layer_name = group_name\n return next(\n (group\n for group in layer_name_to_groups(layer_name)\n if group in groups),\n None,\n )\n if group_name in groups:\n return group_name\n return None\n\n\ndef get_corresponding_group(needle_group, groups):\n \"\"\"Find an group among all groups or layers.\"\"\"\n for group in groups:\n if is_layer_name(group):\n layer_name = group\n layer_groups = layer_name_to_groups(layer_name)\n return next(\n (layer_name\n for layer_group in layer_groups\n if needle_group == layer_group\n ),\n None,\n )\n if needle_group == group:\n return group\n return None\n\n\ndef load_data(filename: str) -> Dict[str, Dict[str, dict]]:\n \"\"\"Load data from JSON file.\"\"\"\n ext = os.path.splitext(filename)[1]\n with open(filename, \"rb\") as data_file:\n if ext == \".json\":\n data = json.load(data_file)\n elif ext == \".cbor\":\n data = cbor2.load(data_file)\n elif ext == \".ubjson\":\n data = ubjson.load(data_file)\n return dict(sorted(data.items()))\n\n\ndef filter_data(indices: List[int], data: dict):\n \"\"\"Leave only results from request list of indices.\"\"\"\n if not indices:\n return data\n\n keys: List[str] = list(data.keys())\n return {k: data[k]\n for k in [keys[i] for i in indices]\n }\n\n\ndef print_scenarios(title: str, data):\n \"\"\"Print an enumerate list of scenario names.\"\"\"\n print(f\"{YELLOW}{title}:\")\n for index, scenario_name in enumerate(data.keys()):\n print(f\" {index}: {scenario_name}\")\n\n\ndef remove_prefix(text: str, prefix: str) -> str:\n \"\"\"Remove prefix from the text\"\"\"\n if text.startswith(prefix):\n return text[len(prefix):]\n return text\n","repo_name":"przemkovv/MutOSim","sub_path":"tools/MutOSim/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5174,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"25560160154","text":"# if :\n # Kjør koden her hvis betingelsen er oppfyllt\n# elif :\n # Kjør koden her hvis betingelsen er oppfyllt (og IKKE den i den første if'en er oppfylt)\n# else:\n # Kjør denne koden hvis betingelsen ikke er oppfyllt\n\n\nnumber = 3\nif number > 0:\n print(f\"{number} is a positive number.\")\nelif number < 0:\n print(f\"{number} is a negative number.\")\nelif number < 0:\n print(f\"{number} is a negative number.\")\nelse:\n print(f\"{number} is zero.\")\n\nprint(f\"End of program?\")\n\n\ndolphins_sleep_with_one_eye_open = True\n\nif dolphins_sleep_with_one_eye_open:\n print(\"\\nIt is true, dolphins DO sleep with one eye open.\")\nelse:\n print(\"\\nIt was a lie, dolphins don't sleep at all!\")\n\n\nx = 50\ny = 10\nis_x_bigger_than_y = y < x\nif is_x_bigger_than_y:\n print(f\"\\nx ({x}) is bigger than y ({y}), let's fix that! Over 9000!\")\n y += 9001\nelse:\n print(f\"\\nx ({x}) is smaller than y ({y}), everything is OK!\")\n\n\nfirst_name = \"Lars Emil\"\nanother_first_name = \"lars emil\"\n\nif first_name.lower() == another_first_name.lower():\n print(f\"\\nThe names {first_name} and {another_first_name} are equal\\n\")\nelse:\n print(f\"\\nThe names {first_name} and {another_first_name} are NOT equal\\n\")\n\n\nnumber = \"ten\"\nif number == \"ten\":\n number = 10\nprint(number)\n\n\nprint(number < 9000 and number != 42)\n\nnumber = 42\nprint(number < 9000 and number != 42)\n\nprint(\"Over 9000\" if number > 9000 else \"Under 9000\")\n\na, b = 5, 10\n\nsmallest = a if a < b else b\nprint(smallest)","repo_name":"larseknu/programmering1_h2021","sub_path":"Forelesning 04 - if-statements/if_statements.py","file_name":"if_statements.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"10876622906","text":"from copy import deepcopy\n\nclass Arguments(object):\n \"\"\"\n Unifies the access to both keywords and simple/positional arguments.\n\n Takes the arguments and keyword arguments and makes them all available through dot notation.\n One can access simple/positional arguments as ``args.arg__0`` for the first argument,\n or generally ``args.arg__n``.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.__args = args\n self.__kwargs = kwargs\n\n self.__unify_args()\n self.__unify_kwargs()\n\n self.all_args = deepcopy(self.__dict__)\n\n def __unify_args(self):\n for idx, value in enumerate(self.__args):\n setattr(self, \"arg__{}\".format(idx), value)\n \n del self.__args\n\n def __unify_kwargs(self):\n for name, value in self.__kwargs.items():\n setattr(self, name, value)\n\n del self.__kwargs\n","repo_name":"AlexandruBurlacu/pycontracts","sub_path":"pycontracts/arguments.py","file_name":"arguments.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"4866748570","text":"\"\"\"\nCreated by: louisenaud on 5/5/22 at 12:57 PM for python_toold.\n\"\"\"\nimport glob\n\nfrom PIL import Image\n\n\ndef make_gif(frame_folder):\n frame_list = sorted(glob.glob(f\"{frame_folder}/*.jpg\"))\n frames = [Image.open(image) for image in frame_list]\n frame_one = frames[0]\n frame_one.save(\"my_awesome.gif\", format=\"GIF\", append_images=frames,\n save_all=True, duration=100, loop=0)\n\n\nif __name__ == \"__main__\":\n make_gif(\"./frames\")","repo_name":"louisenaud/python_tools","sub_path":"create_gif.py","file_name":"create_gif.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10341509363","text":"import os\n\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing import image\n\n\nclass CustomCallback(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs={}):\n if logs.get('accuracy') > 0.999:\n print(\"\\nReached 99.9% accuracy so cancelling training!\")\n self.model.stop_training = True\n\n\ncallbaks = [\n CustomCallback()\n]\n\nmodel = keras.models.Sequential(\n [\n keras.layers.Conv2D(16, (3, 3), input_shape=(150, 150, 3), activation=keras.activations.relu),\n keras.layers.MaxPooling2D((2, 2)),\n keras.layers.Conv2D(32, (3, 3), activation=keras.activations.relu),\n keras.layers.MaxPooling2D((2, 2)),\n keras.layers.Conv2D(64, (3, 3), activation=keras.activations.relu),\n keras.layers.MaxPooling2D((2, 2)),\n keras.layers.Conv2D(64, (3, 3), activation=keras.activations.relu),\n keras.layers.MaxPooling2D((2, 2)),\n keras.layers.Flatten(),\n keras.layers.Dense(512, activation=keras.activations.relu),\n keras.layers.Dense(1, activation=keras.activations.sigmoid)\n ]\n)\n\nmodel.compile(\n loss=keras.losses.BinaryCrossentropy(),\n optimizer=keras.optimizers.RMSprop(learning_rate=0.001),\n metrics=['accuracy']\n)\n\ntrain_datagen = image.ImageDataGenerator(rescale=1/255)\ntrain_generator = train_datagen.flow_from_directory(\n directory=os.path.join(os.getcwd(), 'datasets'),\n target_size=(150, 150),\n batch_size=20,\n class_mode='binary'\n)\n\nhistory = model.fit(\n train_generator,\n epochs=50,\n steps_per_epoch=2,\n verbose=2,\n callbacks=callbaks\n)","repo_name":"german-mesa/template","sub_path":"src/introduction/Exercise_Happy_or_Sad_CNN.py","file_name":"Exercise_Happy_or_Sad_CNN.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14246334717","text":"import os\nimport sys\nimport time\nimport math\nimport traceback\n\nimport maya.mel as mel\nimport maya.cmds as cmds\n\nRS2RPR_CONVERTER_VERSION = \"3.0.0\"\n\n# log functions\ndef write_converted_property_log(rpr_name, rs_name, rpr_attr, rs_attr):\n\ttry:\n\t\tfile_path = cmds.file(q=True, sceneName=True) + \".log\"\n\t\twith open(file_path, 'a') as f:\n\t\t\tf.write(u\" property {}.{} is converted to {}.{} \\r\\n\".format(rs_name, rs_attr, rpr_name, rpr_attr).encode('utf-8'))\n\texcept:\n\t\tpass\n\n\ndef write_own_property_log(text):\n\ttry:\n\t\tfile_path = cmds.file(q=True, sceneName=True) + \".log\"\n\t\twith open(file_path, 'a') as f:\n\t\t\tf.write(\" {} \\r\\n\".format(text))\n\texcept:\n\t\tpass\n\n\ndef start_log(rs, rpr):\n\ttry:\n\t\ttext = u\"Found node: \\r\\n name: {} \\r\\n\".format(rs).encode('utf-8')\n\t\ttext += \"type: {} \\r\\n\".format(cmds.objectType(rs))\n\t\ttext += u\"Converting to: \\r\\n name: {} \\r\\n\".format(rpr).encode('utf-8')\n\t\ttext += \"type: {} \\r\\n\".format(cmds.objectType(rpr))\n\t\ttext += \"Conversion details: \\r\\n\"\n\n\t\tfile_path = cmds.file(q=True, sceneName=True) + \".log\"\n\t\twith open(file_path, 'a') as f:\n\t\t\tf.write(text)\n\texcept:\n\t\tpass\n\t\n\ndef end_log(rs):\n\ttry:\n\t\ttext = u\"Conversion of {} is finished.\\n\\n \\r\\n\".format(rs).encode('utf-8')\n\n\t\tfile_path = cmds.file(q=True, sceneName=True) + \".log\"\n\t\twith open(file_path, 'a') as f:\n\t\t\tf.write(text)\n\texcept:\n\t\tpass\n\t\t\n\ndef validateStringType(text):\n\tif sys.version_info.major == 3:\n\t\treturn type(text) == str\n\telse:\n\t\treturn type(text) == unicode\n\n\n# additional fucntions\ndef copyProperty(rpr_name, conv_name, rpr_attr, conv_attr):\n\n\t# full name of attribute\n\tconv_field = conv_name + \".\" + conv_attr\n\trpr_field = rpr_name + \".\" + rpr_attr\n\trs_type = type(getProperty(conv_name, conv_attr))\n\trpr_type = type(getProperty(rpr_name, rpr_attr))\n\n\ttry:\n\t\tlistConnections = cmds.listConnections(conv_field)\n\t\t# connection convert\n\t\tif listConnections and cmds.objectType(listConnections[0]) not in (\"transform\", \"RedshiftBumpBlender\"):\n\t\t\tobj, channel = cmds.connectionInfo(conv_field, sourceFromDestination=True).split('.')\n\t\t\tsource_name, source_attr = convertMaterial(obj, channel).split('.')\n\t\t\tconnectProperty(source_name, source_attr, rpr_name, rpr_attr)\n\t\t# complex color conversion for each channel (RGB/XYZ/HSV)\n\t\telif not listConnections and rpr_type == rs_type == tuple:\n\n\t\t\t# RGB \n\t\t\tif cmds.objExists(conv_field + \"R\") and cmds.objExists(rpr_field + \"R\"):\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"R\", conv_attr + \"R\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"G\", conv_attr + \"G\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"B\", conv_attr + \"B\")\n\t\t\telif cmds.objExists(conv_field + \"R\") and cmds.objExists(rpr_field + \"X\"):\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"X\", conv_attr + \"R\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"Y\", conv_attr + \"G\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"Z\", conv_attr + \"B\")\n\t\t\telif cmds.objExists(conv_field + \"R\") and cmds.objExists(rpr_field + \"H\"):\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"H\", conv_attr + \"R\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"S\", conv_attr + \"G\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"V\", conv_attr + \"B\")\n\t\t\t# XYZ \n\t\t\telif cmds.objExists(conv_field + \"X\") and cmds.objExists(rpr_field + \"R\"):\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"R\", conv_attr + \"X\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"G\", conv_attr + \"Y\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"B\", conv_attr + \"Z\")\n\t\t\telif cmds.objExists(conv_field + \"X\") and cmds.objExists(rpr_field + \"X\"):\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"X\", conv_attr + \"X\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"Y\", conv_attr + \"Y\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"Z\", conv_attr + \"Z\")\n\t\t\telif cmds.objExists(conv_field + \"X\") and cmds.objExists(rpr_field + \"H\"):\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"H\", conv_attr + \"X\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"S\", conv_attr + \"Y\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"V\", conv_attr + \"Z\")\n\t\t\t# HSV \n\t\t\telif cmds.objExists(conv_field + \"H\") and cmds.objExists(rpr_field + \"R\"):\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"R\", conv_attr + \"H\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"G\", conv_attr + \"S\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"B\", conv_attr + \"V\")\n\t\t\telif cmds.objExists(conv_field + \"H\") and cmds.objExists(rpr_field + \"X\"):\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"X\", conv_attr + \"H\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"Y\", conv_attr + \"S\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"Z\", conv_attr + \"V\")\n\t\t\telif cmds.objExists(conv_field + \"H\") and cmds.objExists(rpr_field + \"H\"):\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"H\", conv_attr + \"H\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"S\", conv_attr + \"S\")\n\t\t\t\tcopyProperty(rpr_name, conv_name, rpr_attr + \"V\", conv_attr + \"V\")\n\t\t\telse:\n\t\t\t\tprint(\"[ERROR] Failed to find right variant for {}.{} conversion\".format(conv_name, conv_attr))\n\n\t\t# field conversion\n\t\telse:\n\t\t\tif rs_type == rpr_type or validateStringType(rs_type):\n\t\t\t\tsetProperty(rpr_name, rpr_attr, getProperty(conv_name, conv_attr))\n\t\t\telif rs_type == tuple and rpr_type == float:\n\t\t\t\tif cmds.objExists(conv_field + \"R\"):\n\t\t\t\t\tconv_attr += \"R\"\n\t\t\t\telif cmds.objExists(conv_field + \"X\"):\n\t\t\t\t\tconv_attr += \"X\"\n\t\t\t\telif cmds.objExists(conv_field + \"H\"):\n\t\t\t\t\tconv_attr += \"H\"\n\t\t\t\tsetProperty(rpr_name, rpr_attr, getProperty(conv_name, conv_attr))\n\t\t\telif rs_type == float and rpr_type == tuple:\n\t\t\t\tif cmds.objExists(rpr_field + \"R\"):\n\t\t\t\t\trpr_attr1 = rpr_attr + \"R\"\n\t\t\t\t\trpr_attr2 = rpr_attr + \"G\"\n\t\t\t\t\trpr_attr3 = rpr_attr + \"B\"\n\t\t\t\telif cmds.objExists(rpr_field + \"X\"):\n\t\t\t\t\trpr_attr1 = rpr_attr + \"X\"\n\t\t\t\t\trpr_attr2 = rpr_attr + \"Y\"\n\t\t\t\t\trpr_attr3 = rpr_attr + \"Z\"\n\t\t\t\telif cmds.objExists(conv_field + \"H\"):\n\t\t\t\t\trpr_attr1 = rpr_attr + \"H\"\n\t\t\t\t\trpr_attr2 = rpr_attr + \"S\"\n\t\t\t\t\trpr_attr3 = rpr_attr + \"V\"\n\t\t\t\tsetProperty(rpr_name, rpr_attr1, getProperty(conv_name, conv_attr))\n\t\t\t\tsetProperty(rpr_name, rpr_attr2, getProperty(conv_name, conv_attr))\n\t\t\t\tsetProperty(rpr_name, rpr_attr3, getProperty(conv_name, conv_attr))\n\n\t\t\twrite_converted_property_log(rpr_name, conv_name, rpr_attr, conv_attr)\n\texcept Exception as ex:\n\t\ttraceback.print_exc()\n\t\tprint(u\"[ERROR] Failed to copy parameters from {} to {}\".format(conv_field, rpr_field).encode('utf-8'))\n\t\twrite_own_property_log(u\"[ERROR] Failed to copy parameters from {} to {}\".format(conv_field, rpr_field).encode('utf-8'))\n\n\ndef setProperty(rpr_name, rpr_attr, value):\n\n\t# full name of attribute\n\trpr_field = rpr_name + \".\" + rpr_attr\n\n\ttry:\n\t\t# break existed connection\n\t\tif not mapDoesNotExist(rpr_name, rpr_attr):\n\t\t\tsource = cmds.connectionInfo(rpr_field, sourceFromDestination=True)\n\t\t\tcmds.disconnectAttr(source, rpr_field)\n\n\t\tif type(value) == tuple:\n\t\t\tcmds.setAttr(rpr_field, value[0], value[1], value[2])\n\t\telif type(value) == str or validateStringType(value):\n\t\t\tcmds.setAttr(rpr_field, value, type=\"string\")\n\t\telse:\n\t\t\tcmds.setAttr(rpr_field, value)\n\t\twrite_own_property_log(u\"Set value {} to {}.\".format(value, rpr_field).encode('utf-8'))\n\texcept Exception as ex:\n\t\ttraceback.print_exc()\n\t\tprint(u\"[ERROR] Set value {} to {} is failed. Check the values and their boundaries. \".format(value, rpr_field).encode('utf-8'))\n\t\twrite_own_property_log(u\"[ERROR] Set value {} to {} is failed. Check the values and their boundaries. \".format(value, rpr_field).encode('utf-8'))\n\n\ndef getProperty(material, attr, size=False):\n\n\t# full name of attribute\n\tfield = material + \".\" + attr\n\ttry:\n\n\t\tif size:\n\t\t\tvalue = cmds.getAttr(field, size=True)\n\t\telse:\n\t\t\tvalue = cmds.getAttr(field)\n\t\t\t# used for color. it has [(),(),()] structure.\n\t\t\tif type(value) == list:\n\t\t\t\tvalue = value[0]\n\texcept Exception as ex:\n\t\tprint(u\"[ERROR] Failed to get information about {} field in {} node.\".format(attr, material).encode('utf-8'))\n\t\twrite_own_property_log(u\"[ERROR] Failed to get information about {} field in {} node.\".format(attr, material).encode('utf-8'))\n\t\treturn\n\n\treturn value\n\n\ndef mapDoesNotExist(rs_name, rs_attr):\n\n\t# full name of attribute\n\trs_field = rs_name + \".\" + rs_attr\n\n\ttry:\n\t\tif cmds.listConnections(rs_field):\n\t\t\treturn 0\n\t\telif cmds.objExists(rs_field + \"R\"):\n\t\t\tif cmds.listConnections(rs_field + \"R\") or cmds.listConnections(rs_field + \"G\") or cmds.listConnections(rs_field + \"B\"):\n\t\t\t\treturn 0\n\t\telif cmds.objExists(rs_field + \"X\"):\n\t\t\tif cmds.listConnections(rs_field + \"X\") or cmds.listConnections(rs_field + \"Y\") or cmds.listConnections(rs_field + \"Z\"):\n\t\t\t\treturn 0\n\t\telif cmds.objExists(rs_field + \"H\"):\n\t\t\tif cmds.listConnections(rs_field + \"H\") or cmds.listConnections(rs_field + \"S\")\tor cmds.listConnections(rs_field + \"V\"):\n\t\t\t\treturn 0\n\texcept Exception as ex:\n\t\ttraceback.print_exc()\n\t\tprint(u\"[ERROR] There is no {} field in this node. Check the field and try again. \".format(rs_field).encode('utf-8'))\n\t\twrite_own_property_log(u\"[ERROR] There is no {} field in this node. Check the field and try again. \".format(rs_field).encode('utf-8'))\n\t\treturn\n\n\treturn 1\n\n\ndef connectProperty(source_name, source_attr, rpr_name, rpr_attr):\n\n\t# full name of attribute\n\tsource = source_name + \".\" + source_attr\n\trpr_field = rpr_name + \".\" + rpr_attr\n\n\ttry:\n\t\tsource_type = type(getProperty(source_name, source_attr))\n\t\tdest_type = type(getProperty(rpr_name, rpr_attr))\n\n\t\tif rpr_attr in (\"surfaceShader\", \"volumeShader\"):\n\t\t\tcmds.connectAttr(source, rpr_field, force=True)\n\n\t\telif cmds.objExists(source_name + \".outAlpha\") and cmds.objExists(source_name + \".outColor\"):\n\n\t\t\tif cmds.objectType(source_name) == \"file\":\n\t\t\t\tsetProperty(source_name, \"ignoreColorSpaceFileRules\", 1)\n\n\t\t\tif source_type == dest_type:\n\t\t\t\tcmds.connectAttr(source, rpr_field, force=True)\n\t\t\telif source_type == tuple and dest_type == float:\n\t\t\t\tsource = source_name + \".outAlpha\"\n\t\t\t\tcmds.connectAttr(source, rpr_field, force=True)\n\t\t\telif source_type == float and dest_type == tuple:\n\t\t\t\tsource = source_name + \".outColor\"\n\t\t\t\tcmds.connectAttr(source, rpr_field, force=True)\n\n\t\telse:\n\t\t\tif source_type == dest_type:\n\t\t\t\tcmds.connectAttr(source, rpr_field, force=True)\n\t\t\telif source_type == tuple and dest_type == float:\n\t\t\t\tif cmds.objExists(source + \"R\"):\n\t\t\t\t\tsource += \"R\"\n\t\t\t\telif cmds.objExists(source + \"X\"):\n\t\t\t\t\tsource += \"X\"\n\t\t\t\telif cmds.objExists(source + \"X\"):\n\t\t\t\t\tsource += \"H\"\n\t\t\t\tcmds.connectAttr(source, rpr_field, force=True)\n\t\t\telif source_type == float and dest_type == tuple:\n\t\t\t\tif cmds.objExists(rpr_field + \"R\"):\n\t\t\t\t\trpr_field1 = rpr_field + \"R\"\n\t\t\t\t\trpr_field2 = rpr_field + \"G\"\n\t\t\t\t\trpr_field3 = rpr_field + \"B\"\n\t\t\t\telif cmds.objExists(rpr_field + \"X\"):\n\t\t\t\t\trpr_field1 = rpr_field + \"X\"\n\t\t\t\t\trpr_field2 = rpr_field + \"Y\"\n\t\t\t\t\trpr_field3 = rpr_field + \"Z\"\n\t\t\t\telif cmds.objExists(rpr_field + \"H\"):\n\t\t\t\t\trpr_field1 = rpr_field + \"H\"\n\t\t\t\t\trpr_field2 = rpr_field + \"S\"\n\t\t\t\t\trpr_field3 = rpr_field + \"V\"\n\t\t\t\tcmds.connectAttr(source, rpr_field1, force=True)\n\t\t\t\tcmds.connectAttr(source, rpr_field2, force=True)\n\t\t\t\tcmds.connectAttr(source, rpr_field3, force=True)\n\t\twrite_own_property_log(u\"Created connection from {} to {}.\".format(source, rpr_field).encode('utf-8'))\n\texcept Exception as ex:\n\t\ttraceback.print_exc()\n\t\tprint(u\"[ERROR] Connection {} to {} is failed.\".format(source, rpr_field).encode('utf-8'))\n\t\twrite_own_property_log(u\"[ERROR] Connection {} to {} is failed.\".format(source, rpr_field).encode('utf-8'))\n\n\ndef invertValue(rpr_name, conv_name, rpr_attr, conv_attr):\n\tconnection = cmds.listConnections(conv_name + \".\" + conv_attr)\n\tif connection and cmds.objectType(connection[0]) == \"reverse\":\n\t\tif mapDoesNotExist(connection[0], \"input\"):\n\t\t\tsetProperty(rpr_name, rpr_attr, getProperty(connection[0], \"input\"))\n\t\telse:\n\t\t\tif cmds.listConnections(connection[0] + \".input\"):\n\t\t\t\tcopyProperty(rpr_name, connection[0], rpr_attr, \"input\")\n\t\t\telif cmds.listConnections(connection[0] + \".inputX\"):\n\t\t\t\tcopyProperty(rpr_name, connection[0], rpr_attr, \"inputX\")\n\t\t\telif cmds.listConnections(connection[0] + \".inputY\"):\n\t\t\t\tcopyProperty(rpr_name, connection[0], rpr_attr, \"inputY\")\n\t\t\telif cmds.listConnections(connection[0] + \".inputZ\"):\n\t\t\t\tcopyProperty(rpr_name, connection[0], rpr_attr, \"inputZ\")\n\telif connection:\n\t\treverse_arith = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\treverse_arith = cmds.rename(reverse_arith, \"Reverse_arithmetic\")\n\t\tsetProperty(reverse_arith, \"operation\", 1)\n\t\tsetProperty(reverse_arith, \"inputA\", (1, 1, 1))\n\t\tcopyProperty(reverse_arith, conv_name, \"inputB\", conv_attr)\n\t\tconnectProperty(reverse_arith, \"out\", rpr_name, rpr_attr)\n\telse:\n\t\tconv_value = getProperty(conv_name, conv_attr)\n\t\tif type(conv_value) == float:\n\t\t\tsetProperty(rpr_name, rpr_attr, 1 - conv_value)\n\t\telif type(conv_value) == tuple:\n\t\t\tsetProperty(rpr_name, rpr_attr, (1 - conv_value[0], 1 - conv_value[1], 1 - conv_value[2]))\n\n\t\n# displacement conversion\ndef convertDisplacement(displacement, displacement_file, rs_material, rpr_material):\n\n\t# get all shapes\n\tcmds.hyperShade(objects=rs_material)\n\tshapes = cmds.ls(sl=True)\n\n\tif len(shapes) > 1:\n\t\tfor shape in shapes:\n\t\t\trsEnableSubdivision = getProperty(shape, \"rsEnableSubdivision\")\n\t\t\trsEnableDisplacement = getProperty(shape, \"rsEnableDisplacement\")\n\t\t\tfeatureDisplacement = getProperty(shape, \"featureDisplacement\")\n\t\t\tif (rsEnableSubdivision and rsEnableDisplacement) or featureDisplacement: \n\t\t\t\trprMaterial = convertMaterial(rs_material, \"displacement_copy\")\n\t\t\t\trpr_sg = cmds.listConnections(rprMaterial, type=\"shadingEngine\")[0]\n\n\t\t\t\tcmds.select(cl=True)\n\t\t\t\tcmds.select(shape, r=True)\n\t\t\t\tcmds.sets(forceElement=rpr_sg)\n\n\t\t\t\tsetProperty(rprMaterial, \"displacementEnable\", 1)\n\t\t\t\tconnectProperty(displacement_file, \"outColor\", rprMaterial, \"displacementMap\")\n\n\t\t\t\tif featureDisplacement:\n\t\t\t\t\tcopyProperty(rprMaterial, shape, \"displacementSubdiv\", \"renderSmoothLevel\")\n\t\t\t\telse:\n\t\t\t\t\trsMaxTessellationSubdivs = getProperty(shape, \"rsMaxTessellationSubdivs\")\n\t\t\t\t\tif rsMaxTessellationSubdivs > 7:\n\t\t\t\t\t\trsMaxTessellationSubdivs = 7\n\t\t\t\t\tsetProperty(rprMaterial, \"displacementSubdiv\", rsMaxTessellationSubdivs)\n\n\t\t\t\t\tosdVertBoundary = getProperty(shape, \"osdVertBoundary\")\n\t\t\t\t\tdisplacementBoundary = remap_value(osdVertBoundary, 2, 1, 1, 0)\n\t\t\t\t\tsetProperty(rprMaterial, \"displacementBoundary\", displacementBoundary)\n\n\t\t\t\t\tdisplacementMax = getProperty(shape, \"rsDisplacementScale\") * getProperty(displacement, \"scale\")\n\t\t\t\t\tsetProperty(rprMaterial, \"displacementMax\", displacementMax)\n\n\telse:\n\t\tsetProperty(rpr_material, \"displacementEnable\", 1)\n\t\tconnectProperty(displacement_file, \"outColor\", rpr_material, \"displacementMap\")\n\t\tcopyProperty(rpr_material, displacement, \"displacementMax\", \"scale\")\n\n\t\trsEnableSubdivision = getProperty(shapes[0], \"rsEnableSubdivision\")\n\t\trsEnableDisplacement = getProperty(shapes[0], \"rsEnableDisplacement\")\n\t\tif rsEnableSubdivision and rsEnableDisplacement: \n\t\t\tcopyProperty(rpr_material, shapes[0], \"displacementSubdiv\", \"rsMaxTessellationSubdivs\")\n\n\ndef convertbump2d(rs, source):\n\n\tif cmds.objExists(rs + \"_rpr\"):\n\t\trpr = rs + \"_rpr\"\n\telse:\n\t\tbumpConnect = cmds.listConnections(rs + \".bumpValue\")\n\t\tif bumpConnect:\n\t\t\tinput_type = cmds.objectType(bumpConnect[0])\n\t\t\tif input_type == \"RedshiftRoundCorners\":\n\t\t\t\trpr = convertUnsupportedNode(rs, source, \"_UNSUPPORTED_BUMP\")\n\t\t\t\treturn rpr\n\t\telse:\n\t\t\trpr = convertUnsupportedNode(rs, source, \"_UNSUPPORTED_BUMP\")\n\t\t\treturn rpr\n\n\t\tbump_type = getProperty(rs, \"bumpInterp\")\n\t\tif not bump_type:\n\t\t\trpr = cmds.shadingNode(\"RPRBump\", asUtility=True)\n\t\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\t\telse:\n\t\t\trpr = cmds.shadingNode(\"RPRNormal\", asUtility=True)\n\t\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\n\t\t# Logging to file\n\t\tstart_log(rs, rpr)\n\n\t\t# Fields conversion\n\t\tcopyProperty(rpr, rs, \"color\", \"bumpValue\")\n\t\tcopyProperty(rpr, rs, \"strength\", \"bumpDepth\")\n\n\t\t# Logging to file\n\t\tend_log(rs)\n\n\tconversion_map = {\n\t\t\"outNormal\": \"out\",\n\t\t\"outNormalX\": \"outX\",\n\t\t\"outNormalY\": \"outY\",\n\t\t\"outNormalZ\": \"outZ\"\n\t}\n\n\trpr += \".\" + conversion_map[source]\n\treturn rpr\n\n\ndef convertBlendColors(rs, source):\n\n\tif cmds.objExists(rs + \"_rpr\"):\n\t\trpr = rs + \"_rpr\"\n\telse:\n\t\trpr = cmds.shadingNode(\"RPRBlendValue\", asUtility=True)\n\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\n\t\t# Logging to file\n\t\tstart_log(rs, rpr)\n\n\t\t# Fields conversion\n\t\tcopyProperty(rpr, rs, \"inputA\", \"color1\")\n\t\tcopyProperty(rpr, rs, \"inputB\", \"color2\")\n\t\tcopyProperty(rpr, rs, \"weight\", \"blender\")\n\n\t\t# Logging to file\n\t\tend_log(rs)\n\n\tconversion_map = {\n\t\t\"output\": \"out\",\n\t\t\"outputR\": \"outR\",\n\t\t\"outputG\": \"outG\",\n\t\t\"outputB\": \"outB\"\n\t}\n\n\trpr += \".\" + conversion_map[source]\n\treturn rpr\n\n\ndef convertLuminance(rs, source):\n\n\tif cmds.objExists(rs + \"_rpr\"):\n\t\trpr = rs + \"_rpr\"\n\telse:\n\t\trpr = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\n\t\t# Logging to file\n\t\tstart_log(rs, rpr)\n\n\t\t# Fields conversion\n\t\tcopyProperty(rpr, rs, \"inputA\", \"value\")\n\t\tsetProperty(rpr, \"inputB\", (0, 0, 0))\n\t\tsetProperty(rpr, \"operation\", 19)\n\n\t\t# Logging to file\n\t\tend_log(rs)\n\n\tconversion_map = {\n\t\t\"outValue\": \"outX\"\n\t}\n\n\trpr += \".\" + conversion_map[source]\n\treturn rpr\n\n\ndef convertColorComposite(rs, source):\n\n\toperation = getProperty(rs, \"operation\")\n\tif operation == 2:\n\t\tif cmds.objExists(rs + \"_rpr\"):\n\t\t\trpr = rs + \"_rpr\"\n\t\telse:\n\t\t\trpr = cmds.shadingNode(\"RPRBlendValue\", asUtility=True)\n\t\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\n\t\t\t# Logging to file\n\t\t\tstart_log(rs, rpr)\n\n\t\t\t# Fields conversion\n\t\t\tcopyProperty(rpr, rs, \"inputA\", \"alphaA\")\n\t\t\tcopyProperty(rpr, rs, \"inputB\", \"alphaB\")\n\t\t\tcopyProperty(rpr, rs, \"weight\", \"factor\")\n\t\t\t\n\n\t\t\t# Logging to file\n\t\t\tend_log(rs)\n\n\t\tconversion_map = {\n\t\t\t\"outAlpha\": \"outR\"\n\t\t}\n\n\t\trpr += \".\" + conversion_map[source]\n\t\treturn rpr\n\n\telse:\n\n\t\tif cmds.objExists(rs + \"_rpr\"):\n\t\t\trpr = rs + \"_rpr\"\n\t\telse:\n\t\t\trpr = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\n\t\t\t# Logging to file\n\t\t\tstart_log(rs, rpr)\n\n\t\t\t# Fields conversion\n\t\t\tif operation in (0, 4, 5):\n\t\t\t\tsetProperty(rpr, \"operation\", 0)\n\t\t\t\tif source == \"outAlpha\":\n\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"alphaA\")\n\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"alphaB\")\n\t\t\t\telse:\n\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"colorA\")\n\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"colorB\")\n\t\t\telif operation == 1:\n\t\t\t\tif source == \"outAlpha\":\n\t\t\t\t\tif mapDoesNotExist(rs, \"alphaA\") and mapDoesNotExist(rs, \"alphaB\"):\n\t\t\t\t\t\talphaA = getProperty(rs, alphaA)\n\t\t\t\t\t\talphaB = getProperty(rs, alphaB)\n\t\t\t\t\t\tif alphaA > alphaB:\n\t\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"alphaA\")\n\t\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"alphaB\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"alphaB\")\n\t\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"alphaA\")\n\t\t\t\t\telif mapDoesNotExist(rs, \"alphaA\"):\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"alphaA\")\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"alphaB\")\n\t\t\t\t\telif mapDoesNotExist(rs, \"alphaB\"):\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"alphaB\")\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"alphaA\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"alphaA\")\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"alphaB\")\n\t\t\t\telse:\n\t\t\t\t\tif mapDoesNotExist(rs, \"colorA\") and mapDoesNotExist(rs, \"colorB\"):\n\t\t\t\t\t\tcolorA = getProperty(rs, alphaA)\n\t\t\t\t\t\tcolorB = getProperty(rs, colorB)\n\t\t\t\t\t\tif colorA[0] > colorB[0] or colorA[1] > colorB[1] or colorA[2] > colorB[2]:\n\t\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"colorA\")\n\t\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"colorB\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"colorB\")\n\t\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"colorA\")\n\t\t\t\t\telif mapDoesNotExist(rs, \"colorA\"):\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"colorA\")\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"colorB\")\n\t\t\t\t\telif mapDoesNotExist(rs, \"colorB\"):\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"colorB\")\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"colorA\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"colorA\")\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"colorB\")\n\t\t\telif operation == 3:\n\t\t\t\tsetProperty(rpr, \"operation\", 2)\n\t\t\t\tif source == \"outAlpha\":\n\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"alphaA\")\n\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"alphaB\")\n\t\t\t\telse:\n\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"colorA\")\n\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"colorB\")\n\t\t\telif operation == 6:\n\t\t\t\tsetProperty(rpr, \"operation\", 1)\n\t\t\t\tif source == \"outAlpha\":\n\t\t\t\t\tif mapDoesNotExist(rs, \"alphaA\"):\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"alphaA\")\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"alphaB\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"alphaA\")\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"alphaB\")\n\t\t\t\telse:\n\t\t\t\t\tif mapDoesNotExist(rs, \"alphaA\"):\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"colorA\")\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"colorB\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"colorA\")\n\t\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"colorB\")\n\t\t\telif operation == 7:\n\t\t\t\tsetProperty(rpr, \"operation\", 25)\n\t\t\t\tif source == \"outAlpha\":\n\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"alphaB\")\n\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"alphaA\")\n\t\t\t\telse:\n\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"colorB\")\n\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"colorA\")\n\t\t\telif operation == 8:\n\t\t\t\tsetProperty(rpr, \"operation\", 20)\n\t\t\t\tif source == \"outAlpha\":\n\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"alphaA\")\n\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"alphaB\")\n\t\t\t\telse:\n\t\t\t\t\tcopyProperty(rpr, rs, \"inputA\", \"colorA\")\n\t\t\t\t\tcopyProperty(rpr, rs, \"inputB\", \"colorB\")\n\n\n\t\t\t# Logging to file\n\t\t\tend_log(rs)\n\n\t\tconversion_map = {\n\t\t\t\"outAlpha\": \"outX\",\n\t\t\t\"outColor\": \"out\",\n\t\t\t\"outColorR\": \"outX\",\n\t\t\t\"outColorG\": \"outY\",\n\t\t\t\"outColorB\": \"outZ\"\n\t\t}\n\n\t\trpr += \".\" + conversion_map[source]\n\t\treturn rpr\n\n\ndef convertReverse(rs, source):\n\n\tif cmds.objExists(rs + \"_rpr\"):\n\t\trpr = rs + \"_rpr\"\n\telse:\n\t\trpr = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\n\t\t# Logging to file\n\t\tstart_log(rs, rpr)\n\n\t\t# Fields conversion\n\t\tsetProperty(rpr, \"inputA\", (1, 1, 1))\n\t\tcopyProperty(rpr, rs, \"inputB\", \"input\")\n\t\tsetProperty(rpr, \"operation\", 1)\n\n\t\t# Logging to file\n\t\tend_log(rs)\n\n\tconversion_map = {\n\t\t\"output\": \"out\",\n\t\t\"outputX\": \"outX\",\n\t\t\"outputY\": \"outY\",\n\t\t\"outputZ\": \"outZ\"\n\t}\n\n\trpr += \".\" + conversion_map[source]\n\treturn rpr\n\n\ndef convertPreMultiply(rs, source):\n\n\tif cmds.objExists(rs + \"_rpr\"):\n\t\trpr = rs + \"_rpr\"\n\telse:\n\t\trpr = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\n\t\t# Logging to file\n\t\tstart_log(rs, rpr)\n\n\t\t# Fields conversion\n\t\tcopyProperty(rpr, rs, \"inputA\", \"inColor\")\n\t\talpha = getProperty(rs, \"inAlpha\")\n\t\tsetProperty(rpr, \"inputB\", (alpha, alpha, alpha))\n\t\tsetProperty(rpr, \"operation\", 2)\n\n\t\t# Logging to file\n\t\tend_log(rs)\n\n\tconversion_map = {\n\t\t\"outAlpha\": \"outX\",\n\t\t\"outColor\": \"out\",\n\t\t\"outColorR\": \"outX\",\n\t\t\"outColorG\": \"outY\",\n\t\t\"outColorB\": \"outZ\"\n\t}\n\n\trpr += \".\" + conversion_map[source]\n\treturn rpr\n\n\ndef convertVectorProduct(rs, source):\n\n\toperation = getProperty(rs, \"operation\")\n\tif operation in (1, 2):\n\t\tif cmds.objExists(rs + \"_rpr\"):\n\t\t\trpr = rs + \"_rpr\"\n\t\telse:\n\t\t\trpr = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\n\t\t\t# Logging to file\n\t\t\tstart_log(rs, rpr)\n\n\t\t\t# Fields conversion\n\t\t\tif operation == 1:\n\t\t\t\tsetProperty(rpr, \"operation\", 11)\n\t\t\telif operation == 2:\n\t\t\t\tsetProperty(rpr, \"operation\", 12)\n\n\t\t\tcopyProperty(rpr, rs, \"inputA\", \"input1\")\n\t\t\tcopyProperty(rpr, rs, \"inputB\", \"input2\")\n\n\t\t\t# Logging to file\n\t\t\tend_log(rs)\n\n\t\tconversion_map = {\n\t\t\t\"output\": \"out\",\n\t\t\t\"outputX\": \"outX\",\n\t\t\t\"outputY\": \"outY\",\n\t\t\t\"outputZ\": \"outZ\"\n\t\t}\n\n\t\trpr += \".\" + conversion_map[source]\n\t\treturn rpr\n\telse:\n\t\trs += \".\" + source\n\t\treturn rs\n\n\ndef convertChannels(rs, source):\n\n\tif \"outColor\" in source:\n\n\t\tif cmds.objExists(rs + \"_color_rpr\"):\n\t\t\trpr = rs + \"_color_rpr\"\n\t\telse:\n\n\t\t\trpr = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\trpr = cmds.rename(rpr, rs + \"_color_rpr\")\n\n\t\t\t# Logging to file\n\t\t\tstart_log(rs, rpr)\n\n\t\t\t# Fields conversion\n\t\t\tcopyProperty(rpr, rs, \"inputA\", \"inColor\")\n\n\t\t\t# Logging to file\n\t\t\tend_log(rs)\n\n\t\tconversion_map = {\n\t\t\t\"outColor\": \"out\",\n\t\t\t\"outColorR\": \"outX\",\n\t\t\t\"outColorG\": \"outY\",\n\t\t\t\"outColorB\": \"outZ\"\n\t\t}\n\n\t\trpr += \".\" + conversion_map[source]\n\t\treturn rpr\n\n\telif \"outAlpha\" in source:\n\n\t\tif cmds.objExists(rs + \"_alpha_rpr\"):\n\t\t\trpr = rs + \"_alpha_rpr\"\n\t\telse:\n\n\t\t\trpr = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\trpr = cmds.rename(rpr, rs + \"_alpha_rpr\")\n\n\t\t\t# Logging to file\n\t\t\tstart_log(rs, rpr)\n\n\t\t\t# Fields conversion\n\t\t\tcopyProperty(rpr, rs, \"inputA\", \"inAlpha\")\n\n\t\t\t# Logging to file\n\t\t\tend_log(rs)\n\n\t\tconversion_map = {\n\t\t\t\"outAlpha\": \"outX\"\n\t\t}\n\n\t\trpr += \".\" + conversion_map[source]\n\t\treturn rpr\n\n\ndef convertmultiplyDivide(rs, source):\n\n\tif cmds.objExists(rs + \"_rpr\"):\n\t\trpr = rs + \"_rpr\"\n\telse:\n\t\trpr = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\n\t\t# Logging to file\n\t\tstart_log(rs, rpr)\n\n\t\t# Fields conversion\n\t\toperation = getProperty(rs, \"operation\")\n\t\toperation_map = {\n\t\t\t1: 2,\n\t\t\t2: 3,\n\t\t\t3: 15\n\t\t}\n\t\tsetProperty(rpr, \"operation\", operation_map[operation])\n\t\tcopyProperty(rpr, rs, \"inputA\", \"input1\")\n\t\tcopyProperty(rpr, rs, \"inputB\", \"input2\")\n\t\t\n\t\t# Logging to file\n\t\tend_log(rs)\n\n\tconversion_map = {\n\t\t\"output\": \"out\",\n\t\t\"outputX\": \"outX\",\n\t\t\"outputY\": \"outY\",\n\t\t\"outputZ\": \"outZ\"\n\t}\n\n\trpr += \".\" + conversion_map[source]\n\treturn rpr\n\n\n# re-convert is not fully supported for this node (only scale field)\ndef convertRedshiftNormalMap(rs, source):\n\n\tif cmds.objExists(rs + \"_rpr\"):\n\t\trpr = rs + \"_rpr\"\n\telse:\n\t\trpr = cmds.shadingNode(\"RPRNormal\", asUtility=True)\n\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\t\tfile = cmds.shadingNode(\"file\", asTexture=True, isColorManaged=True)\n\t\ttexture = cmds.shadingNode(\"place2dTexture\", asUtility=True)\n\n\t\tconnectProperty(texture, \"coverage\", file, \"coverage\")\n\t\tconnectProperty(texture, \"translateFrame\", file, \"translateFrame\")\n\t\tconnectProperty(texture, \"rotateFrame\", file, \"rotateFrame\")\n\t\tconnectProperty(texture, \"mirrorU\", file, \"mirrorU\")\n\t\tconnectProperty(texture, \"mirrorV\", file, \"mirrorV\")\n\t\tconnectProperty(texture, \"stagger\", file, \"stagger\")\n\t\tconnectProperty(texture, \"wrapU\", file, \"wrapU\")\n\t\tconnectProperty(texture, \"wrapV\", file, \"wrapV\")\n\t\tconnectProperty(texture, \"repeatUV\", file, \"repeatUV\")\n\t\tconnectProperty(texture, \"offset\", file, \"offset\")\n\t\tconnectProperty(texture, \"rotateUV\", file, \"rotateUV\")\n\t\tconnectProperty(texture, \"noiseUV\", file, \"noiseUV\")\n\t\tconnectProperty(texture, \"vertexUvOne\", file, \"vertexUvOne\")\n\t\tconnectProperty(texture, \"vertexUvTwo\", file, \"vertexUvTwo\")\n\t\tconnectProperty(texture, \"vertexUvThree\", file, \"vertexUvThree\")\n\t\tconnectProperty(texture, \"vertexCameraOne\", file, \"vertexCameraOne\")\n\t\tconnectProperty(texture, \"outUV\", file, \"uv\")\n\t\tconnectProperty(texture, \"outUvFilterSize\", file, \"uvFilterSize\")\n\t\tcopyProperty(texture, rs, \"repeatU\", \"repeats0\")\n\t\tcopyProperty(texture, rs, \"repeatV\", \"repeats1\")\n\n\t\tif getProperty(rs, \"flipY\"):\n\t\t\tarithmetic = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(arithmetic, \"inputA\", (1, 1, 1))\n\t\t\tsetProperty(arithmetic, \"operation\", 1)\n\t\t\tconnectProperty(file, \"outColorG\", arithmetic, \"inputBY\")\n\t\t\tconnectProperty(arithmetic, \"outY\", rpr, \"colorG\")\n\t\t\tconnectProperty(file, \"outColorR\", rpr, \"colorR\")\n\t\t\tconnectProperty(file, \"outColorB\", rpr, \"colorB\")\n\t\telse:\n\t\t\tconnectProperty(file, \"outColor\", rpr, \"color\")\n\n\t\tsetProperty(file, \"colorSpace\", \"Raw\")\n\t\tsetProperty(file, \"fileTextureName\", getProperty(rs, \"tex0\"))\n\t\t\t\n\t\t# Logging to file (start)\n\t\tstart_log(rs, rpr)\n\n\t\tcopyProperty(rpr, rs, \"strength\", \"scale\")\n\n\t\t# Logging to file (end)\n\t\tend_log(rs)\n\n\tconversion_map = {\n\t\t\"outDisplacementVector\": \"out\",\n\t\t\"outDisplacementVectorR\": \"outR\",\n\t\t\"outDisplacementVectorG\": \"outG\",\n\t\t\"outDisplacementVectorB\": \"outB\"\n\t}\n\n\trpr += \".\" + conversion_map[source]\n\treturn rpr\n\n\ndef convertRedshiftNoise(rs, source):\n\n\tif cmds.objExists(rs + \"_rpr\"):\n\t\trpr = rs + \"_rpr\"\n\telse:\n\t\tnoiseType = getProperty(rs, \"noise_type\")\n\t\t\n\t\tif noiseType == 0:\n\t\t\trpr = cmds.shadingNode(\"simplexNoise\", asUtility=True)\n\t\telif noiseType == 2:\n\t\t\trpr = cmds.shadingNode(\"fractal\", asUtility=True)\n\t\telif noiseType == 3:\n\t\t\trpr = cmds.shadingNode(\"noise\", asUtility=True)\n\n\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\n\t\ttexture = cmds.shadingNode(\"place2dTexture\", asUtility=True)\n\n\t\tconnectProperty(texture, \"outUV\", rpr, \"uv\")\n\t\tconnectProperty(texture, \"outUvFilterSize\", rpr, \"uvFilterSize\")\n\t\tsetProperty(texture, \"repeatU\", getProperty(rs, \"coord_scale_global\") * getProperty(rs, \"coord_scale0\"))\n\t\tsetProperty(texture, \"repeatV\", getProperty(rs, \"coord_scale_global\") * getProperty(rs, \"coord_scale1\"))\n\t\tcopyProperty(texture, rs, \"offsetU\", \"coord_offset0\")\n\t\tcopyProperty(texture, rs, \"offsetV\", \"coord_offset1\")\n\n\t\t# Logging to file (start)\n\t\tstart_log(rs, rpr)\n\n\t\tsetProperty(rpr, \"amplitude\", getProperty(rs, \"noise_gain\") / 2)\n\n\t\tif noiseType == 0:\n\t\t\tsetProperty(rpr, \"noiseType\", 1)\n\t\t\tcopyProperty(rpr, rs, \"octaves\", \"noise_complexity\")\n\t\t\tcopyProperty(rpr, rs, \"frequency\", \"noise_scale\")\n\t\t\tcopyProperty(rpr, rs, \"distortionU\", \"distort\")\n\t\t\tcopyProperty(rpr, rs, \"distortionV\", \"distort\")\n\t\t\tcopyProperty(rpr, rs, \"distortionRatio\", \"distort_scale\")\n\t\telif noiseType == 2:\n\t\t\tcopyProperty(rpr, rs, \"frequencyRatio\", \"noise_scale\")\n\t\telif noiseType == 3:\n\t\t\tcopyProperty(rpr, rs, \"depthMax\", \"noise_complexity\")\n\t\t\tcopyProperty(rpr, rs, \"frequencyRatio\", \"noise_scale\")\n\n\t\t# Logging to file (end)\n\t\tend_log(rs)\n\n\trpr += \".\" + source\n\treturn rpr\n\n\ndef convertRedshiftAmbientOcclusion(rs, source):\n\n\tif cmds.objExists(rs + \"_rpr\"):\n\t\trpr = rs + \"_rpr\"\n\telse:\n\t\trpr = cmds.shadingNode(\"RPRAmbientOcclusion\", asUtility=True)\n\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\n\t\t# Logging to file\n\t\tstart_log(rs, rpr)\n\n\t\t# Fields conversion\n\t\tcopyProperty(rpr, rs, \"unoccludedColor\", \"bright\")\n\t\tcopyProperty(rpr, rs, \"occludedColor\", \"dark\")\n\t\tcopyProperty(rpr, rs, \"radius\", \"spread\")\n\n\t\t# Logging to file\n\t\tend_log(rs)\n\n\tconversion_map = {\n\t\t\"outColor\": \"output\",\n\t\t\"outColorR\": \"outputR\",\n\t\t\"outColorG\": \"outputG\",\n\t\t\"outColorB\": \"outputB\"\n\t}\n\n\trpr += \".\" + conversion_map[source]\n\treturn rpr\n\n\n# re-convert for ior in unsupported\ndef convertRedshiftFresnel(rs, source):\n\n\tif cmds.objExists(rs + \"_rpr\"):\n\t\trpr = rs + \"_rpr\"\n\telse:\n\t\trpr = cmds.shadingNode(\"RPRBlendValue\", asUtility=True)\n\t\t\n\t\tfresnel = cmds.shadingNode(\"RPRFresnel\", asUtility=True)\n\t\tfresnel = cmds.rename(fresnel, rs + \"_rpr\")\n\n\t\tconnectProperty(fresnel, \"out\", rpr, \"weight\")\n\t\tcopyProperty(fresnel, rs, \"ior\", \"ior\")\n\n\t\t# Logging to file\n\t\tstart_log(rs, rpr)\n\n\t\t# Fields conversion\n\t\tcopyProperty(rpr, rs, \"inputA\", \"facing_color\")\n\t\tcopyProperty(rpr, rs, \"inputB\", \"perp_color\")\n\n\t\t# Logging to file\n\t\tend_log(rs)\n\n\tconversion_map = {\n\t\t\"outColor\": \"out\",\n\t\t\"outColorR\": \"outR\",\n\t\t\"outColorG\": \"outG\",\n\t\t\"outColorB\": \"outB\"\n\t}\n\n\trpr += \".\" + conversion_map[source]\n\treturn rpr\n\n\ndef convertRedshiftBumpMap(rs, source):\n\n\tif cmds.objExists(rs + \"_rpr\"):\n\t\trpr = rs + \"_rpr\"\n\telse:\n\t\tbumpConnect = cmds.listConnections(rs + \".input\")\n\t\tif bumpConnect:\n\t\t\tinput_type = cmds.objectType(bumpConnect[0])\n\t\t\tif input_type == \"RedshiftRoundCorners\":\n\t\t\t\trpr = convertUnsupportedNode(rs, source, \"_UNSUPPORTED_BUMP\")\n\t\t\t\treturn rpr\n\t\telse:\n\t\t\trpr = convertUnsupportedNode(rs, source, \"_UNSUPPORTED_BUMP\")\n\t\t\treturn rpr\n\n\t\tinputType = getProperty(rs, \"inputType\")\n\t\tif inputType == 0:\n\t\t\trpr = cmds.shadingNode(\"RPRBump\", asUtility=True)\n\t\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\n\t\t\t# Logging to file\n\t\t\tstart_log(rs, rpr)\n\n\t\t\t# Fields conversion\n\t\t\tsetProperty(rpr, \"strength\", getProperty(rs, \"scale\") * 4)\n\t\t\tcopyProperty(rpr, rs, \"color\", \"input\")\n\n\t\t\t# Logging to file\n\t\t\tend_log(rs)\n\n\t\telif inputType == 1:\n\t\t\trpr = cmds.shadingNode(\"RPRNormal\", asUtility=True)\n\t\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\n\t\t\t# Logging to file\n\t\t\tstart_log(rs, rpr)\n\n\t\t\t# Fields conversion\n\t\t\tcopyProperty(rpr, rs, \"strength\", \"scale\")\n\t\t\tcopyProperty(rpr, rs, \"color\", \"input\")\n\n\t\t\t# Logging to file\n\t\t\tend_log(rs)\n\n\t\telif inputType == 2:\n\t\t\trpr = cmds.shadingNode(\"RPRNormal\", asUtility=True)\n\t\t\trpr = cmds.rename(rpr, rs + \"_UNSUPPORTED_NORMAL\")\n\t\t\t# Logging to file\n\t\t\tstart_log(rs, rpr)\n\t\t\tend_log(rs)\n\t\t\n\trpr += \".\" + source\n\treturn rpr\n\n\ndef convertRedshiftColorLayer(rs, source):\n\n\tif cmds.objExists(rs + \"_rpr\"):\n\t\trpr = rs + \"_rpr\"\n\telse:\n\t\tlayer1_blend_mode = getProperty(rs, \"layer1_blend_mode\")\n\t\tif layer1_blend_mode in (2, 3, 4, 15):\n\t\t\trpr = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\t\telse:\n\t\t\trpr = cmds.shadingNode(\"RPRBlendMaterial\", asShader=True)\n\t\t\trpr = cmds.rename(rpr, rs + \"_rpr\")\n\n\t\t# Logging to file\n\t\tstart_log(rs, rpr)\n\n\t\t# Fields conversion\n\t\tif cmds.objectType(rpr) == \"RPRArithmetic\":\n\t\t\tconversion_map_operation = {\n\t\t\t\t2: 0,\n\t\t\t\t3: 1,\n\t\t\t\t4: 2,\n\t\t\t\t15: 3\n\t\t\t}\n\t\t\tsetProperty(rpr, \"operation\", conversion_map_operation[layer1_blend_mode])\n\t\t\tcopyProperty(rpr, rs, \"inputA\", \"base_color\")\n\t\t\tcopyProperty(rpr, rs, \"inputB\", \"layer1_color\")\n\t\telse:\n\t\t\tcopyProperty(rpr, rs, \"color0\", \"base_color\")\n\t\t\tcopyProperty(rpr, rs, \"color1\", \"layer1_color\")\n\t\t\tcopyProperty(rpr, rs, \"weight\", \"layer1_mask\")\n\n\t\t# Logging to file\n\t\tend_log(rs)\n\n\tif cmds.objectType(rpr) == \"RPRArithmetic\":\n\t\tconversion_map = {\n\t\t\t\"outColor\": \"out\",\n\t\t\t\"outColorR\": \"outR\",\n\t\t\t\"outColorG\": \"outG\",\n\t\t\t\"outColorB\": \"outB\"\n\t\t}\n\t\tsource = conversion_map[source]\n\n\trpr += \".\" + source\n\treturn rpr\n\n\ndef convertRedshiftBumpBlender(rsMaterial, rs, source):\n\t\n\trprMaterial = convertMaterial(rsMaterial, \"inside_blender_bump_conversion\")\n\trprMaterial = rprMaterial.split(\".\")[0]\n\n\t# Collect all bumps\n\tbump_maps = []\n\n\tif not mapDoesNotExist(rs, \"baseInput\"):\n\t\tbump_maps.append(\"baseInput\")\n\t\n\tif not mapDoesNotExist(rs, \"bumpInput0\"):\n\t\tbump_maps.append(\"bumpInput0\")\n\n\tif not mapDoesNotExist(rs, \"bumpInput1\"):\n\t\tbump_maps.append(\"bumpInput1\")\n\t\n\tif not mapDoesNotExist(rs, \"bumpInput2\"):\n\t\tbump_maps.append(\"bumpInput2\")\n\n\n\tdef activateNormalMap(rprMaterial):\n\t\tsetProperty(rprMaterial, \"normalMapEnable\", 1)\n\t\tsetProperty(rprMaterial, \"useShaderNormal\", 1)\n\t\tsetProperty(rprMaterial, \"reflectUseShaderNormal\", 1)\n\t\tsetProperty(rprMaterial, \"refractUseShaderNormal\", 1)\n\t\tsetProperty(rprMaterial, \"coatUseShaderNormal\", 1)\n\n\n\tif len(bump_maps) == 0:\n\t\treturn rprMaterial\n\n\telif len(bump_maps) == 1:\n\t\tactivateNormalMap(rprMaterial)\n\t\tcopyProperty(rprMaterial, rs, \"normalMap\", bump_maps[0])\n\t\treturn rprMaterial\n\n\telif len(bump_maps) > 1:\n\t\tblend_material = cmds.shadingNode(\"RPRBlendMaterial\", asShader=True)\n\n\t\tactivateNormalMap(rprMaterial)\n\t\tcopyProperty(rprMaterial, rs, \"normalMap\", bump_maps[0])\n\t\tconnectProperty(rprMaterial, \"outColor\", blend_material, \"color0\")\n\n\t\trprMaterial_copy = cmds.duplicate(rprMaterial)[0]\n\t\trprMaterial_copy = cmds.rename(rprMaterial_copy, rprMaterial + \"_copy\")\n\t\tcopyProperty(rprMaterial_copy, rs, \"normalMap\", bump_maps[1])\n\t\tconnectProperty(rprMaterial_copy, \"outColor\", blend_material, \"color1\")\n\t\tcopyProperty(blend_material, rs, \"weight\", bump_maps[1].replace(\"Input\", \"Weight\"))\n\n\t\tif len(bump_maps) > 2:\n\t\t\told_blend_material = blend_material\n\t\t\tblend_material = cmds.shadingNode(\"RPRBlendMaterial\", asShader=True)\n\n\t\t\tconnectProperty(old_blend_material, \"outColor\", blend_material, \"color0\")\n\n\t\t\trprMaterial_copy2 = cmds.duplicate(rprMaterial)[0]\n\t\t\trprMaterial_copy2 = cmds.rename(rprMaterial_copy2, rprMaterial + \"_copy2\")\n\t\t\tcopyProperty(rprMaterial_copy2, rs, \"normalMap\", bump_maps[2])\n\t\t\tconnectProperty(rprMaterial_copy2, \"outColor\", blend_material, \"color1\")\n\t\t\tcopyProperty(blend_material, rs, \"weight\", bump_maps[2].replace(\"Input\", \"Weight\"))\n\n\t\t\tif len(bump_maps) > 3:\n\n\t\t\t\told_blend_material = blend_material\n\t\t\t\tblend_material = cmds.shadingNode(\"RPRBlendMaterial\", asShader=True)\n\n\t\t\t\tconnectProperty(old_blend_material, \"outColor\", blend_material, \"color0\")\n\n\t\t\t\trprMaterial_copy3 = cmds.duplicate(rprMaterial)[0]\n\t\t\t\trprMaterial_copy3 = cmds.rename(rprMaterial_copy3, rprMaterial + \"_copy3\")\n\t\t\t\tcopyProperty(rprMaterial_copy3, rs, \"normalMap\", bump_maps[3])\n\t\t\t\tconnectProperty(rprMaterial_copy3, \"outColor\", blend_material, \"color1\")\n\t\t\t\tcopyProperty(blend_material, rs, \"weight\", bump_maps[3].replace(\"Input\", \"Weight\"))\n\n\t\treturn blend_material\n\n\ndef convertRedshiftUserDataScalar(vr, source):\n\n\tif cmds.objExists(vr + \"_rpr\"):\n\t\trpr = vr + \"_rpr\"\n\telse:\n\t\trpr = cmds.shadingNode(\"floatConstant\", asUtility=True)\n\t\trpr = cmds.rename(rpr, vr + \"_rpr\")\n\n\t\t# Logging to file\n\t\tstart_log(vr, rpr)\n\n\t\t# Fields conversion\n\t\tcopyProperty(rpr, vr, \"inFloat\", \"default\")\n\t\t\n\t\t# Logging to file\n\t\tend_log(vr)\n\n\tconversion_map = {\n\t\t\"out\": \"outFloat\"\n\t}\n\n\trpr += \".\" + conversion_map[source]\n\treturn rpr\n\n\ndef convertRedshiftUserDataInteger(vr, source):\n\n\tif cmds.objExists(vr + \"_rpr\"):\n\t\trpr = vr + \"_rpr\"\n\telse:\n\t\trpr = cmds.shadingNode(\"floatConstant\", asUtility=True)\n\t\trpr = cmds.rename(rpr, vr + \"_rpr\")\n\n\t\t# Logging to file\n\t\tstart_log(vr, rpr)\n\n\t\t# Fields conversion\n\t\tcopyProperty(rpr, vr, \"inFloat\", \"default\")\n\t\t\n\t\t# Logging to file\n\t\tend_log(vr)\n\n\tconversion_map = {\n\t\t\"out\": \"outFloat\"\n\t}\n\n\trpr += \".\" + conversion_map[source]\n\treturn rpr\n\n\ndef convertRedshiftUserDataColor(vr, source):\n\n\tif cmds.objExists(vr + \"_rpr\"):\n\t\trpr = vr + \"_rpr\"\n\telse:\n\t\trpr = cmds.shadingNode(\"colorConstant\", asUtility=True)\n\t\trpr = cmds.rename(rpr, vr + \"_rpr\")\n\n\t\t# Logging to file\n\t\tstart_log(vr, rpr)\n\n\t\t# Fields conversion\n\t\tcopyProperty(rpr, vr, \"inColor\", \"default\")\n\t\t\n\t\t# Logging to file\n\t\tend_log(vr)\n\n\n\tconversion_map = {\n\t\t\"out\": \"outColor\",\n\t\t\"outR\": \"outColorR\",\n\t\t\"outG\": \"outColorG\",\n\t\t\"outB\": \"outColorB\"\n\t}\n\n\trpr += \".\" + conversion_map[source]\n\treturn rpr\n\n\n# standart utilities\ndef convertStandartNode(rsMaterial, source):\n\n\tnot_converted_list = (\"materialInfo\", \"defaultShaderList\", \"shadingEngine\", \"place2dTexture\")\n\ttry:\n\t\tfor attr in cmds.listAttr(rsMaterial):\n\t\t\tconnection = cmds.listConnections(rsMaterial + \".\" + attr)\n\t\t\tif connection:\n\t\t\t\tif cmds.objectType(connection[0]) not in not_converted_list and attr not in (source, \"message\"):\n\t\t\t\t\tobj, channel = cmds.connectionInfo(rsMaterial + \".\" + attr, sourceFromDestination=True).split('.')\n\t\t\t\t\tsource_name, source_attr = convertMaterial(obj, channel).split('.')\n\t\t\t\t\tconnectProperty(source_name, source_attr, rsMaterial, attr)\n\texcept:\n\t\tpass\n\n\treturn rsMaterial + \".\" + source\n\n\n# unsupported utilities\ndef convertUnsupportedNode(rsMaterial, source, postfix=\"_UNSUPPORTED_NODE\"):\n\n\tif cmds.objExists(rsMaterial + postfix):\n\t\trpr = rsMaterial + postfix\n\telse:\n\t\trpr = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\trpr = cmds.rename(rpr, rsMaterial + postfix)\n\n\t\t# Logging to file\n\t\tstart_log(rsMaterial, rpr)\n\n\t\t# 2 connection save\n\t\ttry:\n\t\t\tsetProperty(rpr, \"operation\", 0)\n\t\t\tunsupported_connections = 0\n\t\t\tfor attr in cmds.listAttr(rsMaterial):\n\t\t\t\tconnection = cmds.listConnections(rsMaterial + \".\" + attr)\n\t\t\t\tif connection:\n\t\t\t\t\tif cmds.objectType(connection[0]) not in (\"materialInfo\", \"defaultShaderList\", \"shadingEngine\") and attr not in (source, \"message\"):\n\t\t\t\t\t\tif unsupported_connections < 2:\n\t\t\t\t\t\t\tobj, channel = cmds.connectionInfo(rsMaterial + \".\" + attr, sourceFromDestination=True).split('.')\n\t\t\t\t\t\t\tsource_name, source_attr = convertMaterial(obj, channel).split('.')\n\t\t\t\t\t\t\tvalueType = type(getProperty(rsMaterial, attr))\n\t\t\t\t\t\t\tif valueType == tuple:\n\t\t\t\t\t\t\t\tif unsupported_connections < 1:\n\t\t\t\t\t\t\t\t\tconnectProperty(source_name, source_attr, rpr, \"inputA\")\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tconnectProperty(source_name, source_attr, rpr, \"inputB\")\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tif unsupported_connections < 1:\n\t\t\t\t\t\t\t\t\tconnectProperty(source_name, source_attr, rpr, \"inputAX\")\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tconnectProperty(source_name, source_attr, rpr, \"inputBX\")\n\t\t\t\t\t\t\tunsupported_connections += 1\n\t\texcept:\n\t\t\tpass\n\n\t\t# Logging to file\n\t\tend_log(rsMaterial)\n\n\tsourceType = type(getProperty(rsMaterial, source))\n\tif sourceType == tuple:\n\t\trpr += \".out\"\n\telse:\n\t\trpr += \".outX\"\n\n\treturn rpr\n\n\n# Create default uber material for unsupported material\ndef convertUnsupportedMaterial(rsMaterial, source):\n\n\tassigned = checkAssign(rsMaterial)\n\t\n\tif cmds.objExists(rsMaterial + \"_rpr\"):\n\t\trprMaterial = rsMaterial + \"_rpr\"\n\telse:\n\t\t# Creating new Uber material\n\t\trprMaterial = cmds.shadingNode(\"RPRUberMaterial\", asShader=True)\n\t\trprMaterial = cmds.rename(rprMaterial, rsMaterial + \"_UNSUPPORTED_MATERIAL\")\n\n\t\t# Check shading engine in rsMaterial\n\t\tif assigned:\n\t\t\tsg = rprMaterial + \"SG\"\n\t\t\tcmds.sets(renderable=True, noSurfaceShader=True, empty=True, name=sg)\n\t\t\tconnectProperty(rprMaterial, \"outColor\", sg, \"surfaceShader\")\n\n\t\t# Logging to file\n\t\tstart_log(rsMaterial, rprMaterial)\n\n\t\t# set green color\n\t\tsetProperty(rprMaterial, \"diffuseColor\", (0, 1, 0))\n\n\t\tend_log(rsMaterial)\n\n\tif source:\n\t\trprMaterial += \".\" + source\n\treturn rprMaterial\n\n\n######################## \n## RedshiftArchitectural \n########################\n\ndef convertRedshiftArchitectural(rsMaterial, source):\n\n\tassigned = checkAssign(rsMaterial)\n\n\t# If we are not in convertBumpBlender function \n\tif source != \"inside_blender_bump_conversion\":\n\t\t# Try to find in input connections BumpBlender\n\t\tblender_bump = cmds.listConnections(rsMaterial, type=\"RedshiftBumpBlender\")\n\t\t\n\t\tif blender_bump and assigned:\n\t\t\tif len(blender_bump) > 1:\n\t\t\t\tprint(\"[ERROR] Multiple RedshiftBlenderBump nodes isn't supported.\")\n\n\t\t\trprMaterial = convertRedshiftBumpBlender(rsMaterial, blender_bump[0], source)\n\t\t\tsg = rprMaterial + \"SG\"\n\t\t\tcmds.sets(renderable=True, noSurfaceShader=True, empty=True, name=sg)\n\t\t\tconnectProperty(rprMaterial, \"outColor\", sg, \"surfaceShader\")\n\n\t\t\treturn rprMaterial + \".\" + source\n\t\n\tif cmds.objExists(rsMaterial + \"_rpr\"):\n\t\trprMaterial = rsMaterial + \"_rpr\"\n\telse:\n\t\t# Creating new Uber material\n\t\trprMaterial = cmds.shadingNode(\"RPRUberMaterial\", asShader=True)\n\t\trprMaterial = cmds.rename(rprMaterial, rsMaterial + \"_rpr\")\n\n\t\t# Check shading engine in rsMaterial\n\t\tif assigned and source != \"inside_blender_bump_conversion\":\n\t\t\tsg = rprMaterial + \"SG\"\n\t\t\tcmds.sets(renderable=True, noSurfaceShader=True, empty=True, name=sg)\n\t\t\tconnectProperty(rprMaterial, \"outColor\", sg, \"surfaceShader\")\n\t\t\t\n\t\t# Enable properties, which are default in RedShift\n\t\tdefaultEnable(rprMaterial, rsMaterial, \"diffuse\", \"diffuse_weight\")\n\t\tdefaultEnable(rprMaterial, rsMaterial, \"reflections\", \"reflectivity\")\n\t\tdefaultEnable(rprMaterial, rsMaterial, \"refraction\", \"transparency\")\n\t\tdefaultEnable(rprMaterial, rsMaterial, \"clearCoat\", \"refl_base\")\n\n\t\t# Logging to file\n\t\tstart_log(rsMaterial, rprMaterial)\n\n\t\t# diffuse\n\t\tcopyProperty(rprMaterial, rsMaterial, \"diffuseColor\", \"diffuse\") \n\t\tcopyProperty(rprMaterial, rsMaterial, \"diffuseWeight\", \"diffuse_weight\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"diffuseRoughness\", \"diffuse_roughness\")\n\t\t\n\t\t# primary reflection (reflection)\n\t\t\n\t\tif not mapDoesNotExist(rsMaterial, \"refl_color\"):\n\t\t\tconnection = cmds.listConnections(rsMaterial + \".refl_color\", type=\"file\")\n\t\t\tif connection:\n\t\t\t\tsetProperty(connection[0], \"colorSpace\", \"Raw\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectColor\", \"refl_color\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectWeight\", \"reflectivity\")\n\n\t\tif getProperty(rsMaterial, \"brdf_fresnel\"):\n\t\t\tior = getProperty(rsMaterial, \"brdf_fresnel_ior\")\n\t\t\tif ior > 10:\n\t\t\t\tsetProperty(rprMaterial, \"reflectIOR\", 10)\n\t\t\telse:\n\t\t\t\tsetProperty(rprMaterial, \"reflectIOR\", ior)\n\t\telse:\n\t\t\trefl = getProperty(rsMaterial, \"brdf_0_degree_refl\")\n\t\t\tior = -1 * (refl + 1 + 2 * math.sqrt(refl) / (refl - 1))\n\t\t\tif ior > 10:\n\t\t\t\tsetProperty(rprMaterial, \"reflectIOR\", 10)\n\t\t\telse:\n\t\t\t\tsetProperty(rprMaterial, \"reflectIOR\", ior)\n\n\t\tinvertValue(rprMaterial, rsMaterial, \"reflectRoughness\", \"refl_gloss\")\n\t\tsetProperty(rprMaterial, \"reflectAnisotropy\", getProperty(rsMaterial, \"anisotropy\") * 2)\n\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectAnisotropyRotation\", \"anisotropy_rotation\")\n\n\t\tsetProperty(rprMaterial, \"reflectMetalMaterial\", getProperty(rsMaterial, \"refl_is_metal\"))\n\n\t\tbrdf_fresnel_type = getProperty(rsMaterial, \"brdf_fresnel_type\")\n\t\tif brdf_fresnel_type: # conductor\n\t\t\tbrdf_extinction_coeff = getProperty(rsMaterial, \"brdf_extinction_coeff\")\n\t\t\tif brdf_extinction_coeff > 2:\n\t\t\t\tsetProperty(rprMaterial, \"reflectMetalMaterial\", 1)\n\t\t\t\tsetProperty(rprMaterial, \"reflectMetalness\", 1)\n\n\t\t\t\tif mapDoesNotExist(rsMaterial, \"diffuse_weight\"):\n\t\t\t\t\tsetProperty(rprMaterial, \"diffuseWeight\", 0)\n\t\t\t\tif mapDoesNotExist(rsMaterial, \"reflectivity\"):\n\t\t\t\t\tsetProperty(rprMaterial, \"reflectWeight\", 1)\n\n\t\t\t\tarithmetic = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tcopyProperty(arithmetic, rsMaterial, \"inputA\", \"diffuse\")\n\t\t\t\tif not mapDoesNotExist(rsMaterial, \"refl_color\"):\n\t\t\t\t\tconnection = cmds.listConnections(rsMaterial + \".refl_color\", type=\"file\")\n\t\t\t\t\tif connection:\n\t\t\t\t\t\tsetProperty(connection[0], \"colorSpace\", \"Raw\")\n\t\t\t\tcopyProperty(arithmetic, rsMaterial, \"inputB\", \"refl_color\")\n\t\t\t\tmetalMaterial = getProperty(rsMaterial, \"refl_is_metal\")\n\t\t\t\tif metalMaterial:\n\t\t\t\t\tsetProperty(arithmetic, \"operation\", 2)\n\t\t\t\telse:\n\t\t\t\t\tsetProperty(arithmetic, \"operation\", 20)\n\t\t\t\tconnectProperty(arithmetic, \"out\", rprMaterial, \"reflectColor\")\n\n\t\t# sec reflection (Coat)\n\t\tsetProperty(rprMaterial, \"coatWeight\", getProperty(rsMaterial, \"refl_base\") / 4)\n\t\tcopyProperty(rprMaterial, rsMaterial, \"coatColor\", \"refl_base_color\")\n\n\t\tinvertValue(rprMaterial, rsMaterial, \"coatRoughness\", \"refl_base_gloss\")\n\n\t\tif getProperty(rsMaterial, \"brdf_base_fresnel\"):\n\t\t\tif getProperty(rsMaterial, \"brdf_base_fresnel_type\"):\n\t\t\t\tcoat_ior = getProperty(rsMaterial, \"brdf_base_fresnel_ior\") + getProperty(rsMaterial, \"brdf_base_extinction_coeff\")\n\t\t\telse:\n\t\t\t\tcoat_ior = getProperty(rsMaterial, \"brdf_base_fresnel_ior\")\n\n\t\t\tif coat_ior > 10:\n\t\t\t\tsetProperty(rprMaterial, \"coatIor\", 10)\n\t\t\telse:\n\t\t\t\tsetProperty(rprMaterial, \"coatIor\", coat_ior)\n\t\telse:\n\t\t\trefl = getProperty(rsMaterial, \"brdf_base_0_degree_refl\")\n\t\t\tior = -1 * (refl + 1 + 2 * math.sqrt(refl) / (refl - 1))\n\t\t\tif ior > 10:\n\t\t\t\tsetProperty(rprMaterial, \"coatIor\", 10)\n\t\t\telse:\n\t\t\t\tsetProperty(rprMaterial, \"coatIor\", ior)\n\t\t\t\n\t\t# refraction\n\t\tcopyProperty(rprMaterial, rsMaterial, \"refractColor\", \"refr_color\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"refractWeight\", \"transparency\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"refractThinSurface\", \"thin_walled\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"refractIor\", \"refr_ior\")\n\n\t\tinvertValue(rprMaterial, rsMaterial, \"refractRoughness\", \"refr_gloss\")\n\t\t\t\t\n\t\tfog_enable = getProperty(rsMaterial, \"refr_falloff_on\")\n\t\tif fog_enable:\n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"refractAbsorptionDistance\", \"refr_falloff_dist\")\n\t\t\n\t\tend_color_enable = getProperty(rsMaterial, \"refr_falloff_color_on\")\n\t\tif end_color_enable:\n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"refractAbsorbColor\", \"refr_falloff_color\") \n\t\telse: \n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"refractAbsorbColor\", \"refr_color\")\n\t\t\n\t\tsetProperty(rprMaterial, \"refractAllowCaustics\", getProperty(rsMaterial, \"do_refractive_caustics\"))\n\t\t\t\n\t\t# emissive\n\t\temissive_weight = getProperty(rsMaterial, \"incandescent_scale\")\n\t\temissive_color = getProperty(rsMaterial, \"additional_color\")\n\t\tif emissive_weight > 0 and (emissive_color[0] > 0 or emissive_color[1] > 0 or emissive_color[2] > 0):\n\t\t\tsetProperty(rprMaterial, \"emissive\", True)\n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"emissiveColor\", \"additional_color\")\n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"emissiveIntensity\", \"incandescent_scale\")\n\n\n\t\tif getProperty(rsMaterial, \"refr_translucency\"):\n\t\t\tsetProperty(rprMaterial, \"separateBackscatterColor\", 1)\n\n\t\t\tif mapDoesNotExist(rsMaterial, \"refr_trans_weight\"):\n\t\t\t\tif mapDoesNotExist(rsMaterial, \"refr_trans_color\"):\n\t\t\t\t\ttransl_weight = getProperty(rsMaterial, \"refr_trans_weight\")\n\t\t\t\t\ttransl_color = getProperty(rsMaterial, \"refr_trans_color\")\n\t\t\t\t\tavg_color = sum(transl_color) / 3.0\n\t\t\t\t\tif transl_weight <= 0.5:\n\t\t\t\t\t\tif avg_color < transl_weight:\n\t\t\t\t\t\t\tbackscatteringWeight = avg_color\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbackscatteringWeight = transl_weight\n\t\t\t\t\telif transl_weight > 0.5:\n\t\t\t\t\t\tif avg_color < transl_weight and avg_color * 2 <= 1:\n\t\t\t\t\t\t\tbackscatteringWeight = avg_color * 2\n\t\t\t\t\t\telif transl_weight * 2 <= 1:\n\t\t\t\t\t\t\tbackscatteringWeight = transl_weight * 2\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbackscatteringWeight = 1\n\n\t\t\t\t\tif mapDoesNotExist(rsMaterial, \"cutout_opacity\"):\n\t\t\t\t\t\tsetProperty(rprMaterial, \"backscatteringWeight\", backscatteringWeight)\n\t\t\t\t\telse:\n\t\t\t\t\t\tarithmetic = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\t\t\tsetProperty(arithmetic, \"operation\", 2)\n\t\t\t\t\t\tsetProperty(arithmetic, \"inputAX\", backscatteringWeight)\n\t\t\t\t\t\tcopyProperty(arithmetic, rsMaterial, \"inputBX\", \"cutout_opacity\")\n\t\t\t\t\t\tconnectProperty(arithmetic, \"outX\", rprMaterial, \"backscatteringWeight\")\n\n\t\t\t\telse:\n\t\t\t\t\tif mapDoesNotExist(rsMaterial, \"cutout_opacity\"):\n\t\t\t\t\t\tsetProperty(rprMaterial, \"backscatteringWeight\", 0.5 * getProperty(rsMaterial, \"refr_trans_weight\"))\n\t\t\t\t\telse:\n\t\t\t\t\t\tarithmetic = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\t\t\tsetProperty(arithmetic, \"operation\", 2)\n\t\t\t\t\t\tcopyProperty(arithmetic, rsMaterial, \"inputAX\", \"refr_trans_weight\")\n\t\t\t\t\t\tcopyProperty(arithmetic, rsMaterial, \"inputBX\", \"cutout_opacity\")\n\t\t\t\t\t\tconnectProperty(arithmetic, \"outX\", rprMaterial, \"backscatteringWeight\")\n\t\t\telse:\n\t\t\t\tarithmetic = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(arithmetic, \"operation\", 2)\n\t\t\t\tcopyProperty(arithmetic, rsMaterial, \"inputAX\", \"refr_trans_weight\")\n\t\t\t\tif mapDoesNotExist(rsMaterial, \"cutout_opacity\"):\n\t\t\t\t\tsetProperty(arithmetic, \"inputB\", (0.5, 0.5, 0.5))\n\t\t\t\telse:\n\t\t\t\t\tcopyProperty(arithmetic, rsMaterial, \"inputB\", \"cutout_opacity\")\n\t\t\t\tconnectProperty(arithmetic, \"outX\", rprMaterial, \"backscatteringWeight\")\n\n\t\t\t# trans color\n\t\t\tarithmetic1 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(arithmetic1, \"operation\", 2)\n\t\t\tcopyProperty(arithmetic1, rsMaterial, \"inputA\", \"refr_trans_color\")\n\t\t\tsetProperty(arithmetic1, \"inputB\", (2.2, 2.2, 2.2))\n\n\t\t\tarithmetic2 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(arithmetic2, \"operation\", 2)\n\t\t\tcopyProperty(arithmetic2, rsMaterial, \"inputA\", \"diffuse\")\n\t\t\tsetProperty(arithmetic2, \"inputB\", (2.2, 2.2, 2.2))\n\n\t\t\tarithmetic3 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(arithmetic3, \"operation\", 20)\n\t\t\tconnectProperty(arithmetic1, \"out\", arithmetic3, \"inputA\")\n\t\t\tconnectProperty(arithmetic2, \"out\", arithmetic3, \"inputB\")\n\n\t\t\tconnectProperty(arithmetic3, \"out\", rprMaterial, \"backscatteringColor\")\n\n\t\topacity = getProperty(rsMaterial, \"cutout_opacity\")\n\t\tif not mapDoesNotExist(rsMaterial, \"cutout_opacity\") or opacity < 1:\n\t\t\tinvertValue(rprMaterial, rsMaterial, \"transparencyLevel\", \"cutout_opacity\")\n\t\t\tsetProperty(rprMaterial, \"transparencyEnable\", 1)\n\n\t\tbumpConnections = cmds.listConnections(rsMaterial + \".bump_input\")\n\t\tif bumpConnections:\n\t\t\tsetProperty(rprMaterial, \"normalMapEnable\", 1)\n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"normalMap\", \"bump_input\")\n\t\t\tsetProperty(rprMaterial, \"useShaderNormal\", not getProperty(rsMaterial, \"no_diffuse_bump\"))\n\t\t\tsetProperty(rprMaterial, \"reflectUseShaderNormal\", not getProperty(rsMaterial, \"no_refl0_bump\"))\n\t\t\tsetProperty(rprMaterial, \"refractUseShaderNormal\", not getProperty(rsMaterial, \"no_refr_bump\"))\n\t\t\tsetProperty(rprMaterial, \"coatUseShaderNormal\", not getProperty(rsMaterial, \"no_refl1_bump\"))\n\t\t\t\t\n\t\t# Logging in file\n\t\tend_log(rsMaterial)\n\n\tif source:\n\t\trprMaterial += \".\" + source\n\treturn rprMaterial\n\n\n#######################\n## RedshiftSprite \n#######################\n\ndef convertRedshiftSprite(rsMaterial, source):\n\n\tassigned = checkAssign(rsMaterial)\n\t\n\tif cmds.objExists(rsMaterial + \"_rpr\"):\n\t\trprMaterial = rsMaterial + \"_rpr\"\n\telse:\n\t\t# Creating new Uber material\n\t\tinput_material = cmds.listConnections(rsMaterial + \".input\")[0]\n\t\trprMaterial = convertRedshiftMaterial(input_material, \"\")\n\t\tsg = rprMaterial + \"SG\"\n\t\tcmds.sets(renderable=True, noSurfaceShader=True, empty=True, name=sg)\n\t\tconnectProperty(rprMaterial, \"outColor\", sg, \"surfaceShader\")\n\t\t\t\n\t\t# Logging to file\n\t\tstart_log(rsMaterial, rprMaterial)\n\n\t\t# Fields conversion\n\n\t\t# convert map\n\t\tif getProperty(rsMaterial, \"tex0\"):\n\n\t\t\tfile = cmds.shadingNode(\"file\", asTexture=True, isColorManaged=True)\n\t\t\ttexture = cmds.shadingNode(\"place2dTexture\", asUtility=True)\n\n\t\t\tconnectProperty(texture, \"coverage\", file, \"coverage\")\n\t\t\tconnectProperty(texture, \"translateFrame\", file, \"translateFrame\")\n\t\t\tconnectProperty(texture, \"rotateFrame\", file, \"rotateFrame\")\n\t\t\tconnectProperty(texture, \"mirrorU\", file, \"mirrorU\")\n\t\t\tconnectProperty(texture, \"mirrorV\", file, \"mirrorV\")\n\t\t\tconnectProperty(texture, \"stagger\", file, \"stagger\")\n\t\t\tconnectProperty(texture, \"wrapU\", file, \"wrapU\")\n\t\t\tconnectProperty(texture, \"wrapV\", file, \"wrapV\")\n\t\t\tconnectProperty(texture, \"repeatUV\", file, \"repeatUV\")\n\t\t\tconnectProperty(texture, \"offset\", file, \"offset\")\n\t\t\tconnectProperty(texture, \"rotateUV\", file, \"rotateUV\")\n\t\t\tconnectProperty(texture, \"noiseUV\", file, \"noiseUV\")\n\t\t\tconnectProperty(texture, \"vertexUvOne\", file, \"vertexUvOne\")\n\t\t\tconnectProperty(texture, \"vertexUvTwo\", file, \"vertexUvTwo\")\n\t\t\tconnectProperty(texture, \"vertexUvThree\", file, \"vertexUvThree\")\n\t\t\tconnectProperty(texture, \"vertexCameraOne\", file, \"vertexCameraOne\")\n\t\t\tconnectProperty(texture, \"outUV\", file, \"uv\")\n\t\t\tconnectProperty(texture, \"outUvFilterSize\", file, \"uvFilterSize\")\n\t\t\tcopyProperty(texture, rsMaterial, \"repeatU\", \"repeats0\")\n\t\t\tcopyProperty(texture, rsMaterial, \"repeatV\", \"repeats1\")\n\n\t\t\tsetProperty(file, \"fileTextureName\", getProperty(rsMaterial, \"tex0\"))\n\t\t\tarithmetic = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(arithmetic, \"operation\", 1)\n\t\t\tsetProperty(arithmetic, \"inputA\", (1, 1, 1))\n\t\t\tconnectProperty(file, \"outColor\", arithmetic, \"inputB\")\n\t\t\tconnectProperty(arithmetic, \"outX\", rprMaterial, \"transparencyLevel\")\t\n\t\t\tsetProperty(rprMaterial, \"transparencyEnable\", 1)\n\n\n\t\t# Logging in file\n\t\tend_log(rsMaterial)\n\n\tif source:\n\t\trprMaterial += \".\" + source\n\treturn rprMaterial\n\n\n#######################\n## RedshiftCarPaint \n#######################\n\ndef convertRedshiftCarPaint(rsMaterial, source):\n\n\tassigned = checkAssign(rsMaterial)\n\t\n\tif cmds.objExists(rsMaterial + \"_rpr\"):\n\t\trprMaterial = rsMaterial + \"_rpr\"\n\telse:\n\t\t# Creating new Uber material\n\t\trprMaterial = cmds.shadingNode(\"RPRUberMaterial\", asShader=True)\n\t\trprMaterial = cmds.rename(rprMaterial, rsMaterial + \"_rpr\")\n\n\t\t# Check shading engine in rsMaterial\n\t\tif assigned:\n\t\t\tsg = rprMaterial + \"SG\"\n\t\t\tcmds.sets(renderable=True, noSurfaceShader=True, empty=True, name=sg)\n\t\t\tconnectProperty(rprMaterial, \"outColor\", sg, \"surfaceShader\")\n\n\t\t# Enable properties, which are default in Redshift\n\t\tdefaultEnable(rprMaterial, rsMaterial, \"diffuse\", \"diffuse_weight\")\n\t\tdefaultEnable(rprMaterial, rsMaterial, \"reflections\", \"spec_weight\")\n\t\tdefaultEnable(rprMaterial, rsMaterial, \"clearCoat\", \"clearcoat_weight\")\n\n\t\t# Logging to file\n\t\tstart_log(rsMaterial, rprMaterial)\n\n\t\t# Fields conversion\n\n\t\t# Mixing diffuse color\n\t\tincident_lookup = cmds.shadingNode(\"RPRLookup\", asUtility=True)\n\t\tincident_lookup = cmds.rename(incident_lookup, \"incident_lookup\")\n\t\tsetProperty(incident_lookup, \"type\", 3)\n\n\t\tnormal_lookup = cmds.shadingNode(\"RPRLookup\", asUtility=True)\n\t\tnormal_lookup = cmds.rename(normal_lookup, \"normal_lookup\")\n\t\tsetProperty(normal_lookup, \"type\", 1)\n\n\t\tdot_product = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tdot_product = cmds.rename(dot_product, \"dot_product\")\n\t\tsetProperty(dot_product, \"operation\", 11)\n\t\tconnectProperty(normal_lookup, \"out\", dot_product, \"inputA\")\n\t\tconnectProperty(incident_lookup, \"out\", dot_product, \"inputB\")\n\n\t\tabsolute = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tabsolute = cmds.rename(absolute, \"absolute\")\n\t\tsetProperty(absolute, \"operation\", 25)\n\t\tconnectProperty(dot_product, \"out\", absolute, \"inputA\")\n\t\tsetProperty(absolute, \"inputB\", (0, 0, 0))\n\n\t\treverse = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\treverse = cmds.rename(reverse, \"reverse\")\n\t\tsetProperty(reverse, \"operation\", 1)\n\t\tsetProperty(reverse, \"inputA\", (1, 1, 1))\n\t\tconnectProperty(absolute, \"out\", reverse, \"inputB\")\n\n\t\tpow_curvefactor = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tpow_curvefactor = cmds.rename(pow_curvefactor, \"pow_curvefactor\")\n\t\tsetProperty(pow_curvefactor, \"operation\", 15)\n\t\tconnectProperty(reverse, \"out\", pow_curvefactor, \"inputA\")\n\t\tcopyProperty(pow_curvefactor, rsMaterial, \"inputB\", \"edge_color_bias\")\n\n\t\tblend_pigment_edge = cmds.shadingNode(\"RPRBlendValue\", asUtility=True)\n\t\tblend_pigment_edge = cmds.rename(blend_pigment_edge, \"blend_pigment_edge\")\n\t\tcopyProperty(blend_pigment_edge, rsMaterial, \"inputA\", \"base_color\")\n\t\tcopyProperty(blend_pigment_edge, rsMaterial, \"inputB\", \"edge_color\")\n\t\tconnectProperty(pow_curvefactor, \"out\", blend_pigment_edge, \"weight\")\n\t\tconnectProperty(blend_pigment_edge, \"out\", rprMaterial, \"diffuseColor\")\n\n\t\tcopyProperty(rprMaterial, rsMaterial, \"diffuseWeight\", \"diffuse_weight\")\n\t\tsetProperty(rprMaterial, \"diffuseRoughness\", 0.5)\n\n\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectColor\", \"spec_color\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectWeight\", \"spec_weight\")\n\t\tinvertValue(rprMaterial, rsMaterial, \"reflectRoughness\", \"spec_gloss\")\n\n\t\tinvertValue(rprMaterial, rsMaterial, \"coatRoughness\", \"clearcoat_gloss\")\n\n\t\tclearcoat_facingweight = getProperty(rsMaterial, \"clearcoat_facingweight\")\n\t\tcoat_ior = -1 * (clearcoat_facingweight + 1 + 2 * math.sqrt(clearcoat_facingweight)) / (clearcoat_facingweight - 1)\n\t\tsetProperty(rprMaterial, \"coatIor\", coat_ior)\n\n\t\tcopyProperty(rprMaterial, rsMaterial, \"coatWeight\", \"clearcoat_weight\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"coatColor\", \"clearcoat_color\")\n\n\t\tbumpConnections = cmds.listConnections(rsMaterial + \".bump_input\")\n\t\tif bumpConnections:\n\t\t\tsetProperty(rprMaterial, \"normalMapEnable\", 1)\n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"normalMap\", \"bump_input\")\n\n\t\t\tif getProperty(rsMaterial, \"no_baselayer_bump\"):\n\t\t\t\tsetProperty(rprMaterial, \"useShaderNormal\", 0)\n\t\t\telse:\n\t\t\t\tsetProperty(rprMaterial, \"useShaderNormal\", 1)\n\n\t\t\tif getProperty(rsMaterial, \"no_clearcoat_bump\"):\n\t\t\t\tsetProperty(rprMaterial, \"coatUseShaderNormal\", 0)\n\t\t\telse:\n\t\t\t\tsetProperty(rprMaterial, \"coatUseShaderNormal\", 1)\n\n\t\t\tsetProperty(rprMaterial, \"reflectUseShaderNormal\", 1)\n\t\t\tsetProperty(rprMaterial, \"refractUseShaderNormal\", 1)\n\n\t\t# Logging in file\n\t\tend_log(rsMaterial)\n\n\tif source:\n\t\trprMaterial += \".\" + source\n\treturn rprMaterial\n\n\n######################## \n## RedshiftIncandescent \n########################\n\ndef convertRedshiftIncandescent(rsMaterial, source):\n\n\tassigned = checkAssign(rsMaterial)\n\t\n\tif cmds.objExists(rsMaterial + \"_rpr\"):\n\t\trprMaterial = rsMaterial + \"_rpr\"\n\telse:\n\t\t# Creating new Uber material\n\t\trprMaterial = cmds.shadingNode(\"RPRUberMaterial\", asShader=True)\n\t\trprMaterial = cmds.rename(rprMaterial, rsMaterial + \"_rpr\")\n\n\t\t# Check shading engine in rsMaterial\n\t\tif assigned:\n\t\t\tsg = rprMaterial + \"SG\"\n\t\t\tcmds.sets(renderable=True, noSurfaceShader=True, empty=True, name=sg)\n\t\t\tconnectProperty(rprMaterial, \"outColor\", sg, \"surfaceShader\")\n\n\t\t# Enable properties, which are default in RedShift\n\t\tsetProperty(rprMaterial, \"diffuse\", 0)\n\t\tdefaultEnable(rprMaterial, rsMaterial, \"emissive\", \"intensity\")\n\n\t\t# Logging to file\n\t\tstart_log(rsMaterial, rprMaterial)\n\n\t\t# Fields conversion\n\t\tcopyProperty(rprMaterial, rsMaterial, \"emissiveIntensity\", \"intensity\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"emissiveWeight\", \"alpha\")\n\n\t\tsetProperty(rprMaterial, \"emissiveDoubleSided\", getProperty(rsMaterial, \"doublesided\"))\n\n\t\topacity = getProperty(rsMaterial, \"alpha\")\n\t\tif not mapDoesNotExist(rsMaterial, \"alpha\") or opacity < 1:\n\t\t\tinvertValue(rprMaterial, rsMaterial, \"transparencyLevel\", \"alpha\")\n\t\t\tsetProperty(rprMaterial, \"transparencyEnable\", 1)\n\n\t\t# converting temperature to emissive color\n\t\t# no_rpr_analog\n\t\tcolor_mode = getProperty(rsMaterial, \"colorMode\")\n\t\tif color_mode:\n\t\t\tcolor = convertTemperature(getProperty(rsMaterial, \"temperature\"))\n\t\t\tsetProperty(rprMaterial, \"emissiveColor\", color)\n\t\telse:\n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"emissiveColor\", \"color\")\n\n\t\t# Logging to file\n\t\tend_log(rsMaterial)\n\n\tif source:\n\t\trprMaterial += \".\" + source\n\treturn rprMaterial\n\n\n\n###################### \n## RedshiftMaterial \n###################### \n\ndef convertRedshiftMaterial(rsMaterial, source):\n\n\tassigned = checkAssign(rsMaterial)\n\n\t# If we are not in convertBumpBlender function \n\tif source != \"inside_blender_bump_conversion\":\n\t\t# Try to find in input connections BumpBlender\n\t\tblender_bump = cmds.listConnections(rsMaterial, type=\"RedshiftBumpBlender\")\n\n\t\tif blender_bump and assigned:\n\t\t\tif len(blender_bump) > 1:\n\t\t\t\tprint(\"[ERROR] Multiple RedshiftBlenderBump nodes isn't supported.\")\n\n\t\t\trprMaterial = convertRedshiftBumpBlender(rsMaterial, blender_bump[0], source)\n\t\t\tsg = rprMaterial + \"SG\"\n\t\t\tcmds.sets(renderable=True, noSurfaceShader=True, empty=True, name=sg)\n\t\t\tconnectProperty(rprMaterial, \"outColor\", sg, \"surfaceShader\")\n\n\t\t\treturn rprMaterial + \".\" + source\n\n\tif cmds.objExists(rsMaterial + \"_rpr\") and source != \"displacement_copy\":\n\t\trprMaterial = rsMaterial + \"_rpr\"\n\telse:\n\t\t# Creating new Uber material\n\t\trprMaterial = cmds.shadingNode(\"RPRUberMaterial\", asShader=True)\n\t\trprMaterial = cmds.rename(rprMaterial, rsMaterial + \"_rpr\")\n\n\t\t# Check shading engine in rsMaterial\n\t\tif assigned and source != \"inside_blender_bump_conversion\":\n\t\t\tsg = rprMaterial + \"SG\"\n\t\t\tcmds.sets(renderable=True, noSurfaceShader=True, empty=True, name=sg)\n\t\t\tconnectProperty(rprMaterial, \"outColor\", sg, \"surfaceShader\")\n\n\t\t\t# displacement conversion\n\t\t\tif source != \"displacement_copy\":\n\t\t\t\trs_sg = cmds.listConnections(rsMaterial, type=\"shadingEngine\")\n\t\t\t\tdisplacement = cmds.listConnections(rs_sg, type=\"RedshiftDisplacement\")\n\t\t\t\tif displacement:\n\t\t\t\t\tdisplacement_file = cmds.listConnections(displacement[0], type=\"file\")\n\t\t\t\t\tif displacement_file:\n\t\t\t\t\t\tconvertDisplacement(displacement[0], displacement_file[0], rsMaterial, rprMaterial)\n\n\t\t# Enable properties, which are default in RedShift.\n\t\tdefaultEnable(rprMaterial, rsMaterial, \"diffuse\", \"diffuse_weight\")\n\t\tdefaultEnable(rprMaterial, rsMaterial, \"reflections\", \"refl_weight\")\n\t\tdefaultEnable(rprMaterial, rsMaterial, \"refraction\", \"refr_weight\")\n\t\tdefaultEnable(rprMaterial, rsMaterial, \"clearCoat\", \"coat_weight\")\n\t\tdefaultEnable(rprMaterial, rsMaterial, \"emissive\", \"emission_weight\")\n\t\tdefaultEnable(rprMaterial, rsMaterial, \"sssEnable\", \"ms_amount\")\n\n\t\t# Logging to file\n\t\tstart_log(rsMaterial, rprMaterial)\n\n\t\t# Fields conversion\n\t\toverall_color = getProperty(rsMaterial, \"overall_color\")\n\t\tif overall_color != (1.0, 1.0, 1.0):\n\t\t\tdiffuse_arith = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(diffuse_arith, \"operation\", 2)\n\t\t\tcopyProperty(diffuse_arith, rsMaterial, \"inputA\", \"diffuse_color\")\n\t\t\tcopyProperty(diffuse_arith, rsMaterial, \"inputB\", \"overall_color\")\n\t\t\tconnectProperty(diffuse_arith, \"out\", rprMaterial, \"diffuseColor\")\n\t\telse:\n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"diffuseColor\", \"diffuse_color\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"diffuseWeight\", \"diffuse_weight\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"diffuseRoughness\", \"diffuse_roughness\")\n\n\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectWeight\", \"refl_weight\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectRoughness\", \"refl_roughness\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectAnisotropy\", \"refl_aniso\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectAnisotropyRotation\", \"refl_aniso_rotation\")\n\n\t\t# Fresnel type conversion\n\t\trefl_reflectivity = getProperty(rsMaterial, \"refl_reflectivity\")\n\t\trefl_fr_mode = getProperty(rsMaterial, \"refl_fresnel_mode\" )\n\n\t\tif refl_fr_mode == 3:\n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectIOR\", \"refl_ior\")\n\t\t\tif not mapDoesNotExist(rsMaterial, \"refl_color\"):\n\t\t\t\tconnection = cmds.listConnections(rsMaterial + \".refl_color\", type=\"file\")\n\t\t\t\tif connection:\n\t\t\t\t\tsetProperty(connection[0], \"colorSpace\", \"Raw\")\n\n\t\t\tif overall_color != (1.0, 1.0, 1.0):\n\t\t\t\trefl_arith = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(refl_arith, \"operation\", 2)\n\t\t\t\tcopyProperty(refl_arith, rsMaterial, \"inputA\", \"refl_color\")\n\t\t\t\tcopyProperty(refl_arith, rsMaterial, \"inputB\", \"overall_color\")\n\t\t\t\tconnectProperty(refl_arith, \"out\", rprMaterial, \"reflectColor\")\n\t\t\telse:\n\t\t\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectColor\", \"refl_color\")\n\n\t\telif refl_fr_mode == 2:\n\n\t\t\tblend_value = cmds.shadingNode(\"RPRBlendValue\", asUtility=True)\n\n\t\t\tif overall_color != (1.0, 1.0, 1.0):\n\t\t\t\trefl_arith = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(refl_arith, \"operation\", 2)\n\t\t\t\tconnectProperty(blend_value, \"out\", refl_arith, \"inputA\")\n\t\t\t\tcopyProperty(refl_arith, rsMaterial, \"inputB\", \"overall_color\")\n\t\t\t\tconnectProperty(refl_arith, \"out\", rprMaterial, \"reflectColor\")\n\t\t\telse:\n\t\t\t\tconnectProperty(blend_value, \"out\", rprMaterial, \"reflectColor\")\n\n\t\t\t# blend color from diffuse and reflectivity to reflect color\n\t\t\t# no_rpr_analog\n\n\t\t\tcopyProperty(blend_value, rsMaterial, \"inputA\", \"refl_reflectivity\")\n\t\t\tcopyProperty(blend_value, rsMaterial, \"inputB\", \"diffuse_color\")\n\t\t\tcopyProperty(blend_value, rsMaterial, \"weight\", \"refl_metalness\")\n\n\t\t\tmetalness = getProperty(rsMaterial, \"refl_metalness\")\n\t\t\tif metalness > 0:\n\t\t\t\tsetProperty(rprMaterial, \"reflectMetalMaterial\", 1)\n\t\t\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectMetalness\", \"refl_metalness\")\n\n\t\t# no_rpr_analog\n\t\telif refl_fr_mode == 1:\n\n\t\t\tedge_tint = getProperty(rsMaterial, \"refl_edge_tint\")\n\n\t\t\tarithmetic = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\n\t\t\tif overall_color != (1.0, 1.0, 1.0):\n\t\t\t\trefl_arith = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(refl_arith, \"operation\", 2)\n\t\t\t\tconnectProperty(arithmetic, \"out\", refl_arith, \"inputA\")\n\t\t\t\tcopyProperty(refl_arith, rsMaterial, \"inputB\", \"overall_color\")\n\t\t\t\tconnectProperty(refl_arith, \"out\", rprMaterial, \"reflectColor\")\n\t\t\telse:\n\t\t\t\tconnectProperty(arithmetic, \"out\", rprMaterial, \"reflectColor\")\n\n\t\t\tblend_value = cmds.shadingNode(\"RPRBlendValue\", asUtility=True)\n\t\t\tconnectProperty(blend_value, \"out\", arithmetic, \"inputB\")\n\n\t\t\tfresnel = cmds.shadingNode(\"RPRFresnel\", asUtility=True)\n\t\t\tconnectProperty(fresnel, \"out\", blend_value, \"weight\")\n\n\t\t\tif not mapDoesNotExist(rsMaterial, \"refl_color\"):\n\t\t\t\tconnection = cmds.listConnections(rsMaterial + \".refl_color\", type=\"file\")\n\t\t\t\tif connection:\n\t\t\t\t\tsetProperty(connection[0], \"colorSpace\", \"Raw\")\n\t\t\tcopyProperty(arithmetic, rsMaterial, \"inputA\", \"refl_color\")\n\n\t\t\tsetProperty(arithmetic, \"operation\", 2)\n\n\t\t\tsetProperty(fresnel, \"ior\", 1.5)\n\n\t\t\tif edge_tint[0] or edge_tint[1] or edge_tint[2]:\n\n\t\t\t\tcopyProperty(blend_value, rsMaterial, \"inputA\", \"refl_reflectivity\")\n\t\t\t\tcopyProperty(blend_value, rsMaterial, \"inputB\", \"refl_edge_tint\")\n\n\t\t\t\tsetProperty(rprMaterial, \"reflectMetalMaterial\", 1)\n\t\t\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectMetalness\", \"refl_metalness\")\n\t\t\t\tif not getProperty(rprMaterial, \"reflectMetalness\"):\n\t\t\t\t\tsetProperty(rprMaterial, \"reflectMetalness\", 1)\n\n\t\t\telse:\n\n\t\t\t\tcopyProperty(blend_value, rsMaterial, \"inputA\", \"refl_reflectivity\")\n\n\t\t\t\tif not mapDoesNotExist(rsMaterial, \"refl_color\"):\n\t\t\t\t\tconnection = cmds.listConnections(rsMaterial + \".refl_color\", type=\"file\")\n\t\t\t\t\tif connection:\n\t\t\t\t\t\tsetProperty(connection[0], \"colorSpace\", \"Raw\")\n\t\t\t\tcopyProperty(blend_value, rsMaterial, \"inputB\", \"refl_color\")\n\n\t\t\t\tmax_refl = max(refl_reflectivity)\n\t\t\t\tif max_refl == 1:\n\t\t\t\t\tmax_refl = 0.9999\n\t\t\t\telif max_refl == 0:\n\t\t\t\t\tmax_refl = 0.0001\n\n\t\t\t\tior = -1 * (max_refl + 1 + 2 * math.sqrt(max_refl) / (max_refl - 1))\n\t\t\t\tif ior > 10:\n\t\t\t\t\tior = 10\n\n\t\t\t\tsetProperty(rprMaterial, \"reflectIOR\", ior)\n\t\t\t\t\n\n\t\telse:\n\t\t\t# advanced ior\n\t\t\t# no_rpr_analog\n\t\t\t# take one channel from advanced ior ti rpr ior\n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectIOR\", \"refl_ior30\")\n\t\t\tif not mapDoesNotExist(rsMaterial, \"refl_color\"):\n\t\t\t\tconnection = cmds.listConnections(rsMaterial + \".refl_color\", type=\"file\")\n\t\t\t\tif connection:\n\t\t\t\t\tsetProperty(connection[0], \"colorSpace\", \"Raw\")\n\n\t\t\tif overall_color != (1.0, 1.0, 1.0):\n\t\t\t\trefl_arith = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(refl_arith, \"operation\", 2)\n\t\t\t\tcopyProperty(refl_arith, rsMaterial, \"inputA\", \"refl_color\")\n\t\t\t\tcopyProperty(refl_arith, rsMaterial, \"inputB\", \"overall_color\")\n\t\t\t\tconnectProperty(refl_arith, \"out\", rprMaterial, \"reflectColor\")\n\t\t\telse:\n\t\t\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectColor\", \"refl_color\")\n\n\t\tif overall_color != (1.0, 1.0, 1.0):\n\t\t\trefr_arith = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(refr_arith, \"operation\", 2)\n\t\t\tcopyProperty(refr_arith, rsMaterial, \"inputA\", \"refr_color\")\n\t\t\tcopyProperty(refr_arith, rsMaterial, \"inputB\", \"overall_color\")\n\t\t\tconnectProperty(refr_arith, \"out\", rprMaterial, \"refractColor\")\n\t\telse:\n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"refractColor\", \"refr_color\")\n\n\t\tcopyProperty(rprMaterial, rsMaterial, \"refractWeight\", \"refr_weight\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"refractRoughness\", \"refr_roughness\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"refractIor\", \"refr_ior\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"refractLinkToReflect\", \"refr_use_base_IOR\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"refractThinSurface\", \"refr_thin_walled\")\n\n\t\t# maps doesn't support ( will work incorrectly )\n\t\tss_unitsMode = getProperty(rsMaterial, \"ss_unitsMode\")\n\t\tif ss_unitsMode:\n\t\t\tsetProperty(rprMaterial, \"diffuse\", 1)\n\t\t\tsetProperty(rprMaterial, \"diffuseWeight\", 1)\n\n\t\t\tarith1 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(arith1, \"operation\", 1)\n\t\t\tsetProperty(arith1, \"inputA\", (1, 1, 1))\n\t\t\tcopyProperty(arith1, rsMaterial, \"inputB\", \"ss_extinction_coeff\")\n\t\t\tconnectProperty(arith1, \"out\", rprMaterial, \"refractAbsorbColor\")\n\n\t\t\tss_ext_coeff = getProperty(rsMaterial, \"ss_extinction_coeff\")\n\t\t\tif ss_ext_coeff[0] > 1 or ss_ext_coeff[1] > 1 or ss_ext_coeff[2] > 1:\n\t\t\t\tsetProperty(rprMaterial, \"refraction\", 0)\n\t\t\t\tsetProperty(rprMaterial, \"separateBackscatterColor\", 1)\n\t\t\t\tsetProperty(rprMaterial, \"backscatteringWeight\", 0.5)\n\t\t\t\tconnectProperty(arith1, \"out\", rprMaterial, \"backscatteringColor\")\n\n\t\t\tif getProperty(rsMaterial, \"ss_extinction_scale\"):\n\t\t\t\tarith2 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(arith2, \"operation\", 3)\n\t\t\t\tsetProperty(arith2, \"inputA\", (1, 1, 1))\n\t\t\t\tcopyProperty(arith2, rsMaterial, \"inputB\", \"ss_extinction_scale\")\n\t\t\t\tconnectProperty(arith2, \"out\", rprMaterial, \"refractAbsorptionDistance\")\n\n\t\telse:\n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"refractAbsorbColor\", \"refr_transmittance\")\n\t\t\tif mapDoesNotExist(rsMaterial, \"refr_absorption_scale\"):\n\t\t\t\tif getProperty(rsMaterial, \"refr_absorption_scale\"):\n\t\t\t\t\tabsorption = 1 / getProperty(rsMaterial, \"refr_absorption_scale\")\n\t\t\t\t\tsetProperty(rprMaterial, \"refractAbsorptionDistance\", absorption)\n\n\t\tif overall_color != (1.0, 1.0, 1.0):\n\t\t\tcoat_arith = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(coat_arith, \"operation\", 2)\n\t\t\tcopyProperty(coat_arith, rsMaterial, \"inputA\", \"coat_color\")\n\t\t\tcopyProperty(coat_arith, rsMaterial, \"inputB\", \"overall_color\")\n\t\t\tconnectProperty(coat_arith, \"out\", rprMaterial, \"coatColor\")\n\t\telse:\n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"coatColor\", \"coat_color\")\n\n\t\tcopyProperty(rprMaterial, rsMaterial, \"coatWeight\", \"coat_weight\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"coatRoughness\", \"coat_roughness\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"coatTransmissionColor\", \"coat_transmittance\")\n\n\t\tcoat_fr_mode = getProperty(rsMaterial, \"coat_fresnel_mode\")\n\t\tif coat_fr_mode == 3:\n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"coatIor\", \"coat_ior\")\n\n\t\tif getProperty(rsMaterial, \"overallAffectsEmission\"):\n\t\t\temissive_arith = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(emissive_arith, \"operation\", 2)\n\t\t\tcopyProperty(emissive_arith, rsMaterial, \"inputA\", \"emission_color\")\n\t\t\tcopyProperty(emissive_arith, rsMaterial, \"inputB\", \"overall_color\")\n\t\t\tconnectProperty(emissive_arith, \"out\", rprMaterial, \"emissiveColor\")\n\t\telse:\n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"emissiveColor\", \"emission_color\")\n\n\t\tcopyProperty(rprMaterial, rsMaterial, \"emissiveWeight\", \"emission_weight\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"emissiveIntensity\", \"emission_weight\")\n\n\t\tif not ss_unitsMode:\n\t\t\tcopyProperty(rprMaterial, rsMaterial, \"backscatteringWeight\", \"ms_amount\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"sssWeight\", \"ms_amount\")\n\n\t\tbackscatteringWeight = getProperty(rsMaterial, \"transl_weight\")\n\n\t\t# SSS\n\t\tms_amount = getProperty(rsMaterial, \"ms_amount\")\n\t\tif ms_amount:\n\t\t\tif not backscatteringWeight:\n\t\t\t\tsetProperty(rprMaterial, \"backscatteringWeight\", 0.5)\n\t\t\t\tsetProperty(rprMaterial, \"separateBackscatterColor\", 0)\n\n\t\t\t# first layer\n\t\t\tarithmetic1 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(arithmetic1, \"operation\", 2)\n\t\t\t# input A\n\t\t\tif mapDoesNotExist(rsMaterial, \"ms_color0\"):\n\t\t\t\tcopyProperty(arithmetic1, rsMaterial, \"inputA\", \"ms_color0\")\n\t\t\telse:\n\t\t\t\tarithmetic = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(arithmetic, \"operation\", 15)\n\t\t\t\tcopyProperty(arithmetic, rsMaterial, \"inputA\", \"ms_color0\")\n\t\t\t\tsetProperty(arithmetic, \"inputB\", (2, 2, 2))\n\t\t\t\tconnectProperty(arithmetic, \"out\", arithmetic1, \"inputA\")\n\t\t\t# input B\n\t\t\tfactor1 = 2 * getProperty(rsMaterial, \"ms_weight0\") * getProperty(rsMaterial, \"ms_radius0\") * getProperty(rsMaterial, \"ms_radius_scale\")\n\t\t\tsetProperty(arithmetic1, \"inputB\", (factor1, factor1, factor1))\n\n\t\t\t# second layer\n\t\t\t# divide L2 by 2\n\t\t\tarithmetic_divide1 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(arithmetic_divide1, \"operation\", 3)\n\t\t\tcopyProperty(arithmetic_divide1, rsMaterial, \"inputA\", \"ms_color1\")\n\t\t\tsetProperty(arithmetic_divide1, \"inputB\", (2, 2, 2))\n\n\t\t\t# pow 2\n\t\t\tarithmetic2 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(arithmetic2, \"operation\", 2)\n\t\t\t# input A\n\t\t\tif mapDoesNotExist(rsMaterial, \"ms_color1\"):\n\t\t\t\tconnectProperty(arithmetic_divide1, \"out\", arithmetic2, \"inputA\")\n\t\t\telse:\n\t\t\t\tarithmetic = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(arithmetic, \"operation\", 15)\n\t\t\t\tconnectProperty(arithmetic_divide1, \"out\", arithmetic, \"inputA\")\n\t\t\t\tsetProperty(arithmetic, \"inputB\", (2, 2, 2))\n\t\t\t\tconnectProperty(arithmetic, \"out\", arithmetic2, \"inputA\")\n\t\t\t# input B\n\t\t\tfactor2 = 2 * getProperty(rsMaterial, \"ms_weight1\") * getProperty(rsMaterial, \"ms_radius1\") * getProperty(rsMaterial, \"ms_radius_scale\")\n\t\t\tsetProperty(arithmetic2, \"inputB\", (factor2, factor2, factor2))\t\n\n\t\t\t# third layer\n\t\t\t# divide L3 by 4\n\t\t\tarithmetic_divide2 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(arithmetic_divide2, \"operation\", 3)\n\t\t\tcopyProperty(arithmetic_divide2, rsMaterial, \"inputA\", \"ms_color2\")\n\t\t\tsetProperty(arithmetic_divide2, \"inputB\", (4, 4, 4))\n\n\t\t\t# pow 2\n\t\t\tarithmetic3 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(arithmetic3, \"operation\", 2)\n\t\t\t# input A\n\t\t\tif mapDoesNotExist(rsMaterial, \"ms_color2\"):\n\t\t\t\tconnectProperty(arithmetic_divide2, \"out\", arithmetic3, \"inputA\")\n\t\t\telse:\n\t\t\t\tarithmetic = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(arithmetic, \"operation\", 15)\n\t\t\t\tconnectProperty(arithmetic_divide2, \"out\", arithmetic3, \"inputA\")\n\t\t\t\tsetProperty(arithmetic, \"inputB\", (2, 2, 2))\n\t\t\t\tconnectProperty(arithmetic, \"out\", arithmetic3, \"inputA\")\n\t\t\t# input B\n\t\t\tfactor3 = 2 * getProperty(rsMaterial, \"ms_weight2\") * getProperty(rsMaterial, \"ms_radius2\") * getProperty(rsMaterial, \"ms_radius_scale\")\n\t\t\tsetProperty(arithmetic3, \"inputB\", (factor3, factor3, factor3))\n\n\t\t\tarithmetic_mix_1 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(arithmetic_mix_1, \"operation\", 20)\n\t\t\tconnectProperty(arithmetic1, \"out\", arithmetic_mix_1, \"inputA\")\n\t\t\tconnectProperty(arithmetic2, \"out\", arithmetic_mix_1, \"inputB\")\n\n\t\t\tarithmetic_mix_2 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(arithmetic_mix_2, \"operation\", 20)\n\t\t\tconnectProperty(arithmetic_mix_1, \"out\", arithmetic_mix_2, \"inputA\")\n\t\t\tconnectProperty(arithmetic3, \"out\", arithmetic_mix_2, \"inputB\")\n\t\t\tconnectProperty(arithmetic_mix_2, \"out\", rprMaterial, \"subsurfaceRadius\")\n\n\t\t\tarithmetic_mix_3 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(arithmetic_mix_3, \"operation\", 20)\n\t\t\tcopyProperty(arithmetic_mix_3, rsMaterial, \"inputA\", \"ms_color0\")\n\t\t\tconnectProperty(arithmetic_divide1, \"out\", arithmetic_mix_3, \"inputB\")\n\n\t\t\tarithmetic_mix_4 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(arithmetic_mix_4, \"operation\", 20)\n\t\t\tconnectProperty(arithmetic_mix_3, \"out\", arithmetic_mix_4, \"inputA\")\n\t\t\tconnectProperty(arithmetic_divide2, \"out\", arithmetic_mix_4, \"inputB\")\n\t\t\tconnectProperty(arithmetic_mix_4, \"out\", rprMaterial, \"volumeScatter\")\n\n\t\t# transl\n\t\tif backscatteringWeight:\n\t\t\tsetProperty(rprMaterial, \"separateBackscatterColor\", 1)\n\n\t\t\tif mapDoesNotExist(rsMaterial, \"transl_weight\"):\n\t\t\t\tif mapDoesNotExist(rsMaterial, \"transl_color\"):\n\t\t\t\t\ttransl_weight = getProperty(rsMaterial, \"transl_weight\")\n\t\t\t\t\ttransl_color = getProperty(rsMaterial, \"transl_color\")\n\t\t\t\t\tavg_color = sum(transl_color) / 3.0\n\t\t\t\t\tif transl_weight <= 0.5:\n\t\t\t\t\t\tif avg_color < transl_weight:\n\t\t\t\t\t\t\tbackscatteringWeight = avg_color\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbackscatteringWeight = transl_weight\n\t\t\t\t\telif transl_weight > 0.5:\n\t\t\t\t\t\tif avg_color < transl_weight and avg_color * 2 <= 1:\n\t\t\t\t\t\t\tbackscatteringWeight = avg_color * 2\n\t\t\t\t\t\telif transl_weight * 2 <= 1:\n\t\t\t\t\t\t\tbackscatteringWeight = transl_weight * 2\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbackscatteringWeight = 1\n\n\t\t\t\t\tif mapDoesNotExist(rsMaterial, \"opacity_color\"):\n\t\t\t\t\t\tsetProperty(rprMaterial, \"backscatteringWeight\", backscatteringWeight)\n\t\t\t\t\telse:\n\t\t\t\t\t\tarithmetic = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\t\t\tsetProperty(arithmetic, \"operation\", 2)\n\t\t\t\t\t\tsetProperty(arithmetic, \"inputAX\", backscatteringWeight)\n\t\t\t\t\t\tcopyProperty(arithmetic, rsMaterial, \"inputB\", \"opacity_color\")\n\t\t\t\t\t\tconnectProperty(arithmetic, \"outX\", rprMaterial, \"backscatteringWeight\")\n\n\t\t\t\telse:\n\t\t\t\t\tif mapDoesNotExist(rsMaterial, \"opacity_color\"):\n\t\t\t\t\t\tsetProperty(rprMaterial, \"backscatteringWeight\", 0.5 * getProperty(rsMaterial, \"transl_weight\"))\n\t\t\t\t\telse:\n\t\t\t\t\t\tarithmetic = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\t\t\tsetProperty(arithmetic, \"operation\", 2)\n\t\t\t\t\t\tcopyProperty(arithmetic, rsMaterial, \"inputAX\", \"transl_weight\")\n\t\t\t\t\t\tcopyProperty(arithmetic, rsMaterial, \"inputB\", \"opacity_color\")\n\t\t\t\t\t\tconnectProperty(arithmetic, \"outX\", rprMaterial, \"backscatteringWeight\")\n\t\t\telse:\n\t\t\t\tarithmetic = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(arithmetic, \"operation\", 2)\n\t\t\t\tcopyProperty(arithmetic, rsMaterial, \"inputAX\", \"transl_weight\")\n\t\t\t\tif mapDoesNotExist(rsMaterial, \"opacity_color\"):\n\t\t\t\t\tsetProperty(arithmetic, \"inputB\", (0.5, 0.5, 0.5))\n\t\t\t\telse:\n\t\t\t\t\tcopyProperty(arithmetic, rsMaterial, \"inputB\", \"opacity_color\")\n\t\t\t\tconnectProperty(arithmetic, \"outX\", rprMaterial, \"backscatteringWeight\")\n\n\t\t\tif mapDoesNotExist(rsMaterial, \"transl_color\"):\n\t\t\t\ttransl_color = getProperty(rsMaterial, \"transl_color\")\n\t\t\t\tarithmetic1 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(arithmetic1, \"operation\", 0)\n\t\t\t\tsetProperty(arithmetic1, \"inputA\", transl_color)\n\t\t\t\tremap_color = []\n\t\t\t\tfor i in range(len(transl_color)):\n\t\t\t\t\tremap_color.append(remap_value(transl_color[i], 1.0, 0.0, 0.0, 0.7))\n\t\t\t\tsetProperty(arithmetic1, \"inputB\", tuple(remap_color))\n\n\t\t\t\tarithmetic2 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(arithmetic2, \"operation\", 2)\n\t\t\t\tsetProperty(arithmetic2, \"inputA\", transl_color)\n\t\t\t\tsetProperty(arithmetic2, \"inputB\", (2.2, 2.2, 2.2))\n\n\t\t\t\tarithmetic3 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(arithmetic3, \"operation\", 2)\n\t\t\t\tconnectProperty(arithmetic1, \"out\", arithmetic3, \"inputA\")\n\t\t\t\tconnectProperty(arithmetic2, \"out\", arithmetic3, \"inputB\")\n\n\t\t\t\tconnectProperty(arithmetic3, \"out\", rprMaterial, \"backscatteringColor\")\n\t\t\telse:\n\t\t\t\tarithmetic1 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(arithmetic1, \"operation\", 0)\n\t\t\t\tcopyProperty(arithmetic1, rsMaterial, \"inputA\", \"transl_color\")\n\t\t\t\tcopyProperty(arithmetic1, rprMaterial, \"inputBX\", \"backscatteringWeight\")\n\t\t\t\tcopyProperty(arithmetic1, rprMaterial, \"inputBY\", \"backscatteringWeight\")\n\t\t\t\tcopyProperty(arithmetic1, rprMaterial, \"inputBZ\", \"backscatteringWeight\")\n\n\t\t\t\tarithmetic2 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(arithmetic2, \"operation\", 2)\n\t\t\t\tcopyProperty(arithmetic2, rsMaterial, \"inputA\", \"transl_color\")\n\t\t\t\tsetProperty(arithmetic2, \"inputB\", (1.5, 1.5, 1.5))\n\n\t\t\t\tarithmetic3 = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(arithmetic3, \"operation\", 2)\n\t\t\t\tconnectProperty(arithmetic1, \"out\", arithmetic3, \"inputA\")\n\t\t\t\tconnectProperty(arithmetic2, \"out\", arithmetic3, \"inputB\")\n\n\t\t\t\tconnectProperty(arithmetic3, \"out\", rprMaterial, \"backscatteringColor\")\n\n\t\tif getProperty(rsMaterial, \"opacity_color\") != (1, 1, 1):\n\t\t\tif mapDoesNotExist(rsMaterial, \"opacity_color\"):\n\t\t\t\ttransparency = 1 - max(getProperty(rsMaterial, \"opacity_color\"))\n\t\t\t\tsetProperty(rprMaterial, \"transparencyLevel\", transparency)\n\t\t\telse:\n\t\t\t\tarithmetic = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\t\tsetProperty(arithmetic, \"operation\", 1)\n\t\t\t\tsetProperty(arithmetic, \"inputA\", (1, 1, 1))\n\t\t\t\tcopyProperty(arithmetic, rsMaterial, \"inputB\", \"opacity_color\")\n\t\t\t\tconnectProperty(arithmetic, \"outX\", rprMaterial, \"transparencyLevel\")\n\t\t\tsetProperty(rprMaterial, \"transparencyEnable\", 1)\n\n\t\t# duct tape\n\t\tif source != \"bump_blender\":\n\t\t\tbumpConnections = cmds.listConnections(rsMaterial + \".bump_input\")\n\t\t\tif bumpConnections:\n\t\t\t\tsetProperty(rprMaterial, \"normalMapEnable\", 1)\n\t\t\t\tcopyProperty(rprMaterial, rsMaterial, \"normalMap\", \"bump_input\")\n\t\t\t\tsetProperty(rprMaterial, \"useShaderNormal\", 1)\n\t\t\t\tsetProperty(rprMaterial, \"reflectUseShaderNormal\", 1)\n\t\t\t\tsetProperty(rprMaterial, \"refractUseShaderNormal\", 1)\n\t\t\t\tsetProperty(rprMaterial, \"coatUseShaderNormal\", 1)\n\t\t\n\t\t# Logging to file\n\t\tend_log(rsMaterial)\n\n\tif source and source not in (\"bump_blender\", \"displacement_copy\"):\n\t\trprMaterial += \".\" + source\n\treturn rprMaterial\n\n\n##########################\n## RedshiftMaterialBlender \n##########################\n\ndef convertRedshiftMaterialBlender(rsMaterial, source): \n\n\tassigned = checkAssign(rsMaterial)\n\t\n\tif cmds.objExists(rsMaterial + \"_rpr\"):\n\t\trprMaterial = rsMaterial + \"_rpr\"\n\telse:\n\t\t# Creating new Uber material\n\t\trprMaterial = cmds.shadingNode(\"RPRBlendMaterial\", asShader=True)\n\t\trprMaterial = cmds.rename(rprMaterial, rsMaterial + \"_rpr\")\n\n\t\t# Check shading engine in rsMaterial\n\t\tif assigned:\n\t\t\tsg = rprMaterial + \"SG\"\n\t\t\tcmds.sets(renderable=True, noSurfaceShader=True, empty=True, name=sg)\n\t\t\tconnectProperty(rprMaterial, \"outColor\", sg, \"surfaceShader\")\n\n\t\t# Logging to file\n\t\tstart_log(rsMaterial, rprMaterial) \n\n\t\t# Fields conversion\n\t\tcopyProperty(rprMaterial, rsMaterial, \"color0\", \"baseColor\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"color1\", \"layerColor1\")\n\n\t\t# weight conversion\n\t\tweight = cmds.listConnections(rsMaterial + \".blendColor1\")\n\t\tif weight:\n\t\t\tconnectProperty(weight[0], \"outAlpha\", rprMaterial, \"weight\")\n\n\t\t# Logging to file\n\t\tend_log(rsMaterial) \n\n\tif source:\n\t\trprMaterial += \".\" + source\n\treturn rprMaterial\n\n\n#######################\n## RedshiftSkin\n#######################\n\ndef convertRedshiftSkin(rsMaterial, source):\n\n\tassigned = checkAssign(rsMaterial)\n\t\n\tif cmds.objExists(rsMaterial + \"_rpr\"):\n\t\trprMaterial = rsMaterial + \"_rpr\"\n\telse:\n\t\t# Creating new Uber material\n\t\trprMaterial = cmds.shadingNode(\"RPRUberMaterial\", asShader=True)\n\t\trprMaterial = cmds.rename(rprMaterial, rsMaterial + \"_rpr\")\n\n\t\t# Check shading engine in rsMaterial\n\t\tif assigned:\n\t\t\tsg = rprMaterial + \"SG\"\n\t\t\tcmds.sets(renderable=True, noSurfaceShader=True, empty=True, name=sg)\n\t\t\tconnectProperty(rprMaterial, \"outColor\", sg, \"surfaceShader\")\n\n\t\t# Enable properties, which are default in Redshift\n\t\tdefaultEnable(rprMaterial, rsMaterial, \"reflections\", \"refl_weight0\")\n\t\tdefaultEnable(rprMaterial, rsMaterial, \"clearCoat\", \"refl_weight1\")\n\n\t\t# Logging to file\n\t\tstart_log(rsMaterial, rprMaterial)\n\n\t\t# Fields conversion\n\t\tsetProperty(rprMaterial, \"diffuseWeight\", 1)\n\t\tsetProperty(rprMaterial, \"diffuseRoughness\", 0)\n\t\tsetProperty(rprMaterial, \"separateBackscatterColor\", True)\n\t\tsetProperty(rprMaterial, \"backscatteringWeight\", 0.4)\n\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectColor\", \"refl_color0\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectWeight\", \"refl_weight0\")\n\t\tsetProperty(rprMaterial, \"reflectRoughness\", (1 - getProperty(rsMaterial, \"refl_gloss0\")))\n\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectIOR\", \"refl_ior0\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"coatColor\", \"refl_color1\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"coatWeight\", \"refl_weight1\")\n\t\tsetProperty(rprMaterial, \"coatRoughness\", (1 - getProperty(rsMaterial, \"refl_gloss1\")))\n\t\tcopyProperty(rprMaterial, rsMaterial, \"coatIor\", \"refl_ior1\")\n\n\t\t# shallow radius * radius scale\n\t\tshallow_raduis_x_radius_scale = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tshallow_raduis_x_radius_scale = cmds.rename(shallow_raduis_x_radius_scale, \"shallow_raduis_x_radius_scale\")\n\t\tsetProperty(shallow_raduis_x_radius_scale, \"operation\", 2)\n\t\tcopyProperty(shallow_raduis_x_radius_scale, rsMaterial, \"inputAX\", \"shallow_radius\")\n\t\tcopyProperty(shallow_raduis_x_radius_scale, rsMaterial, \"inputBX\", \"radius_scale\")\n\n\t\t# shallow radius * weight\n\t\tshallow_raduis_x_weight = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tshallow_raduis_x_weight = cmds.rename(shallow_raduis_x_weight, \"shallow_raduis_x_weight\")\n\t\tsetProperty(shallow_raduis_x_weight, \"operation\", 2)\n\t\tcopyProperty(shallow_raduis_x_weight, rsMaterial, \"inputAX\", \"shallow_weight\")\n\t\tconnectProperty(shallow_raduis_x_radius_scale, \"out\", shallow_raduis_x_weight, \"inputB\")\n\n\t\t# Mult by OverallScale\n\t\tmult_by_overall_scale = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tmult_by_overall_scale = cmds.rename(mult_by_overall_scale, \"mult_by_overall_scale\")\n\t\tsetProperty(mult_by_overall_scale, \"operation\", 2)\n\t\tcopyProperty(mult_by_overall_scale, rsMaterial, \"inputAX\", \"overall_scale\")\n\t\tconnectProperty(shallow_raduis_x_weight, \"out\", mult_by_overall_scale, \"inputB\")\n\n\t\t# Mult by Shallow Color\n\t\tmult_by_shallow_color = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tmult_by_shallow_color = cmds.rename(mult_by_shallow_color, \"mult_by_shallow_color\")\n\t\tsetProperty(mult_by_shallow_color, \"operation\", 2)\n\t\tif mapDoesNotExist(rsMaterial, \"shallow_color\"):\n\t\t\tcopyProperty(mult_by_shallow_color, rsMaterial, \"inputA\", \"shallow_color\")\n\t\telse:\n\t\t\tshallow_color_map = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(shallow_color_map, \"operation\", 15)\n\t\t\tcopyProperty(shallow_color_map, rsMaterial, \"inputA\", \"shallow_color\")\n\t\t\tsetProperty(shallow_color_map, \"inputB\", (2.2, 2.2, 2.2))\n\t\t\tconnectProperty(shallow_color_map, \"out\", mult_by_shallow_color, \"inputA\")\n\t\tconnectProperty(mult_by_overall_scale, \"out\", mult_by_shallow_color, \"inputB\")\n\n\t\t# middle radius * radius scale\n\t\tmid_raduis_x_radius_scale = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tmid_raduis_x_radius_scale = cmds.rename(mid_raduis_x_radius_scale, \"mid_raduis_x_radius_scale\")\n\t\tsetProperty(mid_raduis_x_radius_scale, \"operation\", 2)\n\t\tcopyProperty(mid_raduis_x_radius_scale, rsMaterial, \"inputAX\", \"mid_radius\")\n\t\tcopyProperty(mid_raduis_x_radius_scale, rsMaterial, \"inputBX\", \"radius_scale\")\n\n\t\t# middle radius * weight\n\t\tmid_raduis_x_weight = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tmid_raduis_x_weight = cmds.rename(mid_raduis_x_weight, \"mid_raduis_x_weight\")\n\t\tsetProperty(mid_raduis_x_weight, \"operation\", 2)\n\t\tcopyProperty(mid_raduis_x_weight, rsMaterial, \"inputAX\", \"mid_weight\")\n\t\tconnectProperty(mid_raduis_x_radius_scale, \"out\", mid_raduis_x_weight, \"inputB\")\n\n\t\t# Mult by OverallScaleMiddle\n\t\tmult_by_overall_scale_middle = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tmult_by_overall_scale_middle = cmds.rename(mult_by_overall_scale_middle, \"mult_by_overall_scale_middle\")\n\t\tsetProperty(mult_by_overall_scale_middle, \"operation\", 2)\n\t\tcopyProperty(mult_by_overall_scale_middle, rsMaterial, \"inputAX\", \"overall_scale\")\n\t\tconnectProperty(mid_raduis_x_weight, \"out\", mult_by_overall_scale_middle, \"inputB\")\n\n\t\t# Mult by Middle Color\n\t\tmult_by_middle_color = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tmult_by_middle_color = cmds.rename(mult_by_middle_color, \"mult_by_middle_color\")\n\t\tsetProperty(mult_by_middle_color, \"operation\", 2)\n\t\tif mapDoesNotExist(rsMaterial, \"mid_color\"):\n\t\t\tcopyProperty(mult_by_middle_color, rsMaterial, \"inputA\", \"mid_color\")\n\t\telse:\n\t\t\tmid_color_map = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(mid_color_map, \"operation\", 15)\n\t\t\tcopyProperty(mid_color_map, rsMaterial, \"inputA\", \"mid_color\")\n\t\t\tsetProperty(mid_color_map, \"inputB\", (2.2, 2.2, 2.2))\n\t\t\tconnectProperty(mid_color_map, \"out\", mult_by_middle_color, \"inputA\")\n\t\tconnectProperty(mult_by_overall_scale_middle, \"out\", mult_by_middle_color, \"inputB\")\n\n\t\t# Mix ShallowBiasedColor and MiddleBiasedColor\n\t\tshallow_biased_color_mix_middle_biased_color = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tshallow_biased_color_mix_middle_biased_color = cmds.rename(shallow_biased_color_mix_middle_biased_color, \"shallow_biased_color_mix_middle_biased_color\")\n\t\tsetProperty(shallow_biased_color_mix_middle_biased_color, \"operation\", 20)\n\t\tconnectProperty(mult_by_shallow_color, \"out\", shallow_biased_color_mix_middle_biased_color, \"inputA\")\n\t\tconnectProperty(mult_by_middle_color, \"out\", shallow_biased_color_mix_middle_biased_color, \"inputB\")\n\n\t\t# deep radius * radius scale\n\t\tdeep_raduis_x_radius_scale = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tdeep_raduis_x_radius_scale = cmds.rename(deep_raduis_x_radius_scale, \"deep_raduis_x_radius_scale\")\n\t\tsetProperty(deep_raduis_x_radius_scale, \"operation\", 2)\n\t\tcopyProperty(deep_raduis_x_radius_scale, rsMaterial, \"inputAX\", \"deep_radius\")\n\t\tcopyProperty(deep_raduis_x_radius_scale, rsMaterial, \"inputBX\", \"radius_scale\")\n\n\t\t# deep radius * weight\n\t\tdeep_raduis_x_weight = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tdeep_raduis_x_weight = cmds.rename(deep_raduis_x_weight, \"deep_raduis_x_weight\")\n\t\tsetProperty(deep_raduis_x_weight, \"operation\", 2)\n\t\tcopyProperty(deep_raduis_x_weight, rsMaterial, \"inputAX\", \"deep_weight\")\n\t\tconnectProperty(deep_raduis_x_radius_scale, \"out\", deep_raduis_x_weight, \"inputB\")\n\n\t\t# Mult by OverallScaleDeep\n\t\tmult_by_overall_scale_deep = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tmult_by_overall_scale_deep = cmds.rename(mult_by_overall_scale_deep, \"mult_by_overall_scale_deep\")\n\t\tsetProperty(mult_by_overall_scale_deep, \"operation\", 2)\n\t\tcopyProperty(mult_by_overall_scale_deep, rsMaterial, \"inputAX\", \"overall_scale\")\n\t\tconnectProperty(deep_raduis_x_weight, \"out\", mult_by_overall_scale_deep, \"inputB\")\n\n\t\t# Mult by Deep Color\n\t\tmult_by_deep_color = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tmult_by_deep_color = cmds.rename(mult_by_deep_color, \"mult_by_deep_color\")\n\t\tsetProperty(mult_by_deep_color, \"operation\", 20)\n\t\tif mapDoesNotExist(rsMaterial, \"deep_color\"):\n\t\t\tcopyProperty(mult_by_deep_color, rsMaterial, \"inputA\", \"deep_color\")\n\t\telse:\n\t\t\tdeep_color_map = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\t\tsetProperty(deep_color_map, \"operation\", 15)\n\t\t\tcopyProperty(deep_color_map, rsMaterial, \"inputA\", \"deep_color\")\n\t\t\tsetProperty(deep_color_map, \"inputB\", (2.2, 2.2, 2.2))\n\t\t\tconnectProperty(deep_color_map, \"out\", mult_by_deep_color, \"inputA\")\n\t\tconnectProperty(mult_by_overall_scale_deep, \"out\", mult_by_deep_color, \"inputB\")\n\n\t\t# Mix DeepBiasColor\n\t\tmix_deep_bias_color = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tmix_deep_bias_color = cmds.rename(mix_deep_bias_color, \"mix_deep_bias_color\")\n\t\tsetProperty(mix_deep_bias_color, \"operation\", 20)\n\t\tconnectProperty(shallow_biased_color_mix_middle_biased_color, \"out\", mix_deep_bias_color, \"inputA\")\n\t\tconnectProperty(mult_by_deep_color, \"out\", mix_deep_bias_color, \"inputB\")\n\n\t\t# SSS radius result\n\t\tconnectProperty(mix_deep_bias_color, \"out\", rprMaterial, \"subsurfaceRadius\")\n\n\t\t# Mix ShallowColor and MiddleColor\n\t\tshallow_color_mix_middle_color = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tshallow_color_mix_middle_color = cmds.rename(shallow_color_mix_middle_color, \"shallow_color_mix_middle_color\")\n\t\tsetProperty(shallow_color_mix_middle_color, \"operation\", 20)\n\t\tif mapDoesNotExist(rsMaterial, \"shallow_color\"):\n\t\t\tcopyProperty(shallow_color_mix_middle_color, rsMaterial, \"inputA\", \"shallow_color\")\n\t\telse:\n\t\t\tconnectProperty(shallow_color_map, \"out\", shallow_color_mix_middle_color, \"inputA\")\n\t\tif mapDoesNotExist(rsMaterial, \"mid_color\"):\n\t\t\tcopyProperty(shallow_color_mix_middle_color, rsMaterial, \"inputB\", \"mid_color\")\n\t\telse:\n\t\t\tconnectProperty(mid_color_map, \"out\", shallow_color_mix_middle_color, \"inputB\")\n\n\t\t# Mix DeepColor\n\t\tmix_deep_color = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tmix_deep_color = cmds.rename(mix_deep_color, \"mix_deep_color\")\n\t\tsetProperty(mix_deep_color, \"operation\", 20)\n\t\tconnectProperty(shallow_color_mix_middle_color, \"out\", mix_deep_color, \"inputA\")\n\t\tif mapDoesNotExist(rsMaterial, \"deep_color\"):\n\t\t\tcopyProperty(mix_deep_color, rsMaterial, \"inputB\", \"deep_color\")\n\t\telse:\n\t\t\tconnectProperty(deep_color_map, \"out\", mix_deep_color, \"inputB\")\n\t\t\n\n\t\t# volume scatter\n\t\tconnectProperty(mix_deep_color, \"out\", rprMaterial, \"volumeScatter\")\n\n\t\t# Color Correction\n\t\tcolor_correction = cmds.shadingNode(\"RPRArithmetic\", asUtility=True)\n\t\tcolor_correction = cmds.rename(color_correction, \"color_correction\")\n\t\tsetProperty(color_correction, \"operation\", 2)\n\t\tconnectProperty(mix_deep_color, \"out\", color_correction, \"inputA\")\n\t\tsetProperty(color_correction, \"inputB\", (1.3, 1.3, 1.3))\n\n\t\t# backscattering color & diffuse color\n\t\tconnectProperty(color_correction, \"out\", rprMaterial, \"diffuseColor\")\n\t\tconnectProperty(color_correction, \"out\", rprMaterial, \"backscatteringColor\")\n\n\t\t# Logging in file\n\t\tend_log(rsMaterial)\n\n\tif source:\n\t\trprMaterial += \".\" + source\n\treturn rprMaterial\n\n\n#############################\n## RedshiftMatteShadowCatcher \n#############################\n\ndef convertRedshiftMatteShadowCatcher(rsMaterial, source): \n\n\tassigned = checkAssign(rsMaterial)\n\t\n\tif cmds.objExists(rsMaterial + \"_rpr\"):\n\t\trprMaterial = rsMaterial + \"_rpr\"\n\telse:\n\t\t# Creating new Uber material\n\t\trprMaterial = cmds.shadingNode(\"RPRMatteMaterial\", asShader=True)\n\t\trprMaterial = cmds.rename(rprMaterial, rsMaterial + \"_rpr\")\n\n\t\t# Check shading engine in rsMaterial\n\t\tif assigned:\n\t\t\tsg = rprMaterial + \"SG\"\n\t\t\tcmds.sets(renderable=True, noSurfaceShader=True, empty=True, name=sg)\n\t\t\tconnectProperty(rprMaterial, \"outColor\", sg, \"surfaceShader\")\n\n\t\t# Logging to file\n\t\tstart_log(rsMaterial, rprMaterial) \n\n\t\t# Fields conversion\n\t\tcopyProperty(rprMaterial, rsMaterial, \"bgIsEnv\", \"backgroundIsEnv\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"shadowTransp\", \"transparency\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"bgColor\", \"background\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"shadowColor\", \"shadows\")\n\t\t\n\t\t# Logging to file\n\t\tend_log(rsMaterial) \n\n\tif source:\n\t\trprMaterial += \".\" + source\n\treturn rprMaterial\n\n\n############################\n## RedshiftSubSurfaceScatter \n############################ \n\ndef convertRedshiftSubSurfaceScatter(rsMaterial, source): \n\n\tassigned = checkAssign(rsMaterial)\n\t\n\tif cmds.objExists(rsMaterial + \"_rpr\"):\n\t\trprMaterial = rsMaterial + \"_rpr\"\n\telse:\n\t\t# Creating new Uber material\n\t\trprMaterial = cmds.shadingNode(\"RPRUberMaterial\", asShader=True)\n\t\trprMaterial = cmds.rename(rprMaterial, rsMaterial + \"_rpr\")\n\n\t\t# Check shading engine in rsMaterial\n\t\tif assigned:\n\t\t\tsg = rprMaterial + \"SG\"\n\t\t\tcmds.sets(renderable=True, noSurfaceShader=True, empty=True, name=sg)\n\t\t\tconnectProperty(rprMaterial, \"outColor\", sg, \"surfaceShader\")\n\t\t\t \n\t\t# Enable properties, which are default in RedShift\n\t\tif getProperty(rsMaterial, \"scatter_radius\") >= 0.01:\n\t\t\tsetProperty(rprMaterial, \"sssEnable\", 1)\n\n\t\tscatter_color = getProperty(rsMaterial, \"scatter_color\")\n\t\tif sum(scatter_color) / len(scatter_color) >= 0.01:\n\t\t\tsetProperty(rprMaterial, \"separateBackscatterColor\", 1)\n\n\t\tsetProperty(rprMaterial, \"reflections\", 1)\n\t\t\t\n\t\t# Logging to file\n\t\tstart_log(rsMaterial, rprMaterial) \n\n\t\t# Fields conversion\n\t\tsetProperty(rprMaterial, \"diffuseWeight\", 1)\n\t\tsetProperty(rprMaterial, \"diffuseRoughness\", 0)\n\t\tsetProperty(rprMaterial, \"backscatteringWeight\", 0.5)\n\t\tcopyProperty(rprMaterial, rsMaterial, \"reflectIOR\", \"ior\")\n\t\tcopyProperty(rprMaterial, rsMaterial, \"volumeScatter\", \"sub_surface_color\")\n\n\t\tsub_surface_color = getProperty(rsMaterial, \"sub_surface_color\")\n\t\tdiffuseColor = (clampValue(sub_surface_color[0] * 1.59, 0, 1), clampValue(sub_surface_color[1] * 1.59, 0, 1), clampValue(sub_surface_color[2] * 1.59, 0, 1))\n\t\tsetProperty(rprMaterial, \"diffuseColor\", diffuseColor)\n\t\t\n\t\tif sum(sub_surface_color) / len(sub_surface_color) < 0.255:\n\t\t\tsetProperty(rprMaterial, \"backscatteringColor\", (sub_surface_color[0] * 3.5, sub_surface_color[1] * 3.5, sub_surface_color[2] * 3.5))\n\t\telse:\n\t\t\tsetProperty(rprMaterial, \"backscatteringColor\", (sub_surface_color[0] * 1.59, sub_surface_color[1] * 1.59, sub_surface_color[2] * 1.59))\n\n\t\tif mapDoesNotExist(rsMaterial, \"scatter_color\"): \n\t\t\tradius = getProperty(rsMaterial, \"scatter_radius\")\n\t\t\tscatterColor= getProperty(rsMaterial, \"scatter_color\")\n\t\t\tsssRadius = [radius + scatterColor[0] * 1.5, radius + scatterColor[1], radius + scatterColor[2]]\n\t\t\tsetProperty(rprMaterial, \"subsurfaceRadius\", tuple(sssRadius))\n\n\t\tinvertValue(rprMaterial, rsMaterial, \"reflectRoughness\", \"refl_gloss\")\n\t\t \n\t\t# Logging to file\n\t\tend_log(rsMaterial) \n\n\tif source:\n\t\trprMaterial += \".\" + source\n\treturn rprMaterial\n\n\ndef convertRedshiftPhysicalSky(rsSky):\n\t\n\t# create RPRSky node\n\tskyNode = cmds.createNode(\"RPRSky\", n=\"RPRSkyShape\")\n \n\t# Logging to file\n\tstart_log(rsSky, skyNode)\n\n\t# Copy properties from rsPhysicalSky\n\tsetProperty(skyNode, \"intensity\", getProperty(rsSky, \"multiplier\") * 2)\n\tcopyProperty(skyNode, rsSky, \"turbidity\", \"haze\")\n\tcopyProperty(skyNode, rsSky, \"groundColor\", \"ground_color\")\n\tcopyProperty(skyNode, rsSky, \"filterColor\", \"night_color\")\n\tcopyProperty(skyNode, rsSky, \"sunDiskSize\", \"sun_disk_scale\")\n\tcopyProperty(skyNode, rsSky, \"sunGlow\", \"sun_glow_intensity\")\n\n\t# Logging to file\n\tend_log(rsSky) \n\n\ndef convertRedshiftPhysicalSun(rsSun):\n\n\tsunTransfrom = cmds.listRelatives(rsSun, p=True)[0]\n\tdirectionalLight = cmds.createNode(\"RPRPhysicalLight\", n=\"RPRPhysicalLightShape\")\n\tdirectionalLightTransform = cmds.listRelatives(directionalLight, p=True)[0]\n\n\t# Logging to file\n\tstart_log(rsSun, directionalLight)\n\n\tcopyProperty(directionalLightTransform, sunTransfrom, \"translate\", \"translate\")\n\tcopyProperty(directionalLightTransform, sunTransfrom, \"rotate\", \"rotate\")\n\tcopyProperty(directionalLightTransform, sunTransfrom, \"scale\", \"scale\")\n\n\tskyNode = cmds.listConnections(rsSun, type=\"RedshiftPhysicalSky\")[0]\n\tsetProperty(directionalLight, \"lightType\", 3)\n\tsetProperty(directionalLight, \"intensityUnits\", 3)\n\tsetProperty(directionalLight, \"intensity\", getProperty(skyNode, \"multiplier\") * 400)\n\tsetProperty(directionalLight, \"color\", (1, 1, 1))\n\n\tallUberMaterials = cmds.ls(type=\"RPRUberMaterial\")\n\tfor uber in allUberMaterials:\n\t\tif getProperty(uber, \"refraction\"):\n\t\t\tsetProperty(uber, \"refractAllowCaustics\", True)\n\n\t# Logging to file\n\tend_log(rsSun) \n\n\ndef convertRedshiftEnvironment(env):\n\n\tif cmds.objExists(\"RPRIBL\"):\n\t\tiblShape = \"RPRIBLShape\"\n\t\tiblTransform = \"RPRIBL\"\n\telse:\n\t\t# create IBL node\n\t\tiblShape = cmds.createNode(\"RPRIBL\", n=\"RPRIBLShape\")\n\t\tiblTransform = cmds.listRelatives(iblShape, p=True)[0]\n\t\tsetProperty(iblTransform, \"scale\", (1001.25663706144, 1001.25663706144, 1001.25663706144))\n\n\t# Logging to file \n\tstart_log(env, iblShape)\n \n\t# Copy properties from rsEnvironment\n\texposure = getProperty(env, \"exposure0\")\n\tsetProperty(iblShape, \"intensity\", 1 * 2 ** exposure)\n\n\tcopyProperty(iblShape, env, \"display\", \"backPlateEnabled\")\n\n\ttexMode = getProperty(env, \"texMode\")\n\tif texMode == 0: # default\n\t\tcopyProperty(iblShape, env, \"filePath\", \"tex0\")\n\n\tenvTransform = cmds.listConnections(env, type=\"place3dTexture\")[0]\n\tcopyProperty(iblTransform, envTransform, \"rotate\", \"rotate\")\n\n\t# Logging to file\n\tend_log(env) \n\n\ndef convertRedshiftDomeLight(dome_light):\n\n\tif cmds.objExists(\"RPRIBL\"):\n\t\tiblShape = \"RPRIBLShape\"\n\t\tiblTransform = \"RPRIBL\"\n\telse:\n\t\t# create IBL node\n\t\tiblShape = cmds.createNode(\"RPRIBL\", n=\"RPRIBLShape\")\n\t\tiblTransform = cmds.listRelatives(iblShape, p=True)[0]\n\t\tsetProperty(iblTransform, \"scale\", (1001.25663706144, 1001.25663706144, 1001.25663706144))\n\n\t# Logging to file \n\tstart_log(dome_light, iblShape)\n\n\t# display IBL option\n\texposure = getProperty(dome_light, \"exposure0\")\n\tsetProperty(iblShape, \"intensity\", 1 * 2 ** exposure)\n\n\tcopyProperty(iblShape, dome_light, \"display\", \"background_enable\")\n\tcopyProperty(iblShape, dome_light, \"filePath\", \"tex0\")\n\t\n\tdomeTransform = cmds.listRelatives(dome_light, p=True)[0]\n\trotateY = getProperty(domeTransform, \"rotateY\") - 90\n\tsetProperty(iblTransform, \"rotateY\", rotateY)\n\n\t# back plane\n\tif getProperty(dome_light, \"backPlateEnabled\"):\n\t\timgPlane = cmds.imagePlane()\n\t\tcopyProperty(imgPlane, dome_light, \"imageName\", \"tex1\")\n\t\tcameras = cmds.ls(type=\"camera\")\n\t\tfor cam in cameras:\n\t\t\tconnectProperty(imgPlane[1], \"message\", cam, \"imagePlane[0]\")\n\n\t# Logging to file\n\tend_log(dome_light) \n\n\ndef convertRedshiftPhysicalLight(rs_light):\n\n\t# Redshift light transform\n\tsplited_name = rs_light.split(\"|\")\n\trsTransform = \"|\".join(splited_name[0:-1])\n\tgroup = \"|\".join(splited_name[0:-2])\n\n\tif cmds.objExists(rsTransform + \"_rpr\"):\n\t\trprTransform = rsTransform + \"_rpr\"\n\t\trprLightShape = cmds.listRelatives(rprTransform)[0]\n\telse: \n\t\trprLightShape = cmds.createNode(\"RPRPhysicalLight\", n=\"RPRPhysicalLightShape\")\n\t\trprLightShape = cmds.rename(rprLightShape, splited_name[-1] + \"_rpr\")\n\t\trprTransform = cmds.listRelatives(rprLightShape, p=True)[0]\n\t\trprTransform = cmds.rename(rprTransform, splited_name[-2] + \"_rpr\")\n\t\trprLightShape = cmds.listRelatives(rprTransform)[0]\n\n\t\tif group:\n\t\t\tcmds.parent(rprTransform, group)\n\n\t\trprTransform = group + \"|\" + rprTransform\n\t\trprLightShape = rprTransform + \"|\" + rprLightShape\n\t\t\n\t# Logging to file \n\tstart_log(rs_light, rprLightShape)\n\n\t# Copy properties from rsLight\n\tcopyProperty(rprTransform, rsTransform, \"translate\", \"translate\")\n\tcopyProperty(rprTransform, rsTransform, \"rotate\", \"rotate\")\n\tcopyProperty(rprTransform, rsTransform, \"scale\", \"scale\")\n\n\tlightType = getProperty(rs_light, \"lightType\")\n\tlight_type_map = {\n\t\t0:0, # area\n\t\t1:2, # point\n\t\t2:1, # spot\n\t\t3:3 # directional\n\t}\n\tsetProperty(rprLightShape, \"lightType\", light_type_map[lightType])\n\t\n\tareaShape = getProperty(rs_light, \"areaShape\")\n\tif lightType == 0: #area\n\t\tarea_shape_map = {\n\t\t\t0:3, # rectangle\n\t\t\t1:0, # disc\n\t\t\t2:2, # sphere\n\t\t\t3:1, # cylinder\n\t\t\t4:4 # mesh \n\t\t}\n\t\tsetProperty(rprLightShape, \"areaLightShape\", area_shape_map[areaShape])\n\n\tintensity = getProperty(rs_light, \"intensity\")\n\texposure = getProperty(rs_light, \"exposure\")\n\tunitsType = getProperty(rs_light, \"unitsType\")\n\tif unitsType == 0: # image \n\t\tscale_multiplier = getProperty(rsTransform, \"scaleX\") * getProperty(rsTransform, \"scaleY\")\n\t\tif lightType == 0: # area #image -> lumen\n\t\t\tif areaShape in (0, 1): # rectangle or disk\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 0)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 2500 * scale_multiplier)\n\t\t\telif areaShape == 2: # sphere\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 0)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 1000 * scale_multiplier)\n\t\t\telif areaShape == 3: # cylinder\n\t\t\t\tcopyProperty(rprTransform, rsTransform, \"scaleX\", \"scaleZ\")\n\t\t\t\tcopyProperty(rprTransform, rsTransform, \"scaleZ\", \"scaleX\")\n\t\t\t\tsetProperty(rprTransform, \"rotateY\", getProperty(rsTransform, \"rotateY\") + 90)\n\t\t\t\tsetProperty(rprTransform, \"rotateX\", 0)\n\t\t\t\tscale_multiplier = getProperty(rsTransform, \"scaleX\") * getProperty(rsTransform, \"scaleY\")\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 0)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 335 * scale_multiplier)\n\t\t\telif areaShape == 4: # mesh\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 0)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 1000 * scale_multiplier)\n\t\telif lightType == 1: # point #image -> lumen\n\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 0)\n\t\t\tsetProperty(rprLightShape, \"intensity\", (intensity * 2 ** exposure) / (2500 * (1 + intensity * 2 ** exposure / 10000)))\n\t\telif lightType == 2: # spot #image -> lumen\n\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 0)\n\t\t\tsetProperty(rprLightShape, \"intensity\", (intensity * 2 ** exposure) / (3000 * (1 + intensity * 2 ** exposure / 10000)))\n\t\telif lightType == 3: # directional #image -> luminance\n\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 1)\n\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 3.3333)\n\telif unitsType == 1: # luminous \n\t\tif lightType == 0: #area \n\t\t\tif areaShape in (0, 1, 2): # rectangle disk sphere\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 0)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 30000)\n\t\t\telif areaShape == 3: # cylinder\n\t\t\t\tcopyProperty(rprTransform, rsTransform, \"scaleX\", \"scaleZ\")\n\t\t\t\tcopyProperty(rprTransform, rsTransform, \"scaleZ\", \"scaleX\")\n\t\t\t\tsetProperty(rprTransform, \"rotateY\", getProperty(rsTransform, \"rotateY\") + 90)\n\t\t\t\tsetProperty(rprTransform, \"rotateX\", 0)\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 0)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 15000)\n\t\t\telif areaShape == 4: # mesh\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 0)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 15000)\n\t\telif lightType in (1, 2): # point and spot #luminous -> lumen\n\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 0)\n\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 10000)\n\t\telif lightType == 3: # directional #luminous -> luminance\n\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 1)\n\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure)\n\telif unitsType == 2: # luminance -> luminance\n\t\tif lightType == 0: # area \n\t\t\tif areaShape == 0: # rectangle \n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 1)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 6666.66)\n\t\t\telif areaShape in (1, 2): # disk sphere\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 1)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 8333.33)\n\t\t\telif areaShape == 3: # cylinder\n\t\t\t\tcopyProperty(rprTransform, rsTransform, \"scaleX\", \"scaleZ\")\n\t\t\t\tcopyProperty(rprTransform, rsTransform, \"scaleZ\", \"scaleX\")\n\t\t\t\tsetProperty(rprTransform, \"rotateY\", getProperty(rsTransform, \"rotateY\") + 90)\n\t\t\t\tsetProperty(rprTransform, \"rotateX\", 0)\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 1)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 5000)\n\t\t\telif areaShape == 4: # mesh\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 1)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 5000)\n\t\telif lightType == 1: # point #luminous -> lumen\n\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 0)\n\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 30000000)\n\t\telif lightType == 2: # spot #luminous -> lumen\n\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 0)\n\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 10000)\n\t\telif lightType == 3: # directional\n\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 1)\n\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 3000)\n\telif unitsType == 3: # radiant power -> watts\n\t\tif lightType == 0: # area \n\t\t\tif areaShape in (0, 1, 2): # rectangle disk sphere\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 2)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 45)\n\t\t\t\tcopyProperty(rprLightShape, rs_light, \"luminousEfficacy\", \"lumensperwatt\")\n\t\t\telif areaShape == 3: # cylinder\n\t\t\t\tcopyProperty(rprTransform, rsTransform, \"scaleX\", \"scaleZ\")\n\t\t\t\tcopyProperty(rprTransform, rsTransform, \"scaleZ\", \"scaleX\")\n\t\t\t\tsetProperty(rprTransform, \"rotateY\", getProperty(rsTransform, \"rotateY\") + 90)\n\t\t\t\tsetProperty(rprTransform, \"rotateX\", 0)\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 2)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 20)\n\t\t\t\tcopyProperty(rprLightShape, rs_light, \"luminousEfficacy\", \"lumensperwatt\")\n\t\t\telif areaShape == 4: # mesh\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 2)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 20)\n\t\t\t\tcopyProperty(rprLightShape, rs_light, \"luminousEfficacy\", \"lumensperwatt\")\n\t\telif lightType in (1, 2): # point and spot # radiant power -> watts\n\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 2)\n\t\t\tsetProperty(rprLightShape, \"intensity\", (intensity * 2 ** exposure) / (15 * (0.92 + intensity * 2 ** exposure / 10000)))\n\t\t\tcopyProperty(rprLightShape, rs_light, \"luminousEfficacy\", \"lumensperwatt\")\n\t\telif lightType == 3: # directional #radiant power -> luminance\n\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 1)\n\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure * 20)\n\t\t\tcopyProperty(rprLightShape, rs_light, \"luminousEfficacy\", \"lumensperwatt\")\n\telif unitsType == 4: # radiance - > radiance\n\t\tif lightType == 0: #area \n\t\t\tif areaShape == 0: # rectangle\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 3)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 10)\n\t\t\t\tcopyProperty(rprLightShape, rs_light, \"luminousEfficacy\", \"lumensperwatt\")\n\t\t\telif areaShape in (1, 2): # disk sphere\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 3)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 12.5)\n\t\t\t\tcopyProperty(rprLightShape, rs_light, \"luminousEfficacy\", \"lumensperwatt\")\n\t\t\telif areaShape == 3: # cylinder\n\t\t\t\tcopyProperty(rprTransform, rsTransform, \"scaleX\", \"scaleZ\")\n\t\t\t\tcopyProperty(rprTransform, rsTransform, \"scaleZ\", \"scaleX\")\n\t\t\t\tsetProperty(rprTransform, \"rotateY\", getProperty(rsTransform, \"rotateY\") + 90)\n\t\t\t\tsetProperty(rprTransform, \"rotateX\", 0)\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 3)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 9)\n\t\t\t\tcopyProperty(rprLightShape, rs_light, \"luminousEfficacy\", \"lumensperwatt\")\n\t\t\telif areaShape == 4: # mesh\n\t\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 3)\n\t\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 9)\n\t\t\t\tcopyProperty(rprLightShape, rs_light, \"luminousEfficacy\", \"lumensperwatt\")\n\t\telif lightType in (1, 2): #point and spot #radiance - > watts\n\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 2)\n\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 44444.44444)\n\t\t\tcopyProperty(rprLightShape, rs_light, \"luminousEfficacy\", \"lumensperwatt\")\t\n\t\telif lightType == 3: #directional #radiance - > radiance\n\t\t\tsetProperty(rprLightShape, \"intensityUnits\", 3)\n\t\t\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure / 5)\n\t\t\tcopyProperty(rprLightShape, rs_light, \"luminousEfficacy\", \"lumensperwatt\")\t\n\n\tif lightType == 0:\n\t\tcopyProperty(rprLightShape, rs_light, \"areaLightVisible\", \"areaVisibleInRender\")\n\telif lightType == 2:\n\t\tangle = getProperty(rs_light, \"spotConeAngle\")\n\t\tfalloffAngle = getProperty(rs_light, \"spotConeFalloffAngle\")\n\t\tfalloffCurve = getProperty(rs_light, \"spotConeFalloffCurve\")\n\n\t\tif falloffAngle*falloffCurve < falloffAngle*math.cos(falloffAngle):\n\t\t\tsetProperty(rprLightShape, \"spotLightOuterConeFalloff\", angle + falloffAngle*falloffCurve)\n\t\t\tsetProperty(rprLightShape, \"spotLightInnerConeAngle\", angle - falloffAngle*falloffCurve)\n\t\telif falloffAngle*falloffCurve < 2*angle:\n\t\t\touterConeFalloff = angle + falloffAngle*math.cos(falloffAngle)\n\t\t\tsetProperty(rprLightShape, \"spotLightOuterConeFalloff\", outerConeFalloff)\n\t\t\tinnerConeAngle = angle - falloffAngle*falloffCurve\n\t\t\tif innerConeAngle < 0:\n\t\t\t\tinnerConeAngle = 0\n\t\t\telif innerConeAngle > outerConeFalloff:\n\t\t\t\tinnerConeAngle = outerConeFalloff\n\t\t\tsetProperty(rprLightShape, \"spotLightInnerConeAngle\", innerConeAngle)\n\t\telse:\n\t\t\touterConeFalloff = angle + falloffAngle*math.cos(falloffAngle)\n\t\t\tsetProperty(rprLightShape, \"spotLightOuterConeFalloff\", outerConeFalloff)\n\t\t\tsetProperty(rprLightShape, \"spotLightInnerConeAngle\", outerConeFalloff / 2)\n\n\tcopyProperty(rprLightShape, rs_light, \"color\", \"color\")\n\tcopyProperty(rprLightShape, rs_light, \"temperature\", \"temperature\")\n\n\tcolor_mode = getProperty(rs_light, \"colorMode\")\n\tif color_mode in (0, 2):\n\t\tsetProperty(rprLightShape, \"colorMode\", 0)\n\telse:\n\t\tsetProperty(rprLightShape, \"colorMode\", 1)\n\n\t# Logging to file\n\tend_log(rs_light) \n\n\ndef convertRedshiftPortalLight(rs_light):\n\n\t# Redshift light transform\n\tsplited_name = rs_light.split(\"|\")\n\trsTransform = \"|\".join(splited_name[0:-1])\n\tgroup = \"|\".join(splited_name[0:-2])\n\n\tif cmds.objExists(rsTransform + \"_rpr\"):\n\t\trprTransform = rsTransform + \"_rpr\"\n\t\trprLightShape = cmds.listRelatives(rprTransform)[0]\n\telse: \n\t\trprLightShape = cmds.createNode(\"RPRPhysicalLight\", n=\"RPRPhysicalLightShape\")\n\t\trprLightShape = cmds.rename(rprLightShape, splited_name[-1] + \"_rpr\")\n\t\trprTransform = cmds.listRelatives(rprLightShape, p=True)[0]\n\t\trprTransform = cmds.rename(rprTransform, splited_name[-2] + \"_rpr\")\n\t\trprLightShape = cmds.listRelatives(rprTransform)[0]\n\n\t\tif group:\n\t\t\tcmds.parent(rprTransform, group)\n\n\t\trprTransform = group + \"|\" + rprTransform\n\t\trprLightShape = rprTransform + \"|\" + rprLightShape\n\n\t# Logging to file \n\tstart_log(rs_light, rprLightShape)\n\n\t# Copy properties from rsLight\n\n\tsetProperty(rprLightShape, \"lightType\", 0)\n\n\tintensity = getProperty(rs_light, \"multiplier\")\n\texposure = getProperty(rs_light, \"exposure\")\n\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure)\n\tsetProperty(rprLightShape, \"intensityUnits\", 1)\n\t\n\tcopyProperty(rprLightShape, rs_light, \"color\", \"tint_color\")\n\n\tvisible = getProperty(rs_light, \"transparency\")\n\tif (visible[0] or visible[1] or visible[2]): \n\t\tsetProperty(rprLightShape, \"areaLightVisible\", 0)\n\telse:\n\t\tsetProperty(rprLightShape, \"areaLightVisible\", 1)\n\t\n\tcopyProperty(rprTransform, rsTransform, \"translate\", \"translate\")\n\tcopyProperty(rprTransform, rsTransform, \"rotate\", \"rotate\")\n\tcopyProperty(rprTransform, rsTransform, \"scale\", \"scale\")\n\n\t# Logging to file\n\tend_log(rs_light) \n\n\ndef convertRedshiftIESLight(rs_light): \n\n\t# Redshift light transform\n\tsplited_name = rs_light.split(\"|\")\n\trsTransform = \"|\".join(splited_name[0:-1])\n\tgroup = \"|\".join(splited_name[0:-2])\n\n\tif cmds.objExists(rsTransform + \"_rpr\"):\n\t\trprTransform = rsTransform + \"_rpr\"\n\t\trprLightShape = cmds.listRelatives(rprTransform)[0]\n\telse: \n\t\trprLightShape = cmds.createNode(\"RPRIES\", n=\"RPRIESLight\")\n\t\trprLightShape = cmds.rename(rprLightShape, splited_name[-1] + \"_rpr\")\n\t\trprTransform = cmds.listRelatives(rprLightShape, p=True)[0]\n\t\trprTransform = cmds.rename(rprTransform, splited_name[-2] + \"_rpr\")\n\t\trprLightShape = cmds.listRelatives(rprTransform)[0]\n\n\t\tif group:\n\t\t\tcmds.parent(rprTransform, group)\n\n\t\trprTransform = group + \"|\" + rprTransform\n\t\trprLightShape = rprTransform + \"|\" + rprLightShape\n\n\t# Logging to file \n\tstart_log(rs_light, rprLightShape)\n\n\t# Copy properties from rsLight\n\tintensity = getProperty(rs_light, \"multiplier\")\n\texposure = getProperty(rs_light, \"exposure\")\n\tsetProperty(rprLightShape, \"intensity\", intensity * 2 ** exposure)\n\tcopyProperty(rprLightShape, rs_light, \"color\", \"color\")\n\tsetProperty(rprLightShape, \"iesFile\", getProperty(rs_light, \"profile\"))\n\t\n\tcopyProperty(rprTransform, rsTransform, \"translate\", \"translate\")\n\tsetProperty(rprTransform, \"rotateX\", getProperty(rsTransform, \"rotateX\") + 180)\n\tcopyProperty(rprTransform, rsTransform, \"rotateY\", \"rotateY\")\n\tcopyProperty(rprTransform, rsTransform, \"rotateZ\", \"rotateZ\")\n\tcopyProperty(rprTransform, rsTransform, \"scale\", \"scale\")\n\n\t# Logging to file\n\tend_log(rs_light) \n\n\ndef convertRedshiftVolumeScattering(rsVolumeScattering):\n\n\t# Creating new Volume material\n\trprMaterial = cmds.shadingNode(\"RPRVolumeMaterial\", asShader=True)\n\trprMaterial = cmds.rename(rprMaterial, rsVolumeScattering + \"_rpr\")\n\t\n\tsg = rprMaterial + \"SG\"\n\tcmds.sets(renderable=True, noSurfaceShader=True, empty=True, name=sg)\n\tconnectProperty(rprMaterial, \"outColor\", sg, \"volumeShader\")\n\n\t# create sphere\n\tcmds.polySphere(n=\"Volume\")\n\tsetProperty(\"Volume\", \"scale\", (999, 999, 999))\n\n\t# assign material\n\tcmds.select(cl=True)\n\tcmds.select(\"Volume\")\n\tcmds.sets(e=True, forceElement=sg)\n\n\t# Logging to file \n\tstart_log(rsVolumeScattering, rprMaterial) \n\n\t# Fields conversion\n\tcopyProperty(rprMaterial, rsVolumeScattering, \"scatterColor\", \"tint\")\n\tcopyProperty(rprMaterial, rsVolumeScattering, \"scatteringDirection\", \"phase\")\n\tcopyProperty(rprMaterial, rsVolumeScattering, \"emissionColor\", \"fogAmbient\")\n\n\tdensity = getProperty(rsVolumeScattering, \"scatteringAmount\") * 8\n\tsetProperty(rprMaterial, \"density\", density)\n\t\n\t# Logging to file\n\tend_log(rsVolumeScattering) \n\n\ndef convertTemperature(temperature):\n\ttemperature = temperature / 100\n\n\tif temperature <= 66:\n\t\tcolorR = 255\n\telse:\n\t\tcolorR = temperature - 60\n\t\tcolorR = 329.698727446 * colorR ** -0.1332047592\n\t\tif colorR < 0:\n\t\t\tcolorR = 0\n\t\tif colorR > 255:\n\t\t\tcolorR = 255\n\n\n\tif temperature <= 66:\n\t\tcolorG = temperature\n\t\tcolorG = 99.4708025861 * math.log(colorG) - 161.1195681661\n\t\tif colorG < 0:\n\t\t\tcolorG = 0\n\t\tif colorG > 255:\n\t\t\tcolorG = 255\n\telse:\n\t\tcolorG = temperature - 60\n\t\tcolorG = 288.1221695283 * colorG ** -0.0755148492\n\t\tif colorG < 0:\n\t\t\tcolorG = 0\n\t\tif colorG > 255:\n\t\t\tcolorG = 255\n\n\n\tif temperature >= 66:\n\t\tcolorB = 255\n\telif temperature <= 19:\n\t\tcolorB = 0\n\telse:\n\t\tcolorB = temperature - 10\n\t\tcolorB = 138.5177312231 * math.log(colorB) - 305.0447927307\n\t\tif colorB < 0:\n\t\t\tcolorB = 0\n\t\tif colorB > 255:\n\t\t\tcolorB = 255\n\n\tcolorR = colorR / 255\n\tcolorG = colorG / 255\n\tcolorB = colorB / 255\n\n\treturn (colorR, colorG, colorB)\n\n\n# Convert material. Returns new material name.\ndef convertMaterial(rsMaterial, source):\n\n\trs_type = cmds.objectType(rsMaterial)\n\n\tconversion_func = {\n\n\t\t# Redshift materials\n\t\t\"RedshiftArchitectural\": convertRedshiftArchitectural,\n\t\t\"RedshiftCarPaint\": convertRedshiftCarPaint,\n\t\t\"RedshiftHair\": convertUnsupportedMaterial,\n\t\t\"RedshiftIncandescent\": convertRedshiftIncandescent,\n\t\t\"RedshiftMaterial\": convertRedshiftMaterial,\n\t\t\"RedshiftMaterialBlender\": convertRedshiftMaterialBlender,\n\t\t\"RedshiftMatteShadowCatcher\": convertRedshiftMatteShadowCatcher,\n\t\t\"RedshiftShaderSwitch\": convertUnsupportedMaterial,\n\t\t\"RedshiftSkin\": convertRedshiftSkin,\n\t\t\"RedshiftSprite\": convertRedshiftSprite,\n\t\t\"RedshiftSubSurfaceScatter\": convertRedshiftSubSurfaceScatter,\n\n\t\t# Redshift utility can be connected to SG\n\t\t\"RedshiftRaySwitch\": convertUnsupportedMaterial,\n\t\t\"RedshiftVolume\": convertUnsupportedMaterial,\n\n\t\t# Standard utilities\n\t\t\"clamp\": convertUnsupportedNode,\n\t\t\"colorCondition\": convertUnsupportedNode,\n\t\t\"colorComposite\": convertColorComposite,\n\t\t\"blendColors\": convertBlendColors,\n\t\t\"luminance\": convertLuminance,\n\t\t\"reverse\": convertReverse,\n\t\t\"bump2d\": convertbump2d,\n\t\t\"premultiply\": convertPreMultiply,\n\t\t\"channels\": convertChannels,\n\t\t\"vectorProduct\": convertVectorProduct,\n\t\t\"multiplyDivide\": convertmultiplyDivide,\n\n\t\t# Redshift utilities\n\t\t\"RedshiftBumpMap\": convertRedshiftBumpMap,\n\t\t\"RedshiftNormalMap\": convertRedshiftNormalMap,\n\t\t\"RedshiftAmbientOcclusion\": convertRedshiftAmbientOcclusion,\n\t\t\"RedshiftFresnel\": convertRedshiftFresnel,\n\t\t\"RedshiftColorLayer\": convertRedshiftColorLayer,\n\t\t# Conversion is in materials\n\t\t\"RedshiftBumpBlender\": convertUnsupportedNode,\n\t\t\"RedshiftNoise\": convertRedshiftNoise,\n\t\t\"RedshiftUserDataInteger\": convertRedshiftUserDataInteger,\n\t\t\"RedshiftUserDataScalar\": convertRedshiftUserDataScalar,\n\t\t\"RedshiftUserDataColor\": convertRedshiftUserDataColor\n\t\t\n\t}\n\n\tif rs_type in conversion_func:\n\t\trpr = conversion_func[rs_type](rsMaterial, source)\n\telse:\n\t\tif isRedshiftType(rsMaterial):\n\t\t\trpr = convertUnsupportedNode(rsMaterial, source)\n\t\telse:\n\t\t\trpr = convertStandartNode(rsMaterial, source)\n\n\treturn rpr\n\n\n# Convert light. Returns new light name.\ndef convertLight(light):\n\n\trs_type = cmds.objectType(light)\n\n\tconversion_func = {\n\t\t\"RedshiftPhysicalLight\": convertRedshiftPhysicalLight,\n\t\t\"RedshiftDomeLight\": convertRedshiftDomeLight,\n\t\t\"RedshiftPortalLight\": convertRedshiftPortalLight,\n\t\t\"RedshiftIESLight\": convertRedshiftIESLight,\n\t\t\"RedshiftPhysicalSun\": convertRedshiftPhysicalSun\n\t}\n\n\tconversion_func[rs_type](light)\n\n\ndef isRedshiftType(obj):\n\n\tif cmds.objExists(obj):\n\t\tif \"Redshift\" in cmds.objectType(obj):\n\t\t\treturn 1\n\treturn 0\n\n\ndef cleanScene():\n\n\tlistMaterials= cmds.ls(materials=True)\n\tfor material in listMaterials:\n\t\tif isRedshiftType(material):\n\t\t\tshEng = cmds.listConnections(material, type=\"shadingEngine\")\n\t\t\ttry:\n\t\t\t\tif shEng:\n\t\t\t\t\tcmds.delete(shEng[0])\n\t\t\t\tcmds.delete(material)\n\t\t\texcept:\n\t\t\t\tpass\n\n\tlistLights = cmds.ls(l=True, type=[\"RedshiftDomeLight\", \"RedshiftIESLight\", \"RedshiftPhysicalLight\", \"RedshiftPhysicalSun\", \"RedshiftPortalLight\"])\n\tfor light in listLights:\n\t\ttransform = cmds.listRelatives(light, p=True)\n\t\ttry:\n\t\t\tcmds.delete(light)\n\t\t\tcmds.delete(transform[0])\n\t\texcept:\n\t\t\tpass\n\n\tlistObjects = cmds.ls(l=True)\n\tfor obj in listObjects:\n\t\tif isRedshiftType(object):\n\t\t\ttry:\n\t\t\t\tcmds.delete(obj)\n\t\t\texcept:\n\t\t\t\tpass\n\n\ndef remap_value(value, maxInput, minInput, maxOutput, minOutput):\n\n\tvalue = maxInput if value > maxInput else value\n\tvalue = minInput if value < minInput else value\n\n\tinputDiff = maxInput - minInput\n\toutputDiff = maxOutput - minOutput\n\n\tremapped_value = minOutput + ((float(value - minInput) / float(inputDiff)) * outputDiff)\n\n\treturn remapped_value\n\n\ndef clampValue(value, minValue, maxValue):\n\treturn max(min(value, maxValue), minValue)\n\n\ndef checkAssign(material):\n\n\tif isRedshiftType(material):\n\t\tmaterialSG = cmds.listConnections(material, type=\"shadingEngine\")\n\t\tif materialSG:\n\t\t\tcmds.hyperShade(objects=material)\n\t\t\tassigned = cmds.ls(sl=True)\n\t\t\tif assigned:\n\t\t\t\treturn 1\n\treturn 0\n\n\ndef defaultEnable(RPRmaterial, rsMaterial, enable, value):\n\n\tweight = getProperty(rsMaterial, value)\n\tif weight > 0:\n\t\tsetProperty(RPRmaterial, enable, 1)\n\telse:\n\t\tsetProperty(RPRmaterial, enable, 0)\n\n\ndef repathScene():\n\tscene_workspace = cmds.workspace(q=True, dir=True)\n\tprint('Your workspace located in {}'.format(scene_workspace))\n\tunresolved_files = cmds.filePathEditor(query=True, listFiles=\"\", unresolved=True, attributeOnly=True)\n\tif unresolved_files:\n\t\tfor item in unresolved_files:\n\t\t\tprint(\"Repathing node {}\".format(item, os.path.join(item, scene_workspace)))\n\t\t\tcmds.filePathEditor(item, repath=scene_workspace, recursive=True, ra=1)\n\n\ndef convertScene():\n\n\t# Disable caching\n\tmaya_version = cmds.about(apiVersion=True)\n\tif maya_version > 20190200:\n\t\tfrom maya.plugin.evaluator.cache_preferences import CachePreferenceEnabled\n\t\tcache_preference_enabled = CachePreferenceEnabled().get_value()\n\t\tif cache_preference_enabled:\n\t\t\tCachePreferenceEnabled().set_value(False)\n\n\t# Repath paths in scene files (filePathEditor)\n\trepathScene()\n\n\t# Check plugins\n\tif not cmds.pluginInfo(\"redshift4maya\", q=True, loaded=True):\n\t\ttry:\n\t\t\tcmds.loadPlugin(\"redshift4maya\", quiet=True)\n\t\texcept Exception as ex:\n\t\t\tresponse = cmds.confirmDialog(title=\"Error\",\n\t\t\t\t\t\t\t message=(\"Redshift plugin is not installed.\\nInstall Redshift plugin before conversion.\"),\n\t\t\t\t\t\t\t button=[\"OK\"],\n\t\t\t\t\t\t\t defaultButton=\"OK\",\n\t\t\t\t\t\t\t cancelButton=\"OK\",\n\t\t\t\t\t\t\t dismissString=\"OK\")\n\t\t\texit(\"Redshift plugin is not installed\")\n\n\tif not cmds.pluginInfo(\"RadeonProRender\", q=True, loaded=True):\n\t\ttry:\n\t\t\tcmds.loadPlugin(\"RadeonProRender\", quiet=True)\n\t\texcept Exception as ex:\n\t\t\tresponse = cmds.confirmDialog(title=\"Error\",\n\t\t\t\t\t\t\t message=(\"RadeonProRender plugin is not installed.\\nInstall RadeonProRender plugin before conversion.\"),\n\t\t\t\t\t\t\t button=[\"OK\"],\n\t\t\t\t\t\t\t defaultButton=\"OK\",\n\t\t\t\t\t\t\t cancelButton=\"OK\",\n\t\t\t\t\t\t\t dismissString=\"OK\")\n\t\t\texit(\"RadeonProRender plugin is not installed\")\n\n\t# redshift engine set before conversion\n\tsetProperty(\"defaultRenderGlobals\", \"currentRenderer\", \"redshift\")\n\n\t# Convert RedshiftEnvironment\n\tenv = cmds.ls(type=\"RedshiftEnvironment\")\n\tif env:\n\t\ttry:\n\t\t\tconvertRedshiftEnvironment(env[0])\n\t\texcept Exception as ex:\n\t\t\ttraceback.print_exc()\n\t\t\tprint(\"Error while converting environment. \")\n\n\t# Convert RedshiftPhysicalSky\n\tsky = cmds.ls(type=\"RedshiftPhysicalSky\")\n\tif sky:\n\t\ttry:\n\t\t\tconvertRedshiftPhysicalSky(sky[0])\n\t\texcept Exception as ex:\n\t\t\ttraceback.print_exc()\n\t\t\tprint(\"Error while converting physical sky. \\n\")\n\n\t# Convert RedshiftAtmosphere\n\tatmosphere = cmds.ls(type=\"RedshiftVolumeScattering\")\n\tif atmosphere:\n\t\ttry:\n\t\t\tconvertRedshiftVolumeScattering(atmosphere[0])\n\t\texcept Exception as ex:\n\t\t\ttraceback.print_exc()\n\t\t\tprint(\"Error while converting volume scattering environment.\")\n\n\t# Get all lights from scene\n\tlistLights = cmds.ls(l=True, type=[\"RedshiftDomeLight\", \"RedshiftIESLight\", \"RedshiftPhysicalLight\", \"RedshiftPhysicalSun\", \"RedshiftPortalLight\"])\n\n\t# Convert lights\n\tfor light in listLights:\n\t\ttry:\n\t\t\tconvertLight(light)\n\t\texcept Exception as ex:\n\t\t\ttraceback.print_exc()\n\t\t\tprint(\"Error while converting {} light. \\n\".format(light))\n\t\t\n\n\t# Get all materials from scene\n\tlistMaterials = cmds.ls(materials=True)\n\tmaterialsDict = {}\n\tfor each in listMaterials:\n\t\tif checkAssign(each):\n\t\t\tmaterialsDict[each] = convertMaterial(each, \"\")\n\n\tfor rs, rpr in materialsDict.items():\n\t\ttry:\n\t\t\tcmds.hyperShade(objects=rs)\n\t\t\trpr_sg = cmds.listConnections(rpr, type=\"shadingEngine\")[0]\n\t\t\tcmds.sets(forceElement=rpr_sg)\n\t\texcept Exception as ex:\n\t\t\ttraceback.print_exc()\n\t\t\tprint(\"Error while converting {} material. \\n\".format(rs))\n\t\n\t# globals conversion\n\ttry:\n\t\tsetProperty(\"defaultRenderGlobals\",\"currentRenderer\", \"FireRender\")\n\t\tsetProperty(\"defaultRenderGlobals\", \"imageFormat\", 8)\n\n\t\tsetProperty(\"RadeonProRenderGlobals\", \"completionCriteriaSeconds\", 0)\n\t\tif getProperty(\"redshiftOptions\", \"progressiveRenderingEnabled\"):\n\t\t\tsetProperty(\"RadeonProRenderGlobals\", \"adaptiveThreshold\", 0)\n\t\t\tsetProperty(\"RadeonProRenderGlobals\", \"completionCriteriaIterations\", getProperty(\"redshiftOptions\", \"progressiveRenderingNumPasses\") * 1.5)\n\t\telse:\n\t\t\tcopyProperty(\"RadeonProRenderGlobals\", \"redshiftOptions\", \"adaptiveThreshold\", \"unifiedAdaptiveErrorThreshold\")\n\t\t\tif getProperty(\"redshiftOptions\", \"unifiedMinSamples\") >= 16:\n\t\t\t\tcopyProperty(\"RadeonProRenderGlobals\", \"redshiftOptions\", \"completionCriteriaMinIterations\", \"unifiedMinSamples\")\n\t\t\telse:\n\t\t\t\tsetProperty(\"RadeonProRenderGlobals\", \"completionCriteriaMinIterations\", 16)\n\t\t\tcopyProperty(\"RadeonProRenderGlobals\", \"redshiftOptions\", \"completionCriteriaIterations\", \"unifiedMaxSamples\")\n\n\t\tsetProperty(\"RadeonProRenderGlobals\", \"giClampIrradiance\", 1)\n\t\tsetProperty(\"RadeonProRenderGlobals\", \"giClampIrradianceValue\", 5)\n\n\t\trsSubSurfaceScatter = cmds.ls(type=\"RedshiftSubSurfaceScatter\")\n\t\tif rsSubSurfaceScatter:\n\t\t\tsetProperty(\"RadeonProRenderGlobals\", \"maxDepthDiffuse\", 12)\n\t\t\tsetProperty(\"RadeonProRenderGlobals\", \"maxRayDepth \", 12)\n\n\t\tcopyProperty(\"RadeonProRenderGlobals\", \"redshiftOptions\", \"maxDepthGlossy\", \"reflectionMaxTraceDepth\")\n\t\tcopyProperty(\"RadeonProRenderGlobals\", \"redshiftOptions\", \"maxDepthRefraction\", \"refractionMaxTraceDepth\")\n\t\tcopyProperty(\"RadeonProRenderGlobals\", \"redshiftOptions\", \"maxRayDepth\", \"combinedMaxTraceDepth\")\n\t\tcopyProperty(\"RadeonProRenderGlobals\", \"redshiftOptions\", \"filter\", \"unifiedFilterType\")\n\t\tcopyProperty(\"RadeonProRenderGlobals\", \"redshiftOptions\", \"motionBlur\", \"motionBlurEnable\")\n\t\tcopyProperty(\"RadeonProRenderGlobals\", \"redshiftOptions\", \"motionBlurScale\", \"motionBlurFrameDuration\")\n\n\t\tcameras = cmds.ls(type=\"camera\")\n\t\tfor cam in cameras:\n\t\t\tsetProperty(cam, \"mask\", 0)\n\n\texcept:\n\t\tpass\n\n\tmatteShadowCatcher = cmds.ls(materials=True, type=\"RedshiftMatteShadowCatcher\")\n\tif matteShadowCatcher:\n\t\ttry:\n\t\t\tsetProperty(\"RadeonProRenderGlobals\", \"aovOpacity\", 1)\n\t\t\tsetProperty(\"RadeonProRenderGlobals\", \"aovBackground\", 1)\n\t\t\tsetProperty(\"RadeonProRenderGlobals\", \"aovShadowCatcher\", 1)\n\t\texcept Exception as ex:\n\t\t\ttraceback.print_exc()\n\n\trsPostEffects = cmds.listConnections(\"redshiftOptions\", type=\"RedshiftPostEffects\")\n\tif rsPostEffects:\n\t\tif getProperty(rsPostEffects[0], \"tonemapEnable\"):\n\t\t\tsetProperty(\"RadeonProRenderGlobals\", \"toneMappingType\", 2)\n\t\t\tsetProperty(\"RadeonProRenderGlobals\", \"toneMappingPhotolinearSensitivity\", getProperty(rsPostEffects[0], \"tonemapFilmSpeed\") / 100.0)\n\t\t\tcopyProperty(\"RadeonProRenderGlobals\", rsPostEffects[0], \"toneMappingPhotolinearFstop\", \"tonemapFstop\")\n\n\t\t\treinhardFactor = getProperty(rsPostEffects[0], \"tonemapReinhardFactor\")\n\t\t\tshutterRatio = getProperty(rsPostEffects[0], \"tonemapShutterRatio\")\n\t\t\tif shutterRatio >= 800:\n\t\t\t\texposure = (3.3 * (10 / (shutterRatio + 400) ** 0.5) / math.log((shutterRatio - 770) ** 0.7)) * 2 ** reinhardFactor\n\t\t\t\tsetProperty(\"RadeonProRenderGlobals\", \"toneMappingPhotolinearExposure\", exposure)\n\t\t\telif shutterRatio < 800 and shutterRatio >= 43:\n\t\t\t\texposure = (10 / math.log10(shutterRatio - 28) ** 3) * 2 ** reinhardFactor\n\t\t\t\tsetProperty(\"RadeonProRenderGlobals\", \"toneMappingPhotolinearExposure\", exposure)\n\t\t\telse:\n\t\t\t\texposure = (10.5 / math.log10(shutterRatio + 1.25)) * 2 ** reinhardFactor\n\t\t\t\tsetProperty(\"RadeonProRenderGlobals\", \"toneMappingPhotolinearExposure\", exposure)\n\n\trsBokeh = cmds.ls(type=\"RedshiftBokeh\")\n\tif rsBokeh:\n\t\tif getProperty(rsBokeh[0], \"dofOn\"):\n\t\t\tdofUseBokehImage = getProperty(rsBokeh[0], \"dofUseBokehImage\")\n\t\t\tdofBokehNormalizationMode = getProperty(rsBokeh[0], \"dofBokehNormalizationMode\")\n\t\t\tif dofUseBokehImage == 0 or (dofUseBokehImage == 1 and dofBokehNormalizationMode != 0):\n\t\t\t\tsetProperty(\"RadeonProRenderGlobals\", \"toneMappingPhotolinearExposure\", getProperty(\"RadeonProRenderGlobals\", \"toneMappingPhotolinearExposure\") / 2)\n\t\t\telif dofUseBokehImage == 1 and dofBokehNormalizationMode == 0:\n\t\t\t\tsetProperty(\"RadeonProRenderGlobals\", \"toneMappingPhotolinearExposure\", getProperty(\"RadeonProRenderGlobals\", \"toneMappingPhotolinearExposure\") / 10)\n\n\tif maya_version > 20190200:\n\t\tif cache_preference_enabled:\n\t\t\tCachePreferenceEnabled().set_value(True)\n\n\ndef auto_launch():\n\tconvertScene()\n\tcleanScene()\n\ndef manual_launch():\n\tprint(\"Conversion start! Converter version: {}\".format(RS2RPR_CONVERTER_VERSION))\n\tstartTime = 0\n\ttestTime = 0\n\tstartTime = time.time()\n\tconvertScene()\n\ttestTime = time.time() - startTime\n\tprint(\"Conversion was finished! Elapsed time: {}\".format(round(testTime, 3)))\n\n\tresponse = cmds.confirmDialog(title=\"Completed\",\n\t\t\t\t\t\t\t message=(\"Scene conversion took {} seconds.\\nWould you like to delete all Redshift objects?\".format(round(testTime, 3))),\n\t\t\t\t\t\t\t button=[\"Yes\", \"No\"],\n\t\t\t\t\t\t\t defaultButton=\"Yes\",\n\t\t\t\t\t\t\t cancelButton=\"No\",\n\t\t\t\t\t\t\t dismissString=\"No\")\n\n\tif response == \"Yes\":\n\t\tcleanScene()\n\n\ndef onMayaDroppedPythonFile(empty):\n\tmanual_launch()\n\nif __name__ == \"__main__\":\n\tmanual_launch()\n\n\n\n","repo_name":"luxteam/Redshift2RPRConvertTool-Maya","sub_path":"convertRS2RPR.py","file_name":"convertRS2RPR.py","file_ext":"py","file_size_in_byte":131650,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"7634656347","text":"import numpy as np\r\nfrom sklearn import datasets, tree, linear_model\r\nfrom sklearn.kernel_ridge import KernelRidge\r\nfrom sklearn.model_selection import cross_val_score\r\nimport timeit\r\n\r\ndef mytraining(X,Y):\r\n \"\"\"\r\n # Kernel Ridge Regression\r\n reg = KernelRidge(kernel='rbf', gamma=0.1,alpha=0.0003)\r\n reg.fit(X,Y)\r\n Ykr = reg.predict(X)\r\n \"\"\"\r\n reg = KernelRidge(kernel='polynomial', gamma=0.0003, alpha=0.1)\r\n reg.fit(X,Y)\r\n Ykr = reg.predict(X) \r\n \r\n return reg\r\n \r\ndef mytrainingaux(X,Y,par):\r\n \r\n reg.fit(X,Y)\r\n \r\n return reg\r\n\r\ndef myprediction(X,reg):\r\n\r\n Ypred = reg.predict(X)\r\n\r\n return Ypred\r\n\r\n\r\ndef secondRegressionMethod(X,Y):\r\n reg = KernelRidge(kernel='polynomial', gamma=1, alpha=0.1)\r\n reg.fit(X,Y)\r\n Ykr = reg.predict(X)\r\n \r\n return reg","repo_name":"regouga/IA-2P-3A1S-1718","sub_path":"proj2alunos/P2/regsol.py","file_name":"regsol.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73019748882","text":"import numpy as np\n#import matplotlib\n#matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom blag import Blag\nfrom linucb import Linucb\nfrom linear import Linear\nimport random\nimport sys\n\n\nm = int(sys.argv[1])\nepsilon = 0.1\nf = []\nprob_0 = []\nfor i in range(m):\n\tf.append(random.randint(1,1000))\n\tprob_0.append(random.uniform(0,1))\n\nActionset = []\n\ndef initial():\n\t#Filling the ActionSet\n\twhile True:\n\t\taction = [0 for i in range(m)]\n\t\tfor itr in range(m-1):\n\t\t\tfor itr2 in range(itr+1,m):\n\t\t\t\tprobability = random.uniform(0,0.5)\n\t\t\t\taction[itr] = probability\n\t\t\t\taction[itr2] = -probability\n\t\t\t\tActionset.append(action)\n\t\t\t\taction = [0 for i in range(m)]\n\t\t\t\taction[itr] = probability\n\t\t\t\taction[itr2] = -probability\n\t\t\t\tActionset.append(action)\n\t\t\t\taction = [0 for i in range(m)]\n\t\tbreak\n\ninitial()\n\nTIME = int(sys.argv[2])\n\nucb_trial = Linucb(m, f, prob_0, Actionset, 0)\nucb_trial.bandit(TIME)\nucb_plot = ucb_trial.getLinucb()/500/int(sys.argv[1])\n\nblag_trial = Blag(m, f, prob_0, Actionset, 0.1)\nblag_trial.bandit(TIME)\nblag_plot = blag_trial.getBlag()/500/int(sys.argv[1])\n\n\noutfile = open('comparison.txt','w+')\noutfile.write('\\nExperiment setting: m='+sys.argv[1]+' TIME='+sys.argv[2]+':\\n')\noutfile.write('BLAG: '+sys(blag_plot))\noutfile.write('UCB: '+sys(ucb_plot))\noutfile.close()\n","repo_name":"EugeneLYC/BLAG","sub_path":"Python_API/reward.py","file_name":"reward.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"35254720852","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport rospy\nimport numpy\nimport tf\n\nfrom os.path import expanduser\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom sensor_msgs import point_cloud2 as pc2\nfrom sensor_msgs.msg import Image, PointCloud2\nfrom std_msgs.msg import Float32\nimport cv2\nimport numpy as np \n\nclass Floor_Object_Pose:\n\tdef thresh(self,gray):\n\t ret,imgt=cv2.threshold(gray,127,255,cv2.THRESH_BINARY)\n\t imgt = cv2.bitwise_not(imgt)\n\t imgt = cv2.erode(imgt, None, iterations=4)\n\t imgt = cv2.dilate(imgt, None, iterations=6)\n\t return imgt\n\n\n\tdef matchandfind(self,imgt):\n\t offset=20\n\t _,cnts, _ = cv2.findContours(imgt, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\t cnts = sorted(cnts, key=cv2.contourArea, reverse=True)\n\t if len(cnts): \n\t c = cnts[0] \n\t rect = cv2.minAreaRect(c)\n\t box = np.int0(cv2.boxPoints(rect))\n\t cv2.drawContours(self.scene, [box], -1, (0, 240, 0), 3)\n\t \n\t return rect, imgt\n\n\n\tdef run(self):\n\t\twhile not rospy.is_shutdown():\n\t\t\tif self.current_image is not None:\n\t\t\t\ttry:\n\t\t\t\t\t#(trans,_) = self.tf_listener.lookupTransform('/camera_link', rospy.Time(0))\n\t\t\t\t\tself.scene = self.bridge.imgmsg_to_cv2(self.current_image, 'passthrough')\n\t\t\t\t\trect, image = self.detect_object(self.scene)\n\t\t\t\t\tself.imagepub.publish(self.bridge.cv2_to_imgmsg(self.scene, 'rgb8'))\n\t\t\t\t\tcv2.imshow('Test',self.scene)\n\t\t\t\t\tif cv2.waitKey(1) &0xFF == ord('q'):\n\t\t\t\t\t break\n\t\t\t\t\tangle = Float32()\n\t\t\t\t\tangle.data = rect[2]\n\t\t\t\t\tself.anglepub.publish(angle)\n\n\t\t\t\t\tcenter = rect[0]\n\t\t\t\t\tif self.current_pc is None:\n\t\t\t\t\t\trospy.loginfo('No point cloud information available')\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tpc_list = list(pc2.read_points(self.current_pc, skip_nans=True, field_names=('x', 'y', 'z'), uvs=[(int(center[0]), int(center[1]))]))\n\n\t\t\t\t\t\tif len(pc_list) > 0:\n\t\t\t\t\t\t\ttf_id = 'floor_object'\n\t\t\t\t\t\t\tpoint_x, point_y, point_z = pc_list[0]\n\n\t\t\t\t\t\t\tobject_tf = [point_z, -point_x, -point_y]\n\t\t\t\t\t\t\tframe = '/camera_link'\n\n\t\t\t\t\t\t\t#object_tf = numpy.array(trans) + object_tf\n\t\t\t\t\t\t\tself.tfpub.sendTransform((object_tf),tf.transformations.quaternion_from_euler(0,0,0), rospy.Time.now(), tf_id, frame)\n\n\t\t\t\texcept CvBridgeError as e:\n\t\t\t\t\tprint(e)\n\t\t\t\texcept (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:\n\t\t\t\t\tprint(e)\n\n\n\tdef detect_object(self, img):\n\t\tgray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) \n\t\timgr=self.thresh(gray)\n\t\trect,image = self.matchandfind(imgr)\n\t\treturn rect, image\n\n\n\tdef __init__(self):\n\t\tself.tf_listener = tf.TransformListener()\n\t\tself.bridge = CvBridge()\n\t\tself.current_image = None\n\t\tself.current_pc = None\n\t\tself.scene = None\n\t\tself.tfpub = tf.TransformBroadcaster()\n\t\trospy.Subscriber('/camera/rgb/image_raw', Image, self.image_callback)\n\t\trospy.Subscriber('/camera/depth/points', PointCloud2, self.pc_callback)\n\t\tself.imagepub = rospy.Publisher('/floor_object/image', Image, queue_size=10)\n\t\tself.anglepub = rospy.Publisher('/floor_object/angle', Float32, queue_size=10)\n\n\n\tdef image_callback(self, image):\n\t\tself.current_image = image\n\n\n\tdef pc_callback(self, pc):\n\t\tself.current_pc = pc\n\n\n\nif __name__ == '__main__':\n\trospy.init_node('floor_object_pose_publisher', log_level = rospy.INFO)\n\n\ttry:\n\t\tf = Floor_Object_Pose()\n\t\tf.run()\n\texcept KeyboardInterrupt:\n\t\trospy.loginfo('Shutting down')\n\n\n\n\n\n\n\n'''\nelement = cv2.getStructuringElement(cv2.MORPH_RECT,(3, 3))\ncv2.namedWindow('Test')\ncap = cv2.VideoCapture(1)\nwhile cap.isOpened():\n ret,img = cap.read()\n img = cv2.addWeighted(img,1,np.zeros(img.shape,img.dtype),0,85)\n img=img[50:430, 50:590]\n main(img)\n cv2.imshow('Test',img)\n if cv2.waitKey(1) &0xFF == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n'''","repo_name":"adubredu/rascapp_robot","sub_path":"bill_ws/src/bill_perception/src/shape_detector.py","file_name":"shape_detector.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41448816897","text":"#!/usr/bin/python\n\nimport feedparser\nimport os\nimport sys\nfrom datetime import datetime\n\nfeed_list = []\nfeed_list.append([\"Ekstrabladet\", \"https://ekstrabladet.dk/rssfeed/all/\"] )\nfeed_list.append([\"Dr Nyheder\", \"https://www.dr.dk/nyheder/service/feeds/allenyheder\"] )\nfeed_list.append([\"Nyborg\", \"https://fyens.dk/feed/nyborg\"])\nfeed_list.append([\"BBC Europe\", \"http://feeds.bbci.co.uk/news/world/europe/rss.xml\"])\nfeed_list.append([\"BT\", \"https://www.bt.dk/bt/seneste/rss\"])\n#feed_list.append([\"Version2\", \"https://www.version2.dk/it-nyheder/rss\"])\n\ndef found_month(init_month):\n \n months = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"June\", \"July\", \"Aug\",\"Sept\", \"Oct\", \"Nov\", \"Dec\"]\n count = 0\n for m in months:\n\n if m == init_month:\n \n if count < 10:\n return \"%s%s\" % (\"0\",count + 1)\n \n else:\n return count + 1\n \n count = count + 1\n \n return 0\n\ndef get_feeds():\n found_feeds = []\n count = 1\n feeds = mix_feeds()\n\n for f in feeds:\n \n date, title, link, feed_name = f\n found_feeds.append([count, date,title,link,feed_name])\n count = count + 1\n\n return found_feeds\n\ndef convert_date(input_string):\n\n split_string = input_string.split(\",\")[1].split(\" \")\n time_split = split_string[4].split(\":\")\n\n day = int(split_string[1])\n month = int(found_month(split_string[2]))\n year = int(split_string[3])\n \n hours = int(time_split[0])\n minutes = int(time_split[1])\n seconds = int(time_split[2])\n\n return datetime(year,month, day, hours,minutes,seconds)\n\ndef is_today(date_string):\n \n now_date = str(datetime.now()).split(\" \")[0].split(\"-\")\n input_date = str(date_string).split(\" \")[0].split(\"-\")\n \n if (now_date[0] == input_date[0]) and (now_date[1] == input_date[1]) and (now_date[2] == input_date[2]):\n return True\n \n \n #print(now_date)\n #print(input_date)\n\n return False\n\ndef mix_feeds():\n\n found_feeds = []\n\n for one_feed in feed_list:\n\n item_list = feedparser.parse(one_feed[1])[\"entries\"]\n\n for item in item_list:\n\n feed_date = convert_date(item[\"published\"])\n \n if (is_today(feed_date)):\n\n found_feeds.append([feed_date,item[\"title\"], item[\"link\"], one_feed[0]])\n \n return sorted(found_feeds, reverse=False)\n\ndef show_feed_list(f_list):\n\n index = 0\n\n for f in f_list:\n\n count, date, title, link, feed_name = f \n print(\"%s | %s | %s | %s\" % (count,date,title,feed_name ))\n\ndef display_menu():\n\n #try:\n f_list = get_feeds()\n \n command = None\n\n show_feed_list(f_list)\n\n while command != 'q':\n\n print(\"? for help :>\", end='')\n command = input()\n\n if command == \"list\":\n\n show_feed_list(f_list)\n \n elif command.split(\" \")[0] == \"read\":\n\n number = int(command.split(\" \")[1])\n\n os.system(\"links %s\" % (f_list[number - 1][3]))\n #print(\"number is %s\" % number)\n \n elif command == \"?\":\n\n print(\"list : get the feedlist\")\n print(\"read : read a feed\")\n print(\"q : quit the program\")\n \n #except Exception as ex:\n \n # print(ex)\n \nif __name__ == \"__main__\":\n\n #result = is_today(\"2021-04-18\")\n\n #print(result)\n \n display_menu()\n #print(datetime.now())","repo_name":"bart-nathan/bizfeed","sub_path":"bizfeed.py","file_name":"bizfeed.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39156092413","text":"import re\n\nfile = open(\"input_data.txt\", \"r\")\n\nlist = file.readlines()\n\ndef ex_coord(line, a, b):\n coord = line.split(\" -> \")[a].split(\",\")[b]\n return int(coord.strip())\n\ncoordinates = [[[ex_coord(l, 0,0),ex_coord(l, 0,1)],[ex_coord(l, 1,0),ex_coord(l, 1,1)]] for l in list]\n\ndef increment_coordinate(coord, dx, dy):\n return [coord[0]+dx, coord[1]+dy]\n\nvent_coords = {}\n\nfor line in coordinates:\n if line[0][0] == line[1][0]:\n # print(\"line {} works\".format(line))\n start = int(line[0][1] > line[1][1])\n # print(\"{} -> {}\".format(line, line[start]))\n for dy in range(abs(line[0][1] - line[1][1])+1):\n coord = increment_coordinate(line[start], 0, dy)\n coord_key = \"[{}, {}]\".format(coord[0],coord[1])\n if coord_key not in vent_coords.keys():\n vent_coords[coord_key] = 1\n # print(\"Line {} added {}\".format(line, coord_key))\n else:\n vent_coords[coord_key] += 1\n # print(\"Line {} increased {} to {}\".format(line, coord_key, vent_coords[coord_key]))\n\n elif line[0][1] == line[1][1]:\n # print(\"line {} works\".format(line))\n start = line[0][0] > line[1][0]\n # print(\"{} -> {}\".format(line, line[start]))\n for dx in range(abs(line[0][0] - line[1][0]) +1):\n coord = increment_coordinate(line[start], dx, 0)\n coord_key = \"[{}, {}]\".format(coord[0],coord[1])\n if coord_key not in vent_coords.keys():\n # print(\"Line {} added {}\".format(line, coord_key))\n vent_coords[coord_key] = 1\n else:\n vent_coords[coord_key] += 1\n # print(\"Line {} increased {} to {}\".format(line, coord_key, vent_coords[coord_key]))\n\n else:\n # print(\"line {} skipped\".format(line))\n continue\n\ncount = 0\nfor key in vent_coords.keys():\n if vent_coords[key] >= 2:\n count += 1\n\n# print(vent_coords)\nprint(count)\n","repo_name":"makah21803/Advent_of_code","sub_path":"2021/Day5/day5_basic.py","file_name":"day5_basic.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"36540831372","text":"\nimport math\n\ndef isprime(n):\n if(n<=1):\n return \"Not Prime\"\n \n '''instead running a loop nearly n times\n we know we can get factors by running loop sqrt(n) times\n if any number divides n we return not prime else prime\n '''\n \n s=int(math.sqrt(n))\n for i in range(2,s+1):\n if(n%i==0):\n return \"Not prime\"\n return \"Prime\"\n\n\nn=int(input())\nresult=isprime(n)\nprint(result)","repo_name":"devcommunity2025/basic_math_problems","sub_path":"Prime_Number/optimal.py","file_name":"optimal.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6780573203","text":"import string\nfrom typing import List, Tuple\nfrom hypothesis import given, assume\nimport hypothesis.strategies as st\nfrom more_itertools import flatten\nfrom renard.pipeline.quote_detection import Quote, QuoteDetector\n\n\n@given(\n prev_text=st.lists(st.text(alphabet=string.ascii_letters)),\n after_text=st.lists(st.text(alphabet=string.ascii_letters)),\n quote_content=st.lists(st.text(alphabet=string.ascii_letters)),\n quote_pair=st.sampled_from(QuoteDetector.DEFAULT_QUOTE_PAIRS),\n)\ndef test_quote_is_extracted(\n prev_text: List[str],\n after_text: List[str],\n quote_content: List[str],\n quote_pair: Tuple[str, str],\n):\n quote = [quote_pair[0]] + quote_content + [quote_pair[1]]\n text = prev_text + quote + after_text\n\n quote_detector = QuoteDetector()\n should_detect_quote = Quote(len(prev_text), len(text) - len(after_text), quote)\n\n detected = quote_detector(tokens=text)[\"quotes\"]\n\n assert len(detected) == 1\n assert detected[0] == should_detect_quote\n\n\n@given(text=st.lists(st.text()))\ndef test_quote_is_not_extracted(text: List[str]):\n all_quotes = list(flatten(QuoteDetector.DEFAULT_QUOTE_PAIRS))\n assume(all([not c in text for c in all_quotes]))\n quote_detector = QuoteDetector()\n assert len(quote_detector(tokens=text)[\"quotes\"]) == 0\n","repo_name":"CompNet/Renard","sub_path":"tests/test_quote_detection.py","file_name":"test_quote_detection.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7909369745","text":"import tkinter as tk\n\nwin = tk.Tk()\n\nfor i in range(10):\n for j in range(10):\n frame = tk.Frame(\n master = win\n )\n frame.grid(row=i, column=j)\n label = tk.Label(master=frame, text=f\"Row {i}\\nColumn {j}\")\n label.pack()\n\nwin.mainloop()","repo_name":"DaminiKhatik/Python_REPO","sub_path":"chapter62/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"17331705460","text":"#!/usr/bin/env python\n# pylint: disable=C0116,W0613\n# This program is dedicated to the public domain under the CC0 license.\n\n\"\"\"\nPrimero, se definen unas funciones de callback. Después, esas funciones se le pasan\nal Dispatcher y se registran en sus respectivos lugares.\nEntonces, el bot se arranca y corre hasta que se presione Ctrl-C en la línea de comandos.\nUso:\nEnvía /start para iniciar la conversación.\nPresiona Ctrl-C en la línea de comandos o envía una señal para parar el proceso del bot.\n\"\"\"\n\nimport logging\nimport analyze\nfrom csv import DictReader\nfrom telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, Update\nfrom telegram.ext import (\n Updater,\n CommandHandler,\n MessageHandler,\n Filters,\n ConversationHandler,\n CallbackContext,\n)\n\n# Habilitamos logging\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO\n)\n\nlogger = logging.getLogger(__name__)\n\n# Creamos los estados del chat\nDECISION, MVOZ, AAUDIO, LOCALIZACION, PROCESAR, START = range(6)\n\n# Creamos las variables del usuario\nlatitud = -1\nlongitud = -1\n\n# Función que contiene el estado inicial del bot y pasa al siguiente estado\ndef start(update: Update, context: CallbackContext) -> int:\n update.message.reply_text(\n 'Hola! Soy un bot que analiza audios buscando sonidos de aves.\\n'\n 'Envia /salir para cancelar la conversación conmigo.\\n'\n 'Para empezar necesito que me envíes tu ubicación.\\n'\n 'Hacerlo me ayudará a determinar mejor los resultados. Si no quieres envíame /saltar'\n )\n\n return LOCALIZACION\n\n# Función que contiene el estado después de elegir nota de voz del bot y pasa al siguiente estado en funcion de la respuesta\ndef eaudio(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n logger.info(\"%s ha elegido enviarme: %s\", user.first_name, update.message.text)\n update.message.reply_text(\n '¡Perfecto! Envíame un mensaje de voz y lo analizo',\n reply_markup=ReplyKeyboardRemove(),\n )\n\n return MVOZ\n\n# Función que contiene el estado de procesamiento de la nota de voz y pasa al siguiente estado en función de la respuesta\ndef paudio(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n id_file = update.message.voice.file_id\n user_voz = context.bot.get_file(id_file)\n user_voz.download('./voice.ogg')\n logger.info(\"%s me ha enviado un mensaje de voz\", user.first_name)\n logger.info(\"El mensaje de voz dura: %s\", update.message.voice.duration)\n if update.message.voice.duration >= 3:\n update.message.reply_text(\n '¡Perfecto! Me pongo a analizarlo, dame un momento.',\n )\n analyze.clasifica('./voice.ogg', latitud, longitud)\n with open('result.csv', 'r') as doc:\n csv_reader = DictReader(doc, delimiter=';')\n for row in csv_reader:\n if float(row['Confidence']) > 0.4:\n update.message.reply_text(\n 'Distingo un ' + row['ScientificName'] + ' con una seguridad de un ' + str(float(row['Confidence']) * 100) + '%',\n )\n especie = row['ScientificName']\n update.message.reply_text(\n 'Si quieres mas información sobre los ' + row[\n 'ScientificName'] + ' puedes seguir el siguiente enlace: ' + 'https://es.wikipedia.org/wiki/' + especie.replace(' ', '_'),\n )\n update.message.reply_text(\n 'Eso fue todo lo que pude distinguir.\\n'\n 'Gracias por pasar pasar un rato conmigo.\\n'\n 'Si quieres analizar otro archivo envía /start pero si no, '\n 'espero que hayas tenido una buena experiencia y pasa un buen día', reply_markup=ReplyKeyboardRemove()\n )\n\n return ConversationHandler.END\n else:\n update.message.reply_text(\n 'No llega a los 3 segundos. Por favor, envíame uno de al menos 3 segundos para que pueda analizarlo',\n )\n\n return MVOZ\n\n# Función que contiene el estado después de utilizar el atajo en la nota de voz del bot escribiendo \"archivo\"\n# y pasa al siguiente estado en funcion de la respuesta\ndef paudioej(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n logger.info(\"Ha usado el atajo del ejemplo\")\n update.message.reply_text('¡Perfecto! Me pongo a analizarlo, dame un momento.',)\n analyze.clasifica('./XC563936Soundscape.mp3', latitud, longitud)\n with open('result.csv', 'r') as doc:\n csv_reader = DictReader(doc, delimiter=';')\n for row in csv_reader:\n if float(row['Confidence']) > 0.4:\n update.message.reply_text('Distingo un ' + row['ScientificName'] + ' con una seguridad de un ' + str(float(row['Confidence']) * 100) + '%',)\n especie = row['ScientificName']\n update.message.reply_text(\n 'Si quieres mas información sobre los ' + row[\n 'ScientificName'] + ' puedes seguir el siguiente enlace: ' + 'https://es.wikipedia.org/wiki/' + especie.replace(' ', '_'),\n )\n update.message.reply_text(\n 'Eso fue todo lo que pude distinguir.\\n'\n 'Gracias por pasar pasar un rato conmigo.\\n'\n 'Si quieres analizar otro archivo envía /start pero si no, '\n 'espero que hayas tenido una buena experiencia y pasa un buen día', reply_markup=ReplyKeyboardRemove()\n )\n\n return ConversationHandler.END\n\n# Función que contiene el estado en el cual el usuario no ha enviado la nota de voz al bot y vuelve a repetir\n# el estado anterior\ndef error_audio(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n logger.info(\"%s no me ha enviado el mensaje de voz. Envió otra cosa\", user.first_name)\n update.message.reply_text(\n 'Habías quedado en enviarme un mensaje de voz.\\n'\n 'Envíame un mensaje de voz o envíame /salir para cancelar la conversación',\n )\n\n return MVOZ\n\n# Función que contiene el estado después de elegir el archivo .mp3 del bot y pasa al siguiente estado en funcion de la respuesta\ndef earchivo(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n logger.info(\"%s ha elegido enviarme: %s\", user.first_name, update.message.text)\n update.message.reply_text(\n '¡Perfecto! Envíame un archivo de audio en formato mp3 y lo analizo',\n )\n\n return AAUDIO\n\n# Función que contiene el estado de procesamiento del archivo .mp3 y pasa al siguiente estado en función de la respuesta\ndef parchivo(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n id_file = update.message.audio.file_id\n user_archivo = context.bot.get_file(id_file)\n user_archivo.download('./voice.mp3')\n logger.info(\"%s me ha enviado un archivo de audio\", user.first_name)\n logger.info(\"El archivo dura: %s\", update.message.audio.duration)\n if update.message.audio.duration >= 3:\n update.message.reply_text(\n '¡Perfecto! Me pongo a analizarlo, dame un momento.',\n )\n \"\"\"Llamamos al BirdNet con el archivo y la ubicación (por defecto si no la quiso facilitar)\"\"\"\n analyze.clasifica('./voice.mp3', latitud, longitud)\n \"\"\"Leemos el csv resultado\"\"\"\n with open('result.csv', 'r') as doc:\n csv_reader = DictReader(doc, delimiter=';')\n for row in csv_reader:\n if float(row['Confidence']) > 0.4:\n update.message.reply_text(\n 'Distingo un ' + row['ScientificName'] + ' con una seguridad de un ' + str(\n float(row['Confidence']) * 100) + '%',\n )\n especie = row['ScientificName']\n update.message.reply_text(\n 'Si quieres mas información sobre los ' + row[\n 'ScientificName'] + ' puedes seguir el siguiente enlace: ' + 'https://es.wikipedia.org/wiki/' + especie.replace(\n ' ', '_'),\n )\n update.message.reply_text(\n 'Eso fue todo lo que pude distinguir.\\n'\n 'Gracias por pasar pasar un rato conmigo.\\n'\n 'Si quieres analizar otro archivo envía /start pero si no, '\n 'espero que hayas tenido una buena experiencia y pasa un buen día', reply_markup=ReplyKeyboardRemove()\n )\n\n return ConversationHandler.END\n else:\n update.message.reply_text(\n 'No llega a los 3 segundos. Por favor, envíame uno de al menos 3 segundos para que pueda analizarlo',\n )\n\n\n return AAUDIO\n\n# Función que contiene el estado después de utilizar el atajo en el archivo .mp3 del bot escribiendo \"archivo\"\n# y pasa al siguiente estado en funcion de la respuesta\ndef parchivoej(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n logger.info(\"Ha utilizado el atajo del ejemplo\")\n update.message.reply_text('¡Perfecto! Me pongo a analizarlo, dame un momento.',)\n \"\"\"Llamamos al BirdNet con el archivo y la ubicación (por defecto si no la quiso facilitar)\"\"\"\n analyze.clasifica('./XC558716Soundscape.mp3', latitud, longitud)\n \"\"\"Leemos el csv resultado\"\"\"\n with open('result.csv', 'r') as doc:\n csv_reader = DictReader(doc, delimiter=';')\n for row in csv_reader:\n if float(row['Confidence']) > 0.4:\n update.message.reply_text(\n 'Distingo un ' + row['ScientificName'] + ' con una seguridad de un ' + str(\n float(row['Confidence']) * 100) + '%',\n )\n especie = row['ScientificName']\n update.message.reply_text(\n 'Si quieres mas información sobre los ' + row[\n 'ScientificName'] + ' puedes seguir el siguiente enlace: ' + 'https://es.wikipedia.org/wiki/' + especie.replace(\n ' ', '_'),\n )\n update.message.reply_text(\n 'Eso fue todo lo que pude distinguir.\\n'\n 'Gracias por pasar pasar un rato conmigo.\\n'\n 'Si quieres analizar otro archivo envía /start pero si no, '\n 'espero que hayas tenido una buena experiencia y pasa un buen día', reply_markup=ReplyKeyboardRemove()\n )\n\n return ConversationHandler.END\n\n# Función que contiene el estado en el cual el usuario no ha enviado el archivo .mp3 al bot y vuelve a repetir\n# el estado anterior\ndef error_archivo(update: Update, context: CallbackContext) -> int:\n \"\"\"Bucle cuando no envía un archivo como dijo antes.\"\"\"\n user = update.message.from_user\n logger.info(\"%s no me ha enviado el archivo de audio. Envió otra cosa\", user.first_name)\n update.message.reply_text(\n 'Habías quedado en enviarme un archivo de audio en formato mp3.\\n'\n 'Envíame un archivo de audio o envíame /salir para cancelar la conversación',\n )\n\n return AAUDIO\n\n# Función que contiene el estado en el cual el usuario no ha enviado ubicción o saltado el paso y vuelve a repetir\n# el estado anterior\ndef error_location(update: Update, context: CallbackContext) -> int:\n \"\"\"Bucle cuando no envía una ubicacion o lo salta.\"\"\"\n user = update.message.from_user\n logger.info(\"%s Ni saltó la ubicación ni la ha enviado\", user.first_name)\n update.message.reply_text(\n 'Por favor, envíame una ubicación, /saltar para continuar o /salir para cancelar la conversación.',\n )\n\n return START\n\n# Función que contiene el estado en el cual el usuario ha enviado la ubicación al bot y pasa al siguiente estado\ndef location(update: Update, context: CallbackContext) -> int:\n \"\"\"Guarda la ubicación y pide el tipo de archivo a analizar.\"\"\"\n reply_keyboard = [['Voz', 'Archivo']]\n user = update.message.from_user\n longitud = update.message.location.longitude\n latitud = update.message.location.latitude\n logger.info(\n \"Ubicación de %s: %f / %f\", user.first_name, latitud, longitud\n )\n update.message.reply_text(\n '¡Gracias! Tener tu ubicación me ayuda a reducir las posibilidades.\\n'\n '¿Ahora que vas a enviarme, un mensaje de voz o un archivo de audio en formato mp3?\\n '\n 'Recuerda que tienen que durar al menos 3 segundos para que pueda reconocerlo.\\n',\n reply_markup=ReplyKeyboardMarkup(\n reply_keyboard, one_time_keyboard=True, input_field_placeholder='Voz o Archivo?'\n ),\n )\n\n return DECISION\n# Función que contiene el estado en el cual el usuario ha saltado el paso de la ubicación al bot y pasa al siguiente estado\ndef skip_location(update: Update, context: CallbackContext) -> int:\n reply_keyboard = [['Voz', 'Archivo']]\n user = update.message.from_user\n logger.info(\"El usuario %s no ha querido facilitar su localización.\", user.first_name)\n update.message.reply_text(\n 'Bueno, tendré que arreglármelas sin ella.\\n'\n 'Te recuerdo que esto disminuye los valores de confianza en las respuestas.\\n'\n '¿Ahora que vas a enviarme, un mensaje de voz o un archivo de audio en formato mp3?\\n'\n 'Recuerda que tienen que durar al menos 3 segundos para que pueda reconocerlo.\\n',\n reply_markup=ReplyKeyboardMarkup(\n reply_keyboard, one_time_keyboard=True, input_field_placeholder='Voz o Archivo?'\n ),\n )\n\n return DECISION\n\n# Función que contiene el estado en el cual el usuario cancela la conversación\ndef cancel(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n logger.info(\"El usuario %s ha cancelado la conversación.\", user.first_name)\n update.message.reply_text(\n 'Gracias por pasar pasar un rato conmigo.\\n'\n 'Espero que hayas tenido una buena experiencia y pasa un buen día', reply_markup=ReplyKeyboardRemove()\n )\n\n return ConversationHandler.END\n\n\n# Función que arranca y mantiene el bot\ndef main() -> None:\n \"\"\"Arranca el bot.\"\"\"\n # Crear el updater y pasarle el token del bot.\n updater = Updater(\"xxxxxxxxxxxxxxx\")\n\n # Preparar el dispatcher para registrar los handlers\n dispatcher = updater.dispatcher\n\n # Añadir los conversation handler con los estados necesarios para el funcionamiento del bot\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n states={\n START: [MessageHandler(Filters.location, location),\n CommandHandler('saltar', skip_location),\n CommandHandler('salir', cancel),\n MessageHandler(~Filters.location, error_location)],\n LOCALIZACION: [MessageHandler(Filters.location, location),\n CommandHandler('saltar', skip_location),\n CommandHandler('salir', cancel),\n MessageHandler(~Filters.location and ~Filters.regex('^(/saltar)$'), error_location)],\n DECISION: [MessageHandler(Filters.regex('^(Voz)$'), eaudio),\n CommandHandler('salir', cancel),\n MessageHandler(Filters.regex('^(Archivo)$'), earchivo)],\n MVOZ: [MessageHandler(Filters.voice, paudio),\n CommandHandler('salir', cancel),\n MessageHandler(Filters.regex('^(ejemplo)$'), paudioej),\n MessageHandler(~Filters.voice and ~Filters.regex('^(ejemplo)$'), error_audio)],\n AAUDIO: [MessageHandler(Filters.audio, parchivo),\n CommandHandler('salir', cancel),\n MessageHandler(Filters.regex('^(ejemplo)$'), parchivoej),\n MessageHandler(~Filters.audio and ~Filters.regex('^(ejemplo)$'), error_archivo)],\n },\n fallbacks=[CommandHandler('salir', cancel)],\n )\n\n dispatcher.add_handler(conv_handler)\n\n # Arrancar el Bot\n updater.start_polling()\n\n # El bot estará corriendo hasta que se pulse Ctrl-C o el proceso reciba SIGINT,\n # SIGTERM or SIGABRT. Esto se debe usar, desde que start_polling() es no bloqueante y parará el bot\n updater.idle()\n\n\nif __name__ == '__main__':\n main()","repo_name":"uo229632/BirdSoundsBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":16309,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10974899127","text":"from enum import Enum\n\nfrom gym_tak.read_only import read_only_enum\n\n\n@read_only_enum('blocks', 'ignores_block', 'forms_road', 'value', 'string')\nclass Types(Enum):\n CAPSTONE = True, True, True, 1, 'C'\n FLAT_STONE = False, False, True, 2, 'F'\n STANDING_STONE = True, False, False, 3, 'S'\n\n def __new__(cls, *args, **kwargs):\n value = len(cls.__members__) + 1\n obj = object.__new__(cls)\n obj._value_ = value\n return obj\n\n def __init__(self, blocks: bool, ignores_block: bool, forms_road: bool, value: int, string: str) -> None:\n self.blocks = blocks\n self.ignores_block = ignores_block\n self.forms_road = forms_road\n self.int_value = value\n self.string = string\n\n @classmethod\n def from_int(cls, value: int) -> 'Types':\n value = abs(value)\n\n for type_ in cls:\n if type_.value == value:\n return type_\n\n def can_move(self, to_top_piece_type: 'Types') -> bool:\n if self is self.CAPSTONE:\n return to_top_piece_type is not self.CAPSTONE\n return self.ignores_block or not to_top_piece_type.blocks\n\n","repo_name":"DrSmugleaf/gym-tak","sub_path":"gym_tak/tak/piece/type.py","file_name":"type.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35351617164","text":"#------------------------------------------------------------------------------\n# PURPOSE: Helper functions from athlete_views.py\n# FILES: ./athlete_views.py\n#------------------------------------------------------------------------------\nfrom .utils import *\nfrom .models import *\nfrom .athlete_forms import *\n\nfrom forecastiopy import *\nfrom geopy.geocoders import Nominatim\nimport time\n\ndef wear_help(location):\n\n geolocator = Nominatim()\n location = geolocator.geocode(location)\n if location == None:\n return None\n lat = location.latitude\n lon = location.longitude\n\n fio = ForecastIO.ForecastIO(\"31d0c8f0c1036505d4f8541000fcc555\", latitude=lat, longitude=lon)\n current = FIOCurrently.FIOCurrently(fio)\n\n tights_CO = 45\n temp = current.temperature\n #------------------------tights-----------------------------\n #adjust for wind\n if current.windSpeed > 5 and current.windSpeed < 10:\n tights_CO -= 5\n elif current.windSpeed > 10:\n tights_CO -= 10\n\n #calculate tights\n if temp > tights_CO:\n tights = \"No tights today.\"\n else:\n tights = \"Wear tights.\"\n\n #----------------------tops--------------------------------\n tops = \"\"\n adj_temp = temp - current.windSpeed\n\n if .85 < current.precipProbability <= 1 and current.icon == \"rain\":\n if adj_temp > 48:\n tops = \"it will probably rain, but it is too warm to matter.\"\n else:\n tops = \"It will probably rain, and it's pretty cold. Throw a windbreaker on top.\"\n\n if adj_temp > 60:\n tops += \"Don't wear any shirt. It's time to get ur tan on.\"\n elif adj_temp > 48:\n tops += \"A T-shirt will probably do it\"\n elif adj_temp > 40:\n tops += \"A long sleeve will work just fine.\"\n elif adj_temp > 34:\n tops += \"It's a two shirt kind of day.\"\n elif adj_temp > 24:\n tops += \"T-shirt, long sleeve, jacket.\"\n else:\n tops += \"Wear at least two layers on top, one of which should be substantial. \"\n\n #------------------------hat/glasses--------------------------\n if adj_temp < 30:\n hat = \"Wear a winter hat.\"\n else:\n hat = \"It's pretty cloudy. No need for a baseball hat or sunglasses.\"\n if current.cloudCover < .3:\n if current.windSpeed > 10:\n hat = \"A hat might blow off today, but sunglasses are a good idea\"\n else:\n hat = \"Hats and sunglasses should be worn today\"\n\n if current.icon == \"snow\":\n tops += \" It is snowing, so glasses and gloves are the move\"\n\n #-----------------------storm warning------------------------\n storm = \"Unavailable\"\n if \"nearestStormDistance\" in current.get().keys():\n storm = \"Just so you know, the nearest storm is {0} miles away\".format(current.nearestStormDistance)\n\n return {\n 'location': location,\n 'tights': tights,\n 'tops': tops,\n 'hat': hat,\n 'storm': storm\n }\n\ndef get_prs(athlete):\n activities = Activity.objects.filter(act_type='Event', athlete=athlete)\n prs = {}\n for e in activities:\n if str(e.distance) in prs:\n if e.duration < prs[str(e.distance)].duration:\n prs[str(e.distance)] = e\n else:\n prs[str(e.distance)] = e\n return prs\n\ndef make_duration_chartable(duration):\n \"\"\"-------------------------------------------------------\n Given a duration object, turn it into a list of the format\n [hours, minutes, seconds, milliseconds]\n -------------------------------------------------------\"\"\"\n days, seconds = duration.days, duration.seconds\n hours = days * 24 + seconds // 3600\n minutes = (seconds % 3600) // 60\n seconds = seconds % 60\n milliseconds = duration.microseconds / 1000\n return [hours, minutes, seconds, milliseconds]\n\ndef get_interval_graph_data(reps):\n graph_data = [['Date', 'Miles', {'role':'style'}]]\n for rep in reps:\n # [place, [hour, minute, second, millisecond]]\n graph_data.append([rep.position, make_duration_chartable(rep.duration), 'color:#f7c331'])\n return graph_data\n\n\ndef get_label(d, num_dates):\n if num_dates < 10:\n return d.strftime('%a %b %d')\n if num_dates < 15:\n return d.strftime('%a %d')\n if num_dates < 370:\n return d.strftime('%b %d')\n return str(d)\n\ndef build_graph_data(dates, athlete):\n total = 0\n colors = {\n 'NormalRun':'#6b7a8f',\n 'IntervalRun':'#f7c331',\n 'CrossTrain':'#dcc7aa',\n 'Event':'#dcc7aa',\n 'OffDay':'#111111' #immaterial, because days off have no color.\n }\n indexes = {\n 'NormalRun':1,\n 'IntervalRun':2,\n 'CrossTrain':3,\n 'Event':4,\n }\n\n data = [['Date', 'Normal Run', 'Interval Run', 'Cross Train', 'Race', {'role':'style'}, 'Link']]\n date_iterator = 0\n athlete_acts = Activity.objects.filter(athlete=athlete)\n for d in dates:\n activities = athlete_acts.filter(\n date=d,\n act_type__in=['NormalRun', 'Event', 'IntervalRun', 'CrossTrain']\n )\n prep = [get_label(d,len(dates)), 0, 0, 0, 0,'color:'+colors['OffDay'], 'nolink']\n for a in activities:\n # Don't include cross training miles\n if a.act_type != 'CrossTrain':\n miles = get_miles(a)\n else:\n miles = 0\n prep[indexes[a.act_type]] += miles\n total += miles\n prep[-1] = \"/log/athlete/activity_detail/\"+str(a.id)+\"/\"\n data.append(prep)\n return data, round(total, 2)\n\n\ndef set_total_distance(activity):\n \"\"\"---------------------------------------------------------\n used to set the total distance attribute of inerval runs.\n\t---------------------------------------------------------\"\"\"\n reps = Rep.objects.filter(activity=activity)\n total = 0\n for r in reps:\n total += get_miles(r, rep=True)\n\n #Calculate warm up distance\n if activity.wu_units == 'Miles':\n total += float(activity.warmup)\n elif activity.wu_units == 'Kilometers':\n total += kilometers_to_miles(activity.warmup)\n elif activity.wu_units == 'Meters':\n total += meters_to_miles(activity.warmup)\n\n #Calculate cool down distance\n if activity.cd_units == 'Miles':\n total += float(activity.cooldown)\n elif activity.cd_units == 'Kilometers':\n total += kilometers_to_miles(activity.cooldown)\n elif activity.cd_units == 'Meters':\n total += meters_to_miles(activity.cooldown)\n\n activity.distance = round(total, 2)\n activity.save()\n","repo_name":"mreading/keeptrack","sub_path":"log/athlete_utils.py","file_name":"athlete_utils.py","file_ext":"py","file_size_in_byte":6588,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"7984773049","text":"#!/bin/python3\r\nfrom itertools import combinations\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n# Complete the birthday function below.\r\ndef birthday(s, d, m):\r\n k=[]\r\n \r\n l=list(combinations(s,m))\r\n l=set(l)\r\n for i in l:\r\n \r\n if(sum(i)==d):\r\n print(i)\r\n k.append(i)\r\n t=set(k)\r\n return len(t)\r\n\r\n\r\n \r\n\r\nn = int(input().strip())\r\n\r\ns = list(map(int, input().rstrip().split()))\r\n\r\ndm = input().rstrip().split()\r\n\r\nd = int(dm[0])\r\n\r\nm = int(dm[1])\r\n\r\nresult = birthday(s, d, m)\r\n\r\nprint(result)\r\n","repo_name":"DiptoChakrabarty/Python","sub_path":"Hacker-rank/birthdaybar.py","file_name":"birthdaybar.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26911311764","text":"class Employee:\n # constructor\n def __init__(self,name,salary,department):\n self.name = name\n self.salary = salary\n self.department = department\n\n \n def showData(self):\n print(\"name = {}\".format(self.name))\n print(\"salary = {}\".format(self.salary))\n print(\"department = {}\".format(self.department))\n\n def __del__(self):\n print(\"Call Destructor\")\n\n# object\nobj1 = Employee(\"pin\",200000,\"programer\")\nobj1.name = \"pinzaa\"\nobj1.salary = 500000\nobj1.showData()\n\nobj2 = Employee(\"parn\",100000,\"ceo\")\nobj2.showData()\n\nobj3 = Employee(\"pook\",20000,\"boy\")\nobj3.showData()","repo_name":"vorasit/python-project","sub_path":"oop/basicoop2.py","file_name":"basicoop2.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"17238997298","text":"# File: ISBN.py\n\n# Description: This program determines the validity of ISBN codes \n\n# Student Name: Derek Orji\n\n# Student UT EID: dao584\n\n# Course Name: CS 303E\n\n# Unique Number: 51845\n\n# Date Created: 4/13/17\n\n# Date Last Modified: 4/14/17\n\n# Function to determine validity \ndef validisbn(line2):\n\ta = []\n\ts1 = []\n\ts2 = []\n\tfor i in line2:\n\t\tif i == \"X\" or i == \"x\":\n\t\t\ta.append(10)\n\t\telse:\n\t\t\ta.append(int(i))\n\t\n\tfor i in range(0, len(a)):\n\t\ts1.append(sum(a[0:(i+1)]))\n\tfor j in range(0, len(s1)):\n\t\ts2.append(sum(s1[0:(j+1)]))\n\tif (s2[len(s2)-1]) % 11 == 0:\n\t\treturn True \n\telse:\n\t\treturn False\n\n\t\n\n\n# Main function\ndef main():\n\t# Opening Files \n\tinitialfile = open(\"isbn.txt\", \"r\")\n\toutputfile = open(\"isbnOut.txt\", \"w\")\n\t# Checks that only digits, hyphens, and X's are available\n\tfor line in initialfile:\n\t\tline1 = line.strip()\n\t\tvalid = True\n\t\tfor i in range(0,len(line1)):\n\t\t\tif line1[i] != \"0\" and line1[i] != \"1\" and line1[i] != \"2\" and line1[i] != \"3\" and line1[i] != \"4\" and line1[i] != \"5\" and line1[i] != \"6\" and line1[i] != \"7\" and line1[i] != \"8\" and line1[i] != \"9\" and line1[i] != \"-\" and line1[i] != \"X\" and line1[i] != \"x\":\n\t\t\t\toutputfile.write(line1 + \" invalid\\n\")\n\t\t\t\tvalid = False\n\t\t\t\tbreak\n\t\t\t\t\n\t\tif valid == True: \n\t\t\t\n\t\t\t\n\t\t\t#Checks for 9 digits and checks last character \n\t\t\tcounter = 0\n\t\t\tfor i in range(0, (len(line1)-1)):\n\t\t\t\tif line1[i] == \"0\" or line1[i] == \"1\" or line1[i] == \"2\" or line1[i] == \"3\" or line1[i] == \"4\" or line1[i] == \"5\" or line1[i] == \"6\" or line1[i] == \"7\" or line1[i] == \"8\" or line1[i] == \"9\":\n\t\t\t\t\tcounter += 1\n\t\t\tif counter != 9 or (line1[len(line1)-1] != \"0\" and line1[len(line1)-1] != \"1\" and line1[len(line1)-1] != \"2\" and line1[len(line1)-1] != \"3\" and line1[len(line1)-1] != \"4\" and line1[len(line1)-1] != \"5\" and line1[len(line1)-1] != \"6\" and line1[len(line1)-1] != \"7\" and line1[len(line1)-1] != \"8\" and line1[len(line1)-1] != \"9\" and line1[len(line1)-1] != \"X\" and line1[len(line1)-1] != \"x\"):\n\t\t\t\toutputfile.write(line1 + \" invalid\\n\")\n\t\t\telse:\n\t\t\t\tline2 = line1.replace(\"-\", \"\")\n\t\t\t\tif validisbn(line2) == True:\n\t\t\t\t\toutputfile.write(line1 + \" valid\\n\")\n\t\t\t\telse:\n\t\t\t\t\toutputfile.write(line1 + \" invalid\\n\")\n\t#Closing Files \n\tinitialfile.close()\n\toutputfile.close()\n\n\n\nmain()\n","repo_name":"Arinze95/CS303E","sub_path":"Documents/CS303E/CS303E-master/ISBN.py","file_name":"ISBN.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30175933515","text":"import sys\r\nimport pygame\r\nimport os\r\n\r\nsize = weight, height = 800, 501\r\nscreen = pygame.display.set_mode(size)\r\n\r\n\r\ndef load_image(name, colorkey=None): # функция обработки картинки\r\n fullname = os.path.join('data', name)\r\n if not os.path.isfile(fullname): # обработка отсутствия картинки\r\n print(f\"Файл с изображением '{fullname}' не найден\")\r\n sys.exit()\r\n image = pygame.image.load(fullname)\r\n if colorkey is not None: # замена выбранного цвета на прозрачный\r\n image = image.convert()\r\n if colorkey == -1:\r\n colorkey = image.get_at((0, 0))\r\n image.set_colorkey(colorkey)\r\n else:\r\n image = image.convert_alpha()\r\n return image\r\n\r\n\r\nclass Mountain(pygame.sprite.Sprite):\r\n image = load_image(\"mountains.png\")\r\n\r\n def __init__(self):\r\n super().__init__(all_sprites)\r\n self.image = Mountain.image\r\n self.rect = self.image.get_rect()\r\n # вычисляем маску для эффективного сравнения\r\n self.mask = pygame.mask.from_surface(self.image)\r\n # располагаем горы внизу\r\n self.rect.bottom = height\r\n\r\n\r\nclass Landing(pygame.sprite.Sprite):\r\n image = load_image(\"pt.png\")\r\n\r\n def __init__(self, pos):\r\n super().__init__(all_sprites)\r\n self.image = Landing.image\r\n self.rect = self.image.get_rect()\r\n # вычисляем маску для эффективного сравнения\r\n self.mask = pygame.mask.from_surface(self.image)\r\n self.rect.x = pos[0]\r\n self.rect.y = pos[1]\r\n\r\n def update(self):\r\n if not pygame.sprite.collide_mask(self, mountain):\r\n self.rect = self.rect.move(0, 1)\r\n\r\n\r\nif __name__ == '__main__':\r\n all_sprites = pygame.sprite.Group()\r\n\r\n pygame.display.set_caption('Landing')\r\n mountain = Mountain()\r\n running = True\r\n clock = pygame.time.Clock()\r\n while running:\r\n screen.fill((0, 0, 0))\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n Landing(event.pos)\r\n clock.tick(50)\r\n all_sprites.update()\r\n all_sprites.draw(screen)\r\n pygame.display.flip()\r\n pygame.quit()\r\n","repo_name":"kotosusl/Landing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42545059238","text":"\nfrom django.shortcuts import render,redirect,HttpResponse,Http404\nfrom firstapp.models import People,Article,Topics,Tag,Comment,Ticket\nfrom django.template import Context,Template\nfrom django import forms\nfrom firstapp.form import CommentForm\nfrom firstapp.aheadline import trans_headline\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\nfrom django.contrib.auth import authenticate,login\nfrom firstapp.form import LoginForm\nfrom django.contrib.auth.forms import UserCreationForm,AuthenticationForm\nfrom django.core.exceptions import ObjectDoesNotExist\n\n# Create your views here.\n\n\ndef index(request):\n\n print(request)\n print('==='*30)\n print(dir(request))\n print('==='*30)\n print(type(request))\n queryset = request.GET.get('tag')\n print(queryset)\n orderset = request.GET.get('order')\n print(orderset)\n dateset = request.GET.get('pubdate')\n\n if queryset:\n article_list = Article.objects.filter(tag=queryset)\n else:\n article_list = Article.objects.all()\n\n if orderset:\n article_list = Article.objects.order_by('-watchnumber')\n else:\n pass\n\n if dateset:\n article_list = Article.objects.order_by('-pub_date')\n else:\n pass\n\n\n for article in article_list: #将文件名中的空格替换为-, 并储存在 headline_2 字段\n if article.headline_2:\n pass\n else:\n article.headline_2 = article.headline.replace(' ','-')\n # print ('headline_2 is: ',article.headline_2)\n article.save()\n\n # for article in article_list:\n # print (article.headline, ' transfer to' ,article.headline_2)\n\n context = {}\n\n topic_list = Topics.objects.all()\n tag_list = Tag.objects.all()\n\n context['article_list'] = article_list\n context['topic_list']= topic_list\n context['tag_list']= tag_list\n\n index_page = render(request,'huoyan_homepage.html',context)\n return index_page\n\n\ndef detail(request,head_line,error_form = None):\n # print('get head_line:', head_line)\n context = {}\n form = CommentForm\n article = Article.objects.get(headline_2 = head_line)\n print ('article is:',article)\n\n voter_id = request.user.profile.id\n user_ticket_for_this_article = []\n try:\n user_ticket_for_this_article = Ticket.objects.get(voter_id=voter_id,article=article)\n best_comment = Comment.objects.filter(is_best = 'True',belong_to=article)\n user_ticket_for_this_article.save()\n except :\n new_ticket = Ticket(voter_id = voter_id,article_id = id,choice = request.POST['vote'])\n new_ticket.save()\n\n # print ('user_ticket is:', user_ticket_for_this_article)\n context['article']= article\n context['user_ticket']=user_ticket_for_this_article\n\n if best_comment:\n context['best_comment']=best_comment[0]\n if error_form is not None:\n context['form']= error_form\n else:\n context['form'] = form\n\n return render(request,'huoyan_article.html',context)\n\ndef detail_comment(request,head_line):\n print ('type is post')\n form = CommentForm(request.POST)\n print(form)\n # 把通过验证的信息储存为Comment 实例\n if form.is_valid():\n name = form.cleaned_data['name']\n comment = form.cleaned_data['comment']\n article = Article.objects.get(headline_2 = head_line)\n # article = Article.objects.get(headline = head_line)\n c = Comment(name=name, comment = comment,belong_to=article)\n c.save()\n print ('c is :',c)\n else:\n return detail(request,head_line,error_form = form)\n return redirect(to='detail',head_line= head_line)\n\n\ndef detail_vote(request,head_line):\n voter_id = request.user.profile.id\n article = Article.objects.get(headline_2 = head_line)\n\n try:\n user_ticket_for_this_article = Ticket.objects.get(voter_id = voter_id,article = article)\n user_ticket_for_this_article.choice = request.POST['vote']\n user_ticket_for_this_article.save()\n except ObjectDoesNotExist:\n new_ticket = Ticket(voter_id = voter_id,article = article, choice = request.POST['vote'])\n new_ticket.save()\n\n return redirect(to = 'detail',head_line = head_line)\n\n\n\n\ndef form(request):\n\n form_page = render(request,'form.html')\n return form_page\n\ndef listing(request,cate=None):\n print('cate is :',cate)\n context = {}\n if cate is None:\n article_list = Article.objects.all()\n user_ticket = Ticket.objects.all()\n else :\n article_list = Article.objects.filter(editorchoice = True)\n print ('this is listing, the cate is:', cate)\n\n\n page_robot = Paginator(article_list,5)\n page_num = request.GET.get('page')\n try:\n article_list = page_robot.page(page_num)\n except EmptyPage:\n article_list = page_robot.page(page_robot.num_pages)\n # raise Http404('EmptyPage!')\n except PageNotAnInteger:\n article_list = page_robot.page(1)\n\n\n\n context['page_robot']=page_robot\n context['article_list']= article_list\n\n return render (request,'listing.html',context)\n\n# 登录表单\ndef index_login(request):\n context = {}\n if request.method == 'GET':\n form = AuthenticationForm\n\n if request.method=='POST':\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n\n login(request,form.get_user())\n return redirect(to='listing')\n\n else:\n return HttpResponse('

NOT A USER!

')\n\n context['form']=form\n return render(request,'login_register.html',context)\n\n# 注册表单\n\ndef index_register(request):\n print ('this is register')\n context = {}\n if request.method == 'GET':\n form = UserCreationForm\n if request.method=='POST':\n form = UserCreationForm(request.POST)\n print ('test if form is valid...',form.is_valid())\n print(form)\n if form.is_valid():\n print(' form is valid')\n form.save()\n return redirect(to='login')\n else:\n print('form is not valid...')\n\n context['form']=form\n\n return render(request,'register.html',context)\n","repo_name":"miaozaiye/pythonpractice","sub_path":"Desktop/Python 全栈学习/root/firstsite/firstapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27252134943","text":"from __future__ import print_function, division\nfrom pyxl320 import ServoSerial\nfrom pyxl320.Packet import le, makeReadPacket\nimport argparse\nimport simplejson as json\nfrom serial import SerialException\nimport sys\nfrom quadruped.packetDecoder import PacketDecoder\n\n\ndef writeToFile(data, filename='data.json'):\n\twith open(filename, 'w') as outfile:\n\t\tjson.dump(data, outfile)\n\n\ndef pktToDict(p):\n\t# print('pkt', pkt)\n\t# print('len(pkt)', len(pkt))\n\tans = {}\n\tans['ID'] = p.id\n\tans['Present Position'] = p.angle()\n\tans['Present Voltage'] = p.voltage()\n\tans['Present Load'] = '{:>5.1f}% {}'.format(*p.load())\n\tans['Present Temperature'] = p.temperature(PacketDecoder.F)\n\tans['Hardware Error Status'] = p.hw_error()\n\n\treturn ans\n\n\nDESCRIPTION = \"\"\"\nReturns limited info for each leg servo.\n\n./get_leg_info.py /dev/tty.usbserial-AL034G2K\nOpened /dev/tty.usbserial-AL034G2K @ 1000000\n\nServos: 1 - 12\n--------------------------------------------------\nServo: 1 \t\tHW Error: 0\nPosition [deg]: 139.6 Load: 0.0% CCW\nVoltage [V] 7.0 Temperature [F]: 80.6\n--------------------------------------------------\nServo: 2 \t\tHW Error: 0\nPosition [deg]: 178.9 Load: 4.5% CW\nVoltage [V] 7.1 Temperature [F]: 86.0\n--------------------------------------------------\nServo: 3 \t\tHW Error: 0\nPosition [deg]: 119.1 Load: 0.0% CCW\nVoltage [V] 7.1 Temperature [F]: 80.6\n--------------------------------------------------\nServo: 4 \t\tHW Error: 0\nPosition [deg]: 146.6 Load: 0.8% CCW\nVoltage [V] 7.3 Temperature [F]: 80.6\n--------------------------------------------------\nServo: 5 \t\tHW Error: 0\nPosition [deg]: 275.4 Load: 0.8% CCW\nVoltage [V] 7.1 Temperature [F]: 80.6\n--------------------------------------------------\nServo: 6 \t\tHW Error: 0\nPosition [deg]: 104.1 Load: 0.0% CCW\nVoltage [V] 7.3 Temperature [F]: 82.4\n--------------------------------------------------\nServo: 7 \t\tHW Error: 0\nPosition [deg]: 163.9 Load: 0.0% CCW\nVoltage [V] 7.2 Temperature [F]: 80.6\n--------------------------------------------------\nServo: 8 \t\tHW Error: 0\nPosition [deg]: 279.5 Load: 0.0% CCW\nVoltage [V] 7.1 Temperature [F]: 80.6\n--------------------------------------------------\nServo: 9 \t\tHW Error: 0\nPosition [deg]: 100.3 Load: 0.0% CCW\nVoltage [V] 7.1 Temperature [F]: 84.2\n--------------------------------------------------\nServo: 10 \t\tHW Error: 0\nPosition [deg]: 156.3 Load: 0.0% CCW\nVoltage [V] 7.1 Temperature [F]: 82.4\n--------------------------------------------------\nServo: 11 \t\tHW Error: 0\nPosition [deg]: 280.6 Load: 0.0% CCW\nVoltage [V] 7.2 Temperature [F]: 80.6\n--------------------------------------------------\nServo: 12 \t\tHW Error: 0\nPosition [deg]: 97.7 Load: 0.0% CCW\nVoltage [V] 7.1 Temperature [F]: 84.2\n--------------------------------------------------\n\"\"\"\n\n\ndef handleArgs():\n\tparser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter)\n\tparser.add_argument('port', help='serial port or \\'dummy\\' for testing', type=str)\n\tparser.add_argument('-j', '--json', metavar='FILENAME', help='save info to a json file: --json my_file.json', type=str)\n\n\targs = vars(parser.parse_args())\n\treturn args\n\n\ndef getSingle(ID, ser):\n\tpkt = makeReadPacket(ID, 37, le(50-37+1))\n\t# print('made packet:', pkt)\n\n\tans = ser.sendPkt(pkt)\n\tif ans:\n\t\tans = ans[0]\n\t\tpd = PacketDecoder(ans, 37) # data packet starts at register 37\n\t\t# pd.printPacket()\n\t\tif pd.checkError():\n\t\t\traise Exception('Crap!')\n\t\tans = pktToDict(pd)\n\telse:\n\t\tans = None\n\n\treturn ans\n\n\ndef printServo(s):\n\tprint('-'*50)\n\tprint('Servo: {} \\t\\tHW Error: {}'.format(s['ID'], s['Hardware Error Status']))\n\tprint('Position [deg]: {:5.1f} Load: {}'.format(s['Present Position'], s['Present Load']))\n\tprint('Voltage [V] {:4.1f} Temperature [F]: {:5.1f}'.format(s['Present Voltage'], s['Present Temperature']))\n\n\ndef main():\n\targs = handleArgs()\n\tport = args['port']\n\n\ts = ServoSerial(port=port)\n\n\t# open serial port\n\ttry:\n\t\ts.open()\n\texcept SerialException as e:\n\t\tprint('-'*20)\n\t\tprint(sys.argv[0], 'encountered an error')\n\t\tprint(e)\n\t\texit(1)\n\n\tids = range(1, 13)\n\n\tresp = {}\n\tfor k in ids:\n\t\tresp[k] = None\n\n\t# get servo data\n\ttry:\n\t\tfor i in ids:\n\t\t\tdata = getSingle(i, s)\n\t\t\tresp[i] = data\n\texcept Exception as e:\n\t\tprint(e)\n\t\texit(1)\n\n\tcnt = 10\n\twhile cnt:\n\t\tcnt = 0\n\t\tfor k, v in resp.items():\n\t\t\t# search through and find servos w/o responses (i.e., None)\n\t\t\tif v is None:\n\t\t\t\tcnt += 1 # found a None\n\t\t\t\tans = getSingle(k, s)\n\t\t\t\tresp[k] = ans\n\n\tprint('')\n\tprint('Servos: 1 - 12')\n\tfor i in range(1, 13):\n\t\tprintServo(resp[i])\n\tprint('-' * 50)\n\tprint('')\n\n\tif args['json']:\n\t\tprint('Saving servo angle info to {}'.format(args['json']))\n\t\twriteToFile(resp, args['json'])\n\n\ts.close()\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"MultipedRobotics/multiped","sub_path":"multiped/bin/get_leg_info.py","file_name":"get_leg_info.py","file_ext":"py","file_size_in_byte":4825,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"22"} +{"seq_id":"70096144057","text":"\n\nclass BinaryTree:\n def binary_search_recursive(self, arr, target, left, right):\n if left > right:\n # input : sorted arr [1,2,3,5,6,8,10] , target =8 , left =0 , right = 7\n # return index if present (5) else -1\n\n mid = left+(right-left) // 2\n print(\"The number at {} : {} \".format(mid, arr[mid]))\n\n if arr[mid] == target: # if target found\n return mid\n\n if target > arr[mid]: # element present in right\n return self.binary_search_recursive(arr, target, mid+1, right)\n else: # element present in left\n return self.binary_search_recursive(arr, target, left, mid)\n else:\n return -1\n\n def binary_search_iter(self, arr, target):\n left = 0\n right = len(arr)\n\n while left < right:\n mid = left+(right-left)//2\n\n if target == arr[mid]:\n return mid\n\n if target > arr[mid]:\n left = mid + 1\n else:\n right = mid\n\n return -1\n\n\narr = [1, 2, 3, 5, 6, 8, 10]\n\nans = BinaryTree()\nprint(ans.binary_search_recursive(arr, 10, 0, len(arr)))\n\nprint(ans.binary_search_iter(arr, 10))\n","repo_name":"ashritdeebadi/ds_leetcode","sub_path":"binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72196696376","text":"from data_structures.linked_list import LinkedList\n\n\ndef zip_lists(list1, list2):\n current1 = list1.head\n current2 = list2.head\n if current1 == None:\n return list2\n if current2 == None:\n return list1\n new_list = LinkedList()\n while current1 and current2:\n if current1:\n new_list.append(current1.value)\n current1 = current1._next\n if current2:\n new_list.append(current2.value)\n current2 = current2._next\n\n while current1:\n new_list.append(current1.value)\n current1 = current1._next\n\n # This is where we will append any remaining elements from list2\n while current2:\n new_list.append(current2.value)\n current2 = current2._next\n\n\n return new_list\n","repo_name":"DanRQuinn/data-structures-and-algorithms","sub_path":"python/code_challenges/linked_list_zip.py","file_name":"linked_list_zip.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25261567137","text":"from datetime import datetime\nfrom django.test import TestCase\nfrom django.utils.dateparse import parse_datetime\nfrom restclients.exceptions import DataFailureException\nfrom restclients.models.bridge import BridgeUser, BridgeCustomField,\\\n BridgeUserRole\nfrom restclients.test import fdao_pws_override\n\n\nclass TestBridgeModel(TestCase):\n\n def test_bridge_user_role(self):\n role = BridgeUserRole(role_id='user', name='user')\n self.assertEqual(role.to_json(),\n {\"id\": \"user\", \"name\": \"user\"})\n\n def test_bridge_custom_field(self):\n bcf = BridgeCustomField(value_id=\"1\",\n field_id=\"5\",\n name=\"REGID\",\n value=\"787\")\n self.assertEqual(bcf.to_json(),\n {'id': '1',\n 'value': '787',\n 'name': 'REGID',\n 'custom_field_id': '5'})\n self.assertTrue(bcf.is_regid())\n\n bcf = BridgeCustomField(field_id=\"5\",\n name=\"REGID\")\n self.assertEqual(bcf.to_json(),\n {'name': 'REGID',\n 'custom_field_id': '5',\n 'value': None})\n self.assertIsNotNone(str(bcf))\n\n def test_bridge_user(self):\n bcf = BridgeCustomField(\n field_id=\"5\",\n name=\"REGID\",\n value=\"12345678901234567890123456789012\")\n user = BridgeUser()\n user.netid = \"iamstudent\"\n user.full_name = \"Iam Student\"\n user.first_name = \"Iam A\"\n user.last_name = \"Student\"\n user.email = \"iamstudent@uw.edu\"\n user.custom_fields.append(bcf)\n user.updated_at = parse_datetime(\"2016-08-08T13:58:20.635-07:00\")\n self.assertEqual(\n user.to_json_post(),\n {'users': [\n {'custom_fields': [\n {'custom_field_id': '5',\n 'name': 'REGID',\n 'value': '12345678901234567890123456789012'}],\n 'uid': 'iamstudent@uw.edu',\n 'email': 'iamstudent@uw.edu',\n 'first_name': 'Iam A',\n 'full_name': 'Iam Student',\n 'last_name': 'Student'\n }]})\n self.assertIsNotNone(str(user))\n self.assertFalse(user.has_course_summary())\n self.assertFalse(user.no_learning_history())\n self.assertEqual(user.get_uid(), \"iamstudent@uw.edu\")\n user = BridgeUser()\n user.netid = \"iamstudent\"\n user.full_name = \"Iam Student\"\n user.email = \"iamstudent@uw.edu\"\n user.custom_fields.append(bcf)\n self.assertEqual(\n user.to_json_post(),\n {'users': [\n {'custom_fields': [\n {'custom_field_id': '5',\n 'name': 'REGID',\n 'value': '12345678901234567890123456789012'}],\n 'email': 'iamstudent@uw.edu',\n 'full_name': 'Iam Student',\n 'uid': 'iamstudent@uw.edu'}]})\n\n user.bridge_id = 123\n self.assertEqual(\n user.to_json_post(),\n {'users': [\n {'custom_fields': [\n {'custom_field_id': '5',\n 'name': 'REGID',\n 'value': '12345678901234567890123456789012'}],\n 'id': 123,\n 'email': 'iamstudent@uw.edu',\n 'full_name': 'Iam Student',\n 'uid': 'iamstudent@uw.edu'}]})\n user.completed_courses_count = 3\n self.assertTrue(user.has_course_summary())\n self.assertFalse(user.no_learning_history())\n self.assertIsNotNone(str(user))\n","repo_name":"btb/uw-restclients","sub_path":"restclients/test/bridge/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41671785701","text":"import sys\nimport torch.onnx\nfrom caffe2.python.onnx.backend import Caffe2Backend as c2\nimport onnx\n\ndef convert_to_caffe2(net):\n model_path = f\"models/{net_type}.onnx\"\n init_net_path = f\"models/{net_type}_init_net.pb\"\n init_net_txt_path = f\"models/{net_type}_init_net.pbtxt\"\n predict_net_path = f\"models/{net_type}_predict_net.pb\"\n predict_net_txt_path = f\"models/{net_type}_predict_net.pbtxt\"\n\n dummy_input = torch.randn(1, 3, 300, 300)\n torch.onnx.export(net, dummy_input, model_path, verbose=False, output_names=['scores', 'boxes'])\n\n model = onnx.load(model_path)\n init_net, predict_net = c2.onnx_graph_to_caffe2_net(model)\n\n print(f\"Save the model in binary format to the files {init_net_path} and {predict_net_path}.\")\n\n with open(init_net_path, \"wb\") as fopen:\n fopen.write(init_net.SerializeToString())\n with open(predict_net_path, \"wb\") as fopen:\n fopen.write(predict_net.SerializeToString())\n\n print(f\"Save the model in txt format to the files {init_net_txt_path} and {predict_net_txt_path}. \")\n with open(init_net_txt_path, 'w') as f:\n f.write(str(init_net))\n\n with open(predict_net_txt_path, 'w') as f:\n f.write(str(predict_net))\n","repo_name":"tyunist/SSD_Pytorch_from_Caffe","sub_path":"conversions/convert_pytorch_to_caffe2.py","file_name":"convert_pytorch_to_caffe2.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"12579110218","text":"from gourauds import *\nfrom projection import *\nfrom phong import *\n\n\ndef rasterize(p2d, rows, cols, H, W):\n # Depicts the coordinates of the Camera system with plane dimensions of H*W onto pixel positions of an image with\n # dimensions of rows*cols. The axis of the camera passes through the center of the orthogonal H*W, while the image\n # indexing starts from down to up and from left to right.\n # - p2d: 2*N numpy array with the 2D coordinates after projection from p3d\n # - rows: number of rows in the image n2d\n # - cols: number of columns in the image n2d\n # - H, W: height and width of the camera plane\n\n # Create an empty n2d array of size rows*cols\n n2d = np.zeros((p2d.shape[1], 2))\n\n # Calculate the scaling factors for mapping p2d coordinates to pixel positions\n width = cols / W\n height = rows / H\n\n # Iterate over each projected 2D coordinate and map it to the corresponding pixel position\n for i in range(p2d.shape[1]):\n n2d[i, 0] = np.around((p2d[0, i] + H / 2) * height + 0.5)\n n2d[i, 1] = np.around((-p2d[1, i] + W / 2) * width + 0.5)\n\n return n2d\n\n\ndef render_object(shader, focal, eye, lookat, up, bg_color, M, N, H, W, verts,\n vert_colors, faces, mat, lights, light_amb, lighting):\n # renders an object made of a specific material, placed in a scene with light sources and a camera; calculates how\n # light is reflected onto the object and its final color at each point.\n # - shader: string {\"gouraud\", \"phong\"} deciding the coloring function\n # - focal: the distance of the projection from the centre of the camera measured in the units used by the camera\n # coordinate system\n # - eye: 3 × 1 vector with the coordinates of the centre of the camera\n # - lookat: 3 × 1 vector with the coordinates of the camera target point\n # - up: 3 × 1 unit \"up\" vector of the camera\n # - bg_color: 3 × 1 vector with the colour components of the background\n # - M, N: height, width of the generated image in pixels\n # - H, W: physical height and width of the camera lens in the units used by the camera coordinate system\n # - verts: 3 × N_v matrix with the coordinates of the vertices of the object\n # - vert_colors: 3 × N_v matrix with the colour components of each vertex of the object\n # - faces: 3*N_t (number of triangles) matrix describing the triangles; the k-th column of faces contains\n # the serial numbers of the vertices of the k-th triangle of the object, 1 ≤ k ≤ NT;\n # the order of juxtaposition of the vertices marks by the right-handed screw rule the\n # direction of the normal vector and therefore also in which direction is the outer side of the object\n # mat: object of type PhongMaterial\n # - lights: list of objects of type PointLight\n # - light_amb: 3 × 1 vector with the components of the ambient radiation intensity in the interval [0, 1]\n # - img: the image with the rendered object\n\n assert shader in [\"gouraud\", \"phong\"]\n\n # Calculate normals for each vertex of each triangle\n normals = calculate_normals(verts, faces.T)\n\n # Project vertices onto the camera plane\n verts_projected, depth = camera_looking_at(focal, eye, lookat, up, verts)\n\n # Rasterize the projected vertices\n verts2d = rasterize(verts_projected, M, N, H, W).astype(int)\n\n # Initialize image\n image_shape = (M, N, 3)\n img = np.full(image_shape, bg_color)\n\n # Average depth of every triangle\n depth_order = np.mean(depth[faces], axis=1)\n\n # Sort triangles by depth\n sorted_triangles = np.flip(np.argsort(depth_order))\n\n for triangle in sorted_triangles:\n triangle_vertices_indices = faces[triangle]\n triangle_verts2d = verts2d[triangle_vertices_indices].T\n triangle_vcolors = vert_colors[triangle_vertices_indices].T\n bcoords = np.mean(verts[:, triangle_vertices_indices], axis=0).T\n\n if shader == \"gouraud\":\n img = shade_gouraud(triangle_verts2d, normals[:, triangle_vertices_indices], triangle_vcolors,\n bcoords, eye, mat, lights, light_amb, img, lighting)\n elif shader == \"phong\":\n img = shade_phong(triangle_verts2d, normals[:, triangle_vertices_indices], triangle_vcolors,\n bcoords, eye, mat, lights, light_amb, img, lighting)\n return img\n","repo_name":"dimitrisAlexo/Computer_Graphics","sub_path":"Assignment_3/src/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11537517921","text":"#https://www.acmicpc.net/problem/1157\nimport sys\nimport collections\nword = sys.stdin.readline().strip().upper()\n#모두 대문자로 바꿔줌\ncounter = collections.Counter(word).most_common()\n#각 문자별 나온 횟수 세줌, 그리고 most_commnon은 가장 많이 나온 순서대로 정렬\nif len(word) > 1 and (counter[0][1] == counter[1][1]):\n print(\"?\")\n exit()\nprint(counter[0][0])\n\n","repo_name":"albtraum/algorithm_study","sub_path":"public_content/1 week/이동은/ex_07.py","file_name":"ex_07.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"35675135485","text":"import json\nimport socket\nimport os\nimport subprocess\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.template import loader\nfrom django.contrib.auth.models import User\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom . import models, forms\n\n## Path to user's AI files\nAIDBPath = \"../../AIDataBase/\"\n\n@csrf_exempt\n## Handler of request of index page\n# @param request - user request\ndef index(request):\n print(\"Index view started\")\n\n userList = []\n for user in User.objects.all():\n userList.append(user)\n\n profileList = []\n for profile in models.Profile.objects.all():\n profileList.append(profile)\n\n context = {\n #'userList': userList,\n #'profileList': profileList,\n 'result': \"\",\n }\n\n template = loader.get_template('backendPart/index.html')\n\n return HttpResponse(template.render(context, request))\n\n## Read game json from file\n# Method for reading game server's json from file\n# @param fileName = name of json's file\ndef readDirectlyFromFile(fileName):\n filePath = 'backendPart/static/backendPart/'\n file = open(filePath + fileName)\n data = json.load(file)\n return data\n\n## Read game json from socket\n# Method for reading game server's json from socket\n# @param blockSize - size of blocks to read per request\ndef readFromSocket(blockSize):\n port = 8000\n data = b\"\"\n # Try to open socket\n try:\n sock = socket.socket() # Do not know when file closes to know then to close socket, so now open and close it in every request\n sock.connect(('localhost', port))\n except ConnectionRefusedError:\n return False, data\n else: # if socket opened\n sock.setblocking(0)\n # Try to read from socket\n try:\n tmp = sock.recv(blockSize)\n while tmp:\n data += tmp\n tmp = sock.recv(blockSize)\n except socket.error:\n return False, data\n sock.close()\n\n\n@csrf_exempt\n## Get game map json\n# @param request - user request\ndef getGameMapJson(request):\n fileName = 'game_map .json'\n # Getting JSON with TCP from game server\n blockSize = 1024 # Get JSON partly\n readed, data = readFromSocket(blockSize)\n if not readed:\n print(\"read directly\")\n data = readDirectlyFromFile(fileName)\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\n@csrf_exempt\n## Get game objects json\n# @param request - user request\ndef getObjectsJson(request):\n fileName = 'objects.json'\n # Getting JSON with TCP from game server\n blockSize = 128 # Get JSON partly\n readed, data = readFromSocket(blockSize)\n if not readed:\n data = readDirectlyFromFile(fileName)\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n## Handler of request of registration page\n# @param request - user request\ndef registration(request):\n template = loader.get_template('backendPart/registration.html')\n return HttpResponse(template.render({}, request))\n\n\n@csrf_exempt\n## Handler of request to register the user\n# @param request - user request\ndef registerUser(request):\n print(\"start registering view\")\n result = \"\"\n if request.method == 'POST':\n form = forms.SignUpForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n email = form.cleaned_data['email']\n\n print(username, password, email)\n try:\n user = User.objects.get(username=username)\n except ObjectDoesNotExist:\n user = None\n\n if user is not None:\n print(\"Unsuccessful registration: such username exist! Please choose other username.\")\n result = \"Unsuccessful registration: such user exist! Please choose other username.\"\n else:\n newUser = User.objects.create_user(username, email, password)\n newUser.save()\n\n # create user directory\n path = AIDBPath + username\n os.makedirs(path)\n # add profile in database\n person = models.Profile()\n person.user = newUser\n person.aiFolderPath = path\n person.save()\n\n print(\"\\nSign up was successful!\")\n result = \"Sign up was successful!\"\n else:\n print(\"Input data is not valid\")\n result = \"Input data is not valid\"\n else:\n print(\"request is not POST\")\n return render(request, 'backendPart/registration.html', {'result': result})\n\n\n@csrf_exempt\n## Handler of request to log in the user\n# @param request - user request\ndef logIn(request):\n # template = loader.get_template('backendPart/index.html')\n print(\"log in view started\")\n result = \"\"\n if request.method == 'POST':\n form = forms.logInForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(request, username=username, password=password)\n\n if user is not None:\n print(\"Have such user\")\n login(request, user)\n else:\n print(\"Username or password is not valid\")\n result = \"Username or password is not valid\"\n else:\n print(\"Input data is not valid\")\n result = \"Username or password is not valid\"\n else:\n print(\"request is not POST\")\n return render(request, 'backendPart/index.html', {'result': result})\n\n\n@csrf_exempt\n## Handler of request to log out the user\n# @param request - user request\ndef logOut(request):\n print(\"log out view started\")\n logout(request)\n return render(request, 'backendPart/index.html', {'result': \"\"})\n\n\n@csrf_exempt\n## Handler of request to upload user files (AI files)\n# @param request - user request\n# permitted file's extensions: .cpp, .c, .h\ndef uploadFile(request):\n permittedExtensions = [\"cpp\", \"h\", \"c\"]\n permittedFileSize = 1e5\n\n result = \"Files were uploaded\" # Default result\n if request.method == 'POST':\n print(\"upload view started\")\n form = forms.UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n # Get game name\n gameName = request.POST['gameName']\n\n # Create game path for user\n filesPath = AIDBPath + \"/\" + request.user.username + \"/\" + gameName + \"/\"\n if os.path.exists(filesPath):\n existingFilesList = [f for f in os.listdir(filesPath)]\n for f in existingFilesList:\n os.remove(os.path.join(filesPath, f)) # delete existing files\n else:\n os.makedirs(filesPath)\n\n # Iterating throw uploaded files\n uploadedfilesList = request.FILES.getlist('FileName')\n for file in uploadedfilesList:\n fileName = file.name\n # Check file extensions (permitted only .cpp, .h) and size\n extension = fileName.split(\".\")[-1]\n if extension not in permittedExtensions:\n result = \"Not permitted extension: \" + fileName\n break\n if file.size > permittedFileSize:\n result = \"Not permitted file size: \" + fileName\n break\n\n # Write sent file\n with open(filesPath + fileName, 'wb+') as destination:\n for chunk in file.chunks():\n destination.write(chunk)\n # Static check AI files\n # ...\n return render(request, 'backendPart/games/game' + gameName + '.html', {'result': result})\n return render(request, 'backendPart/gameGallery.html', {})\n\n## Handler of request of game gallery page\n# @param request - user request\ndef gameGallery(request):\n template = loader.get_template('backendPart/gameGallery.html')\n return HttpResponse(template.render({}, request))\n\n## Handler of request of game PickItUp page\n# @param request - user request\ndef gamePickItUp(request):\n template = loader.get_template('backendPart/games/gamePickItUp.html')\n return HttpResponse(template.render({}, request))\n\n## Handler of request of game MakeItYours page\n# @param request - user request\ndef gameMakeItYours(request):\n template = loader.get_template('backendPart/games/gameMakeItYours.html')\n return HttpResponse(template.render({}, request))\n\n## Handler of request of game BattleTanks page\n# @param request - user request\ndef gameBattleTanks(request):\n template = loader.get_template('backendPart/games/gameBattleTanks.html')\n return HttpResponse(template.render({}, request))\n\n\n@csrf_exempt\n## Handler of request to start game\n# @param request - user request\ndef startGame(request):\n if request.method == 'POST':\n form = forms.StartGameForm(request.POST)\n if form.is_valid():\n gameName = form.cleaned_data['gameName']\n # start game with name = gameName\n pathToBuilds = '../../builds/'\n subprocess.Popen(\n pathToBuilds + 'x64/Debug/BattleServer.exe -p ..\\\\..\\\\src\\\\ClientBackend\\\\backendPart\\\\static\\\\backendPart\\\\')\n subprocess.Popen(pathToBuilds + 'x64/Debug/AI_1.exe')\n subprocess.Popen(pathToBuilds + 'x64/Debug/AI_2.exe')\n return render(request, 'backendPart/gameGallery.html', {})\n\n# Handler of request to get user guide\n# @param request - user request\ndef getUserGuide(request):\n filename = '..\\\\..\\\\documents\\\\user_guide.docx'\n data = open(filename, \"rb\").read()\n print(data)\n response = HttpResponse(data, content_type='application/vnd.openxmlformats-officedocument.wordprocessingml.document')\n response['Content-Length'] = os.path.getsize(filename)\n return response\n\n","repo_name":"unknownoperation/aibattlespace","sub_path":"src/ClientBackend/backendPart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10072,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"13853675257","text":"import math\n\nfrom bokeh.core.properties import Instance, Float\nfrom bokeh.core.enums import TextBaseline, Orientation, TextAlign\n\nfrom bokeh.embed import components\nfrom bokeh.layouts import column, row\nfrom bokeh.models import LayoutDOM, Select, Plot, \\\n ColumnDataSource, DataTable, \\\n TableColumn, Range1d, ColorBar, BasicTicker, \\\n LinearColorMapper, DateFormatter, DatetimeTickFormatter, MultiSelect\nfrom bokeh.palettes import RdYlGn11\nfrom bokeh.plotting import figure\nfrom bokeh.resources import INLINE\nfrom inqbus.graphdemo import constants\nfrom inqbus.graphdemo.bokeh_extension.helpers import get_min_value, \\\n get_max_value\nfrom inqbus.graphdemo.bokeh_extension.helpers_xy import get_diagram_data, \\\n get_plot_data_python\nfrom inqbus.graphdemo.constants import UPLOAD_PATH, X_MIN_CONTOUR, \\\n X_MAX_CONTOUR, Y_MIN_CONTOUR, Y_MAX_CONTOUR, DISPLAY_STD, X_AXIS_DATES, \\\n USE_DATA_FILTER, OPTIONS_FOR_DATAFILTER, COLUMN_FOR_DATAFILTER\nfrom inqbus.graphdemo.views.overview import get_files_by_path\n\n\nclass XYPlotJSLayout(LayoutDOM):\n \"\"\"\n Testlayout using JS callbacks\n \"\"\"\n\n __implementation__ = \"coffeescripts/jslayout.coffee\"\n __javascript__ = \"http://underscorejs.org/underscore-min.js\"\n\n table_select = Instance(Select)\n\n x_axis = Instance(Select)\n\n y_axis = Instance(Select)\n\n y_axis2 = Instance(Select)\n\n plot = Instance(Plot)\n\n data = Instance(ColumnDataSource)\n\n source = Instance(ColumnDataSource)\n\n table_plot = Instance(DataTable)\n\n data_filter = Instance(MultiSelect)\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n create all parts of the layout in their initialised way with default data.\n Changes will be handled by jslayout.coffee\n \"\"\"\n super(XYPlotJSLayout, self).__init__(*args, **kwargs)\n\n self.initialize_select_boxes()\n\n x = []\n y = []\n\n self.source = ColumnDataSource(data=dict(\n x=x,\n y=y,\n y_below=y,\n y_above=y,\n index=x,\n y2=y,\n ))\n\n\n self.plot = figure(\n webgl=constants.WEBGL,\n )\n\n if X_AXIS_DATES:\n self.plot.xaxis.formatter=DatetimeTickFormatter(formats=dict(\n seconds=[\"%d.%m.%y %H:%M:%S\"],\n minutes=[\"%d.%m.%y %H:%M:%S\"],\n hourmin=[\"%d.%m.%y %H:%M:%S\"],\n hours=[\"%d.%m.%y %Hh\"],\n days=[\"%d.%m.%y\"],\n months=[\"%b %y\"],\n years=[\"%b %y\"]))\n self.plot.xaxis.major_label_orientation=math.pi/2\n self.plot.xaxis.major_label_text_baseline=TextBaseline.top\n self.plot.xaxis.major_label_text_align=TextAlign.left\n\n if USE_DATA_FILTER:\n self.data_filter = MultiSelect(\n options = OPTIONS_FOR_DATAFILTER,\n value = OPTIONS_FOR_DATAFILTER,\n title = \"Filter on %s\" % COLUMN_FOR_DATAFILTER\n )\n else:\n self.data_filter = MultiSelect(\n options = [],\n value = [],\n title = 'Filtering is disabled'\n )\n\n self.plot.x_range = Range1d(start=0.0, end=10.0)\n self.plot.y_range = Range1d(start=0.0, end=10.0)\n\n self.plot.line(\n x='x',\n y='y',\n source=self.source,\n color='blue',\n line_width=2)\n\n self.plot.line(\n x='x',\n y='y2',\n source=self.source,\n color='green',\n line_width=2)\n\n if DISPLAY_STD:\n self.plot.line(\n x='x',\n y='y_below',\n source=self.source,\n color='red',\n line_width=1)\n self.plot.line(\n x='x',\n y='y_above',\n source=self.source,\n color='red',\n line_width=1)\n\n self.table_plot = DataTable(\n source=self.source,\n columns=[\n TableColumn(\n field='x',\n title='x average of slided data'),\n TableColumn(\n field='y',\n title='y average of slided data'),\n TableColumn(\n field='y2',\n title='Second Line y2'),\n TableColumn(\n field='y_above',\n title='y + standard derivation'),\n TableColumn(\n field='y_below',\n title='y - standard derivation')])\n\n def get_colums(self, data, table):\n if table in data:\n columns = data[table]\n columns.append('No Selection')\n else:\n columns = []\n return columns\n\n def initialize_select_boxes(self):\n if self.data:\n data = dict(self.data.data)\n else:\n data = {}\n\n tables = self.get_tables(data)\n if not tables:\n tables = ['No table found']\n columns = self.get_colums(data, tables[0])\n if not columns:\n columns = ['No column found']\n\n self.table_select = Select(\n options=tables,\n title=\"Select a table\",\n value=tables[0]\n )\n\n if 'date' in columns:\n x_value = 'date'\n elif 'time' in columns:\n x_value = 'time'\n else:\n x_value = columns[0]\n\n self.x_axis = Select(\n options=columns,\n title=\"Select a x axis\",\n value=x_value\n )\n\n if len(columns) >= 2:\n self.y_axis = Select(\n options=columns,\n title=\"Select a y axis\",\n value=columns[1]\n )\n else:\n self.y_axis = Select(\n options=columns,\n title=\"Select a y axis\",\n value=columns[0]\n )\n\n self.y_axis2 = Select(\n options=columns,\n title=\"Select a second y axis\",\n value='No Selection'\n )\n\n def get_tables(self, data):\n return list(data.keys())\n\n def render_components(self):\n\n layout = column(self.table_select,\n self.x_axis,\n row(self.y_axis, self.y_axis2),\n self.data_filter,\n self,\n row(self.plot,\n self.table_plot))\n\n layout_script, div = components(layout)\n\n script = ''\n\n # Work around to use custom model\n for x in INLINE.js_raw:\n if \"XYPlotJSLayout\" in x:\n script += ('\\n\\n' % x)\n\n script += layout_script\n\n return script, div\n\n\nclass XYPlotPythonLayout(XYPlotJSLayout):\n \"\"\"\n Testlayout using Python-callbacks and bokeh-server\n \"\"\"\n\n __implementation__ = 'coffeescripts/xyplotpythonlayout.coffee'\n\n file_select = Instance(Select)\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create all parts of the layout in their initialised way with default data.\n Register handler for python callbacks\n \"\"\"\n super(XYPlotPythonLayout, self).__init__(*args, **kwargs)\n\n self.change_data_source_ignore_range(None, None, None)\n\n self.plot.y_range.on_change('start', self.change_data_source_in_range)\n self.plot.y_range.on_change('end', self.change_data_source_in_range)\n self.plot.x_range.on_change('start', self.change_data_source_in_range)\n self.plot.x_range.on_change('end', self.change_data_source_in_range)\n\n self.x_axis.on_change('value', self.change_data_source_ignore_range)\n self.y_axis.on_change('value', self.change_data_source_ignore_range)\n\n self.table_select.on_change('value', self.change_columns)\n\n self.file_select.on_change('value', self.change_tables)\n\n def initialize_select_boxes(self):\n \"\"\"\n Initial selectboxes\n :return:\n \"\"\"\n files = get_files_by_path(UPLOAD_PATH)\n if not files:\n files = ['No file found']\n\n self.file_select = Select(\n options=files,\n title=\"Select a file\",\n value=files[0]\n )\n\n self.data = ColumnDataSource()\n\n self.data.data = get_diagram_data(UPLOAD_PATH, files[0])\n\n super(XYPlotPythonLayout, self).initialize_select_boxes()\n\n def change_tables(self, attrname, old, new):\n \"\"\"\n a different file is selected and other tables are available\n \"\"\"\n self.data.data = get_diagram_data(UPLOAD_PATH, self.file_select.value)\n\n tables = self.get_tables(self.data.data)\n if not tables:\n tables = ['No table found']\n\n self.table_select.options = tables\n self.table_select.value = tables[0]\n\n def change_columns(self, attrname, old, new):\n \"\"\"\n a different table is selected and other columns are available\n \"\"\"\n data = dict(self.data.data)\n table = self.table_select.value\n\n columns = self.get_colums(data, table)\n\n if len(columns) == 0:\n columns = ['No column found']\n\n self.x_axis.options = columns\n self.y_axis.options = columns\n\n\n self.x_axis.value = columns[0]\n if len(columns) >= 2:\n self.y_axis.value = columns[1]\n else:\n self.y_axis.value = columns[0]\n\n # @Debounce(period=DEBOUNCE_CALLBACK_PERIOD)\n def change_data_source_in_range(self, attrname, old, new):\n \"\"\"\n deals with data generation after zooming\n \"\"\"\n data = self.get_plot_data(ignore_ranges=False)\n\n self.source.data = dict(\n x=data['source.data.x'],\n y=data['source.data.y'],\n index=data['source.data.index'],\n y_above=data['source.data.y_above'],\n y_below=data['source.data.y_below']\n )\n\n # @Debounce(period=DEBOUNCE_CALLBACK_PERIOD)\n def change_data_source_ignore_range(self, attrname, old, new):\n \"\"\"\n deals with data generation after selecting different columns\n \"\"\"\n data = self.get_plot_data(ignore_ranges=True)\n\n self.plot.x_range.start = data['plot.x_range.start']\n self.plot.x_range.end = data['plot.x_range.end']\n self.plot.y_range.start = data['plot.y_range.start']\n self.plot.y_range.end = data['plot.y_range.end']\n\n self.source.data = dict(\n x=data['source.data.x'],\n y=data['source.data.y'],\n index=data['source.data.index'],\n y_above=data['source.data.y_above'],\n y_below=data['source.data.y_below']\n )\n\n def get_plot_data(self, ignore_ranges=True):\n \"\"\"\n Calculating data\n :param ignore_ranges: True if complete data should be used, false if min and max depends on current ranges\n :return: dictionary with data for redraw\n 'source.data.x': numpy array, floats\n 'source.data.y': numpy array, floats\n 'source.data.index': numpy array, floats\n 'source.data.y_above': numpy array, floats\n 'source.data.y_below': numpy array, floats\n 'plot.x_range.start': float\n 'plot.x_range.end': float\n 'plot.y_range.start': float\n 'plot.y_range.end': float\n\n \"\"\"\n upload_path = UPLOAD_PATH\n filename = self.file_select.value\n tablepath = self.table_select.value\n x_col = self.x_axis.value\n y_col = self.y_axis.value\n\n if ignore_ranges:\n xmin = None\n xmax = None\n ymin = None\n ymax = None\n else:\n xmin = self.plot.x_range.start\n xmax = self.plot.x_range.end\n ymin = self.plot.y_range.start\n ymax = self.plot.y_range.end\n\n data = get_plot_data_python(\n upload_path,\n filename,\n tablepath,\n x_col=x_col,\n y_col=y_col,\n xmin=xmin,\n xmax=xmax,\n ymin=ymin,\n ymax=ymax,\n plotwidth=self.plot.width,\n plotheight=self.plot.height)\n\n return data\n\n def render_components(self):\n\n layout = column(self.file_select,\n self.table_select,\n self.x_axis,\n self.y_axis,\n row(self.plot,\n self.table_plot))\n\n return layout\n\n\nclass ContourPlotLayout(LayoutDOM):\n\n __implementation__ = 'coffeescripts/jslayout.coffee'\n __javascript__ = \"http://underscorejs.org/underscore-min.js\"\n\n plot = Instance(Plot)\n data = Instance(ColumnDataSource)\n\n x_min = Float()\n x_max = Float()\n y_min = Float()\n y_max = Float()\n\n color_mapper = Instance(LinearColorMapper)\n\n color_bar = Instance(ColorBar)\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create all parts of the layout in their initialised way with default data.\n Register handler for python callbacks\n \"\"\"\n super(ContourPlotLayout, self).__init__(*args, **kwargs)\n\n data = self.data.data\n\n image_data = data['image'][0]\n\n flat_image_data = image_data.flatten()\n\n min_data = get_min_value(flat_image_data)\n max_data = get_max_value(flat_image_data)\n\n self.color_mapper = LinearColorMapper(\n palette=RdYlGn11, low=min_data, high=max_data)\n\n if 'x_min' in data:\n x_min = data['x_min']\n else:\n x_min = self.x_min\n if 'x_max' in data:\n x_max = data['x_max']\n else:\n x_max = self.x_max\n if 'y_min' in data:\n y_min = data['y_min']\n else:\n y_min = self.y_min\n if 'y_max' in data:\n y_max = data['y_max']\n else:\n y_max = self.y_max\n\n# self.data.X0 = x_min\n# self.data.Y0 = y_min\n# self.data.DX= x_max-x_min\n# self.data.DY= y_max-y_min\n\n self.plot = figure(plot_width=600,\n plot_height=400,\n x_range= [x_min, x_max],\n y_range = [y_min, y_max],\n# x_range=[X_MIN_CONTOUR, X_MAX_CONTOUR],\n# y_range=[Y_MIN_CONTOUR, Y_MAX_CONTOUR],\n min_border_right=10)\n\n# self.plot.image(image='image',\n# x='X0',\n# y='Y0',\n# dw='DX',\n# dh='DY',\n# color_mapper=self.color_mapper,\n# source=self.data)\n\n self.plot.image(image='image',\n x=x_min,\n y=y_min,\n dw=x_max-x_min,\n dh=y_max-y_min,\n color_mapper=self.color_mapper,\n source=self.data)\n\n self.color_bar = ColorBar(\n color_mapper=self.color_mapper,\n ticker=BasicTicker(desired_num_ticks=10),\n label_standoff=12,\n border_line_color=None,\n location=(0, 0))\n\n self.plot.add_layout(self.color_bar, 'left')\n\n def render_components(self):\n\n layout = column(self.plot, self)\n\n layout_script, div = components(layout)\n\n script = ''\n\n # Work around to use custom model\n for x in INLINE.js_raw:\n if \"ContourPlotLayout\" in x:\n script += ('\\n\\n' % x)\n\n script += layout_script\n\n return script, div\n","repo_name":"Inqbus/inqbus.graphdemo","sub_path":"src/inqbus/graphdemo/bokeh_extension/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":15747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"46120714749","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom os.path import abspath, dirname, join, normpath\nimport sys\n\nimport dj_twiml\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nversion = dj_twiml.__version__\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a %s -m 'version %s'\" % (version, version))\n print(\" git push --tags\")\n sys.exit()\n\nsetup(\n name='dj-twiml',\n version=version,\n description=\"\"\"Create Twilio TwiML views in Django\"\"\",\n long_description=open(\n normpath(join(dirname(abspath(__file__)), 'README.rst'))\n ).read(),\n author='Paul Hallett',\n author_email='paul@twilio.com',\n url='https://github.com/phalt/dj-twiml',\n packages=[\n 'dj_twiml',\n ],\n include_package_data=True,\n install_requires=[\n 'django_twilio',\n ],\n license=\"BSD\",\n zip_safe=False,\n keywords='django, twilio, twiml, telephony',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n \"Programming Language :: Python :: 2\",\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n ],\n)\n","repo_name":"phalt/dj-twiml","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"25570868716","text":"import sys\nimport cv2\nimport torch\nimport numpy as np\nfrom base_tracker import BaseTracker\nimport path_config\n\nsys.path.append(\"external/DaSiamRPN/code\")\nfrom net import SiamRPNotb\nfrom run_SiamRPN import SiamRPN_init, SiamRPN_track\nfrom utils import cxy_wh_2_rect\n\n\nclass DaSiamRPN(BaseTracker):\n def __init__(self):\n super(DaSiamRPN, self).__init__(name=\"DaSiamRPN\")\n self.net_file = path_config.DASIAMRPN_MODEL\n\n def initialize(self, image_file, box):\n self.net = SiamRPNotb()\n self.net.load_state_dict(torch.load(self.net_file))\n self.net.eval().cuda()\n\n image = cv2.imread(image_file)\n box = box - np.array([1, 1, 0, 0])\n self.state = SiamRPN_init(\n image, box[:2] + box[2:] / 2.0, box[2:], self.net\n ) # init tracker\n\n def track(self, image_file):\n image = cv2.imread(image_file)\n self.state = SiamRPN_track(self.state, image) # track\n center = self.state[\"target_pos\"] + 1\n target_sz = self.state[\"target_sz\"]\n box = cxy_wh_2_rect(center, target_sz)\n return box\n","repo_name":"songheony/A3T","sub_path":"experts/dasiamrpn.py","file_name":"dasiamrpn.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"22"} +{"seq_id":"69848493177","text":"import argparse\nimport tensorflow.keras as K\nimport tensorflow.python.keras.callbacks as callbacks_module\n\n\ndef add_arguments(parser: argparse.ArgumentParser):\n parser.add_argument_group(\"Callback Arguments\")\n parser.add_argument(\"-c\", \"--ckpt\", type=str, default=\"final\",\n help=\"File name of the real-time checkpoint. Set empty string to disable it.\")\n parser.add_argument(\"-bc\", \"--best_ckpt\", type=str, default=\"best\",\n help=\"File name of the best checkpoint. Set empty string to disable it.\")\n parser.add_argument(\"-tb\", \"--tensorboard\", type=str, default=\"logs\",\n help=\"Directory name of the tensorboard logs. Set empty string to disable it.\")\n parser.add_argument(\"-v\", \"--verbose\", type=int, default=1, choices=[0, 1, 2],\n help=\"0 = silent, 1 = progress bar, 2 = one line per epoch.\"\n \"Note that the progress bar is not particularly useful when\"\n \"logged to a file, so verbose=2 is recommended when not running\"\n \"interactively (eg, in a production environment).\")\n\n\ndef get(base_dir, cfg, model, train_steps, **params):\n callbacks = [\n K.callbacks.TerminateOnNaN(),\n ]\n if cfg.ckpt:\n callbacks.append(K.callbacks.ModelCheckpoint(\n base_dir + f\"/{cfg.tag}/ckpt/{cfg.ckpt}\", save_weights_only=True, verbose=1))\n if cfg.best_ckpt:\n callbacks.append(K.callbacks.ModelCheckpoint(\n base_dir + f\"/{cfg.tag}/ckpt/{cfg.best_ckpt}\", save_best_only=True, save_weights_only=True, verbose=1))\n if cfg.tensorboard:\n callbacks.append(K.callbacks.TensorBoard(base_dir + f\"/{cfg.tag}/{cfg.tensorboard}\", write_graph=False))\n if cfg.lrp:\n from . import optimizer\n callbacks.append(optimizer.lr_callback(cfg))\n\n final_params = {\n \"verbose\": cfg.verbose,\n \"epochs\": cfg.total_epochs,\n \"steps\": train_steps\n }\n return callbacks_module.CallbackList(callbacks,\n add_history=True,\n add_progbar=cfg.verbose != 0,\n model=model,\n **params)\n\n\nclass CallBacks(object):\n def __init__(self, *args):\n self.call_backs = list(args)\n\n def on_batch_begin(self, batch, logs=None):\n for cb in self.call_backs:\n cb.on_batch_begin(batch, logs)\n\n def on_batch_end(self, batch, logs=None):\n for cb in self.call_backs:\n cb.on_batch_end(batch, logs)\n\n def on_epoch_begin(self, epoch, logs=None):\n for cb in self.call_backs:\n cb.on_epoch_begin(epoch, logs)\n\n def on_epoch_end(self, epoch, logs=None):\n for cb in self.call_backs:\n cb.on_epoch_end(epoch, logs)\n\n def on_train_batch_begin(self, batch, logs=None):\n for cb in self.call_backs:\n cb.on_train_batch_begin(batch, logs)\n\n def on_train_batch_end(self, batch, logs=None):\n for cb in self.call_backs:\n cb.on_train_batch_end(batch, logs)\n\n def on_train_begin(self, logs=None):\n for cb in self.call_backs:\n cb.on_train_begin(logs)\n\n def on_train_end(self, logs=None):\n for cb in self.call_backs:\n cb.on_train_end(logs)\n\n def on_test_begin(self, logs=None):\n for cb in self.call_backs:\n cb.on_test_begin(logs)\n\n def on_test_end(self, logs=None):\n for cb in self.call_backs:\n cb.on_test_end(logs)\n\n def set_model(self, model):\n for cb in self.call_backs:\n cb.set_model(model)\n\n def set_params(self, params):\n for cb in self.call_backs:\n cb.set_model(params)\n","repo_name":"Jarvis73/DeepCNN-TF2","sub_path":"ingredients/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"36179774023","text":"def criarArquivoPessoa():\n try:\n arquivo = open(\"pessoas.txt\", \"+r\")\n arquivo.close()\n return True\n except Exception as e:\n print(\"Arquivo já foi criado!\")\n return False\n\n\n\ndef addPessoas(nome, rg, ano_nasc):\n pessoa = f'{nome},{rg},{ano_nasc}'\n try:\n arquivo = open(\"pessoas.txt\", \"a\")\n arquivo.write(pessoa)\n arquivo.close()\n except Exception as e:\n print(\"Não foi possivel add essa pessoa, Erro:\", e)\n\n\ndef listarPessoas():\n pessoas = list()\n try:\n arquivo = open(\"pessoas.txt\", \"r\")\n\n for linha in arquivo.readlines():\n lista = linha.split(\",\")\n\n pessoa = {\"nome\": lista[0], \"rg\": lista[1], \"ano_nasc\": lista[2].replace(\"\\n\", \"\")}\n pessoas.append(pessoa)\n\n return pessoas\n except Exception as e:\n print(\"Não foi possivel ler o arquivo pessoas, Erro:\", e)\n finally:\n arquivo.close()\n# Criando o meu Sistema\n\n\nvalor = criarArquivoPessoa()\n\n\nwhile True:\n if valor:\n print(\"MENU\".center(40, \"#\"))\n print(\"\"\"\n Opção 1 -> Cadastrar\n Opção 2 -> Listar\n Opção 3 -> Buscar\n Opção 4 -> Sair do sistema\n \"\"\")\n\n pergunta = input(\"Qual Opção Deseja: \")\n\n if pergunta == \"1\":\n print(\"Cadastrar\".center(40, \"#\"))\n nome = input(\"Nome: \")\n rg = input(\"RG: \")\n try:\n ano = int(input(\"Ano de nascimento: \"))\n addPessoas(nome=nome, rg=rg, ano_nasc=ano)\n except Exception as e:\n print(\"Erro na digitação do ano de nascimento \\nErro: \", e)\n\n\n elif pergunta == \"2\":\n for linha in listarPessoas():\n print(f\"Nome: {linha['nome']}\")\n print(f\"RG: {linha['rg']}\")\n print(f\"Ano: {linha['ano_nasc']}\")\n print(\"#\"*20)\n\n\n elif pergunta == \"3\":\n buscarPessoa = input(\"Quem você deseja buscar: \")\n for linha in listarPessoas():\n if linha['nome'].upper() == buscarPessoa.upper():\n print(\"Busca realizada\")\n print(f\"Nome: {linha['nome']}\")\n print(f\"RG: {linha['rg']}\")\n print(f\"Ano: {linha['ano_nasc']}\")\n break\n else:\n print(\"A pessoa pesquisada não esta no sistema!\")\n\n\n elif pergunta == \"4\":\n break\n else:\n print(\"Valor digitado não é valido !\")\n else:\n break\n","repo_name":"clbruna/AulasPython_Info","sub_path":"aula13_prof.py","file_name":"aula13_prof.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21407012331","text":"from unittest import TextTestRunner, TestSuite, TestLoader\r\nfrom tests.test_contact import TestContact\r\nfrom tests.test_invoice import TestInvoice\r\n\r\n\r\ntest_cases = [\r\n TestContact,\r\n # TestInvoice,\r\n]\r\n\r\n\r\ndef load_tests() -> TestSuite:\r\n loader = TestLoader()\r\n suite = TestSuite()\r\n\r\n for _test_case in test_cases:\r\n suite.addTests(loader.loadTestsFromTestCase(_test_case))\r\n\r\n return suite\r\n\r\n\r\nif __name__ == '__main__':\r\n runner = TextTestRunner()\r\n runner.run(load_tests())\r\n","repo_name":"aryan-arabshahi/invoice","sub_path":"tests/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73466709495","text":"def twoStrings(s1, s2):\n out = ''\n for c in s1:\n if c in s2 and not c in out:\n out += c\n if len(out) > 0:\n return 'YES'\n else:\n return 'NO'\n\n\nprint(twoStrings('hello', 'world'))\n","repo_name":"amalshehu/algorithms-hackerrank","sub_path":"Strings/two-strings/two-strings.py","file_name":"two-strings.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2497926331","text":"# Escenario\r\n# Como probablemente sabes, Sudoku es un rompecabezas de colocación de números jugado en un tablero de 9x9.\r\n# El jugador tiene que llenar el tablero de una manera muy específica:\r\n# Cada fila del tablero debe contener todos los dígitos del 0 al 9 (el orden no importa).\r\n# Cada columna del tablero debe contener todos los dígitos del 0 al 9 (nuevamente, el orden no importa).\r\n# Cada subcuadro de 3x3 de la tabla debe contener todos los dígitos del 0 al 9.\r\n# Si necesitas más detalles, puedes encontrarlos aquí.\r\n# Tu tarea es escribir un programa que:\r\n# Lea las 9 filas del Sudoku, cada una con 9 dígitos (verifica cuidadosamente si los datos ingresados son válidos).\r\n# Da como salida Si si el Sudoku es válido y No de lo contrario.\r\n# Prueba tu código utilizando los datos que te proporcionamos.\r\n# Datos de Prueba\r\n# Entrada de Muestra:\r\n# 295743861\r\n# 431865927\r\n# 876192543\r\n# 387459216\r\n# 612387495\r\n# 549216738\r\n# 763524189\r\n# 928671354\r\n# 154938672\r\n# Salida de la Muestra:\r\n# Yes\r\n# Entrada de Muestra:\r\n# 195743862\r\n# 431865927\r\n# 876192543\r\n# 387459216\r\n# 612387495\r\n# 549216738\r\n# 763524189\r\n# 928671354\r\n# 254938671\r\n# Salida de la Muestra\r\n# No\r\n# --------------------------------------------------------------------------------------------------------------------------\r\n# while True:\r\n# try:\r\n# num = int(input(\"Ingrese un valor: \"))\r\n# assert validarNum\r\n# break\r\n# except ValueError:\r\n# print(\"Vuelve a Ingresar un valor numerico\")\r\n\r\n\r\n# tablero = [[num for fila in range(9)] for columna in range(9)]\r\n# tablero=[[0,0,0],[0,0,0],[0,0,0]]\r\n\r\ndef extraeSubMatriz(tablero,x):\r\n # for i\r\n return ''\r\ndef validaMatriz(tablero):\r\n return True\r\ndef extraeColumna(tablero,i):\r\n lista = []\r\n for j in range(9):\r\n lista.append(tablero[j][i])\r\n return lista\r\ndef validoColumNum(columna):\r\n return True if (columna.count(1)==1 and columna.count(2)==1 and columna.count(3)==1 and columna.count(4)==1 and columna.count(5)==1 and columna.count(6)==1 and columna.count(7)==1 and columna.count(8)==1 and columna.count(9)==1) else False\r\ndef convertirListaToInt(cadena):\r\n lista = []\r\n for char in cadena:\r\n lista.append(int(char,10))\r\n return lista\r\ndef validarCadena(cadena):\r\n return True if (cadena.count('1')==1 and cadena.count('2')==1 and cadena.count('3')==1 and cadena.count('4')==1 and cadena.count('5')==1 and cadena.count('6')==1 and cadena.count('7')==1 and cadena.count('8')==1 and cadena.count('9')==1) else False\r\nfila ,columna,tablero=[],[],[]\r\nx,i,j=0,0,0\r\nwhile j<9:\r\n cadena = input(\"ingrese texto de 9 caracteres (del 0 al 9) : \")\r\n if ( validarCadena(cadena)):\r\n fila = convertirListaToInt(cadena)\r\n tablero.append(fila)\r\n j+=1\r\n if j>8:\r\n # print(\"YES\")\r\n # print(tablero)\r\n while i<9:\r\n # comparo columnas (tablero) de 9x9 que esten del 0 al 9 y no se repitan\r\n columna = extraeColumna(tablero,i) #// extraer la columna de la lista\r\n if validoColumNum(columna): #validoListaNum(columna) //validar lista numerica que no se repita del 0 a 9\r\n i+=1\r\n if i>8:\r\n print(\"YES\")\r\n print(tablero)\r\n #comparar columnas y filas (tablero) de 3*3 que esten del 0 al 9 y no se repitan\r\n # 1mera vuelta ( )\r\n auxMatriz3_3 = extraeSubMatriz(tablero,x)\r\n while x<9 and validaMatriz(auxMatriz3_3):\r\n x+=1\r\n auxMatriz3_3 = extraeSubMatriz(tablero,x)\r\n if x>8:\r\n print(\"YES\")\r\n break\r\n else:\r\n print(\"NO\")\r\n break\r\n else:\r\n print(\"NO\")\r\n break\r\n else:\r\n print(\"NO\")\r\n break\r\n","repo_name":"grimapatroy/Python_NETACAD","sub_path":"Modulo5/resumen5.1.9.11/miParte5.1.9.11_5.1.11.11/5.1.11.11_LABORATORIO_Sudoku.py","file_name":"5.1.11.11_LABORATORIO_Sudoku.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34780952580","text":"from argparse import ArgumentParser\nimport os, time, glob\n\nfrom utils.CondorJobSubmitter import CondorJobSubmitter\nfrom utils.LocalJobSubmitter import LocalJobSubmitter\nfrom base.Configs import TrainingConfig\n\ndef run_and_wait(func, **args):\n jobs_before = TrainingConfig.submitter.get_running_cluster_IDs()\n func(**args)\n jobs_after = TrainingConfig.submitter.get_running_cluster_IDs()\n\n need_to_finish = jobs_after.difference(jobs_before)\n\n while True:\n currently_running = TrainingConfig.submitter.get_running_cluster_IDs()\n wait_to_finish = len(currently_running.intersection(need_to_finish))\n print(\"waiting for {} jobs to finish\".format(wait_to_finish))\n\n if wait_to_finish > 0:\n time.sleep(5)\n else:\n break\n\ndef CampaignPilot(master_confpath, nrep, use_test):\n\n # get the things that need to be done in the right order\n from RunTrainingCampaign import RunTrainingCampaign\n from RunPrepareHistFitterCampaign import RunPrepareHistFitterCampaign\n from RunHistFitterCampaign import RunHistFitterCampaign\n from MakeGlobalAsimovPlots import MakeGlobalAsimovPlots\n from MakeGlobalAnalysisPlots import MakeAllGlobalAnalysisPlots\n\n # run the training\n run_and_wait(RunTrainingCampaign, master_confpath = master_confpath, nrep = nrep)\n\n # these are the directories for the individual runs\n workdir = os.path.dirname(master_confpath)\n run_dir_pattern = os.path.splitext(master_confpath)[0] + \"_slice*\"\n run_dirs = glob.glob(run_dir_pattern)\n\n print(\"After training, found the following model directories:\")\n print('\\n'.join(run_dirs))\n\n # export the histograms\n run_and_wait(RunPrepareHistFitterCampaign, model_dirs = run_dirs)\n\n # run HistFitter\n run_and_wait(RunHistFitterCampaign, model_dirs = run_dirs)\n\n # and make the plots\n MakeGlobalAsimovPlots(model_dirs = run_dirs, plot_dir = workdir)\n MakeAllGlobalAnalysisPlots({\"model_dirs\": run_dirs, \"plotdir\": workdir})\n\nif __name__ == \"__main__\":\n if not os.environ[\"ROOTDIR\"]:\n raise Exception(\"Error: 'ROOTDIR' not defined. Please do 'source setup_env.sh'.\")\n\n parser = ArgumentParser()\n parser.add_argument(\"--confpath\", action = \"store\", dest = \"master_confpath\")\n parser.add_argument(\"--nrep\", action = \"store\", dest = \"nrep\", type = int)\n parser.add_argument(\"--use_test\", action = \"store_const\", const = True, default = False)\n args = vars(parser.parse_args())\n\n CampaignPilot(**args)\n","repo_name":"philippwindischhofer/HiggsPivoting","sub_path":"CampaignPilot.py","file_name":"CampaignPilot.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"35875675075","text":"from dataclasses import dataclass\nfrom datetime import datetime\nfrom typing import List, Optional\n\nfrom bson import ObjectId\n\nfrom extutils.dt import localtime\nfrom extutils.emailutils import MailSender\nfrom models import ChannelModel\nfrom mongodb.factory import ChannelManager, ProfileManager, MessageRecordStatisticsManager\nfrom mongodb.helper import IdentitySearcher\n\n\n@dataclass\nclass CollatedChannelData:\n channel_name: str\n channel_data: ChannelModel\n\n\n@dataclass\nclass MemberInfoEntry:\n user_oid: ObjectId\n user_name: str\n first_joined: datetime\n last_message_at: Optional[datetime] = None\n\n @property\n def first_joined_str(self):\n return self.first_joined.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n @property\n def last_message_at_str(self):\n if self.last_message_at:\n return self.last_message_at.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n return \"N/A\"\n\n\nclass InfoProcessor:\n @staticmethod\n def collate_child_channel_data(root_oid: ObjectId, child_channel_oids: List[ObjectId]) \\\n -> List[CollatedChannelData]:\n accessible: List[CollatedChannelData] = []\n inaccessible: List[CollatedChannelData] = []\n\n missing_oids = []\n\n for ccoid in child_channel_oids:\n cdata = ChannelManager.get_channel_oid(ccoid)\n\n if cdata:\n ccd = CollatedChannelData(channel_name=cdata.get_channel_name(root_oid), channel_data=cdata)\n\n if cdata.bot_accessible:\n accessible.append(ccd)\n else:\n inaccessible.append(ccd)\n else:\n missing_oids.append(ccoid)\n\n if missing_oids:\n MailSender.send_email_async(f\"No associated channel data found of the channel IDs below:
\"\n f\"
{' / '.join([str(oid) for oid in missing_oids])}
\")\n\n accessible = sorted(accessible, key=lambda data: data.channel_data.id, reverse=True)\n inaccessible = sorted(inaccessible, key=lambda data: data.channel_data.id, reverse=True)\n\n return accessible + inaccessible\n\n @staticmethod\n def get_member_info(channel_model: ChannelModel) -> List[MemberInfoEntry]:\n ret = []\n\n prof_conns = ProfileManager.get_channel_prof_conn(channel_model.id, available_only=True)\n user_oids = [mdl.user_oid for mdl in prof_conns]\n\n user_name_dict = IdentitySearcher.get_batch_user_name(user_oids, channel_model)\n last_message_oids = MessageRecordStatisticsManager.get_user_last_message_ts(channel_model.id, user_oids)\n\n for prof_conn in prof_conns:\n user_oid = prof_conn.user_oid\n user_name = user_name_dict.get(user_oid) or str(user_oid)\n first_joined = localtime(prof_conn.id.generation_time)\n last_message_at = last_message_oids.get(user_oid)\n if last_message_at:\n last_message_at = localtime(last_message_at)\n\n ret.append(MemberInfoEntry(\n user_oid=user_oid, user_name=user_name, first_joined=first_joined, last_message_at=last_message_at))\n\n return ret\n","repo_name":"RxJellyBot/Jelly-Bot","sub_path":"mongodb/helper/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"22"} +{"seq_id":"26572048970","text":"import matplotlib.pyplot as plt\nimport math\nimport numpy as np\nfrom scipy.interpolate import CubicSpline\nfrom scipy.integrate import solve_ivp\nfrom prettytable import PrettyTable\n\nksi0 = 0\neta0 = 0\nalf0 = 1\nbeta0 = 1\nw0 = 0.5\nmu = 0.5\nk = 0.5\ndelt = 0.01\nkappa = 10\nEps = 12\n\n# Xsn - X simple, not optimal, ind = 0\nXsn = [ksi0, alf0, eta0, beta0, w0]\n# Xso - X simple, optimal, ind = 1\nXso = [ksi0, alf0, eta0, beta0, w0]\n# Xn - X with forse,not optimal, ind = 2\nXn = [ksi0, alf0, eta0, beta0, w0]\n# Xo - X with forse, more optimal, ind = 2\nXo = [ksi0, alf0, eta0, beta0, w0]\n\n\nS = [[], [], [], []]\nY = [[], [], [], []]\n\neps = [mu * (1 + k) * beta0 / (1 - k), mu * (1 + k) * beta0 / (1 - k)]\nQ = [eps[0] - 200 * delt, eps[1] + 200 * delt, 0]\nts = [0, 0, 0]\n\npsi3 = 0\nu = []\nttiks = np.linspace(0, 1, 1001)\n\ndef getXYsn(t, X): return [X[1], 0, X[3], -1, Q[0]] # without forse, not optimal\n\n\ndef getXYso(t, X): return [X[1], 0, X[3], -1, Q[1]] # without forse, optimal\n\n\ndef mysqrt(X): return (X[1] ** 2 + X[3] ** 2) ** 0.5\n\n\ndef getAlf(X): return -(Eps * X[1] * mysqrt(X) + X[3] * X[4])\n\n\ndef getBeta(X): return -(Eps * X[3] * mysqrt(X) + 1 - X[1] * X[4])\n\n\ndef getXYn(t, X): return [X[1], getAlf(X), X[3], getBeta(X),\n Q[2] - kappa * np.abs(X[4]) * X[4]] # with forse, not optimal\n\n\ndef getXYo(t, X): return [X[1] * T, T * (-Eps * X[1] * mysqrt(X) - X[4] * X[3]), X[3] * T,\n T * (-Eps * X[3] * mysqrt(X) + X[4] * X[1] - 1),\n (Ufun(1 - t) - kappa * np.sign(X[4]) * X[4] ** 2) * T]\n\n\n# X = [X0=psi2, X1=psi4, X2=psi5]\n# Сопряженная система\ndef getPsi2(t, X): return 1 + X[0] * Eps * (\n np.sqrt(VXfun(1 - t) ** 2 + VYfun(1 - t) ** 2) + (VXfun(1 - t) ** 2) / np.sqrt(\n VXfun(1 - t) ** 2 + VYfun(1 - t) ** 2)) + X[1] * (\n Eps * VXfun(1 - t) * VYfun(1 - t) / np.sqrt(\n VXfun(1 - t) ** 2 + VYfun(1 - t) ** 2) - Wfun(1 - t))\n\n\ndef getPsi4(t, X): return X[0] * (\n Eps * VXfun(1 - t) * VYfun(1 - t) / np.sqrt(VXfun(1 - t) ** 2 + VYfun(1 - t) ** 2) + Wfun(1 - t)) - psi3 + X[\n 1] * Eps * (\n np.sqrt(VXfun(1 - t) ** 2 + VYfun(1 - t) ** 2) + VYfun(1 - t) ** 2 / np.sqrt(\n VXfun(1 - t) ** 2 + VYfun(1 - t) ** 2))\n\n\ndef getPsi5(t, X): return X[0] * VYfun(1 - t) - X[1] * VXfun(1 - t) + X[2] * 2 * kappa * np.abs(Wfun(1 - t))\n\n\ndef oneStep(t, X): return [-getPsi2(t, X) * T, -getPsi4(t, X) * T, -getPsi5(t, X) * T]\n\n\n# Функция задания новых начальных условий\ndef getNewX(solv, ind, sind, q):\n eps = solv.y[1][sind] - solv.y[4][sind]\n return [solv.y[0][ind],\n solv.y[1][sind] - mu * (1 + k) * solv.y[3][sind]*np.sign(eps),\n solv.y[2][ind],\n -k * solv.y[3][sind],\n solv.y[4][sind] - mu * (1 + k) * solv.y[3][sind]*np.sign(eps) + q]\n\n\ndef eevent(t, X): return X[2]\n\n\neevent.terminal = True\neevent.direction = -1\n\nfor i in range(0, 10):\n step = 0.0001\n solv0 = solve_ivp(getXYsn, [ts[0], float(\"inf\")], y0=[Xsn[0], Xsn[1], Xsn[2], Xsn[3], Xsn[4]], events=eevent,\n max_step=step)\n solv1 = solve_ivp(getXYso, [ts[1], float(\"inf\")], y0=[Xso[0], Xso[1], Xso[2], Xso[3], Xso[4]], events=eevent,\n max_step=step)\n solv2 = solve_ivp(getXYn, [ts[2], float(\"inf\")], y0=[Xn[0], Xn[1], Xn[2], Xn[3], Xn[4]], events=eevent,\n max_step=step)\n\n u.clear()\n\n for i in range(0, 1001):\n u.append(1)\n Ufun = CubicSpline(ttiks, u)\n psi3 = 0\n for ind1 in range(0, 4):\n\n Ta = 0.1\n Tb = 99.9\n T = 0.5 * (Ta + Tb)\n for ind2 in range(20):\n if abs(Tb - Ta) < 10 ** (-3):\n break\n solv31 = solve_ivp(getXYo, [0, 1], y0=[Xo[0], Xo[1], Xo[2], Xo[3], Xo[4]], events=eevent, max_step=0.001, t_eval=ttiks)\n if len(solv31.t_events[0]) == 0:\n Ta = T\n else:\n Tb = T\n T = 0.5 * (Ta + Tb)\n psi3 = solv31.y[1][len(solv31.t) - 2] / solv31.y[3][len(solv31.t) - 2]\n Xfun = CubicSpline(solv31.t, solv31.y[0])\n VXfun = CubicSpline(solv31.t, solv31.y[1])\n Yfun = CubicSpline(solv31.t, solv31.y[2])\n VYfun = CubicSpline(solv31.t, solv31.y[3])\n Wfun = CubicSpline(solv31.t, solv31.y[4])\n solv32 = solve_ivp(oneStep, [0, 1], y0=[0, 0, 0], max_step=0.001)\n u.clear()\n for f in range(0, len(solv32.t)):\n Psi5 = solv32.y[2][f]\n u.append(np.sign(Psi5))\n Ufun = CubicSpline(solv32.t, u)\n\n for j in range(1, len(solv0.t) - 1):\n S[0].append(solv0.y[0][j])\n Y[0].append(solv0.y[2][j])\n for j in range(1, len(solv1.t) - 1):\n S[1].append(solv1.y[0][j])\n Y[1].append(solv1.y[2][j])\n for j in range(1, len(solv2.t) - 1):\n S[2].append(solv2.y[0][j])\n Y[2].append(solv2.y[2][j])\n for j in range(1, len(solv31.t) - 1):\n S[3].append(solv31.y[0][j])\n Y[3].append(solv31.y[2][j])\n\n last_ind = [len(solv0.t) - 2, len(solv1.t) - 2, len(solv2.t) - 2, len(solv31.t) - 2]\n ts = [solv0.t[last_ind[0]], solv1.t[last_ind[1]], solv2.t[last_ind[2]], solv31.t[last_ind[3]]]\n Q = [solv0.y[1][last_ind[0]] - solv0.y[4][last_ind[0]] - 200 * delt,\n solv1.y[1][last_ind[1]] - solv1.y[4][last_ind[1]] + 200 * delt, 0, Ufun(solv32.t[len(solv32.t) -1 ])]\n Xsn = getNewX(solv0, last_ind[0], 0, Q[0])\n Xso = getNewX(solv1, last_ind[1], 0, Q[1])\n Xn = getNewX(solv2, last_ind[2], 0, Q[2])\n Xo = getNewX(solv31, last_ind[3], last_ind[3], Q[3])\n\nax = plt.subplot()\nplt.plot(S[0], Y[0], 'o', ms =1.2, label='Without F, not opt')\nplt.plot(S[1], Y[1], label='Without F, opt')\nplt.plot(S[2], Y[2], label='With F, not opt')\nplt.plot(S[3], Y[3], label='With F, more opt')\nplt.xlabel('X')\nplt.ylabel('Y')\nleg = plt.legend(loc='upper right', ncol=1, bbox_to_anchor=(0.65, 0.9), mode=\"expand\", shadow=True, fancybox=True)\nleg.get_frame().set_alpha(0.5)\nplt.show()\n","repo_name":"MochalovaAn/robot_math","sub_path":"MyVersion1.py","file_name":"MyVersion1.py","file_ext":"py","file_size_in_byte":6107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20362369581","text":"from copy import deepcopy\nimport getpass\nimport os.path\nimport pygame\nimport sys\nimport time\nfrom collections import deque\nfrom math import sin, cos, atan2, pi, sqrt\nfrom pygame.locals import *\nfrom random import random, randint, choice, shuffle, setstate, getstate, seed\ntau = 2 * pi\n\nclass Game(object):\n def __init__(self):\n self.fillcolor = (0, 0, 0)\n self.fullscreen = '--fullscreen' in sys.argv\n self.w = 800\n self.h = 600\n self.font_name = None\n self.font_size = 16, 24\n self.maxfps = 10\n self.grid_y = 18\n self.grid_x = 8\n self.logged = deque(maxlen=30)\n\n # initialized in init_pygame()\n self.game_time = 0\n self.clock = None\n self.font = None\n self.font_small = None\n self.screen = None\n self.pause = False\n self.brickwid = 4\n self.colors = [[0, 0, 0], [0x7f, 0x30, 0x30], [0x30, 0x7f, 0x30], [0x30, 0x30, 0x7f], [0x7f, 0x7f, 0x30], [0x30, 0x7f, 0x7f], [0x7f, 0x30, 0x7f]]\n self.all_bricks = [\n [[0, 1, 0, 0],\n [0, 1, 0, 0],\n [0, 1, 0, 0],\n [0, 1, 0, 0]],\n [[0, 2, 0, 0],\n [0, 2, 2, 0],\n [0, 2, 0, 0],\n [0, 0, 0, 0]],\n [[0, 3, 0, 0],\n [0, 3, 3, 0],\n [0, 0, 3, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 4, 4, 0],\n [0, 4, 4, 0],\n [0, 0, 0, 0]],\n [[0, 0, 5, 0],\n [0, 5, 5, 0],\n [0, 5, 0, 0],\n [0, 0, 0, 0]],\n [[0, 6, 6, 0],\n [0, 0, 6, 0],\n [0, 0, 6, 0],\n [0, 0, 0, 0]],\n ]\n\n def reset_game(self):\n self.score = 0\n self.grid = []\n for i in range(self.grid_y):\n self.grid.append([0] * self.grid_x)\n self.shit_brick()\n\n def shit_brick(self):\n self.brick1_type = randint(0, len(self.all_bricks) - 1)\n self.brick2_type = randint(0, len(self.all_bricks) - 1)\n self.brick = []\n self.brick.append(deepcopy(self.all_bricks[self.brick1_type]))\n self.brick.append(deepcopy(self.all_bricks[self.brick2_type]))\n for brick in self.brick:\n for i in range(randint(0, 3)):\n oldbrick = deepcopy(brick)\n for x in range(self.brickwid):\n for y in range(self.brickwid):\n brick[x][y] = oldbrick[self.brickwid - y - 1][x]\n self.brick_x = int(self.grid_x / 2 - self.brickwid/2)\n self.brick_y = -3\n self.true_brick = choice(range(len(self.brick)))\n if self.would_a_move_collide(0, 2):\n self.game_over()\n\n def game_over(self):\n self.log(\"Game Over! Final Score: %d\" % self.score)\n self.reset_game()\n\n def log(self, *things):\n self.logged.extend([str(obj) for obj in things])\n\n def start(self):\n self.init_pygame()\n self.loop()\n\n def init_pygame(self):\n pygame.init()\n pygame.font.init()\n pygame.key.set_repeat(180, 80)\n pygame.mouse.set_visible(False)\n flags = DOUBLEBUF | (self.fullscreen and FULLSCREEN)\n self.screen = pygame.display.set_mode((self.w, self.h), flags, 32)\n self.font_small = pygame.font.Font(self.font_name, self.font_size[0])\n self.font = pygame.font.Font(self.font_name, self.font_size[1])\n self.clock = pygame.time.Clock()\n\n def keypress(self, key):\n if key == K_ESCAPE:\n raise SystemExit()\n elif key == K_F11:\n pygame.display.toggle_fullscreen()\n elif key in (K_h, K_a, K_LEFT):\n if not self.would_a_move_collide(-1, 0):\n self.brick_x -= 1\n elif key in (K_l, K_d, K_RIGHT):\n if not self.would_a_move_collide(1, 0):\n self.brick_x += 1\n elif key in (K_k, K_UP):\n self.brick_rotate()\n elif key == K_SPACE:\n self.brick_drop()\n elif key in (K_p, K_RETURN):\n self.pause ^= True\n elif key == K_F1:\n self.reset_game()\n\n\n def keyhold(self, pressed):\n if (pressed[K_j] or pressed[K_s] or pressed[K_DOWN]):\n self.speed = 15\n else:\n self.speed = min(int(self.score / 1000 + 1), 15)\n\n def loop(self):\n next_log_refresh = 0\n previous_tick = 0\n while True:\n time_before = time.time()\n self.clock.tick(self.maxfps)\n if next_log_refresh <= time.time():\n next_log_refresh = time.time() + 1\n self.log(\"\")\n\n for event in pygame.event.get():\n if event.type == QUIT:\n return\n elif event.type == KEYDOWN:\n self.keypress(event.key)\n self.keyhold(pygame.key.get_pressed())\n\n if not self.pause:\n pass\n\n self.draw_game()\n\n if not self.pause:\n self.dt = time.time() - time_before\n self.game_time += self.dt\n if self.game_time > previous_tick + (0.5 / self.speed):\n previous_tick = self.game_time\n self.brick_move_down()\n\n def brick_move_down(self):\n if self.would_a_move_collide(0, 1):\n self.drop()\n return False\n else:\n self.brick_y += 1\n return True\n\n def brick_drop(self):\n while self.brick_move_down():\n pass\n\n def brick_rotate(self):\n old_state = deepcopy(self.brick)\n for brick in self.brick:\n oldbrick = deepcopy(brick)\n for x in range(self.brickwid):\n for y in range(self.brickwid):\n brick[x][y] = oldbrick[self.brickwid - y - 1][x]\n if self.would_a_move_collide(0, 0):\n self.brick = old_state\n\n def drop(self):\n for x in range(self.brickwid):\n for y in range(self.brickwid):\n newx = self.brick_x + x\n newy = self.brick_y + y\n if newx >= 0 or newy >= 0 or newx < self.grid_x - 1 or newy < self.grid_y - 1:\n newcolor = self.brick[self.true_brick][x][y]\n if newcolor:\n self.grid[newy][newx] = newcolor\n self.clear_lines()\n self.shit_brick()\n\n def clear_lines(self):\n i = 0\n while i < len(self.grid):\n if all(self.grid[i]):\n del self.grid[i]\n self.score += 100\n else:\n i += 1\n while len(self.grid) < self.grid_y:\n self.grid.insert(0, [0] * self.grid_x)\n\n def would_a_move_collide(self, dx, dy):\n for x in range(self.brickwid):\n for y in range(self.brickwid):\n if dy:\n if not self.brick[self.true_brick][x][y]:\n continue\n else:\n if not any(brick[x][y] for brick in self.brick):\n continue\n newx = self.brick_x + x + dx\n newy = self.brick_y + y + dy\n if newx < 0 or newx > self.grid_x - 1 or newy > self.grid_y - 1:\n return True\n if newy > 0 and self.grid[newy][newx]:\n return True\n return False\n\n def draw_game(self):\n self.screen.fill(self.fillcolor)\n\n self.draw_hud()\n self.draw_brick()\n self.draw_field()\n self.draw_log()\n text = self.font.render(\"Score: %d\" % self.score, 1, (255, 255, 255))\n self.screen.blit(text, (550, 20))\n pygame.display.flip()\n\n def draw_log(self):\n x, y = 20, 0\n for line in self.logged:\n if not line:\n continue\n text = self.font.render(line, 1, (255, 255, 255))\n self.screen.blit(text, (x, y))\n y += text.get_rect().height + 2\n\n def draw_brick(self):\n s = 30\n w = game.w/2\n h = game.h/2\n x = (s+2)*(self.grid_x/2)\n y = (s+2)*(self.grid_y/2)\n for i in range(4):\n for j in range(4):\n clr1 = self.colors[self.brick[0][i][j]]\n clr2 = self.colors[self.brick[1][i][j]]\n color = [clr1[0] + clr2[0], clr1[1] + clr2[1], clr1[2] + clr2[2]]\n if any(color):\n self.screen.fill((color[0], color[1], color[2]), \\\n Rect(w-x + (i+self.brick_x)*(s+2), \\\n h-y + (j+self.brick_y)*(s+2), s, s))\n\n def draw_field(self):\n s = 30\n w = game.w/2\n h = game.h/2\n x = (s+2)*(self.grid_x/2)\n y = (s+2)*(self.grid_y/2)\n for i in range(self.grid_x):\n for j in range(self.grid_y):\n clr = self.grid[j][i]\n color = self.colors[clr]\n if clr:\n self.screen.fill((color[0], color[1], color[2]), \\\n Rect(w-x + (i)*(s+2), \\\n h-y + (j)*(s+2), s, s))\n\n def draw_hud(self):\n s = 30\n nx = self.grid_x\n ny = self.grid_y\n x = (s+2)*(nx/2)\n y = (s+2)*(ny/2)\n w = game.w/2\n h = game.h/2\n pygame.draw.rect(self.screen, (0, 255, 0), Rect(w-x-1, h-y-1, x*2, y*2), 2)\n for i in range(nx):\n for j in range(ny):\n pygame.draw.rect(self.screen, (32, 32, 32), Rect(w-x + i*(s+2), h-y + j*(s+2), s, s), 1)\n\n\nif __name__ == '__main__':\n game = Game()\n game.reset_game()\n\n if '--help' in sys.argv or '-h' in sys.argv:\n print(__doc__)\n elif '--version' in sys.argv:\n print(game.version)\n else:\n game.start()\n","repo_name":"hut/confusetris","sub_path":"confusetris.py","file_name":"confusetris.py","file_ext":"py","file_size_in_byte":9907,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"40668698064","text":"\"\"\"\nThis file was created at Smartbuzz Inc.\nFor more information visit http://www.smartbuzzinc.com\n\"\"\"\nfrom django.contrib import admin\nfrom django.db.models import Q\nfrom django.utils.translation import ugettext_lazy as _\n\nclass DecadeBornListFilter(admin.SimpleListFilter):\n\t# Human-readable title which will be displayed in the\n\t# right admin sidebar just above the filter options.\n\ttitle = _('Categorized Only')\n\n\t# Parameter for the filter that will be used in the URL query.\n\tparameter_name = 'category'\n\n\tdef lookups(self, request, model_admin):\n\t\t\"\"\"\n\t\tReturns a list of tuples. The first element in each\n\t\ttuple is the coded value for the option that will\n\t\tappear in the URL query. The second element is the\n\t\thuman-readable name for the option that will appear\n\t\tin the right sidebar.\n\t\t\"\"\"\n\t\treturn (\n\t\t\t('80s', _('in the eighties')),\n\t\t\t('90s', _('in the nineties')),\n\t\t)\n\n\tdef queryset(self, request, queryset):\n\t\t\"\"\"\n\t\tReturns the filtered queryset based on the value\n\t\tprovided in the query string and retrievable via\n\t\t`self.value()`.\n\t\t\"\"\"\n\t\t# Compare the requested value (either '80s' or '90s')\n\t\t# to decide how to filter the queryset.\n\n\t\treturn queryset.filter(~Q(category=None))","repo_name":"rodel-daroy/django-autolife","sub_path":"vehicles/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"43060909770","text":"\"\"\"This module contains the general information for StorageScsiLunInstRef ManagedObject.\"\"\"\n\nfrom ...ucscmo import ManagedObject\nfrom ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta\nfrom ...ucscmeta import VersionMeta\n\n\nclass StorageScsiLunInstRefConsts():\n ADMIN_STATE_OFFLINE = \"offline\"\n ADMIN_STATE_ONLINE = \"online\"\n ADMIN_STATE_UNDEPLOYED = \"undeployed\"\n AUTO_AQUIRED_FALSE = \"false\"\n AUTO_AQUIRED_NO = \"no\"\n AUTO_AQUIRED_TRUE = \"true\"\n AUTO_AQUIRED_YES = \"yes\"\n CONFIG_STATE_N_A = \"N/A\"\n CONFIG_STATE_APPLIED = \"applied\"\n CONFIG_STATE_APPLY_FAILED = \"apply-failed\"\n CONFIG_STATE_APPLYING = \"applying\"\n CONFIG_STATE_NOT_APPLIED = \"not-applied\"\n CONFIG_STATE_NOT_IN_USE = \"not-in-use\"\n CONFIG_STATE_ORPHANED = \"orphaned\"\n CONFIG_STATE_UNKNOWN = \"unknown\"\n LUN_MASK_ID_UNASSIGNED = \"unassigned\"\n LUN_STATUS_OFFLINE = \"offline\"\n LUN_STATUS_ONLINE = \"online\"\n LUN_STATUS_UNDEFINED = \"undefined\"\n PREFERRED_LUN_MASK_ID_UNASSIGNED = \"unassigned\"\n SNAPSHOT_ADMIN_STATE_ABORT_REPLICATION = \"abort-replication\"\n SNAPSHOT_ADMIN_STATE_CREATE = \"create\"\n SNAPSHOT_ADMIN_STATE_CREATE_LUN_REPLICA = \"create-lun-replica\"\n SNAPSHOT_ADMIN_STATE_REPLICATION_RESTORE = \"replication-restore\"\n SNAPSHOT_ADMIN_STATE_SET_REPLICATION_OFFLINE = \"set-replication-offline\"\n SNAPSHOT_ADMIN_STATE_SET_REPLICATION_ONLINE = \"set-replication-online\"\n SNAPSHOT_ADMIN_STATE_UNDEFINED = \"undefined\"\n\n\nclass StorageScsiLunInstRef(ManagedObject):\n \"\"\"This is StorageScsiLunInstRef class.\"\"\"\n\n consts = StorageScsiLunInstRefConsts()\n naming_props = set([u'lunItemName'])\n\n mo_meta = MoMeta(\"StorageScsiLunInstRef\", \"storageScsiLunInstRef\", \"lun-inst-ref-[lun_item_name]\", VersionMeta.Version131a, \"InputOutput\", 0xfff, [], [\"admin\", \"ls-compute\", \"ls-config\", \"ls-server\", \"ls-storage\"], [u'lsServer'], [], [None])\n\n prop_meta = {\n \"admin_name\": MoPropertyMeta(\"admin_name\", \"adminName\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x2, None, None, r\"\"\"[\\-\\.:_a-zA-Z0-9]{0,15}\"\"\", [], []), \n \"admin_state\": MoPropertyMeta(\"admin_state\", \"adminState\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, [\"offline\", \"online\", \"undeployed\"], []), \n \"auto_aquired\": MoPropertyMeta(\"auto_aquired\", \"autoAquired\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [\"false\", \"no\", \"true\", \"yes\"], []), \n \"child_action\": MoPropertyMeta(\"child_action\", \"childAction\", \"string\", VersionMeta.Version131a, MoPropertyMeta.INTERNAL, None, None, None, r\"\"\"((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}\"\"\", [], []), \n \"config_state\": MoPropertyMeta(\"config_state\", \"configState\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, None, None, None, [\"N/A\", \"applied\", \"apply-failed\", \"applying\", \"not-applied\", \"not-in-use\", \"orphaned\", \"unknown\"], []), \n \"dn\": MoPropertyMeta(\"dn\", \"dn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []), \n \"lun_dn\": MoPropertyMeta(\"lun_dn\", \"lunDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x20, 0, 256, None, [], []), \n \"lun_item_dn\": MoPropertyMeta(\"lun_item_dn\", \"lunItemDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []), \n \"lun_item_name\": MoPropertyMeta(\"lun_item_name\", \"lunItemName\", \"string\", VersionMeta.Version131a, MoPropertyMeta.NAMING, 0x40, 1, 32, None, [], []), \n \"lun_mask_id\": MoPropertyMeta(\"lun_mask_id\", \"lunMaskId\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, None, None, None, [\"unassigned\"], [\"0-4294967295\"]), \n \"lun_status\": MoPropertyMeta(\"lun_status\", \"lunStatus\", \"string\", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, None, None, None, [\"offline\", \"online\", \"undefined\"], []), \n \"preferred_lun_mask_id\": MoPropertyMeta(\"preferred_lun_mask_id\", \"preferredLunMaskId\", \"string\", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, [\"unassigned\"], [\"0-4294967295\"]), \n \"rn\": MoPropertyMeta(\"rn\", \"rn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x100, 0, 256, None, [], []), \n \"size\": MoPropertyMeta(\"size\", \"size\", \"ulong\", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x200, None, None, None, [], [\"1-10240\"]), \n \"snapshot_admin_state\": MoPropertyMeta(\"snapshot_admin_state\", \"snapshotAdminState\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x400, None, None, None, [\"abort-replication\", \"create\", \"create-lun-replica\", \"replication-restore\", \"set-replication-offline\", \"set-replication-online\", \"undefined\"], []), \n \"status\": MoPropertyMeta(\"status\", \"status\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x800, None, None, r\"\"\"((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}\"\"\", [], []), \n \"volume_dn\": MoPropertyMeta(\"volume_dn\", \"volumeDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []), \n }\n\n prop_map = {\n \"adminName\": \"admin_name\", \n \"adminState\": \"admin_state\", \n \"autoAquired\": \"auto_aquired\", \n \"childAction\": \"child_action\", \n \"configState\": \"config_state\", \n \"dn\": \"dn\", \n \"lunDn\": \"lun_dn\", \n \"lunItemDn\": \"lun_item_dn\", \n \"lunItemName\": \"lun_item_name\", \n \"lunMaskId\": \"lun_mask_id\", \n \"lunStatus\": \"lun_status\", \n \"preferredLunMaskId\": \"preferred_lun_mask_id\", \n \"rn\": \"rn\", \n \"size\": \"size\", \n \"snapshotAdminState\": \"snapshot_admin_state\", \n \"status\": \"status\", \n \"volumeDn\": \"volume_dn\", \n }\n\n def __init__(self, parent_mo_or_dn, lun_item_name, **kwargs):\n self._dirty_mask = 0\n self.lun_item_name = lun_item_name\n self.admin_name = None\n self.admin_state = None\n self.auto_aquired = None\n self.child_action = None\n self.config_state = None\n self.lun_dn = None\n self.lun_item_dn = None\n self.lun_mask_id = None\n self.lun_status = None\n self.preferred_lun_mask_id = None\n self.size = None\n self.snapshot_admin_state = None\n self.status = None\n self.volume_dn = None\n\n ManagedObject.__init__(self, \"StorageScsiLunInstRef\", parent_mo_or_dn, **kwargs)\n\n","repo_name":"CiscoUcs/ucscsdk","sub_path":"ucscsdk/mometa/storage/StorageScsiLunInstRef.py","file_name":"StorageScsiLunInstRef.py","file_ext":"py","file_size_in_byte":6501,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"22"} +{"seq_id":"33697799791","text":"#https://www.hackerrank.com/challenges/equal-stacks/problem\n#!/bin/python3\n\nimport os\nimport sys\n\n#\n# Complete the equalStacks function below.\n#\ndef equalStacks(h1, h2, h3):\n s = list(map(sum, [h1, h2, h3]))\n \n s = [[s[0], h1], [s[1], h2], [s[2], h3]]\n s.sort(key=lambda x: x[0], reverse=True)\n \n while s[0][0] != s[1][0] or s[0][0] != s[2][0] or s[1][0] != s[2][0]:\n if s[0][0] > s[1][0]:\n s[0][0] -= s[0][1].pop(0)\n if s[0][0] > s[2][0]:\n s[0][0] -= s[0][1].pop(0)\n if s[1][0] > s[2][0]:\n s[1][0] -= s[1][1].pop(0)\n s.sort(key=lambda x: x[0], reverse=True)\n return s[0][0]\n \nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n1N2N3 = input().split()\n\n n1 = int(n1N2N3[0])\n\n n2 = int(n1N2N3[1])\n\n n3 = int(n1N2N3[2])\n\n h1 = list(map(int, input().rstrip().split()))\n\n h2 = list(map(int, input().rstrip().split()))\n\n h3 = list(map(int, input().rstrip().split()))\n\n result = equalStacks(h1, h2, h3)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n\n","repo_name":"wesleymesquita/Snippets","sub_path":"HR/equal-stacks.py","file_name":"equal-stacks.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18961915404","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 16 14:07:18 2018\r\n\r\n@author: lsprague\r\n\r\nTrigger with:\r\n >python.exe SampleSelect.py \r\nNOTE: sys.argv[1] = \r\n\"\"\"\r\nimport sys\r\n#import random\r\n#import matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n#Some print triggering values\r\ntesting = False\r\ndetails = True\r\n\r\nif testing == True:\r\n print(\"Testing mode enabled. Verbose printing will occur. \")\r\n \r\n#if/elif statements for checking for CSV filename. If none given, requests\r\n#filename. If given as proper argument, will use it.\r\nif len(sys.argv) == 1:\r\n print(\"Error: too few arguments, only \", len(sys.argv), \"given. \")\r\n csvfile = input(\"Please give a CSV filename: \")\r\nelif len(sys.argv) == 2 or len(sys.argv) == 3:\r\n csvfile = str(sys.argv[1])\r\n\r\n#Reading csv file into data frame variable\r\ncsvcomps = pd.read_csv(csvfile)\r\nif testing == True:\r\n print(\"Header of chosen file: \")\r\n print(\"\\n\",csvcomps.head())\r\n\r\n#acquiring number of compounds/rows in csv file\r\nncs = csvcomps.shape[0] #number of compounds/rows in csv\r\n\r\n#lowest and highest masses in CSV file\r\nlowm = csvcomps['molmass'].min() #lowest mass in molmass\r\nhighm = csvcomps['molmass'].max() #highest mass in molmass\r\nprint(\"\\nLowest mass in CSV file is: \",lowm)\r\nprint(\"Highest mass in CSV file is: \",highm)\r\n\r\n#optional command of 'r' for request all options/details, or 'd' (or nothing)\r\n#for default settings to be run.\r\nif len(sys.argv) == 3:\r\n if sys.argv[2] == 'r': #r for request options\r\n print(\"Requesting options... \\n\")\r\n print(\"NOTE: all lists should be values separated by commas (no spaces)\")\r\n sampsize = float(input(\"How many largely separated samples? \")) #sample size desired from csv\r\n lrangelow = float(input(\"\\nLowest Mass? \")) #lowest mass in range\r\n lrangehigh = float(input(\"\\nHighest Mass? \") ) #largest mass in range\r\n lspace = float(input(\"\\nSeparation? \") ) #min diff in mw for large spacing\r\n #use a list for the desired small range start and end values\r\n ssampsize = float(input(\"\\nHow many tightly separated samples (per range, one value)?\"))\r\n srangelow_in = [input(\"\\nList all starting MWs for tight cluster samples. \")] #range(s) desired for tight spaced samples\r\n srangelow = list(map(float, srangelow_in[0].split(','))) #splits up response with commas, into integer list\r\n srangehi_in = [input(\"\\nList all ending MWs for tight cluster samples. \")] #max range(s)\r\n srangehi = list(map(float, srangehi_in[0].split(','))) #splits with commas to integer vals\r\n sspace = float(input(\"\\nDesired tight cluster spacing? \")) #min diff in molmass for tight spacing\r\n \r\n elif sys.argv[2] == 'd':\r\n print(\"Using default values... \")\r\n #All default spacing/sample size, min/max, etc... values are figured\r\n #out (via reading csv) or denoted here.\r\n sampsize = 600 #sample size desired from csv\r\n lrangelow = 150 #lowest mass in range\r\n lrangehigh = 750 #largest mass in range\r\n lspace = 1 #min diff in mw for large spacing\r\n #use a list for the desired small range start and end values\r\n ssampsize = 100\r\n srangelow = [250,350,450] #range(s) desired for tight spaced samples\r\n srangehi = [255,355,455] #max range(s)\r\n sspace = 0.001 #min diff in molmass for tight spacing\r\n \r\nelse:\r\n print(\"\\nNo alternate settings given. Default values will be used. \\n\")\r\n sampsize = 600 #sample size desired from csv\r\n lrangelow = 150 #lowest mass in range\r\n lrangehigh = 750 #largest mass in range\r\n lspace = 1 #min diff in mw for large spacing\r\n #use a list for the desired small range start and end values\r\n ssampsize = 100\r\n srangelow = [250,350,450] #range(s) desired for tight spaced samples\r\n srangehi = [255,355,455] #max range(s)\r\n sspace = 0.001 #min diff in molmass for tight spacing\r\n\r\nif details == True: \r\n parameters = [ncs,sampsize,lrangelow,lrangehigh,lspace,ssampsize,srangelow,srangehi,sspace]\r\n print(\"Parameters are: \\n\")\r\n print(\"# Comps | L.Samp.Size | L.LowMW | L.HighMW | L.Spacing | S.Samp.Size | S.LowMW | S.HighMW | S.Spacing \")\r\n print(parameters)\r\n print(\"\\n\")\r\n\r\n#Empty dataframe for storage of sample compounds\r\ndfstorage = pd.DataFrame()\r\n#print(storage.head())\r\n\r\n##random starting position from first 100 compounds, for variety of spacing\r\n#randstart = random.randint(0,100)\r\n\r\ndef getindex(bound,varchange):\r\n \"\"\"Obtains the index in given CSV file for the molecular mass that is \r\n greater than or equal to the given bound (i.e., low or high range), so \r\n that the program has an index to begin/end its search for samples.\r\n \"\"\"\r\n for index, row in csvcomps.iterrows():\r\n checkval = (row['molmass'])\r\n if checkval >= bound:\r\n# print(\"Checkval for mass range: \",checkval)\r\n# print(\"Index: \", index)\r\n varchange = (index,checkval) #the index of checkval\r\n break\r\n else:\r\n #print(\"nope\")\r\n continue\r\n# print(\"Varchange: \", varchange)\r\n if varchange == 0:\r\n print(\"No checkval generated. Bound likely out of range. \")\r\n# print(\"Returning tuple of (0,0), and allowing IF statements to fix.\")\r\n varchange = (0,0)\r\n elif len(varchange) == 2:\r\n print(\"Index found. No problems identified.\")\r\n# print(\"Varchange updated (if req'd): \",varchange, \"\\n\")\r\n return varchange\r\n\r\n\r\n\"\"\"\r\nObtaining index of lowest mass in range from CSV, for large (spaced) sample\r\n\"\"\"\r\n#default row start if either lrangelow is out of range of dataset, or equiv.\r\nrowiters = 0\r\n\r\nlowestm = getindex(lrangelow,rowiters)\r\nrowiters = lowestm[0]\r\n\r\nif testing == True:\r\n print(\"Lowest Mass getindex return vals (index,checkval): \",lowestm)\r\n\r\nif rowiters == 0 and lowestm[1] < lrangelow:\r\n print(\"\\n!!!!\\n\")\r\n print(\"Error with given lrangelow. Setting rowiters start val to 0. \\n\")\r\nprint(\"Rowiters value: \",rowiters, \"\\n\")\r\n\r\n\"\"\" Done \"\"\"\r\n\r\n\"\"\"\r\nObtaining index of highest mass in range from CSV, for large (spaced) sample\r\n\"\"\"\r\nlrangehi_index = 0\r\n\r\nhighestm = getindex(lrangehigh, lrangehi_index)\r\n#NOTE: the \"-1\" ensures that the index is within the bound, not one row over it\r\nlrangehi_index = highestm[0] - 1\r\n\r\nif testing == True:\r\n print(\"Highest Mass getindex return vals (index,checkval): \",highestm)\r\n\r\nif lrangehi_index == 0 and highestm[1] < lrangehigh:\r\n print(\"\\n!!!!\\n\")\r\n print(\"Error with given lrangehigh. Value likely larger than mass range of dataset. \")\r\n lrangehi_index = csvcomps.shape[0]\r\n print(\"Continuing with highest index of dataset... --> \",lrangehi_index)\r\nprint(\"Lrangehi_index value: \",lrangehi_index)\r\n\r\n\r\n\"\"\" Done \"\"\"\r\n\r\n#Include the first row in the dataframe\r\nprint(\"\\n\\nIncluding starting row in sampling... \\n\")\r\ndfstorage = dfstorage.append(csvcomps.loc[rowiters,:])\r\nprint(\"Done. \\n\")\r\n\r\n\"\"\" \r\nBeginning the loop for sampling the large (spaced) dataset \r\n\"\"\"\r\n\r\n#storing the differences calculated\r\ndiffsave = []\r\n\r\n#iteration counter, gives number of runs in loop/number of samples gathered\r\n#...starts at 1 because I've already included the first sample based on the \r\n#starting index.\r\nloopiters = 1\r\n\r\n#counter to stay at the row after compared row (rowiters)\r\nnxtrow = rowiters + 1 \r\n\r\nprint(\"Beginning sampling from dataset... \")\r\n\r\n\"\"\"\r\nNOTE: Below while loop could DEFINITELY be a defined function... exactly the\r\nsame below in tightly clustered section with exchanced variables.\r\n\"\"\"\r\n#loops until the sample size is incremented upwards to desired number, and \r\n#checks the number of rows left so that it doesn't look for a higher indexed\r\n#row than actually exists. Also cuts off at highest desired mass range.\r\nwhile loopiters <= (sampsize-1) and nxtrow <= ncs-1 and rowiters <= lrangehi_index:\r\n m1 = csvcomps.loc[rowiters,'molmass']\r\n m2 = csvcomps.loc[nxtrow,'molmass']\r\n diff = m2 - m1\r\n if testing == True:\r\n print(\"Loop # = \",loopiters)\r\n print(\"Mass 1: \",m1)\r\n print(\"Mass 2: \",m2)\r\n print(\"Diff = \",diff)\r\n print(\"Next Row = \",nxtrow)\r\n \r\n if diff >= lspace:\r\n #append to list, set rowiters to nxtrow, and reset nxtrow\r\n #i.e., start again at new value, to find next val that fulfills spacing\r\n if testing == True:\r\n print(\"Accepted\\n\")\r\n diffsave.append(diff)\r\n dfstorage = dfstorage.append(csvcomps.loc[nxtrow,:])\r\n loopiters = loopiters + 1 #says, \"i got another sample\"\r\n rowiters = nxtrow\r\n nxtrow = rowiters + 1\r\n elif diff < lspace:\r\n #increment nxtrow by +1\r\n if testing == True:\r\n print(\"Declined\\n\")\r\n nxtrow = nxtrow + 1\r\n\r\n#NOTE: the sampling still rolls over by 1, but i'm going to leave it...\r\nif loopiters == sampsize:\r\n print(\"\\nSuccessfully gathered all samples from dataset. \")\r\n print(\"Number of samples = \", loopiters, \"\\n\")\r\nelif loopiters != sampsize:\r\n print(\"\\n!!!!\\n\")\r\n print(\"Complete sample size not reached. Check desired range and spacing.\")\r\n print(\"Number of samples = \", loopiters, \"\\n\")\r\n\r\n\"\"\" \r\nEnd of large dataset sampling\r\n\"\"\"\r\n\r\nprint(\"Some details on the large space sampling... \\n\")\r\nprint(\"Max difference calculated: \", max(diffsave))\r\nprint(\"Max difference in samples: \", dfstorage['molmass'].max() - dfstorage['molmass'].min())\r\nprint(\"Min difference calculated AND in samples: \", min(diffsave))\r\n#print(\"\\nDone. Check file \" + ofile, \"\\n\\n \\t~~~~~~~~~~~~~~~~~~\\n\")\r\nprint(\"\\nBeginning small data set inclusion with tighter spacing... \\n\")\r\n\r\n\"\"\"\r\nBeginning small (spaced) sampling process below....\r\n\r\n~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\nIdentify indices for all starting masses in srangelow\r\n\"\"\"\r\nstartlist = []\r\nfor initials in srangelow:\r\n tstart = 0\r\n begin = getindex(initials,tstart)\r\n startlist.append(begin[0])\r\n# print(startlist, \"\\n\")\r\n \r\nprint(\"These are the start indices for the chosen starting masses: \",startlist)\r\nprint(\"\\n\")\r\n\r\n\r\n\"\"\"\r\nIdentify indices for all ending masses in srangehi\r\n\"\"\"\r\nendlist = []\r\nfor ends in srangehi:\r\n endpt = 0\r\n ending = getindex(ends,endpt)\r\n #\"-1\" is again to ensure ending row is within the mass boundary\r\n endlist.append(ending[0] - 1)\r\n# print(endlist, \"\\n\")\r\n \r\nprint(\"These are the end indices for the chosen masses: \",endlist,\"\\n\")\r\n\r\n\r\n#zips start and endpoint together. Each tuple is a (start,end) pair.\r\nrangelist = list(zip(startlist,endlist))\r\n#print((rangelist))\r\n##calling the first tuple's (first range's) starting index in the CSV file.\r\n#print(rangelist[0][0]) #gives 0th tuple's 0th element in zipped list\r\n\r\n\r\n\"\"\"\r\nLoop through each defined range, check if diff is successful, check if \r\nALREADY in dfstorage from large selection (to avoid overlap) and do not \r\ninclude in new dftight if so.\r\n\"\"\"\r\ndiff2save = []\r\ndftightstore = pd.DataFrame()\r\n#for loop goes over all generated tuples, i.e. the start/end pairs\r\nfor tuples in rangelist:\r\n loopiters = 1\r\n nxtrow = loopiters + 1\r\n rowiters = tuples[0]\r\n dftightstore = dftightstore.append(csvcomps.loc[rowiters,:])\r\n srangehi_index = tuples[1]\r\n #rowiters will now use the starting points under the FOR loop, as tuples[0] \r\n while loopiters <= ssampsize and nxtrow <= ncs-1 and rowiters <= srangehi_index:\r\n m1 = csvcomps.loc[rowiters,'molmass']\r\n m2 = csvcomps.loc[nxtrow,'molmass']\r\n diff = m2 - m1\r\n if testing == True:\r\n print(\"Loop # = \",loopiters)\r\n print(\"Mass 1: \",m1)\r\n print(\"Mass 2: \",m2)\r\n print(\"Diff = \",diff)\r\n print(\"Next Row = \",nxtrow)\r\n \r\n if diff >= sspace:\r\n #append to list, set rowiters to nxtrow, and reset nxtrow\r\n #i.e., start again at new value, to find next val that fulfills spacing\r\n if testing == True:\r\n print(\"Accepted\\n\")\r\n diff2save.append(diff)\r\n dftightstore = dftightstore.append(csvcomps.loc[nxtrow,:])\r\n loopiters = loopiters + 1 #says, \"i got another sample\"\r\n rowiters = nxtrow\r\n nxtrow = rowiters + 1\r\n elif diff < sspace:\r\n #increment nxtrow by +1\r\n if testing == True:\r\n print(\"Declined\\n\")\r\n nxtrow = nxtrow + 1\r\n\r\nprint(\"Tighter sampling finished.\")\r\nprint(\"Total new samples (possibly including duplicates): \",len(diff2save), \"\\n\")\r\n\r\nprint(\"Some details on the tighter sampling... \\n\")\r\nprint(\"Max difference calculated: \", max(diff2save))\r\nprint(\"Max difference in samples: \", dftightstore['molmass'].max() - dftightstore['molmass'].min())\r\nprint(\"Min difference calculated AND in samples: \", min(diff2save))\r\n\r\n\r\n\"\"\"\r\nPrint data for newly created dftight, AND concatenated final dataframe????\r\n\"\"\"\r\nprint(\"\\nComparing large and tightly spaced dataframes now...\")\r\nprint(\"Removing duplicates... \")\r\n\r\nframes = [dfstorage,dftightstore]\r\ncumeframe = pd.concat(frames)\r\nfinale = cumeframe.drop_duplicates(subset='molmass',keep='first')\r\n\r\nprint(\"Re-sorting the dataframe by molecular mass...\")\r\n\r\nfinalframe = finale.sort_values('molmass')\r\nprint(\"Size of final dataframe: \",finalframe.shape)\r\n\r\nsplitfile = csvfile.split(sep='.') #cuts off extension\r\nofile = str(splitfile[0]) + '_samples.csv' #replaces with .csv\r\nfinalframe.to_csv(ofile)\r\n\r\nprint(\"\\nDone. Check file \" + ofile, \"\\n\\n \\t~~~~~~~~~~~~~~~~~~\\n\")\r\n\"\"\" \r\nEnd of small dataset sampling\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n","repo_name":"l1sprague/chemdiv-sortsample","sub_path":"SampleSelect.py","file_name":"SampleSelect.py","file_ext":"py","file_size_in_byte":13617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33826459886","text":"\"\"\"empty message\n\nRevision ID: ef1368f9347f\nRevises: 7a4f128284e8\nCreate Date: 2017-12-13 01:29:23.225891\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ef1368f9347f'\ndown_revision = '7a4f128284e8'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('credentials', sa.JSON(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('users', 'credentials')\n # ### end Alembic commands ###\n","repo_name":"probir03/email-campaign-","sub_path":"migrations/versions/ef1368f9347f_.py","file_name":"ef1368f9347f_.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"22476612825","text":"import pygame\nfrom sys import exit\nfrom classes.camera import Camera\nfrom classes.spawn import Spawn\n\ndef run_gui(grid, taxi_group, structures):\n pygame.init()\n screen = pygame.display.set_mode((1200, 700))\n clock = pygame.time.Clock()\n\n # camera initialization\n camera = Camera(30, grid, 1200, 700)\n\n start, end = None, None\n person = None\n target = None\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n\n if event.type == pygame.KEYDOWN and event.key == pygame.K_r:\n camera.reset()\n\n if event.type == pygame.KEYDOWN and event.key == pygame.K_v:\n camera.change()\n\n if event.type == pygame.MOUSEBUTTONUP:\n pos = pygame.mouse.get_pos()\n x = int((pos[0] - camera.offset[0]) // grid.size)\n y = int((pos[1] - camera.offset[1]) // grid.size)\n\n if grid.matrix[x][y] > 0:\n\n if start is None:\n start = (x, y)\n person = Spawn(x, y, 1, 'person', grid.size)\n structures.add(person)\n else:\n if start[0] == x and start[1] == y:\n start = None\n person.kill()\n else:\n end = (x, y)\n target = Spawn(x, y, 1, 'target', grid.size)\n structures.add(target)\n\n if start is not None and end is not None:\n grid.put(start, end, person, target)\n start = None\n end = None\n\n # if event.type == pygame.KEYDOWN and event.key == pygame.K_c:\n # camera.switch_camera_type()\n\n if grid.force_stop is True:\n exit(1)\n screen.fill('black')\n camera.update()\n screen.blit(grid.grid, camera.offset)\n taxi_group.draw(screen)\n taxi_group.update(camera.offset)\n structures.draw(screen)\n structures.update(camera.offset)\n\n pygame.display.flip()\n clock.tick(32)\n","repo_name":"Coder-Jojo/cab_route_visualization","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15264609759","text":"from behave import *\nimport requests\nimport jpath\nimport json\nimport simplejson\nimport nose\nfrom delayedAssert import expect, assert_expectations\nfrom features.steps.utilities import *\n\ndef test_should_pass():\n expect(1 == 1, 'one is one')\n assert_expectations()\n\ndef test_should_fail():\n expect(1 == 2)\n x = 1\n y = 2\n expect(x == y, 'x:%s y:%s' % (x, y))\n expect(1 == 1)\n assert_expectations()\n\n\n#to read a file and return the content\ndef loadfile(path):\n with open(path, 'r') as f:\n data = f.read()\n return data\n\n#Given load test data from \"#DR#.createTemplate_PackingSlip_sample\"\n@Given('load test data from \"{fileName}\"')\ndef step_impl(context,fileName):\n data =loadfile('features/testData/'+fileName[5:])\n #context.zpl_json = json.loads(json.dumps(data))\n #value= loadfile('features/testData/create.json')\n #my_bytes = value.encode(\"utf-8\")\n #decoded_value = my_bytes.decode(\"unicode_escape\")\n context.testData_jsonBody = data\n pass\n\n\n@given('set base url to \"{base_URL}\"')\ndef step_impl(context, base_URL):\n context.base_url= base_URL\n pass\n\n\n@when('we do \"{request_verb}\" request to URL \"{path}\"')\ndef step_impl(context, request_verb, path):\n url =context.base_url+ path\n\n if request_verb.lower() == \"get\":\n context.r = getattr(requests, request_verb.lower())(url, headers=\"\", verify=True)\n elif request_verb.lower() == \"post\":\n #data_json = simplejson.dumps(context.zpl_json)\n #payload = {'json_payload': data_json}\n headers = {\n 'Content-Type': 'application/json',\n }\n context.r = getattr(requests, request_verb.lower())(url, headers=headers, verify=True, data=context.testData_jsonBody)\n #print (context.testData_jsonBody)\n\n\n #print(context.r.status_code)\n\n\n #raise NotImplementedError(u'STEP: When We make a \"POST\" request to URL \"/labels/zpl\"')\n\n# print(r.status_code)\n# print(r.reason)\n# print(r.headers[\"Content-Type\"])\n# print(r.json())\n\n\n\n@then('the response status code should be equal to {expected_http_status_code}')\ndef step_impl(context, expected_http_status_code):\n nose.tools.assert_equal(context.r.status_code, int(expected_http_status_code))\n #raise NotImplementedError(u'STEP: Then the response status code should equal 200')\n\n\n@then('the response status message should equal to \"{expected_http_status_message}\"')\ndef step_impl(context, expected_http_status_message):\n print(context.r.json)\n nose.tools.assert_equal(context.r.reason, expected_http_status_message)\n #raise NotImplementedError(u'STEP: Then the response status message should equal to \"OK\"')\n\n#========================== for different assertions ====================\n#expect(length_var==exp_data_array_len, 'expected value was:%d and actual value was:%d' %(exp_data_array_len, length_var))\n#assert_expectations()\n#assert False, \"message\"\n#nose.tools.ok_(length_var==exp_data_array_len, 'expected value was:%d and actual value was:%d' %(exp_data_array_len, length_var))\n#nose.tools.assert_equal(length_var, exp_data_array_len)\n#=======================================================================\n\n@then('the response should contain \"{tag_to_find}\" array of \"{data_array_Length}\" records')\ndef step_impl(context, tag_to_find, data_array_Length):\n\n response_json=context.r.json()\n item_dict=json.loads(json.dumps(response_json))\n length_var=len(item_dict[tag_to_find])\n exp_data_array_len=int(data_array_Length)\n nose.tools.assert_equal(length_var, exp_data_array_len)\n\n #expect( length_var= int(data_array_Length), \"euqal\" )\n\n #nose.tools.assert_equal(len(item_dict[tag_to_find]),int(data_array_Length))\n #if len(item_dict[tag_to_find]) != int(data_array_Length):\n\n #logging.error(\"This is error message, 22 was not 21\", False )\n #print(\"vaibhavccc\")\n\n\n #response_json = context.r.json()\n #item_dict = jpath.get(tag_to_find, response_json)\n #nose.tools.assert_equal(len(item_dict), int(data_array_Length))\n\n\n #actual_json_value = \"vaibhav-_json_value\"\n #actual_json_value = jpath.get(\".templates\", response_json)\n\n #nose.tools.assert_equal(actual_json_value,\"vaibhav\")\n #print(actual_json_value)\n #raise NotImplementedError(u'STEP: Then the response should contain array of \"20\" or \"less\"')\n\n\n@then('the \"{tag_key}\" should be \"{expected_tag_value}\"')\ndef step_impl(context, tag_key, expected_tag_value):\n\n response_json = context.r.json()\n if expected_tag_value.startswith(\"#DR#.\"):\n expected_tag_value=loadfile('features/testData/'+expected_tag_value[5:])\n\n # encoding and Decoding expected value to unicode for escape character\n my_bytes = expected_tag_value.encode(\"utf-8\")\n decoded_message_expected_tag_value = my_bytes.decode(\"unicode_escape\")\n\n # encoding and Decoding expected value to unicode for escape character\n actual_tag_value = jpath.get(tag_key, response_json)\n my_bytes_actual = actual_tag_value.encode(\"utf-8\")\n decoded_message_actual_tag_value = my_bytes_actual.decode(\"unicode_escape\")\n #print(\"Vaibhav agarwal - expected decoded---- \" + decoded_message_expected_tag_value +\"---VaiibhavEND\")\n #print(\"Vaibhav agarwal actual--decodec ---- \" + decoded_message_actual_tag_value +\"---VaiibhavEND\")\n\n #for removing max limit on comparision\n nose.tools.assert_equal.__self__.maxDiff = None\n nose.tools.assert_equal(decoded_message_actual_tag_value, decoded_message_expected_tag_value)\n\n#all the template results should contain Description, Id, Name, Owner_Id, Owner_Name, Type of Template\n@then('all the \"{array_tag}\" results should contain \"tag\"')\ndef step_impl(context,array_tag):\n response_json = context.r.json()\n item_dict = json.loads(json.dumps(response_json))\n item_templates=item_dict[array_tag]\n\n for x in item_templates:\n verify_tag_existance(x, context.table, \"tag\")\n\n#Done_riya\n#atleast/mimimum, atmax/maximum, exactly\n@then('the output should contain \"{condition}\" \"{expected_record_count}\" record in \"{tag}\"')\ndef step_impl(context, condition, expected_record_count, tag):\n response_json = context.r.json()\n item_dict = json.loads(json.dumps(response_json))\n item_templates = item_dict[tag]\n\n if condition=='atleast':\n if len(item_templates)int(expected_record_count):\n assert False, 'Expected count was:%s, and the actual was:%d' %(expected_record_count, len(item_templates))\n elif condition=='exactly':\n if len(item_templates)!=int(expected_record_count):\n assert False, 'Expected count was:%s, and the actual was:%d' %(expected_record_count, len(item_templates))\n else:\n assert False, \"Condition did not matced with atleast/atmax/exactly\"\n\n\n@then('the output should contain \"{column_Header}\" tag')\ndef step_impl(context,column_Header):\n response_json = context.r.json()\n item_dict = json.loads(json.dumps(response_json))\n if column_Header.startswith(\"#table#.\"):\n verify_tag_existance(item_dict, context.table,column_Header)\n else:\n expect(column_Header in item_dict.keys(), 'Expected value:%s was not found in:%s' % (column_Header, item_dict.keys()))\n assert_expectations()\n\n\n\n#the output should contain \"templates\" array with \"1\" recrod having \"id\" as \"#var#.generated_ID\"\n\n@then('the output should contain \"{array_tag}\" array with \"{result_count}\" recrod having \"{tag_key}\" as \"{value}\"')\ndef step_impl(context,array_tag, result_count, tag_key, value):\n response_json = context.r.json()\n item_dict = json.loads(json.dumps(response_json))\n item_templates = item_dict[array_tag]\n #jpath.get(tag_key, response_json)\n #print(item_templates)\n\n counter=0\n for i in item_templates:\n id_value=i[tag_key]\n if id_value==value:\n counter+=1\n\n nose.tools.assert_equal(int(result_count), counter)\n\n","repo_name":"avnikaushik/QA","sub_path":"becone_shippingDoc_v0.4.3_VA/features/steps/Template_PackingSlip.py","file_name":"Template_PackingSlip.py","file_ext":"py","file_size_in_byte":8047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24259846190","text":"import cv2\nimport numpy as np\n\nim = cv2.imread(\"wire.png\", 0)\n\nans = np.zeros([100,100])\n\nbound = 0\nx = 0\ny = 0\n\n# apparently it is easier to start aligning from the edges\n\nfor i in range(0, 10000):\n ans[x][y] = im[0][i]\n if x == bound:\n if y < 99 - bound:\n y = y + 1\n continue\n if y == 99 - bound:\n if x < 99 - bound:\n x = x + 1\n continue\n if x == 99 - bound:\n if y > bound:\n y = y - 1\n continue\n if y == bound:\n if x > bound + 1:\n x = x - 1\n continue\n if x == bound + 1:\n bound = bound + 1\n y = y + 1\n continue\n\ncv2.imwrite(\"14.png\", np.asarray(ans, dtype=np.float))\n \nprint(im[0])\nprint(ans[50][50])","repo_name":"Darwin-Che/python-challenge","sub_path":"14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9440108951","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponse\nfrom datetime import datetime\nfrom django.forms import modelform_factory\n\nfrom .models import Seminar\n\nMeetupForm = modelform_factory(Seminar, exclude = [])\n\n# Create your views here.\ndef meetup_detail(request, id):\n meetup = get_object_or_404(Seminar, pk=id)\n return render(request,\"informatics/meetup.html\",{\"meetup\":meetup})\n\ndef get_cities(request):\n return render(request,\"informatics/cities.html\",{\"cities\":Seminar.objects.all()})\n\ndef new_meetup(request):\n if request.method == \"POST\":\n form = MeetupForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(\"home\")\n else: \n form = MeetupForm()\n return render(request,\"informatics/new_meetup.html\", {'form':form})\n\n","repo_name":"suyash2796/django_meet_app","sub_path":"meet_up_scheduler/informatics/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23032611530","text":"import pandas as pd\n\nread_metric_name = \"gld_throughput\"\nwrite_metric_name = \"gst_throughput\"\n\nmetrics = pd.read_csv(\"./metrics\", skiprows=5)\nprof = pd.read_csv(\"./profile\", skiprows=3)\n\nflops = metrics[metrics['Metric Name'] == \"flop_count_sp\"]\ntotal_flops_sp = sum(flops[\"Invocations\"].astype(\"int64\") * flops[\"Avg\"].astype(\"int64\"))\n\nkernel_to_time = {}\nfor _, row in prof.iterrows():\n if type(row[\"Type\"]) is float:\n time_elm = row[\"Time\"]\n if row[\"Type\"] != \"GPU activities\": continue\n kernel_to_time[row[\"Name\"]] = float(row[\"Time\"]) * {\"us\":1e-6, \"ms\":1e-3}[time_elm]\ntotel_exec_time = sum(kernel_to_time.values())\n\ngld_bytes, gst_bytes = 0, 0\nfor _, row in metrics.iterrows():\n if row['Metric Name'] == read_metric_name:\n throughput = row[\"Avg\"]\n value = float(throughput[:-4]) * {'G':1e9, 'M':1e6, 'K':1e3, '0': 1}[throughput[-4]]\n gld_bytes += kernel_to_time[row[\"Kernel\"]] * value\n elif row['Metric Name'] == write_metric_name:\n throughput = row[\"Avg\"]\n value = float(throughput[:-4]) * {'G':1e9, 'M':1e6, 'K':1e3, '0': 1}[throughput[-4]]\n gst_bytes += kernel_to_time[row[\"Kernel\"]] * value\n\nprint(\"Total GLD: {} MB, GST: {} MB, TOT: {} MB\".format(gld_bytes / 1e6, gst_bytes / 1e6, gld_bytes / 1e6 + gst_bytes / 1e6))\nprint(\"Time: {} ms\".format(totel_exec_time * 1000))\nprint(\"Compute: {} TFlops\".format(total_flops_sp / totel_exec_time * 1e-12))\nprint(\"GLD Thoughput: {} GB/s\".format(gld_bytes / totel_exec_time * 1e-9))\nprint(\"GST Thoughput: {} GB/s\".format(gst_bytes / totel_exec_time * 1e-9))\nprint(\"Total Thoughput: {} GB/s\".format((gld_bytes + gst_bytes) / totel_exec_time * 1e-9))\n","repo_name":"nox-410/Welder_artifacts","sub_path":"artifacts/Figure1/process_metrics.py","file_name":"process_metrics.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"24628330370","text":"\"\"\"\nTest milvus api\nThe milvus server must be running in the appropriate port \n\"\"\"\nimport pytest\nfrom app.api.milvus import get_registered_person_milvus\nfrom tests.conftest import FACE_VECTOR_DIM, TEST_PERSON_FILE_ID\n\n\n@pytest.mark.order(before=\"test_get_person_milvus\")\ndef test_insert_person_milvus(test_milvus_connec):\n \"\"\"Inserts a test person into Milvus.\"\"\"\n emb_vec = [0.0] * FACE_VECTOR_DIM\n data = [[TEST_PERSON_FILE_ID], [emb_vec]]\n assert test_milvus_connec.insert(data).insert_count == 1\n\n\n@pytest.mark.order(before=\"test_delete_person_milvus\")\ndef test_get_person_milvus(test_milvus_connec):\n \"\"\"Queries Milvus and retrieves the test person.\"\"\"\n emb_vec = [0.0] * FACE_VECTOR_DIM\n res = get_registered_person_milvus(\n test_milvus_connec, TEST_PERSON_FILE_ID, output_fields=[\"person_id\", \"embedding\"])\n assert res[\"status\"] == \"success\"\n results = res[\"person_data\"]\n person_id = results[0][\"person_id\"]\n embedding = results[0][\"embedding\"]\n assert person_id == TEST_PERSON_FILE_ID, embedding == emb_vec\n\n\ndef test_delete_person_milvus(test_milvus_connec):\n \"\"\"Deletes the test person from Milvus. Should call milvus_collec_conn.flush() and milvus_collec_conn.compact(collec_name) after deletion\"\"\"\n expr = f'person_id in [{TEST_PERSON_FILE_ID}]'\n assert test_milvus_connec.delete(expr).delete_count == 1\n","repo_name":"SamSamhuns/face_registration_and_recognition_milvus","sub_path":"app_docker_compose/tests/api/test_milvus_api.py","file_name":"test_milvus_api.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"22"} +{"seq_id":"35498682329","text":"__author__ = \"sibirrer\"\n\n# this file contains a class to make a Moffat profile\n\n__all__ = [\"Moffat\"]\n\n\nclass Moffat(object):\n \"\"\"This class contains functions to evaluate a Moffat surface brightness profile.\n\n .. math::\n\n I(r) = I_0 * (1 + (r/\\\\alpha)^2)^{-\\\\beta}\n\n with :math:`I_0 = amp`.\n \"\"\"\n\n def __init__(self):\n self.param_names = [\"amp\", \"alpha\", \"beta\", \"center_x\", \"center_y\"]\n self.lower_limit_default = {\n \"amp\": 0,\n \"alpha\": 0,\n \"beta\": 0,\n \"center_x\": -100,\n \"center_y\": -100,\n }\n self.upper_limit_default = {\n \"amp\": 100,\n \"alpha\": 10,\n \"beta\": 10,\n \"center_x\": 100,\n \"center_y\": 100,\n }\n\n def function(self, x, y, amp, alpha, beta, center_x=0, center_y=0):\n \"\"\"2D Moffat profile.\n\n :param x: x-position (angle)\n :param y: y-position (angle)\n :param amp: normalization\n :param alpha: scale\n :param beta: exponent\n :param center_x: x-center\n :param center_y: y-center\n :return: surface brightness\n \"\"\"\n\n x_shift = x - center_x\n y_shift = y - center_y\n return amp * (1.0 + (x_shift**2 + y_shift**2) / alpha**2) ** (-beta)\n","repo_name":"lenstronomy/lenstronomy","sub_path":"lenstronomy/LightModel/Profiles/moffat.py","file_name":"moffat.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"22"} +{"seq_id":"44388395179","text":"from random import shuffle\nfrom data import pickle_dump, pickle_load\nimport data\nimport itertools\nimport numpy as np\nimport os\nfrom tqdm import tqdm\n\ndef get_training_and_validation_generators(data_file, batch_size=4, data_split=0.8, validation_keys_file=\"val_keys.pkl\", training_keys_file=\"train_keys.pkl\", slice_based=True, validation_batch_size=4, skip_blank=False):\n print(\"generating the training/validation split\")\n training_list, validation_list = get_validation_split(data_file, \n training_keys_file=training_keys_file, \n validation_keys_file=validation_keys_file,\n data_split = data_split)\n\n print(\"creating the generators...\")\n training_generator = data_generator(data_file, training_list,\n batch_size=batch_size,\n slice_based = slice_based,\n skip_blank=skip_blank)\n validation_generator = data_generator(data_file, validation_list,\n batch_size=batch_size,\n slice_based=slice_based,\n skip_blank=skip_blank)\n print (\"computing the #training/validation steps...\")\n num_training_steps = get_number_of_steps(get_number_of_instances(data_file, training_list.copy(), slice_based, skip_blank), batch_size)\n num_validation_steps = get_number_of_steps(get_number_of_instances(data_file, validation_list.copy(), slice_based, skip_blank), batch_size)\n return [training_generator, validation_generator, num_training_steps, num_validation_steps] \n\ndef data_generator(data_file, index_list, batch_size, slice_based=True, skip_blank=True):\n orig_index_list = index_list\n while True:\n x_list = list()\n y_list = list()\n if slice_based:\n z = data_file.root.data.shape[-1]\n index_list = create_slice_index_list(orig_index_list, z)\n else:\n index_list = copy.copy(orig_index_list)\n while len(index_list) > 0:\n index = index_list.pop()\n add_data(x_list, y_list, data_file, index, slice_based, skip_blank)\n if len(x_list) == batch_size or (len(index_list) == 0 and len(x_list) > 0):\n yield np.asarray(x_list), np.asarray(y_list)\n x_list = list()\n y_list = list()\n\ndef get_number_of_instances(data_file, index_list, slice_based=True, skip_blank=True):\n if slice_based:\n z = data_file.root.data.shape[-1]\n index_list = create_slice_index_list(index_list,z)\n count = 0\n for index in tqdm(index_list):\n x_list = list()\n y_list = list()\n add_data(x_list, y_list, data_file, index, slice_based, skip_blank)\n if len(x_list) > 0:\n count += 1\n return count\n else:\n return len(index_list)\n\n\n\ndef create_slice_index_list(index_list, z):\n slice_index = list()\n for index in index_list:\n slice_nums = list(range(z))\n slice_index.extend(itertools.product([index], slice_nums))\n return slice_index\n\n\ndef add_data(x_list, y_list, data_file, index, slice_based=None, skip_blank=False):\n data, truth = get_data_from_file(data_file, index, slice_based=slice_based)\n truth = truth[np.newaxis]\n if not skip_blank or np.any(truth != 0):\n x_list.append(data)\n y_list.append(truth)\n\n\n\ndef get_data_from_file(data_file, index, slice_based=None):\n if slice_based:\n index, slice_index = index\n data, truth = get_data_from_file(data_file, index, slice_based=False)\n x= data[:,:,:,slice_index]\n y= truth[:,:,slice_index]\n else:\n x, y = data_file.root.data[index], data_file.root.truth[index, 0]\n return x, y\n\n\n\n\ndef get_validation_split(data_file, training_keys_file, validation_keys_file, data_split=0.8):\n if not os.path.exists(training_keys_file):\n nb_samples = data_file.root.data.shape[0]\n sample_list = list(range(nb_samples))\n training_list, validation_list = split_list(sample_list, split=data_split)\n pickle_dump(training_list, training_keys_file)\n pickle_dump(validation_list, validation_keys_file)\n return training_list, validation_list\n else:\n return pickle_load(training_keys_file), pickle_load(validation_keys_file)\n\n\ndef split_list(input_list, split=0.8):\n shuffle(input_list)\n n_training = int(len(input_list) * split)\n training = input_list[:n_training]\n testing = input_list[n_training:]\n return training, testing\n\n\ndef get_number_of_steps(n_samples, batch_size):\n if n_samples <= batch_size:\n return n_samples\n elif np.remainder(n_samples, batch_size) == 0:\n return n_samples//batch_size\n else:\n return n_samples//batch_size + 1\n\n \n\n\n\n\n","repo_name":"ali2009a/CTStrokeTorch","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":5038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"22365841228","text":"import os\nimport sys\n\ndef read_lines(filename):\n with open(filename, 'r') as f:\n return f.readlines()\n\n\ndef merge(files, merge_out_file):\n with open(merge_out_file, 'w') as f:\n for file in files:\n lines = read_lines(file)\n for line in lines:\n f.write(line.strip() + '\\n')\n\nfiles = ['data_v4/train_ner_norm_code.out', 'data_v4/dev_ner_norm_code.out']\nmerge(files, 'data_v4/all_merged.out')","repo_name":"xy-always/2020Iberlef","sub_path":"src/data/merge_data.py","file_name":"merge_data.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"35245405209","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = \"David Lizarazo\"\n__version__ =\"1.0.0\"\n\n\"\"\"Este programa usa los \nGPIO para enceder perifericos necesario\npara el desarrollo del presente proyeto \"\"\"\n\nimport RPi.GPIO as GPIO\nimport time\n\n# GPIO RASP BOARD ( Common PINS with ZERO, PI 2 , PI 4\t)\nPIN_BUZZER= 17\nPIN_GREEN = 27\nPIN_RED = 22\nPIN_BLUE = 23\n# Logica vairables used depends in the electronic USED\nPIN_ON = True\nPIN_OFF = False\n\n#set Ultrasonic GPIO Pins\nGPIO_TRIGGER = 18\nGPIO_ECHO = 7\n\n\ndef GPIO_CONF(LOGIC_ON='HIGH'):\n global PIN_ON\n global PIN_OFF\n if LOGIC_ON == 'LOW' or LOGIC_ON=='low' or LOGIC_ON=='Low':\n PIN_ON = False\n PIN_OFF= True\n #Disable warning pins\n GPIO.setwarnings(False)\n \n\n #GPIO Mode (BOARD / BCM)\n #GPIO.setmode(GPIO.BOARD) # Set the pins as hardware pins are nummerated (Left odds)\n GPIO.setmode(GPIO.BCM) # para Ultrasonido \n\n\n #Enable the pins to be used and set GPIO direction (IN / OUT)\n GPIO.setup(GPIO_TRIGGER, GPIO.OUT)\n GPIO.setup(GPIO_ECHO, GPIO.IN)\n\n GPIO.setup (PIN_GREEN , GPIO.OUT)\n GPIO.setup (PIN_RED , GPIO.OUT)\n GPIO.setup (PIN_BUZZER, GPIO.OUT)\n GPIO.setup (PIN_BLUE , GPIO.OUT)\n\n GPIO.output(PIN_BLUE , PIN_OFF)\n GPIO.output(PIN_RED , PIN_OFF)\n GPIO.output(PIN_GREEN, PIN_OFF)\n GPIO.output(PIN_BUZZER, False)\n\ndef GPIO_INIT ():\n GPIO.output(PIN_BLUE , PIN_OFF )\n GPIO.output(PIN_GREEN ,PIN_ON )\n GPIO.output(PIN_BUZZER,GPIO.HIGH)\n time.sleep(0.3)\n GPIO.output(PIN_BUZZER,GPIO.LOW )\n time.sleep(0.3)\n GPIO.output(PIN_BUZZER,GPIO.HIGH)\n GPIO.output(PIN_RED ,PIN_ON )\n time.sleep(0.3)\n GPIO.output(PIN_BUZZER,GPIO.LOW )\n GPIO.output(PIN_RED ,PIN_OFF )\n time.sleep(0.3)\n GPIO.output(PIN_GREEN ,PIN_OFF )\n GPIO.output(PIN_BUZZER,GPIO.LOW )\n GPIO.output(PIN_BLUE , PIN_ON )\n\ndef GPIO_PROC_OFF():\n try:\n GPIO.output(PIN_RED, PIN_ON )\n GPIO.output(PIN_BUZZER,GPIO.HIGH)\n time.sleep(0.3)\n GPIO.output(PIN_BUZZER,GPIO.LOW )\n time.sleep(0.3)\n GPIO.output(PIN_RED, PIN_OFF )\n GPIO.output(PIN_BUZZER,GPIO.HIGH)\n GPIO.output(PIN_GREEN ,PIN_ON)\n time.sleep(0.3)\n GPIO.output(PIN_BUZZER,GPIO.LOW )\n GPIO.output(PIN_GREEN ,PIN_OFF )\n GPIO.output(PIN_RED, PIN_ON )\n time.sleep(0.3)\n GPIO.output(PIN_RED ,PIN_OFF )\n GPIO.output(PIN_BUZZER,GPIO.LOW )\n GPIO.output(PIN_BLUE ,PIN_OFF)\n GPIO.cleanup()\n except Exception as e:\n print (\"Couldnt Open the GPIOs, probably were closed before\")\n\ndef GPIO_ACCEPTED():\n GPIO.output(PIN_BLUE , PIN_OFF )\n GPIO.output(PIN_BUZZER,GPIO.HIGH)\n GPIO.output(PIN_GREEN ,PIN_ON )\n time.sleep(0.3)\n GPIO.output(PIN_GREEN ,PIN_OFF )\n GPIO.output(PIN_BUZZER,GPIO.LOW )\n GPIO.output(PIN_BLUE , PIN_ON )\n\ndef GPIO_REJECT():\n GPIO.output(PIN_BLUE , PIN_OFF )\n GPIO.output(PIN_RED ,PIN_ON )\n GPIO.output(PIN_BUZZER,GPIO.HIGH)\n time.sleep(0.3)\n GPIO.output(PIN_RED ,PIN_OFF )\n GPIO.output(PIN_BUZZER,GPIO.LOW )\n GPIO.output(PIN_BLUE , PIN_ON )\n\ndef get_distance():\n # set Trigger to HIGH\n GPIO.output(GPIO_TRIGGER, True)\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(GPIO_TRIGGER, False)\n # Init time\n StartTime = time.time()\n StopTime = time.time()\n\n # save StartTime\n while GPIO.input(GPIO_ECHO) == 0:\n StartTime = time.time()\n # save time of arrival\n while GPIO.input(GPIO_ECHO) == 1:\n StopTime = time.time()\n\n # time difference between start and arrival\n TimeElapsed = StopTime - StartTime\n # multiply with the sonic speed (34300 cm/s) and divide by 2, because there and back\n distance = (TimeElapsed * 34300) / 2\n if distance < 400 :\n return distance\n return None\n\n\n# --------------------- TESTS -----------------------\ndef ultrasonic_test():\n try:\n while True:\n dist = get_distance()\n print (\"Measured Distance = %.1f cm\" % dist)\n time.sleep(1)\n # Reset by pressing CTRL + C\n except KeyboardInterrupt:\n print(\"Measurement stopped by User\")\n GPIO.cleanup()\n\n\n\ndef main_test():\n print(\"** GPIO TEST **\")\n #GPIO_CONF('LOW')\n GPIO_CONF('HIGH')\n GPIO_INIT()\n GPIO_INIT()\n print(\"** DONE WITH THE PROCESS **\")\n GPIO.cleanup()\n\n\nif __name__ == '__main__':\n GPIO_CONF('HIGH')\n \n try:\n while True:\n dist = get_distance()\n print (\"Measured Distance = %.1f cm\" % dist)\n time.sleep(1)\n # Reset by pressing CTRL + C\n except KeyboardInterrupt:\n print(\"Measurement stopped by User\")\n GPIO.cleanup()\n\n main()\n","repo_name":"DavidLiza/Cam_Fruit","sub_path":"Tests/gpioModule.py","file_name":"gpioModule.py","file_ext":"py","file_size_in_byte":4935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39177407797","text":"#用于整理归档csv文件\nimport os,gzip,shutil\nFolderPath=r\"E:\\03_Insight Data Parsing Tool Testing\\Requirements\\IT_Files\\IT_Files\\153\"\nName=FolderPath.split('\\\\')[-1]\ndef DecompressDocAndRank(FolderPath):\n \"\"\"\n 1.遍历文件夹若有压缩文档则解压\n 2.没有后缀名为.csv的文档改名新增后缀名.csv\n 3.同一个日期和工站的csv文档放在一个按工站取名的文件夹\n :param FolderPath:\n :return: NewFoderPath\n \"\"\"\n rootdir = os.path.join(FolderPath)\n AllCSVPathList = [] # 用于保存所有csv的路径\n # 遍历所有文件夹和子文件夹\n #解压\n for (dirpath, dirnames, filenames) in os.walk(rootdir):\n for filename in filenames:\n #分离文件名和后缀\n portion=os.path.splitext(filename)\n if portion[1]=='.gz':\n os.chdir(rootdir) # 切换到当前目录\n # 开始解压\n try:\n un_gz(filename)\n except EOFError:\n continue\n #添加后缀名 全部改成csv格式\n for (dirpath, dirnames, filenames) in os.walk(rootdir):\n for filename in filenames:\n # 分离文件名和后缀\n portion = os.path.splitext(filename)\n if portion[1]=='':\n os.chdir(dirpath)#切换到当前目录\n new_filename=portion[0]+\".csv\"\n try:\n if filename!=new_filename:#捕捉同名异常\n os.rename(filename, new_filename)#变更文件名\n except FileExistsError:\n pass\n #解压改名处理完后,收集所有csv文件路径,保存到列表\n for (dirpath, dirnames, filenames) in os.walk(rootdir):\n for filename in filenames:\n # 分离文件名和后缀\n portion = os.path.splitext(filename)\n if portion[1] == '.csv':\n AllCSVPathList.append(dirpath + '\\\\' + filename)\n #去重\n AllCSVPathList=list(set(AllCSVPathList))\n return AllCSVPathList\n\ndef un_gz(file_name):\n # 获取文件的名称,去掉后缀名\n f_name = file_name.replace(\".gz\", \"\")\n # 开始解压\n g_file = gzip.GzipFile(file_name)\n # 读取解压后的文件,并写入去掉后缀名的同名文件(即得到解压后的文件)\n open(f_name, \"wb+\").write(g_file.read())\n g_file.close()\nif __name__ == '__main__':\n AllCSVPathList=DecompressDocAndRank(FolderPath)\n #取出工站名 保存在列表中\n StationNameList=[]\n for i in range(len(AllCSVPathList)):\n StationNameList.append(AllCSVPathList[i].split('_')[3])\n #去重\n StationNameList=list(set(StationNameList))\n #创建日期文件夹\n os.chdir('E:\\\\')\n print(os.getcwd())\n if os.path.exists(Name)==False:\n os.mkdir(Name)\n print(1)\n else:\n print(\"Folder is exist\")\n os.chdir(Name)#进入Name目录 按工站列表依次创建文件夹\n for j in range(len(StationNameList)):\n if os.path.exists(StationNameList[j]) == False:\n os.mkdir(StationNameList[j])\n print(1)\n else:\n print(\"Folder is exist\")\n #遍历所有csv 如果属于StationNameList工站的就复制到对应工站文件夹中\n for x in range(len(AllCSVPathList)):\n for y in range(len(StationNameList)):\n if AllCSVPathList[x].split('_')[3]==StationNameList[y]:\n print(1)\n #复制文件\n shutil.copy(AllCSVPathList[x],'E:\\\\'+Name+'\\\\'+StationNameList[y])","repo_name":"wuha07456/InsightDataParsingToolTesting","sub_path":"Functions/DecompressDocAndRank.py","file_name":"DecompressDocAndRank.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"36699196964","text":"from django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.http import HttpRequest, HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\n\nfrom posts.forms import CommentForm, PostForm\nfrom posts.models import Follow, Group, Post, User\nfrom yatube.settings import NUMBER_POSTS_PER_PAGE\n\n\ndef index(request: HttpRequest) -> HttpResponse:\n \"\"\"Home page.\"\"\"\n posts = Post.objects.select_related('group').select_related('author').all()\n context = {\n 'title': 'Последние обновления на сайте',\n 'page_obj': create_paginator(request, posts),\n 'index': True\n }\n template = 'posts/index.html'\n return render(request, template, context)\n\n\ndef group_posts(request: HttpRequest, slug: str) -> HttpResponse:\n \"\"\"Page to display posts of one group.\"\"\"\n group = get_object_or_404(Group, slug=slug)\n posts = group.groups.select_related('author').all()\n context = {\n 'group': group,\n 'page_obj': create_paginator(request, posts),\n }\n template = 'posts/group_list.html'\n return render(request, template, context)\n\n\ndef profile(request: HttpRequest, username: str) -> HttpResponse:\n \"\"\"User information page.\"\"\"\n user = get_object_or_404(User, username=username)\n posts = user.posts.select_related('group').all()\n following = None\n if request.user.is_authenticated:\n sub = Follow.objects.filter(author=user, user=request.user)\n following = True if sub else False\n context = {\n 'posts_count': posts.count(),\n 'page_obj': create_paginator(request, posts),\n 'username': user,\n 'following': following,\n }\n template = 'posts/profile.html'\n return render(request, template, context)\n\n\ndef post_detail(request: HttpRequest, post_id: int) -> HttpResponse:\n \"\"\"Page to display post details.\"\"\"\n post = get_object_or_404(Post, pk=post_id)\n comments = post.comments.all()\n posts_count = post.author.posts.all().count()\n form = CommentForm()\n context = {\n 'post': post,\n 'title': post.text[:29],\n 'posts_count': posts_count,\n 'form': form,\n 'comments': comments,\n }\n template = 'posts/post_detail.html'\n return render(request, template, context)\n\n\n@login_required\ndef post_create(request: HttpRequest) -> HttpResponse:\n \"\"\"Page to create a new post for logged in users.\"\"\"\n if request.method != 'POST':\n form = PostForm()\n return render(request, 'posts/create_post.html', {'form': form})\n form = PostForm(\n request.POST,\n files=request.FILES or None,\n )\n if form.is_valid():\n save_form_to_db(form, request.user)\n return redirect('posts:profile', username=request.user.username)\n return render(request, 'posts/create_post.html', {'form': form})\n\n\n@login_required\ndef post_edit(request: HttpRequest, post_id: int) -> HttpResponse:\n \"\"\"Page to edit a post for logged in user.\"\"\"\n post = get_object_or_404(Post, pk=post_id)\n if request.method != 'POST' and post.author == request.user:\n form = PostForm(instance=post)\n context = {\n 'form': form,\n 'is_edit': True,\n }\n return render(request, 'posts/create_post.html', context)\n if request.method != 'POST' and post.author != request.user:\n return redirect('posts:post_detail', post_id=post_id)\n form = PostForm(\n request.POST,\n instance=post,\n files=request.FILES or None,\n )\n if form.is_valid():\n save_form_to_db(form, request.user)\n return redirect('posts:post_detail', post_id=post_id)\n return render(request, 'posts/create_post.html', {'form': form})\n\n\n@login_required\ndef add_comment(request: HttpRequest, post_id: int) -> HttpResponse:\n \"\"\"Add a comment to a post by an authorized user.\"\"\"\n post = get_object_or_404(Post, pk=post_id)\n form = CommentForm(request.POST or None)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.post = post\n comment.save()\n return redirect('posts:post_detail', post_id=post_id)\n\n\n@login_required\ndef follow_index(request):\n \"\"\"\"Subscription page.\"\"\"\n posts = Post.objects.filter(\n author__following__user=request.user\n ).select_related('group').select_related('author')\n context = {\n 'title': 'Подписки',\n 'page_obj': create_paginator(request, posts),\n 'follow': True,\n }\n return render(request, 'posts/follow.html', context)\n\n\n@login_required\ndef profile_follow(request, username):\n \"\"\"Add author to subscriptions.\"\"\"\n user = get_object_or_404(User, username=username)\n if user == request.user:\n return redirect('posts:profile', username=username)\n Follow.objects.get_or_create(\n author=user,\n user=request.user,\n )\n return redirect('posts:profile', username=username)\n\n\n@login_required\ndef profile_unfollow(request, username):\n \"\"\"Remove author from subscriptions.\"\"\"\n user = get_object_or_404(User, username=username)\n sub = Follow.objects.filter(author=user, user=request.user)\n sub.delete()\n return redirect('posts:profile', username=username)\n\n\ndef save_form_to_db(form: PostForm, user: User) -> None:\n \"\"\"\"Save post to DB.\"\"\"\n post = form.save(commit=False)\n post.text = form.cleaned_data['text']\n post.group = form.cleaned_data['group']\n post.author = user\n post.save()\n\n\ndef create_paginator(request: HttpRequest, posts: Post) -> Paginator:\n \"\"\"Create paginator\"\"\"\n paginator = Paginator(posts, NUMBER_POSTS_PER_PAGE)\n page_number = request.GET.get('page')\n return paginator.get_page(page_number)\n","repo_name":"Gollum959/hw05_final","sub_path":"yatube/posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3696474580","text":"from hrapp.models.user import User\n\nfrom .base_repository import BaseRepository\n\n\nclass UserRepository(BaseRepository):\n model = User\n \n @classmethod\n def get_department_users(cls, department_id = None, **kwargs):\n users = cls.model.objects\n if department_id:\n users = users.filter(department_id = department_id)\n else:\n \n users = users.prefetch_related('department')\n for key, value in kwargs.items():\n if value:\n users = users.filter(**{key+'__contains': value})\n return users\n \n \n ","repo_name":"MohammedSalah96/we_task","sub_path":"hrapp/repositories/user_repository.py","file_name":"user_repository.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74508371575","text":"from random import randint;\r\nimport time;\r\nimport sys;\r\n\r\ndef displayBoard(board = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']) :\r\n print(f'\\n\\n | | | |');\r\n print(f' 1 | 2 | 3 {board[0]} | {board[1]} | {board[2]}');\r\n print(f' | | | |');\r\n print(f' ----------- -----------');\r\n print(f' | | | |');\r\n print(f' 4 | 5 | 6 {board[3]} | {board[4]} | {board[5]}');\r\n print(f' | | | |');\r\n print(f' ----------- -----------');\r\n print(f' | | | |');\r\n print(f' 7 | 8 | 9 {board[6]} | {board[7]} | {board[8]}');\r\n print(f' | | | |\\n\\n');\r\n\r\n\r\ndef chooseFirst() :\r\n return randint(0,1);\r\n \r\n \r\ndef getPieceSelection(first) :\r\n dispStrSlow(f'Player {first}, please select your piece (\\'X\\' or \\'O\\'): ', 0.05);\r\n choice = input().upper();\r\n while choice != 'X' and choice != 'O' :\r\n if choice == '-1' : sys.exit();\r\n dispStrSlow('Please enter either \\'X\\' or \\'O\\': ', 0.05);\r\n choice = input().upper();\r\n \r\n if first == 1 :\r\n if choice == 'X' :\r\n return (choice, 'O');\r\n else :\r\n return (choice, 'X');\r\n else :\r\n if choice == 'X' :\r\n return ('O', choice);\r\n else :\r\n return ('X', choice);\r\n\r\n \r\ndef getMove(player, board) :\r\n validNums = ['1', '2', '3', '4', '5', '6', '7', '8', '9'];\r\n move = input(f'Player {player}, pick the spot for your next move (or -1 to exit): ');\r\n \r\n open = False;\r\n if move.isdigit() :\r\n open = isOpen(int(move)-1, board)\r\n while not open :\r\n if move == '-1' : sys.exit('\\n\\nGoodbye!');\r\n msg = 'Please pick a valid spot (number from 1 to 9 or -1 to exit): ';\r\n if move in validNums : msg = 'That spot is already taken! Choose an open spot (or -1 to exit): ';\r\n move = input(msg);\r\n if move.isdigit() :\r\n open = isOpen(int(move)-1, board)\r\n else : \r\n move = int(move);\r\n \r\n return move;\r\n\r\n\r\ndef isOpen(location, board) :\r\n try:\r\n return board[location].isspace();\r\n except:\r\n return False;\r\n \r\n \r\ndef updateBoard(move, board, piece) :\r\n board[move-1] = piece;\r\n\r\n\r\ndef toggle(turn) :\r\n if turn == 0 :\r\n return 1;\r\n else :\r\n return 0;\r\n \r\n\r\ndef checkWin(board) :\r\n #converts between 2D coordinates, where x increases from left (0) to right(2) \r\n #and y from top (0) to bottom (2), to the indexes in the board list (index = x+3y)\r\n def convertToIndex(coordinate) : return coordinate[0] + 3*coordinate[1];\r\n \r\n def checkStreak(start, xStep, yStep) : \r\n streak = 0;\r\n x = start[0];\r\n y = start[1];\r\n while streak < 3 and x < 3 and y < 3 :\r\n if board[convertToIndex(start)].isspace() : return False;\r\n if board[convertToIndex((x,y))] == board[convertToIndex(start)] :\r\n streak +=1;\r\n else :\r\n return False;\r\n x += xStep;\r\n y += yStep;\r\n return streak == 3;\r\n \r\n #For bigger grids, 3 could be replaced with new size\r\n values = [x for x in range(1, 3)]\r\n zeroes = [0]*3;\r\n points1 = list(zip(values, zeroes));\r\n points2 = list(zip(zeroes, values));\r\n #All possible winning combinations must include the top row and left column\r\n possibleStarts = points1 + points2;\r\n \r\n win = False;\r\n for t in possibleStarts :\r\n if t[0] == 0 :\r\n win = checkStreak(t, 1, 0);\r\n else :\r\n win = checkStreak(t, 0, 1);\r\n if win :\r\n return True;\r\n \r\n #Checks for (0,0) and diagonal from (2,0)\r\n return checkStreak((0,0), 1, 0) or checkStreak((0,0), 0, 1) or checkStreak((0,0), 1, 1) or checkStreak((2,0), -1, 1); \r\n \r\n \r\ndef checkTie(board) :\r\n return ' ' not in board;\r\n \r\n \r\ndef getReplay() :\r\n dispStrSlow('\\n\\nDo you want to play again (Y or N)?', 0.05);\r\n askReplay = input().upper();\r\n while askReplay != 'Y' and askReplay != 'N' :\r\n dispStrSlow('Enter either \\'Y\\' or \\'N\\'?', 0.05);\r\n askReplay = input().upper();\r\n print('\\n');\r\n return askReplay;\r\n \r\n\r\ndef displayInstructions(speed) :\r\n dispStrSlow('\\nInstructions: Y\\'all know how to play tic-tac-toe: get three of your pieces in a row \\n(either along a column, row, or diagonal), and you win.\\n', speed);\r\n dispStrSlow('To enter your moves, enter the number corresponding to the spot you want to play according \\nto the diagram on the left below (which will be displayed every turn so you won\\'t forget)\\n', speed);\r\n time.sleep(0.3);\r\n displayBoard();\r\n time.sleep(0.3);\r\n dispStrSlow('If you ever want to exit the game, type \\'-1\\' to exit.\\n', speed);\r\n dispStrSlow('Got it? Great! Now, decide amongst yourselves who will be Player 1 and Player 2.\\n', speed);\r\n time.sleep(1);\r\n dispStrSlow('Ready? ', speed);\r\n time.sleep(1);\r\n dispStrSlow('Type anything to get started!', speed)\r\n input();\r\n print();\r\n\r\n\r\ndef displayStatistics(players, total) :\r\n p1 = players[0][2];\r\n p2 = players[1][2];\r\n \r\n perc1 = (p1/total)*100;\r\n perc2 = (p2/total)*100;\r\n \r\n print('------------------------------------------------------------------------------------');\r\n dispStrSlow(f'End game stats: \\n', 0.05);\r\n dispStrSlow(f'Number of games played: {total}\\n', 0.05);\r\n dispStrSlow(f'Number of games won: Player 1 -> {p1}, Player 2 -> {p2}\\n', 0.05);\r\n dispStrSlow(f'Winning percentages: Player 1 - {perc1:1.1f}%, Player 2 - {perc2:1.1f}%\\n', 0.05);\r\n print('------------------------------------------------------------------------------------');\r\n \r\n\r\ndef dispStrSlow(phrase, t) :\r\n for i in phrase :\r\n print(i, end='');\r\n sys.stdout.flush();\r\n time.sleep(t);\r\n \r\n \r\ndef main() :\r\n players = [[1, ' ', 0], [2, ' ', 0]];\r\n totalGames = 0;\r\n displayInstructions(0.03);\r\n replay = True;\r\n while replay :\r\n board = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '];\r\n\r\n first = chooseFirst();\r\n \r\n dispStrSlow('The person going first will be ', 0.03);\r\n dispStrSlow('...', 0.5); \r\n dispStrSlow(f' Player {players[first][0]}!\\n\\n', 0.03); \r\n time.sleep(0.5);\r\n print();\r\n \r\n players[0][1], players[1][1] = getPieceSelection(players[first][0]);\r\n dispStrSlow(f'Player 1 will be \\'{players[0][1]}\\' and Player 2 will be \\'{players[1][1]}\\'.\\n\\n', 0.03);\r\n dispStrSlow('Let\\'s begin!', 0.03);\r\n time.sleep(1);\r\n displayBoard(board);\r\n \r\n gameOver = False;\r\n tie = False;\r\n currPlayer = players[first][0];\r\n while not gameOver and not tie:\r\n time.sleep(0.2);\r\n move = getMove(currPlayer, board);\r\n time.sleep(0.2);\r\n updateBoard(move, board, players[first][1]);\r\n displayBoard(board);\r\n gameOver = checkWin(board);\r\n tie = checkTie(board);\r\n first = toggle(first);\r\n currPlayer = players[first][0];\r\n \r\n if gameOver :\r\n dispStrSlow(f'Player {players[toggle(first)][0]} wins!', 0.03);\r\n players[toggle(first)][2] += 1;\r\n elif tie :\r\n dispStrSlow('It\\'s a tie!', 0.03);\r\n \r\n totalGames += 1;\r\n replay = getReplay() == 'Y';\r\n \r\n displayStatistics(players, totalGames);\r\n dispStrSlow('\\n\\nGoodbye!\\n\\n', 0.03);\r\n \r\nif __name__ == '__main__' :\r\n main();\r\n","repo_name":"arjunkeerthi/Tic-Tac-Toe","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":7853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"281253356","text":"import logging\nfrom aiogram import Bot, Dispatcher, executor, types\nfrom checkword import checkWord\nfrom transliterate import to_cyrillic\nfrom settings.local_settings import TELEGRAM_TOKEN\n\nAPI_TOKEN = TELEGRAM_TOKEN\n\n# Configure logging\nlogging.basicConfig(level=logging.INFO)\n\n# Initialize bot and dispatcher\nbot = Bot(token=API_TOKEN)\ndp = Dispatcher(bot)\n\n@dp.message_handler(commands=['start', 'help'])\nasync def send_welcome(message: types.Message):\n \"\"\"\n This handler will be called when user sends `/start` or `/help` command\n \"\"\"\n await message.reply(\"Hi!\\nI'm Uz-imlo-bot!\\nYou can write any word to check whether it is true or not\")\n\n@dp.message_handler()\nasync def checkImlo(message: types.Message):\n # old style:\n # await bot.send_message(message.chat.id, message.text)\n msg = message.text\n javob = lambda msg: to_cyrillic(msg) if msg.isascii() else to_cyrillic(msg)\n message.text = javob(msg)\n\n if len(message.text)>1:\n message.text = message.text.split()\n for word in message.text:\n result = checkWord(word)\n if result['available']:\n response = f\"✅{word.capitalize()}\"\n else:\n response = f\"❌{word.capitalize()}\\n\"\n for text in result['matches']:\n response += f\"✅{text.capitalize()}\\n\"\n await message.answer(response)\n else:\n word = message.text\n result = checkWord(word)\n if result['available']:\n response = f\"✅{word.capitalize()}\"\n else:\n response = f\"❌{word.capitalize()}\\n\"\n for text in result['matches']:\n response += f\"✅{text.capitalize()}\\n\"\n await message.answer(response)\n\nif __name__ == '__main__':\n executor.start_polling(dp, skip_updates=True)","repo_name":"Kamol774/Uz-imlo-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"69948425978","text":"INPUT_DIR = '/kaggle/input/'\n\n\n\nTEST_SIZE = 0.3\n\nRANDOM_STATE = 128\n\n\n\nBATCH_SIZE = 8\n\nNUM_WORKERS = 0\nimport torch\n\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom albumentations import Normalize, Compose\n\nimport numpy as np\n\nimport pandas as pd\n\nimport cv2\n\nfrom tqdm.notebook import tqdm\n\nfrom sklearn.model_selection import train_test_split\n\nimport os\n\nimport glob\nclass RandomFaceDataset(Dataset):\n\n def __init__(self, img_dirs, labels, preprocess=None):\n\n '''\n\n Parameters:\n\n img_dirs: The directories that contain face images.\n\n Each directory coresponding to a video in the original training data.\n\n labels: Corresponding labels {'FAKE': 1, 'REAL', 0} of videos\n\n \n\n '''\n\n self.img_dirs = img_dirs\n\n self.labels = labels\n\n self.preprocess = preprocess\n\n\n\n def __len__(self):\n\n return len(self.img_dirs)\n\n \n\n def __getitem__(self, idx):\n\n if torch.is_tensor(idx):\n\n idx = idx.tolist()\n\n\n\n img_dir = self.img_dirs[idx]\n\n label = self.labels[idx]\n\n face_paths = glob.glob(f'{img_dir}/*.png')\n\n\n\n sample = face_paths[np.random.choice(len(face_paths))]\n\n \n\n face = cv2.imread(sample, 1)\n\n face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\n\n\n\n if self.preprocess is not None:\n\n augmented = self.preprocess(image=face)\n\n face = augmented['image']\n\n \n\n return {'face': face, 'label': np.array([label], dtype=float)}\nall_train_dirs = glob.glob(INPUT_DIR + 'deepfake-detection-faces-*')\n\nfor i, train_dir in enumerate(all_train_dirs):\n\n print('[{:02}]'.format(i), train_dir)\nall_dataframes = []\n\nfor train_dir in all_train_dirs:\n\n df = pd.read_csv(os.path.join(train_dir, 'metadata.csv'))\n\n df['path'] = df['filename'].apply(lambda x: os.path.join(train_dir, x.split('.')[0]))\n\n all_dataframes.append(df)\n\n\n\ntrain_df = pd.concat(all_dataframes, ignore_index=True, sort=False)\ntrain_df\n# Remove videos that don't have any face\n\ntrain_df = train_df[train_df['path'].map(lambda x: os.path.exists(x))]\ntrain_df\ntrain_df['label'].replace({'FAKE': 1, 'REAL': 0}, inplace=True)\ntrain_df\nlabel_count = train_df.groupby('label').count()['filename']\n\nprint(label_count)\nX = train_df['path'].to_numpy()\n\ny = train_df['label'].to_numpy()\nX_train, X_val, y_train, y_val = train_test_split(X, y, test_size=TEST_SIZE, random_state=RANDOM_STATE, stratify=y)\npreprocess = Compose([\n\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], p=1)\n\n])\n\n\n\ntrain_dataset = RandomFaceDataset(\n\n img_dirs=X_train,\n\n labels=y_train,\n\n preprocess=preprocess\n\n)\n\nval_dataset = RandomFaceDataset(\n\n img_dirs=X_val,\n\n labels=y_val,\n\n preprocess=preprocess\n\n)\n\n\n\ntrain_dataloader = DataLoader(\n\n train_dataset,\n\n batch_size=BATCH_SIZE,\n\n shuffle=True,\n\n num_workers=NUM_WORKERS\n\n)\n\nval_dataloader = DataLoader(\n\n val_dataset,\n\n batch_size=BATCH_SIZE,\n\n shuffle=False,\n\n num_workers=NUM_WORKERS\n\n)\nfor batch in tqdm(train_dataloader):\n\n face_batch = batch['face']\n\n label_batch = batch['label']\n\n \n\n print(type(face_batch), face_batch.shape)\n\n print(type(label_batch), label_batch.shape)\n\n\n\n break\nfor batch in tqdm(val_dataloader):\n\n face_batch = batch['face']\n\n label_batch = batch['label']\n\n \n\n print(type(face_batch), face_batch.shape)\n\n print(type(label_batch), label_batch.shape)\n\n\n\n break","repo_name":"aorursy/new-nb-5","sub_path":"phunghieu_loading-merging-multiple-kaggle-datasets-demo.py","file_name":"phunghieu_loading-merging-multiple-kaggle-datasets-demo.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40586790263","text":"from mido import MidiFile\n\ndef print_hi(name):\n print(f'Hi, {name}')\n\nimport os\n\ndirectory = \"midi_files\"\n\nfor filename in os.listdir(directory):\n f = os.path.join(directory, filename)\n if filename.endswith(\".mid\"):\n print(os.path.join(directory, filename))\n mid = MidiFile(f)\n\n for track in mid.tracks:\n for i, msg in enumerate(track):\n if msg.is_cc(7):\n print('Volume changed to', 0)\n\n mid.save('updated_velocity_{}'.format(filename))\n\nif __name__ == '__main__':\n print_hi('velocity updated')\n\n","repo_name":"LoicValenti/Velocity_Update","sub_path":"Velocity_Update.py","file_name":"Velocity_Update.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10361751372","text":"from yaml import dump\nfrom functools import wraps\nfrom app_config import config, CONFIG_PATH\nfrom tkinter import Variable, IntVar, BooleanVar, StringVar, DoubleVar\n\ndef prefix_function(function, prefunction):\n @wraps(function)\n def run(*args, **kwargs):\n prefunction(*args, **kwargs)\n return function(*args, **kwargs)\n return run\n\ndef _hook_init(self, *args, **kwargs):\n self._previous_value = kwargs['value'] if 'value' in kwargs else None\n\ndef _hook_set(self, *args, **kwargs):\n try:\n self._previous_value = self.get()\n except:\n self._previous_value = self._default\n\n# this is a terrible idea\nVariable.__init__ = prefix_function(Variable.__init__, _hook_init)\nVariable.set = prefix_function(Variable.set, _hook_set)\n\ndef _create_variable(value):\n\n assert isinstance(value, (bool, int, float, str)), \\\n \"Invalid value type: %s\" + type(value)\n\n # bool needs to be first because isinstance(, int) returns true\n if isinstance(value, bool):\n return BooleanVar(\n value=value\n )\n\n if isinstance(value, int):\n return IntVar(\n value=value\n )\n\n if isinstance(value, float):\n return DoubleVar(\n value=value\n )\n\n if isinstance(value, str):\n return StringVar(\n value=value\n )\n\n raise ValueError(\n \"Invalid value type for variable creation: %s\" % type(value)\n )\n\ndef _dict_to_view_model(src, dst):\n for key, value in src.items():\n dst[key] = _dict_to_view_model(value, {}) \\\n if isinstance(value, dict) else _create_variable(value)\n return dst\n\ndef _view_model_to_dict(src, dst):\n for key, value in src.items():\n dst[key] = _view_model_to_dict(value, {}) \\\n if isinstance(value, dict) else value.get()\n return dst\n\nconfig_view_model = _dict_to_view_model(config, {})\n\ndef register_vm_updates(on_update, *vars_):\n traces = []\n for v in vars_:\n traces.append(\n (v, \"write\", v.trace_add(\"write\", lambda *a: on_update(v)))\n )\n return traces\n\ndef unregister_vm_updates(vm_traces):\n while vm_traces: \n t = vm_traces.pop(0)\n t[0].trace_remove(t[1], t[2])\n\ndef save_data():\n d = _view_model_to_dict(config_view_model, {})\n with open(CONFIG_PATH, 'w') as yaml_file:\n dump(d, yaml_file, sort_keys=False)","repo_name":"bmclare19/py-fisher","sub_path":"view_model/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43789291920","text":"import json\nimport httpx\nfrom WatchList import *\nfrom UserInformation import *\n\nclass APIService:\n\n def __init__(self):\n return\n # Flask server does not support http/2 so unable to do async requests\n \"\"\" async def getSampleWatchList(self):\n async with httpx.AsyncClient() as client:\n response = await client.get(\"http://localhost:5000/getSampleWatchlist\")\n return response \"\"\"\n\n def getWatchList(self) -> list:\n jsonResponse = json.loads(httpx.get(\"http://localhost:5000/getWatchlist\").content)\n watchList = list()\n for item in jsonResponse:\n currItem: WatchList = WatchList()\n currItem.ProductGTIN = item['ProductGTIN']\n currItem.ProductModelNumber = item['ProductModelNumber']\n currItem.ProductName = item['ProductName']\n currItem.DesiredPrice = item['DesiredPrice']\n watchList.append(currItem)\n return watchList\n \n def getUserInfo(self) -> UserInformation:\n response = json.loads(httpx.get(\"http://localhost:5000/getUserInfo\").content)\n info: UserInformation = UserInformation()\n info.userEmail = response['userEmail']\n return info\n\n\n","repo_name":"ScienceLuke7/CS-317-Ecommerce-Monitoring","sub_path":"E-Commerce.UI/API_Service.py","file_name":"API_Service.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37731344501","text":"#\n#Written by: Luke Shannon\n#10/09/18\n#350x250\n#This module creates playing cards\n\nfrom graphics import *\nfrom random import *\n\nclass Card:\n\n \"\"\"__init__(self, center, win, rank, suit, real)\n takes numbers 1-4 as suit\n takes numbers 1-13 as rank\n getValue() returns rank\"\"\"\n\n def __init__(self, center, win, rank, suit, real):\n self.rank = rank\n self.suit = suit\n self.win = win\n self.centerX = center.getX()\n self.centerY = center.getY()\n #Getting the outline of the card size to fill later\n cardBack = Rectangle(Point(self.centerX-125/2,self.centerY-175/2),Point(self.centerX+125/2,self.centerY+175/2))\n cardBack.setWidth(2)\n #clubs -- some repetition here, but not problematic. Determines the suit and gives a value to the card\n #So that they can be drawn later\n if self.suit == 1:\n color = 'black'\n if self.rank > 10:\n if self.rank == 11:\n value = 'J'\n elif self.rank == 12:\n value = 'Q'\n elif self.rank == 13:\n value = 'K'\n else:\n value = 'error'\n elif self.rank == 1:\n value = 'A'\n else:\n value = self.rank\n #diamonds -- see comment above\n if self.suit == 2:\n color = 'red'\n if self.rank > 10:\n if self.rank == 11:\n value = 'J'\n elif self.rank == 12:\n value = 'Q'\n elif self.rank == 13:\n value = 'K'\n else:\n self.value = 'errpr'\n elif self.rank == 1:\n value = 'A'\n else:\n value = self.rank\n #hearts -- see comment above\n if self.suit == 3:\n color = 'red'\n if self.rank > 10:\n if self.rank == 11:\n value = 'J'\n elif self.rank == 12:\n value = 'Q'\n elif self.rank == 13:\n value = 'K'\n else:\n value = 'errpr'\n elif self.rank == 1:\n value = 'A'\n else:\n value = self.rank\n #spades -- see comment above\n if self.suit == 4:\n color = 'black'\n if self.rank > 10:\n if self.rank == 11:\n value = 'J'\n elif self.rank == 12:\n value = 'Q'\n elif self.rank == 13:\n value = 'K'\n else:\n value = 'errpr'\n elif self.rank == 1:\n value = 'A'\n else:\n value = self.rank\n\n #Draw a blank card with a question mark for when the card is not yet revealed\n if not real:\n cardBack.setFill('white')\n cardBack.draw(self.win)\n fakeText = Text(Point(self.centerX,self.centerY),\"?\")\n fakeText.setSize(36)\n fakeText.draw(win)\n\n #Drawing the card and with both the rank and suit\n else:\n #card back\n cardBack.setFill('white')\n cardBack.draw(self.win)\n\n #Rank Text with the appropriate color and size\n valueText = Text(Point(self.centerX-25,self.centerY),str(value))\n valueText.setFill(color)\n valueText.setSize(36)\n valueText.draw(self.win)\n\n #Preparing all types of suits to be drawn, all at the same point, so that we can decide later which to choose\n heartCenter = Point(self.centerX+25,self.centerY)\n hCX = heartCenter.getX()\n hCY = heartCenter.getY()\n hearts = Polygon(Point(hCX,hCY-4),Point(hCX-4,hCY-12),Point(hCX-14,hCY-12),Point(hCX-18,hCY),Point(hCX,hCY+20),Point(hCX+18,hCY),Point(hCX+14,hCY-12),Point(hCX+4,hCY-12))\n hearts.setOutline('red')\n hearts.setFill('red')\n \n spadesCenter = Point(self.centerX+25,self.centerY)\n sCX = spadesCenter.getX()\n sCY = spadesCenter.getY()\n spades = Polygon(Point(sCX,sCY+4),Point(sCX+4,sCY+12),Point(sCX+14,sCY+12),Point(sCX+18,sCY),Point(sCX,sCY-20),Point(sCX-18,sCY),Point(sCX-14,sCY+12),Point(sCX-4,sCY+12),Point(sCX,sCY),Point(sCX-2,sCY+22),Point(sCX+2,sCY+22))\n spades.setFill('black')\n \n diamondsCenter = Point(self.centerX+25,self.centerY)\n dCX = diamondsCenter.getX()\n dCY = diamondsCenter.getY()\n diamonds = Polygon(Point(dCX-15,dCY),Point(dCX,dCY+20),Point(dCX+15,dCY),Point(dCX,dCY-20))\n diamonds.setFill('red')\n diamonds.setOutline('red')\n \n clubsCenter = Point(self.centerX+25,self.centerY)\n cCX = clubsCenter.getX()\n cCY = clubsCenter.getY()\n clubs = Polygon(Point(cCX,cCY),Point(cCX-2,cCY+15),Point(cCX+2,cCY+15),Point(cCX,cCY),Point(cCX-12,cCY+6),Point(cCX-18,cCY),Point(cCX-12,cCY-6),Point(cCX,cCY),Point(cCX-6,cCY-12),Point(cCX,cCY-18),Point(cCX+6,cCY-12),Point(cCX,cCY),Point(cCX+12,cCY-6),Point(cCX+18,cCY),Point(cCX+12,cCY+6),Point(cCX,cCY))\n clubs.setFill('black')\n\n #Depending on the randomly generated number 1-4, draw the appropriate suit (alphabetically)\n if self.suit == 1:\n clubs.draw(win)\n elif self.suit == 2:\n diamonds.draw(win)\n elif self.suit == 3:\n hearts.draw(win)\n elif self.suit == 4:\n spades.draw(win)\n else:\n print(\"sad error\")\n\n #Get Value function necessary for after drawing and randomizing the card, so I can input randrange directly into my constructor, but still access the values later.\n def getValue(self):\n return self.rank\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n \n","repo_name":"remised77/PycharmProjects","sub_path":"Lab04/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":5975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36214536978","text":"import pandas as pd\r\nimport numpy as np\r\nimport datetime as datetime\r\nimport matplotlib.pyplot as plt\r\nfrom pandas_datareader import data as pdr\r\nimport yfinance\r\nimport pandas_ta as ta\r\n\r\n# Volume Weighted Average Price\r\ndef vwap(price, volume, period = 14):\r\n Vwap = []\r\n for i in range(len(price)):\r\n if i < period:\r\n Vwap.append(np.nan)\r\n else:\r\n low_range = i - period\r\n pvol = price[low_range:(i+1)]*volume[low_range:(i+1)]\r\n ind = np.sum(pvol)/np.sum(volume[low_range:(i+1)])\r\n Vwap.append(ind)\r\n return Vwap\r\n \r\n# Function to classify up or down movement of price data, up = True, down = False\r\n\r\ndef up_down(price):\r\n up_down = [True]\r\n for i in range(1,len(price)):\r\n if price[i] > price[i-1]:\r\n up_down.append(True)\r\n else:\r\n up_down.append(False)\r\n return up_down\r\n\r\n# Relative Strenght index\r\n\r\ndef rsi(price, period):\r\n delta = price.diff()\r\n up = delta.clip(lower=0)\r\n down = -1*delta.clip(upper=0)\r\n ema_up = up.ewm(com=period, adjust=False).mean()\r\n ema_down = down.ewm(com=period, adjust=False).mean()\r\n rs = 100 - (100/(1 + (ema_up/ema_down)))\r\n return rs\r\n\r\n# Moving average\r\n\r\ndef MA(price, period):\r\n MA = []\r\n for i in range(len(price)):\r\n if i < period:\r\n MA.append(np.nan)\r\n else:\r\n mean = np.mean(price[(i-period):(i+1)])\r\n MA.append(mean)\r\n return MA\r\n\r\ndef ROI(df,n):\r\n m = len(df)\r\n arr = []\r\n for i in range(0,n):\r\n arr.append('N')\r\n for j in range(n,m):\r\n roi= (df.Close[j] - df.Close[j-n])/df.Close[j-n] #Equation for ROI\r\n arr.append(roi)\r\n return arr\r\n\r\n# Stocastic Oscillator\r\n\r\ndef Stochastic_Oscillator(price, period = 14):\r\n K = []\r\n for i in range(len(price)):\r\n if i < period:\r\n K.append(np.nan)\r\n else:\r\n low_range = i - period\r\n L = np.min(price[low_range:(i+1)])\r\n H = np.max(price[low_range:(i+1)])\r\n C = price[i]\r\n ind = ((C-L)/(H-L))*100\r\n K.append(ind)\r\n return K \r\n\r\n#Exponential Moving Average \r\ndef exp_moving_average(price, period):\r\n ema= price.ewm(span=period, adjust=False).mean()\r\n return ema \r\n\r\n#MACD\r\ndef macd(df, price):\r\n res= df.ta.macd(close=price, fast=12, slow=26, signal=9, append=True)\r\n df.append(res)\r\n return res\r\n\r\n#Bollinger Bands\r\ndef Bollinger(price, ma_period = 14, n_std = 2):\r\n UB = []\r\n LB = []\r\n for i in range(len(price)):\r\n if i < ma_period:\r\n UB.append(np.nan)\r\n LB.append(np.nan)\r\n else:\r\n lr = i - ma_period\r\n hr = i +1\r\n ub = MA(price, ma_period)[-1] + n_std*np.std(price[lr:hr])\r\n lb = MA(price, ma_period)[-1] - n_std*np.std(price[lr:hr])\r\n UB.append(ub)\r\n LB.append(lb)\r\n return UB,LB \r\ndef series_to_supervised(data, n_in=1, n_out=1, dropnan=True):\r\n n_vars = 1 if type(data) is list else data.shape[1]\r\n df = pd.DataFrame(data)\r\n cols, names = list(), list()\r\n # input sequence (t-n, ... t-1)\r\n for i in range(n_in, 0, -1):\r\n cols.append(df.shift(i))\r\n names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\r\n # forecast sequence (t, t+1, ... t+n)\r\n for i in range(0, n_out):\r\n cols.append(df.shift(-i))\r\n if i == 0:\r\n names += [('var%d(t)' % (j+1)) for j in range(n_vars)]\r\n else:\r\n names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\r\n # put it all together\r\n agg = pd.concat(cols, axis=1)\r\n agg.columns = names\r\n # drop rows with NaN values\r\n if dropnan:\r\n agg.dropna(inplace=True)\r\n return agg\r\n\r\ndef ud_pred(pred, actual):\r\n up_down = [1]\r\n for i in range(1,len(pred)):\r\n if pred[i] > actual[i-1]:\r\n up_down.append(1)\r\n else:\r\n up_down.append(0)\r\n return up_down\r\n\r\ndef ud(predict):\r\n up_down = [1]\r\n for i in range(1,len(predict)):\r\n if predict[i] > predict[i-1]:\r\n up_down.append(1)\r\n else:\r\n up_down.append(0)\r\n return up_down\r\n\r\n# Scale back predictions\r\ndef scale_back(pred, close):\r\n predict = [close[0]]\r\n for i in range(1,len(pred)):\r\n predict.append(pred[i]+list(close)[i-1])\r\n return predict\r\n\r\ndef scale_back_pct(pred, close):\r\n close = list(close)\r\n predict = [close[0]]\r\n for i in range(1,len(pred)):\r\n predict.append(pred[i]*list(close)[i-1]+list(close)[i-1])\r\n return predict\r\n\r\ndef long_equity(prediction, close, commission = 0.00):\r\n long = [close[0]]\r\n for i in range(1,len(prediction)-1):\r\n if prediction[i] == 1:\r\n long.append(long[i-1] + close[i] - close[i-1] - close[i-1]*commission)\r\n else:\r\n long.append(long[i-1] + 0)\r\n return long\r\n\r\ndef short_equity(prediction, close, commission = 0.00):\r\n short = [close[0]]\r\n for i in range(1,len(prediction)-1):\r\n if prediction[i] == 0:\r\n short.append(short[i-1] + close[i-1] - close[i] - close[i-1]*commission)\r\n else:\r\n short.append(short[i-1] + 0)\r\n return short \r\n\r\ndef total_equity(prediction, close, commission = 0.00):\r\n total = [close[0]]\r\n for i in range(1,len(prediction)-1):\r\n if prediction[i] == 1:\r\n total.append(total[i-1] + close[i] - close[i-1] - close[i-1]*commission)\r\n else:\r\n total.append(total[i-1] + close[i-1] - close[i] - close[i-1]*commission)\r\n return total\r\n\r\ndef percentage_returns(price):\r\n perc = [0]\r\n for i in range(1,len(price)):\r\n perc.append(perc[i-1] + ((price[i]-price[i-1])/price[0])*100)\r\n return perc\r\n\r\ndef percentage_returns_for_dd(price):\r\n perc = [0]\r\n for i in range(1,len(price)):\r\n perc.append(perc[i-1] + ((price[i]-price[i-1])/price[i-1])*100)\r\n return perc\r\n\r\ndef max_drawdown(equity):\r\n dd = 0\r\n drawdown = [0]\r\n for i in range(1,len(equity)):\r\n if equity[i] < equity[i-1]:\r\n dd += equity[i-1] - equity[i]\r\n drawdown.append(dd)\r\n else:\r\n drawdown.append(dd)\r\n dd = 0\r\n for i in range(1, len(drawdown)):\r\n if drawdown[i] != 0:\r\n drawdown[i] = drawdown[i] + drawdown[i-1]\r\n else:\r\n drawdown[i] = 0\r\n Drawdown = [i for i in drawdown if i != 0]\r\n return [max(Drawdown), np.mean(Drawdown), min(Drawdown)]\r\n\r\ndef number_of_trades(equity, updown_pred):\r\n count = 0\r\n for i in range(1,len(equity)-1):\r\n if (equity[i] != equity[i-1] and updown_pred[i] != updown_pred[i-1]):\r\n count += 1\r\n total_profit = float(equity[len(equity)-1] - equity[0])\r\n average_profit = total_profit/count\r\n return count \r\n\r\ndef number_of_winning_long_trades(equity, updown_pred):\r\n long_results = []\r\n profit_loss = 0\r\n for i in range(1,len(equity)-1):\r\n if (equity[i] != equity[i-1] and updown_pred[i] == updown_pred[i-1] and updown_pred[i] == 1):\r\n profit_loss += equity[i] - equity[i-1]\r\n if (equity[i] != equity[i-1] and updown_pred[i] != updown_pred[i-1]):\r\n long_results.append(profit_loss)\r\n profit_loss = 0\r\n wins = [i for i in long_results if i > 0]\r\n losses = [i for i in long_results if i < 0]\r\n average_profit = np.mean(wins)\r\n best_profit = np.max(wins)\r\n worst_loss = np.min(losses)\r\n average_loss = np.mean(losses)\r\n profit_loss_ratio = np.abs(average_profit/average_loss)\r\n return [len(wins), average_profit, average_loss, profit_loss_ratio, best_profit, worst_loss]\r\n\r\ndef number_of_winning_short_trades(equity, updown_pred):\r\n long_results = []\r\n profit_loss = 0\r\n for i in range(1,len(equity)-1):\r\n if (equity[i] != equity[i-1] and updown_pred[i] == updown_pred[i-1] and updown_pred[i] == 0):\r\n profit_loss += equity[i] - equity[i-1]\r\n if (equity[i] != equity[i-1] and updown_pred[i] != updown_pred[i-1]):\r\n long_results.append(profit_loss)\r\n profit_loss = 0\r\n wins = [i for i in long_results if i > 0]\r\n losses = [i for i in long_results if i < 0]\r\n average_profit = np.mean(wins)\r\n best_profit = np.max(wins)\r\n worst_loss = np.min(losses)\r\n average_loss = np.mean(losses)\r\n profit_loss_ratio = np.abs(average_profit/average_loss)\r\n return [len(wins), average_profit, average_loss, profit_loss_ratio, best_profit, worst_loss] \r\n\r\ndef long_short_market_time(updown_pred):\r\n long_days = 0\r\n short_days = 0\r\n for i in updown_pred:\r\n if i > 0:\r\n long_days += 1\r\n else:\r\n short_days += 1\r\n return [long_days, short_days]\r\n ","repo_name":"valerio591/LSTM-Automated-Trading-Bot","sub_path":"Trading Bot/Models/package/euklid_regressor.py","file_name":"euklid_regressor.py","file_ext":"py","file_size_in_byte":8758,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"28515393826","text":"import numpy as np\r\nfrom atuo_radiomics.auto_run import Radiomics\r\nfrom atuo_radiomics.Classifier import LR, SVM\r\nfrom atuo_radiomics.FeatureSelector import FeatureSelectByRFE, FeatureSelectByANOVA, FeatureSelectByKruskalWallis, FeatureSelectByRelief\r\nimport os\r\njoin = os.path.join\r\nfrom pathlib import Path\r\nimport shutil\r\nimport pandas as pd\r\n\r\nLVSI_dilation = {\"DWI\": 5, \"T1CE\": 6, \"T2\": 3}\r\nLNM_dilation = {\"DWI\": 9, \"T1CE\": 9, \"T2\": 7}\r\n\r\n\r\ndef run_dilation(root, modal, tasks=1, min_i=1, max_i=11):\r\n for i in range(min_i, max_i):\r\n path = os.path.join(root, modal, f\"dilation_{i}\")\r\n a = Radiomics([FeatureSelectByRFE, FeatureSelectByANOVA, FeatureSelectByKruskalWallis, FeatureSelectByRelief],\r\n [LR, SVM], path, max_feature_num=10, task_num = tasks, has_shape=False)\r\n a.load_csv(os.path.join(path, \"train_numeric_feature.csv\"), os.path.join(path, \"test_numeric_feature.csv\"))\r\n a.run()\r\n\r\n\r\ndef merge_liuzhou():\r\n best_dilation = {\"DWI\": 5, \"T1CE\": 6, \"T2\": 3}\r\n # best_dilation = {\"DWI\": 9, \"T1CE\": 9, \"T2\": 7}\r\n for category in [\"train\", \"test\"]:\r\n for modal in [\"DWI\", \"T2\", \"T1CE\"]:\r\n cancer_df = pd.read_csv(\r\n f\"/homes/syli/dataset/EC_all/model/{modal}/original+log-sigma/best_model/selected_{category}_data.csv\")\r\n dilation_df = pd.read_csv(\r\n f\"/homes/syli/dataset/EC_all/model/dilation_split/{modal}/dilation_{best_dilation[modal]}/original+log-sigma/best_model/selected_{category}_data.csv\")\r\n dilation_features = [i.replace(\"resampled.nii\", f\"dilation_{best_dilation[modal]}\") for i in list(dilation_df)]\r\n dilation_df.columns = dilation_features\r\n store_path = f\"/homes/syli/dataset/EC_all/model/merge_all/merge_features/{modal}\"\r\n os.makedirs(store_path, exist_ok=True)\r\n df = pd.merge(cancer_df, dilation_df, on=[\"CaseName\", \"label\"])\r\n df.to_csv(os.path.join(store_path, f\"{category}_numeric_feature.csv\"), index=False)\r\n\r\n\r\n\r\ndef copy_new_dilation(source):\r\n for i in Path(source).iterdir():\r\n if not i.is_dir():\r\n continue\r\n for j in i.iterdir():\r\n tar_path = str(j).replace(\"liuzhou\", \"liuzhou_split\")\r\n os.makedirs(tar_path, exist_ok=True)\r\n shutil.copy(str(j)+\"/train_numeric_feature.csv\", tar_path+\"/train_numeric_feature.csv\")\r\n shutil.copy(str(j) + \"/test_numeric_feature.csv\", tar_path + \"/test_numeric_feature.csv\")\r\n\r\n\r\ndef merge_label_feature(clinical_path, feature_path, modals, key, store_path):\r\n clinical_df = pd.read_excel(clinical_path)\r\n for modal in modals:\r\n df = pd.read_csv(os.path.join(feature_path, f\"{modal}_features.csv\"))\r\n new_df = pd.merge(clinical_df[[\"CaseName\", key]], df)\r\n new_df.rename(columns={key: \"label\"}, inplace=True)\r\n os.makedirs(os.path.join(store_path, modal), exist_ok=True)\r\n print(os.path.join(store_path, modal))\r\n new_df.to_csv(os.path.join(store_path, modal, f\"{key}_test.csv\"), index=False)\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n root = \"/homes/syli/dataset/LVSI_LNM/multi_task/liuzhou\"\r\n #\r\n # merge_label_feature(root+\"/shenzhen.xlsx\", root+\"/shenzhen ROI seg/dataframe\", [\"DWI\", \"T1CE\", \"T2\"], \r\n # \"LNM\", root+\"/shenzhen ROI seg/LNM/liunei\")\r\n for modal in [\"T2\"]:\r\n run_dilation(root, modal, 2, 2, 11)\r\n # a = Radiomics([FeatureSelectByRFE, FeatureSelectByANOVA, FeatureSelectByKruskalWallis, FeatureSelectByRelief],\r\n # [SVM, LR], savepath=root, task_num=1, max_feature_num=10)\r\n # a.load_csv(join(root, \"train_numeric_feature.csv\"), join(root, \"test_numeric_feature.csv\"))\r\n # #a.predict_save(a.train_data, a.test_data, a.savepath, True)\r\n # a.run()\r\n # combine_prediction(root, [\"T1\", \"T2\", \"T1CE\", \"ADC\", \"b1000\"], root)\r\n # external_test()\r\n\r\n\r\n","repo_name":"shenchuxiaofugui/endometrial","sub_path":"run_radiomics.py","file_name":"run_radiomics.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13938256840","text":"import hermipy.equations as eq\nimport hermipy.quad as quad\nimport hermipy.series as series\nimport hermipy.function as func\n\nimport numpy as np\nimport numpy.linalg as la\nimport sympy as sym\n\n# Declare variables, operators and parameters {{{\nsym.init_printing()\nequation = eq.McKean_Vlasov_harmonic_noise\nx, y, z, f = equation.x, equation.y, equation.z, equation.f\nparams = equation.params()\nparams['γ'], params['θ'] = 0, 0\nforward, params = equation.equation(params), equation.params()\nβ, Vp, θ, m, ε = (params[k] for k in ('β', 'Vp', 'θ', 'm', 'ε'))\n\n# Map the forward operator to a \"backward operator\"\nfactor = sym.exp(- sym.Rational(1, 2) * (y*y + z*z))\noperator = eq.map_operator(forward, f, factor).expand()\n\nepsilon = params['ε']\nL0 = (operator*epsilon**2).expand().subs(epsilon, 0)\nL1 = (operator*epsilon - L0/epsilon).expand().subs(epsilon, 0)\nL2 = (operator - L1/epsilon - L0/epsilon**2).expand()\n\n# Quadrature used to solve cell problems\ndegree, nquad, σy, σz = 10, 20, 1, 1\nquad_num = quad.Quad.gauss_hermite(nquad, dirs=[1, 2], mean=[0, 0],\n cov=[[σy, 0], [0, σz]])\n\n# Discretization in Hermite space\nfyz = sym.Function('fyz')(y, z)\n\nop = quad_num.discretize_op(L0.subs(f, fyz), degree,\n sparse=False, index_set=\"triangle\")\n# }}}\n# Expansion of the solution {{{\nnterms = 7\nzeros = [sym.Integer(0)] * nterms\nu, centered, unk = zeros, zeros.copy(), zeros.copy()\n\n\ndef iL0(term):\n t_rhs = quad_num.transform(term, degree)\n solution = la.solve(op.matrix[1:, 1:], t_rhs.coeffs[1:])\n solution = np.array([0, *solution])\n sol_series = series.Series(solution, t_rhs.position, significant=14)\n symbolic = sol_series.to_function().as_xyz()\n symbolic = func.Function.sanitize(symbolic, max_denom=1e8)\n\n print(\"--> Solving cell problem with rhs: \" + str(term))\n print(\"----> Solution: \" + str(symbolic))\n diff = (L0.subs(f, symbolic).doit() - term)\n if len(diff.expand().free_symbols) > 0:\n print(\"----> Error: does not match!\")\n sym.pprint(diff)\n import ipdb\n ipdb.set_trace()\n return symbolic\n\n\nfor i in range(nterms):\n print(\"Solving for power \" + str(i))\n\n unk[i] = sym.Function('u{}'.format(i))(x)\n u[i] = unk[i]\n\n rhs = sym.Integer(0)\n if i > 0:\n rhs += - L1.subs(f, u[i - 1])\n if i > 1:\n rhs += - L2.subs(f, u[i - 2])\n\n split = func.Function(rhs.doit().expand(), dim=3).split()\n\n for term in split:\n x_part = term[-1] * term[0].as_xyz()\n yz_part = term[1].as_xyz() * term[2].as_xyz()\n t_rhs = quad_num.transform(yz_part, degree)\n centered[i] += round(t_rhs.coeffs[0], 10) * x_part\n u[i] += x_part * iL0(yz_part)\n u[i] = func.Function.sanitize(u[i])\n centered[i] = func.Function.sanitize(centered[i])\n\n# }}}\n# Some manual calculations {{{\n# Operator\nfx = sym.Function('fx')(x)\nLFP = ((1/β)*sym.exp(-β*Vp)*(fx*sym.exp(β*Vp)).diff(x)).diff(x)\n\n# Centering condition for u₀\nprint(\"Equation for u₀: \")\nsym.pprint(centered[2])\nZ = sym.symbols('Z', real=True)\nsolution_0 = sym.exp(-β*Vp)/Z\nif not centered[2].subs(unk[0], solution_0).doit().cancel() == 0:\n print(\"Error!\")\n exit(0)\n\n# Centering condition for u₁\nfor i in range(3, 6):\n print(\"Equation for u_{}: \".format(i-2))\n sym.pprint(centered[i])\n\n # The solution is 0\n for j in range(nterms):\n u[j] = u[j].subs(unk[i-2], 0).doit().expand()\n centered[j] = centered[j].subs(unk[i-2], 0).doit().expand()\n\n# Centering condition for u₂\nprint(\"Equation for u₄: \")\nsym.pprint(centered[6])\n\nC1, C2 = sym.symbols('C1 C2', real=True)\n\nfokker_planck = - ((1/β)*sym.exp(-β*Vp)*(unk[4]*sym.exp(β*Vp)).diff(x)).diff(x)\nremainder = centered[6] - fokker_planck\n\nbeta3 = (remainder*β**3).expand().subs(β, 0)\nbeta2 = ((remainder - beta3/β**3) * β**2).expand().subs(β, 0)\nbeta1 = ((remainder - beta3/β**3 - beta2/β**2) * β).expand().subs(β, 0)\nbeta0 = remainder - beta3/β**3 - beta2/β**2 - beta1/β\n\nassert (unk[0].diff(x)*Vp.diff(x)).diff(x, x, x, x) \\\n + (unk[0].diff(x, x, x)*Vp.diff(x)).diff(x, x) == beta2\n\nassert (Vp.diff(x)*Vp.diff(x)*unk[0].diff(x)).diff(x, x, x) \\\n - (Vp.diff(x)*Vp.diff(x, x)*unk[0].diff(x)).diff(x, x) - beta1 == 0\n\nint_fokker_planck = - (1/β)*sym.exp(-β*Vp)*(unk[4]*sym.exp(β*Vp)).diff(x)\n\nint_beta1 = (Vp.diff(x)*Vp.diff(x)*unk[0].diff(x)).diff(x, x) \\\n - (Vp.diff(x)*Vp.diff(x, x)*unk[0].diff(x)).diff(x)\n\nint_beta2 = (unk[0].diff(x)*Vp.diff(x)).diff(x, x, x) \\\n + (unk[0].diff(x, x, x)*Vp.diff(x)).diff(x)\n\nint_beta3 = unk[0].diff(x, x, x, x, x)\n\nintegral1 = int_fokker_planck + \\\n int_beta1/β + int_beta2/β**2 + int_beta3/β**3 + C1\n\nassert (integral1.diff(x) - centered[6]).expand() == 0\n\n# C1 = 0\nintegral1 = integral1.subs(C1, 0)\n\nintegrand = ((integral1.subs(unk[0], solution_0).doit().expand()\n .subs(unk[4], solution_0*unk[4]).doit().expand()\n / solution_0).expand())\n\nintegral2 = 3*(Vp.diff(x, x))**2/(2*β) - Vp.diff(x, x, x, x)/β**2 \\\n - Vp.diff(x, x)*Vp.diff(x)**2 - unk[4]/β \\\n + 2/β*Vp.diff(x)*Vp.diff(x, x, x) - Vp.diff(x, x)**2/β \\\n + (Vp.diff(x)*Vp.diff(x, x)**2).integrate(x) + C2\n\nassert (integrand - integral2.diff(x)).expand() == 0\n\nsolution_4 = sym.solve(integral2, unk[4])[0] * solution_0\n\nassert centered[6].subs(unk[0], solution_0)\\\n .subs(unk[4], solution_4).doit().expand() == 0\n# }}}\n# Projection on x - z {{{\nquady = quad.Quad.gauss_hermite(nquad, dirs=[1], mean=[0], cov=[[σy]])\nsolution, proj_xz = u[0] + ε*u[1] + ε**2*u[2] + ε**3*u[3] + ε**4*u[4], 0\nsplit = func.Function(solution.expand(), dim=3).split()\nn_proj = 5\nproj_xz = [0]*n_proj\nfor i in range(n_proj):\n split = func.Function(u[i].expand(), dim=3).split()\n for term in split:\n xz_part = term[-1] \\\n * term[0].as_xyz() \\\n * term[2].as_xyz()\n integ = quady.integrate(term[1].as_xyz())\n proj_xz[i] += integ * xz_part\n proj_xz[i] = func.Function.sanitize(proj_xz[i])\n# }}}\n# Check solution {{{\n\nprint(\"Solution: \")\nsym.pprint(func.Function.sanitize(solution).factor())\n\nsolution_x = 0\nsplit = func.Function(solution.expand(), dim=3).split()\n\nfor term in split:\n x_part = term[-1] * term[0].as_xyz()\n yz_part = term[1].as_xyz() * term[2].as_xyz()\n solution_x += x_part * quad_num.integrate(yz_part)\n\nsolution_x = func.Function.sanitize(solution_x)\\\n .subs(unk[0], solution_0)\\\n .subs(unk[4], solution_4)\n\nprint(\"x-projection of the solution: \")\nsym.pprint(func.Function.sanitize(solution_x).factor())\n\nhandle = func.Function.sanitize(solution_x).factor()\n\n# Checks\nsolution = solution.subs(unk[0], solution_0).subs(unk[4], solution_4)\n\nfor i in range(nterms):\n u[i] = u[i].subs(unk[0], solution_0)\\\n .subs(unk[4], solution_4)\n\nassert (operator/epsilon**4).expand()\\\n .subs(f, solution + ε**5*u[5] + ε**6*u[6])\\\n .doit().expand().subs(epsilon, 0) == 0\n\n\n# }}}\n# Plot {{{\nn_points = 100\npotential = (x**4/4 - x**2/2) + θ/2*(x**2 - 2*x*m)\nquad_gauss = quad.Quad.gauss_hermite(n_points, dirs=[0, 2], cov=[[1, 0], [0, 1]])\nquadx = quad.Quad.gauss_hermite(100, dirs=[0], cov=[[1]])\n\nr = sym.Rational\nθn, mn, βn, εn = r(0), r(0, 5), r(1), r(1, 2)\npotential_n = potential.subs(((θ, θn), (m, mn), (β, βn), (ε, εn)))\nZ_n = quadx.integrate(sym.exp(-βn*potential_n), flat=True)\nsolution_0_n = sym.exp(-βn*potential_n)/Z_n\nsolution_4_n = solution_4.subs(Vp, potential).doit()\nsolution_4_n = solution_4_n.factor().subs(((θ, θn), (m, mn), (β, βn), (ε, εn), (Z, Z_n)))\nC2_n = quadx.integrate(sym.solve(solution_4_n, C2)[0] * solution_0_n, flat=True)\nsolution_4_n = solution_4_n.subs(C2, C2_n)\nassert abs(quadx.integrate(solution_0_n, flat=True) - 1) < 1e-8\nassert abs(quadx.integrate(solution_4_n, flat=True) - 0) < 1e-8\n\nrho_z = 1/sym.sqrt(2*sym.pi) * sym.exp(-z*z/2)\n\nproj_xz_n = [0]*n_proj\nfor i, p in enumerate(proj_xz):\n proj_xz_n[i] = (proj_xz[i]*rho_z).subs(unk[0], solution_0)\\\n .subs(unk[4], solution_4).doit()\n proj_xz_n[i] = proj_xz_n[i].subs(Vp, potential).doit()\n proj_xz_n[i] = proj_xz_n[i].factor().subs(((θ, θn), (m, mn), (β, βn),\n (ε, εn), (Z, Z_n), (C2, C2_n)))\n assert abs(quad_gauss.integrate(proj_xz_n[i], flat=True) - i == 0) < 1e-8\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.rc('font', size=22)\nmatplotlib.rc('font', family='serif')\nmatplotlib.rc('text', usetex=True)\n\nquad_visu = quad.Quad.newton_cotes([n_points, n_points], [2, 2], dirs=[0, 2])\n\nfor i in range(5):\n fig, ax = plt.subplots()\n exponent = '^' + str(i) if i is not 1 else ''\n cont = quad_visu.plot(proj_xz_n[i], ax=ax, title='$\\\\varepsilon{}$'.format(exponent))\n ax.set_xlabel('$x$')\n ax.set_ylabel('$\\\\eta$')\n plt.colorbar(cont, ax=ax, pad=.01)\n for c in cont.collections:\n c.set_edgecolor(\"face\")\n plt.tight_layout()\n plt.savefig(\"asymptotic-harmonic-{}.eps\".format(i), bbox_inches='tight')\nplt.close('all')\n\nfig, axes = plt.subplots(2, 2)\nfor i in (0, 1):\n for j in (0, 1):\n n = 1 + 2*i + j\n cont = quad_visu.plot(proj_xz_n[n], ax=axes[i][j])\n plt.colorbar(cont, ax=axes[i][j], pad=.01)\n for c in cont.collections:\n c.set_edgecolor(\"face\")\n# plt.tight_layout()\n# plt.savefig(\"test.eps\", bbox_inches='tight')\nplt.show()\n# }}}\n","repo_name":"urbainvaes/hermipy","sub_path":"examples/asymptotic_harmonic.py","file_name":"asymptotic_harmonic.py","file_ext":"py","file_size_in_byte":9492,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"14453219851","text":"from __future__ import unicode_literals, print_function\n\n# set to 1 to get debug messages\ndebug_lvl = 0\n\n# number of parallel processes (for fst-mor)\nnum_processes = 4\n\n# fst executable and model\nfst_string = \"fst-mor sles/sles.a\"\n","repo_name":"rsennrich/zmorge","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"22"} +{"seq_id":"18917914986","text":"#This program hides a null cypher within a give list.\n#Uses the program load_dictionary to do so.\n\n#Required\nfrom random import randint\nimport string\nimport load_dictionary\n\n# Message should use no punctuation or numbers\nuser_message = input(\"Input a message to cipher. The message should not contain punctuation or numbers\\n\")\nuser_message.translate(string.punctuation)\nmessage = ''\nfor char in user_message:\n if char in string.ascii_letters:\n message += char\nprint(message, \"\\n\")\nmessage = \"\".join(message.split())\n\n# open dictionary file\nword_list = load_dictionary.loadFile('dictionary.txt')\ni = 0\n# build vocabulary word list with hidden message\nvocab_list = []\nfor letter in message:\n size = randint(6, 10)\n for word in word_list:\n if len(word) == size and word[i].lower() == letter.lower()\\\n and word not in vocab_list:\n vocab_list.append(word)\n i+=1\n if i > 4:\n i = 0\n break\n \nif len(vocab_list) < len(message):\n print(\"Word List is too small. Try larger dictionary or shorter message!\")\nelse: \n print(\"Vocabulary words for Unit 1: \\n\", *vocab_list, sep=\"\\n\") \n \n\n","repo_name":"stonespheres/pypher","sub_path":"null.py","file_name":"null.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40790504040","text":"from flask_jwt_extended import current_user\nfrom flask_restful import Resource\nfrom marshmallow import fields\nfrom sqlalchemy import and_, func\n\nfrom scuevals_api.models import Permission, Quarter, Section, Professor\nfrom scuevals_api.auth import auth_required\nfrom scuevals_api.utils import use_args\n\n\nclass QuartersResource(Resource):\n\n @auth_required(Permission.ReadEvaluations, Permission.WriteEvaluations)\n @use_args({'course_id': fields.Int(), 'professor_id': fields.Int()})\n def get(self, args):\n quarters = Quarter.query.filter(\n func.upper(Quarter.period) <= Quarter.current().with_entities(func.lower(Quarter.period)),\n Quarter.university_id == current_user.university_id\n )\n\n quarter_filters = []\n\n if 'course_id' in args:\n quarter_filters.append(Section.course_id == args['course_id'])\n\n if 'professor_id' in args:\n quarter_filters.append(Section.professors.any(Professor.id == args['professor_id']))\n\n if quarter_filters:\n expr = True\n for fil in quarter_filters:\n expr = and_(expr, fil)\n\n quarters = quarters.filter(Quarter.sections.any(expr))\n\n return [quarter.to_dict() for quarter in quarters.all()]\n","repo_name":"SCUEvals/scuevals-api","sub_path":"scuevals_api/resources/quarters.py","file_name":"quarters.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"42246945368","text":"import re\ninput = 'day6input.txt'\n\ndef read_message():\n f = open(input)\n\n content = f.read()\n\n return content\n\nmessage = read_message()\n\nfor idx, char in enumerate(message):\n group = set(message[idx:idx + 14])\n if len(group) == 14:\n print(idx + 14)\n break","repo_name":"J-Coke/advent-of-code-2022","sub_path":"days1-9/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"893518962","text":"import re\n\nimport pytest\n\nfrom scrapli_community.cumulus.vtysh.cumulus_vtysh import DEFAULT_PRIVILEGE_LEVELS\n\n\n@pytest.mark.parametrize(\n \"priv_pattern\",\n [\n (\"linux\", \"user@scrapli1-r01-test-location1:mgmt-vrf:~$\"),\n (\"linux\", \"root@scrapli1-r01-test-location1:mgmt-vrf:~#\"),\n (\"exec\", \"SCRAPLI-R1#\"),\n (\"configuration\", \"scrapli_r1(config)#\"),\n ],\n ids=[\n \"linux\",\n \"linux\",\n \"exec\",\n \"configuration\",\n ],\n)\ndef test_default_prompt_patterns(priv_pattern):\n priv_level_name = priv_pattern[0]\n prompt = priv_pattern[1]\n\n prompt_pattern = DEFAULT_PRIVILEGE_LEVELS.get(priv_level_name).pattern\n match = re.search(pattern=prompt_pattern, string=prompt, flags=re.M | re.I)\n\n assert match\n","repo_name":"scrapli/scrapli_community","sub_path":"tests/unit/cumulus/vtysh/test_cumulus_vtysh.py","file_name":"test_cumulus_vtysh.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"22"} +{"seq_id":"35817035764","text":"import numpy as np\n\ntunnels = ['RIO450', 'DPT', 'DMAT', 'RCLT' , 'YBT']\nways = ['EntranceExit', 'ExitEntrance']\n\nfor tunnel in tunnels:\n\n\tfor way in ways:\n\t\tif(way == \"ExitEntrance\" and tunnel == \"RIO450\"):\n\t\t\tcontinue\n\t\tpath = tunnel+way+'.txt'\n\t\t\n\t\tlistLon, listLat = np.loadtxt(path, delimiter='\\t', unpack = True, usecols=(1,2), skiprows=1)\n\t\tfile = open('bare_outages/'+tunnel+way+'.txt','w')\n\t\tfile.write('if(outagesFile==\"'+tunnel+way+'\"){\\n'+ 'double listLonLat['+str(len(listLon))+']['+str(len(listLon))+'] = {\\n')\n\t\tfor i in range(0, len(listLon)):\n\t\t\tif(i == (len(listLon)-1)):\n\t\t\t\tfile.write('\\t{'+str(listLon[i])+', '+str(listLat[i])+'}\\n')\n\t\t\telse:\n\t\t\t\tfile.write('\\t{'+str(listLon[i])+', '+str(listLat[i])+'},\\n')\n\t\tfile.write('};\\n}')","repo_name":"pedroliborio/CooperativePositioning","sub_path":"localization/GPS/outagesxy/generate-bare-files.py","file_name":"generate-bare-files.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23497172008","text":"import time\nimport requests\n\nfrom myth.sink import Sink\n\ndef is_number(n):\n is_number = True\n try:\n num = float(n)\n # check for \"nan\" floats\n is_number = num == num # or use `math.isnan(num)`\n except ValueError:\n is_number = False\n return is_number\n\nclass ClickhouseSink(Sink):\n def __init__(self, config, fields, worker_id):\n Sink.__init__(self, config, fields, worker_id)\n self.url = self.config[\"url\"]\n \n self.schema_config = self.config[\"schema\"]\n self.db = self.schema_config[\"database_name\"]\n self.table = self.schema_config[\"table_name\"]\n \n if self.schema_config[\"create_table\"] == \"auto\":\n self.create_table = True\n self.create_table_sql = self.generate_schema()\n elif self.schema_config[\"create_table\"] == \"yes\" :\n self.create_table = True\n self.create_table_sql = self.schema_config[\"create_table_sql\"]\n else:\n self.create_table = False\n \n self.name = f'clickhouse:{self.config[\"url\"]}'\n \n self.init()\n \n def send(self, data):\n load_sql = f'INSERT INTO {self.db}.{self.table} VALUES '\n rows=data.strip().split('\\n')\n for row in rows:\n fields = row.split('|')\n processed_fields = self.process_fields(fields)\n load_sql = load_sql + '(' + ','.join(processed_fields) + ') '\n \n #print(load_sql)\n ts = time.time() \n r = self.query(load_sql)\n te = time.time() \n return te-ts\n \n def process_fields(self, fields):\n processed_fields = []\n for t,v in zip(self.fields_types, fields):\n if t == 'Float32':\n processed_fields.append(v)\n elif t.startswith('DateTime') and is_number(v):\n processed_fields.append(v)\n else:\n # adding ' for no number\n processed_fields.append(f\"'{v}'\")\n return processed_fields \n \n def init(self):\n sql_create_db = f'CREATE DATABASE IF NOT EXISTS {self.db}'\n r = self.query(sql_create_db)\n \n if self.create_table:\n #print(self.create_table_sql)\n r = self.query(self.create_table_sql)\n #print(r.text)\n \n def generate_schema(self):\n sql_create_table = f'CREATE TABLE IF NOT EXISTS {self.db}.{self.table} ( '\n fields = [ f'{field[\"name\"]} {self.get_db_type(field[\"type\"])}' for field in self.fields]\n self.fields_types = [ self.get_db_type(field[\"type\"]) for field in self.fields]\n sql_create_table = sql_create_table + ','.join(fields)\n # TODO: support differnt engine type\n sql_create_table = sql_create_table + ') ENGINE = MergeTree() PARTITION BY toYYYYMMDD(time) ORDER BY time'\n return sql_create_table\n \n def get_db_type(self, field): \n if field == \"date\" :\n return \"Date\"\n \n if field == \"datetime\" :\n return \"DateTime('UTC')\"\n \n if field == \"timestamp\" :\n return \"DateTime('UTC')\"\n \n if field == \"number\" :\n return \"Nullable(Float64)\"\n \n if field == \"string\" :\n return \"String\"\n \n return \"String\"\n \n def query(self, sql):\n return requests.post(self.url, data = sql)\n \n def clean(self): \n sql_drop_db = f'DROP DATABASE IF EXISTS {self.table}'\n r = self.query(sql_drop_db)\n print(r.text)\n \n def count(self):\n sql_count_table = f'SELECT COUNT(*) FROM {self.db}.{self.table}'\n r = self.query(sql_count_table)\n result = r.text\n print(result)\n return int(result)","repo_name":"gangtao/myth","sub_path":"src/myth/clickhouse.py","file_name":"clickhouse.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"8614972974","text":"import numpy as np \nimport pandas as pd\nfrom tensorflow import keras\nfrom tensorflow.keras.layers import Input, Dense, Dropout, Conv2D, MaxPool2D, Flatten, Reshape, BatchNormalization\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping\nfrom tensorflow.keras.optimizers import SGD, Adam\nfrom functools import partial\nimport time\n\n\n\nclass ann:\n def __init__(self, nb_of_outputs, weights, X_train, X_test, y_train, y_test, entropy, nb_of_epochs):\n self.nb_of_outputs = nb_of_outputs\n self.weights = weights\n self.X_train = X_train \n self.y_train = y_train\n self.X_test = X_test\n self.y_test = y_test\n self.entropy = entropy\n self.nb_of_epochs = nb_of_epochs\n \n def modelTrain(self, nb_of_outputs, weights, X_train, X_test, y_train, y_test, entropy, nb_of_epochs, save_name):\n RegularizedDense = partial(keras.layers.Dense, activation=\"elu\", kernel_initializer=\"he_normal\", kernel_regularizer=keras.regularizers.l2(0.01))\n model = Sequential()\n model.add(Reshape(input_shape=(1,200,200),target_shape=(200,200,1)))\n\n model.add(Conv2D(128, 1, activation='sigmoid'))\n model.add(MaxPool2D(pool_size=(1,1)))\n model.add(BatchNormalization())\n\n model.add(Conv2D(64, 1, strides=(2,2), activation='sigmoid'))\n model.add(MaxPool2D(pool_size=(1,1)))\n model.add(BatchNormalization())\n\n model.add(Conv2D(32, 1, strides=(2,2), activation='sigmoid'))\n model.add(MaxPool2D(pool_size=(1,1)))\n model.add(BatchNormalization())\n\n model.add(Conv2D(16, 1, strides=(2,2), activation='sigmoid'))\n model.add(MaxPool2D(pool_size=(1,1)))\n model.add(BatchNormalization())\n\n model.add(Flatten())\n\n model.add(RegularizedDense(48, activation='sigmoid'))\n model.add(Dropout(0.3))\n model.add(BatchNormalization())\n\n model.add(RegularizedDense(64, activation='sigmoid'))\n model.add(Dropout(0.4))\n model.add(BatchNormalization())\n\n model.add(RegularizedDense(24, activation='sigmoid'))\n model.add(Dropout(0.3))\n model.add(BatchNormalization())\n\n model.add(Dense(nb_of_outputs, activation='softmax'))\n\n lr_schedule = keras.optimizers.schedules.ExponentialDecay(initial_learning_rate=1e-2,decay_steps=10000,decay_rate=0.9)\n optimizer = keras.optimizers.SGD(learning_rate=lr_schedule)\n\n model.compile(optimizer=optimizer, loss=entropy, metrics=['accuracy'])\n print(model.summary())\n tb = TensorBoard('./logs')\n checkpt = ModelCheckpoint(save_name,'val_accuracy',1,True)\n es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=3, )\n\n time_to_train = time.time()\n hist = model.fit(X_train,y_train,epochs=nb_of_epochs,validation_data=(X_test, y_test), callbacks=[tb, checkpt, es], class_weight=weights)\n\n model = load_model(save_name)\n final_predicts = model.predict(X_test)\n time_diff = time.time()\n time_train = -time_to_train + time_diff\n\n exact_acc = np.count_nonzero(np.absolute((y_test.argmax(axis=-1)-final_predicts.argmax(axis=-1))) <= 0) / len(y_test)\n one_off = np.count_nonzero(np.absolute((y_test.argmax(axis=-1)-final_predicts.argmax(axis=-1))) <= 1) / len(y_test)\n two_off = np.count_nonzero(np.absolute((y_test.argmax(axis=-1)-final_predicts.argmax(axis=-1))) <= 2) / len(y_test)\n print(\"Training time {}\".format(time_train))\n print(\"Exact_acc = {}\".format(exact_acc))\n print(\"One off acc = {}\".format(one_off))\n print(\"Two off acc = {}\".format(two_off))\n return exact_acc, one_off, two_off, time_train, hist\n\n","repo_name":"plsakr/CVAgePredictionTool","sub_path":"MLBackend/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41865355331","text":"import unittest\nimport numpy as np\nimport pandas as pd\nfrom datetime import date\nfrom datetime import datetime\nfrom pandas_import import join_df\nfrom pandas_import import round_df\nfrom pandas_import import set_time_index\nfrom pandas_import import rename_columns\nfrom pandas_import import group_df\n\n\nclass TestTimeSeries(unittest.TestCase):\n\n def _test_join(self):\n left_df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6],\n [7, 8, 9]]),\n columns=[\"a\", \"b\", \"c\"])\n\n right_df = pd.DataFrame(np.array([[\"a\", \"b\", \"c\"], [\"d\", \"e\", \"f\"],\n [None, None, None]]),\n columns=[1, 2, 3])\n\n joined = pd.DataFrame(np.array([[1, 2, 3, \"a\", \"b\", \"c\"],\n [4, 5, 6, \"d\", \"e\", \"f\"],\n [7, 8, 9, 0, 0, 0]]),\n columns=[\"a\", \"b\", \"c\", 1, 2, 3])\n\n pd.testing.assert_frame_equal(join_df(left_df, right_df), joined)\n\n def test_round(self):\n df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),\n columns=[\"a\", \"b\", \"c\"],\n index=[datetime(2019, 1, 1, 16, 30),\n datetime(2019, 1, 2, 16, 32),\n datetime(2019, 1, 3, 16, 34)])\n\n trans = pd.DataFrame(np.array([[1, 2], [4, 5], [7, 8]]),\n columns=[\"a\", \"b\"],\n index=[datetime(2019, 1, 1, 16, 30),\n datetime(2019, 1, 2, 16, 30),\n datetime(2019, 1, 3, 16, 35)])\n\n pd.testing.assert_frame_equal(round_df(df, \"5min\", [\"a\", \"b\"]),\n trans)\n\n def _test_set_time(self):\n df = pd.DataFrame(np.array([[1, \"1/1/19\"],\n [2, \"1/2/19\"],\n [3, \"1/3/19\"]]),\n columns=[\"value\", \"time\"])\n\n trans = pd.DataFrame(np.array([[1], [2], [3]]),\n columns=[\"value\"],\n index=[date(2019, 1, 1),\n date(2019, 1, 2),\n date(2019, 1, 3)])\n\n pd.testing.assert_frame_equal(set_time_index(df, \"time\"), trans)\n\n def test_rename_cols(self):\n df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),\n columns=[\"a\", \"b\", \"c\"])\n file = \"test/test_.csv\"\n trans = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),\n columns=[\"test\", \"b\", \"c\"])\n pd.testing.assert_frame_equal(rename_columns(df, file, \"a\"), trans)\n\n def test_group(self):\n df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),\n columns=[\"a\", \"b\", \"c\"], index=[1, 1, 2])\n\n group = pd.DataFrame(np.array([[5, 7, 4.5], [7, 8, 9]]),\n columns=[\"a\", \"b\", \"c\"], index=[1, 2])\n\n pd.testing.assert_frame_equal(group_df(df, [\"a\", \"b\"], [\"c\"]),\n group, check_dtype=False)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"cu-swe4s-fall-2019/time-series-basics-mchifala","sub_path":"test_time_series_pandas.py","file_name":"test_time_series_pandas.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15304327611","text":"from mpi4py import MPI\nimport openmdao.api as om\n\nfrom mphys import Multipoint\nfrom vlm_solver.mphys_vlm import VlmBuilder\nfrom mphys.scenario_aerodynamic import ScenarioAerodynamic\n\nclass Top(Multipoint):\n def setup(self):\n # VLM options\n mesh_file = 'wing_VLM.dat'\n\n mach0 = 0.85,\n mach1 = 0.80,\n aoa0 = 0.0\n aoa1 = 1.0\n q_inf = 3000.\n vel = 178.\n nu = 3.5E-5\n\n dvs = self.add_subsystem('dvs', om.IndepVarComp(), promotes=['*'])\n dvs.add_output('aoa0', val=aoa0, units='deg')\n dvs.add_output('aoa1', val=aoa1, units='deg')\n dvs.add_output('mach0', mach0)\n dvs.add_output('mach1', mach1)\n dvs.add_output('q_inf', q_inf)\n dvs.add_output('vel', vel)\n dvs.add_output('nu', nu)\n\n aero_builder = VlmBuilder(mesh_file)\n aero_builder.initialize(self.comm)\n\n self.add_subsystem('mesh',aero_builder.get_mesh_coordinate_subsystem())\n self.mphys_add_scenario('cruise',ScenarioAerodynamic(aero_builder=aero_builder))\n self.mphys_add_scenario('cruise_higher_aoa',ScenarioAerodynamic(aero_builder=aero_builder))\n\n for dv in ['q_inf', 'vel', 'nu']:\n self.connect(dv, f'cruise.{dv}')\n self.connect(dv, f'cruise_higher_aoa.{dv}')\n for dv in ['aoa', 'mach']:\n self.connect(f'{dv}0', f'cruise.{dv}')\n self.connect(f'{dv}1', f'cruise_higher_aoa.{dv}')\n\n self.connect('mesh.x_aero0',['cruise.x_aero','cruise_higher_aoa.x_aero'])\n\nprob = om.Problem()\nprob.model = Top()\nprob.setup()\n\nom.n2(prob, show_browser=False, outfile='vlm_aero_2cruises.html')\n\nprob.run_model()\nif MPI.COMM_WORLD.rank == 0:\n for scenario in ['cruise','cruise_higher_aoa']:\n print('%s: C_L = %f, C_D = %f' % (scenario, prob['%s.C_L'%scenario], prob['%s.C_D'%scenario]))\n","repo_name":"OpenMDAO/mphys","sub_path":"examples/aero_only/mach_tutorial_wing/vlm/run_vlm_2scenarios.py","file_name":"run_vlm_2scenarios.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"22"} +{"seq_id":"22020439849","text":"\n\nfrom flask import Flask, jsonify, request\nimport json\n\nfrom PIL import Image\n\n#declared an empty variable for reassignment\nresponse = ''\n\n#creating the instance of our flask application\napp = Flask(__name__)\n\n#route to entertain our post and get request from flutter app\n@app.route('/', methods = ['GET', 'POST'])\ndef nameRoute():\n\n #fetching the global response variable to manipulate inside the function\n global response\n\n #checking the request type we get from the app\n if(request.method == 'POST'):\n # file = request.files #getting the response data\n file = request.files\n print(file)\n \n image = Image.open(file[\"image\"]).resize((320, 320))\n print(image.size)\n image.show()\n\n # image = Image.open(file).resize((32, 32))\n # print(request_data['name'])\n # request_data = json.loads(request_data.decode('utf-8')) #converting it from json to key value pair\n # print(request_data)\n # name = request_data['name'] #assigning it to name\n # print(name)\n # response = f'Hi {name}! this is Python' #re-assigning response with the name we got from the user\n return \" \" #to avoid a type error \n else:\n print(response)\n return jsonify({'name' : response}) #sending data back to your frontend app\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", debug=True)\n","repo_name":"Tarekbouamer/visloc_localization","sub_path":"loc/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"71408317176","text":"from copy import copy\n\nimport discord\nfrom discord.ext import commands\n\n__version__ = \"1.0.0\"\n\nclass SudoCog(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(\n hidden = True\n )\n @commands.is_owner()\n async def sudo(self, ctx, victim: discord.Member, *, command):\n \"\"\"Take control.\"\"\"\n new_message = copy(ctx.message)\n new_message.author = victim\n new_message.content = ctx.prefix + command\n await self.bot.process_commands(new_message)\n\n\ndef setup(bot):\n bot.add_cog(SudoCog(bot))","repo_name":"DigiDuncan/DigiSudo","sub_path":"sudo.py","file_name":"sudo.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24485959353","text":"import os\nfrom typing import NamedTuple\n\nimport cv2\n\nfrom ..data_types.query_result import QueryResult\nfrom ..video_processor.stream.strongsort import TrackingResult\nfrom .get_object_list import MovableObject, get_object_list\n\nTEXT_PADDING = 5\n\n\ndef save_video_util(\n objects: \"dict[str, list[QueryResult]]\",\n trackings: \"dict[str, list[list[TrackingResult]]]\",\n outputDir: \"str\",\n addBoundingBoxes: \"bool\" = False,\n) -> \"list[tuple[str, int]]\":\n objList = get_object_list(objects=objects, trackings=trackings)\n camera_to_video, video_to_camera = _get_video_names(objects=objects)\n bboxes = _get_bboxes(objList=objList, cameraVideoNames=camera_to_video)\n\n result: \"list[tuple[str, int]]\" = []\n\n if not os.path.exists(outputDir):\n os.makedirs(outputDir)\n\n for videoname, frame_tracking in bboxes.items():\n cameraId = video_to_camera[videoname]\n output_file = os.path.join(outputDir, cameraId + \"-result.mp4\")\n\n cap = cv2.VideoCapture(videoname)\n assert cap.isOpened(), f\"Cannot read video file: {videoname}\"\n\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n vid_writer = cv2.VideoWriter(\n output_file, cv2.VideoWriter_fourcc(*\"mp4v\"), 1, (width, height)\n )\n\n frame_cnt = 0\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n\n if frame_cnt in frame_tracking:\n if addBoundingBoxes:\n for bbox in frame_tracking.get(frame_cnt, []):\n object_id, object_type, bbox_left, bbox_top, bbox_w, bbox_h = bbox\n x1, y1 = bbox_left, bbox_top\n x2, y2 = bbox_left + bbox_w, bbox_top + bbox_h\n x1, y1, x2, y2 = map(int, (x1, y1, x2, y2))\n\n bboxColor = 255, 255, 0\n\n # Place Bounding Box\n frame = cv2.rectangle(frame, (x1, y1), (x2, y2), bboxColor, 2)\n\n # Place Label Background\n font = cv2.FONT_HERSHEY_SIMPLEX\n fontScale = 1\n fontThickness = 2\n label = f\"{object_type}:{object_id}\"\n labelSize, _ = cv2.getTextSize(label, font, fontScale, fontThickness)\n labelW, labelH = labelSize\n\n frame = cv2.rectangle(\n frame,\n (x1, y1 - labelH - 2 * TEXT_PADDING),\n (x1 + labelW + 2 * TEXT_PADDING, y1),\n bboxColor,\n cv2.FILLED,\n )\n\n # Place Label\n frame = cv2.putText(\n frame,\n label,\n (x1 + TEXT_PADDING, y1 - TEXT_PADDING),\n font,\n fontScale,\n (255, 255, 255),\n fontThickness,\n cv2.LINE_AA,\n )\n vid_writer.write(frame)\n result.append((videoname, frame_cnt))\n\n frame_cnt += 1\n\n vid_writer.release()\n\n return result\n\n\nclass BboxWithIdAndType(NamedTuple):\n id: \"int\"\n type: \"str\"\n left: \"float\"\n top: \"float\"\n width: \"float\"\n height: \"float\"\n\n\ndef _get_bboxes(objList: \"list[MovableObject]\", cameraVideoNames: \"dict[str, str]\"):\n \"\"\"\n Indexes objects based on frame ID\n \"\"\"\n result: \"dict[str, dict[int, list[BboxWithIdAndType]]]\" = {}\n for obj in objList:\n videoName = cameraVideoNames[obj.camera_id]\n for frameId, bbox in zip(obj.frame_ids, obj.bboxes):\n if videoName not in result:\n result[videoName] = {}\n if frameId not in result[videoName]:\n result[videoName][frameId] = []\n result[videoName][frameId].append(BboxWithIdAndType(obj.id, obj.type, *bbox))\n\n return result\n\n\ndef _get_video_names(objects: \"dict[str, list[QueryResult]]\"):\n \"\"\"\n Returns mappings from videoName to cameraId and vice versa\n \"\"\"\n camera_to_video: \"dict[str, str]\" = {}\n video_to_camera: \"dict[str, str]\" = {}\n for video, obj in filter(lambda x: len(x[1]) > 0, objects.items()):\n _, cameraId, _, _ = obj[0]\n camera_to_video[cameraId] = video\n video_to_camera[video] = cameraId\n return camera_to_video, video_to_camera\n","repo_name":"apperception-db/spatialyze","sub_path":"spatialyze/utils/save_video_util.py","file_name":"save_video_util.py","file_ext":"py","file_size_in_byte":4628,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"30723402316","text":"from controller import Supervisor, Emitter\n\n# create the Robot instance.\nsupervisor = Supervisor()\n\n# get the time step of the current world.\ntimestep = int(supervisor.getBasicTimeStep())\nprint(\"Starting Supervisor...\")\n\nepisode_step = 0\n\nrobot = supervisor.getFromDef('NAO')\ntranslation_field = robot.getField('translation')\n# rotation_field = robot.getField('rotation')\ncustom_data = robot.getField('customData')\n\ninitial_translation = [0, 0.35, 0]\ninitial_rotation = [-1, 0, 0, 1.57]\n\nreset_robot = False\n\n# Main loop:\n# - perform simulation steps until Webots is stopping the controller\nwhile supervisor.step(timestep) != -1: \n episode_step += 1\n\n robot_position = translation_field.getSFVec3f()\n\n y_position = robot_position[1]\n\n if reset_robot:\n supervisor.step(timestep)\n supervisor.simulationReset()\n #translation_field.setSFVec3f(initial_translation)\n #rotation_field.setSFRotation(initial_rotation)\n \n if y_position < 0.29 or episode_step > 512:\n custom_data.setSFString('reset')\n reset_robot = True\n","repo_name":"paddepadde/nao_walking_ppo","sub_path":"controllers/ppo_supervisor/ppo_supervisor.py","file_name":"ppo_supervisor.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"22"} +{"seq_id":"33528466980","text":"from django.core.management.base import BaseCommand\n\nfrom recipe_db.models import RecipeHop\n\n\nclass Command(BaseCommand):\n help = \"Unset mapping for hop\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"hop\", help=\"Id of the hop to remove\")\n\n def handle(self, *args, **options):\n hop_id = options[\"hop\"]\n query = RecipeHop.objects.filter(kind_id=hop_id)\n self.stdout.write(\"Unsetting %s hops\" % query.count())\n query.update(kind_id=None)\n self.stdout.write(\"Done\")\n","repo_name":"scheb/beer-analytics","sub_path":"recipe_db/management/commands/unset_hop_mapping.py","file_name":"unset_hop_mapping.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"22"} +{"seq_id":"41429282207","text":"\"\"\"\nDelete duplicate-value nodes from a sorted linked list\n\nSample Input\n\n1\n5\n1\n2\n2\n3\n4\nSample Output\n\n1 2 3 4\nExplanation\n\nThe initial linked list is: 1 -> 2 -> 2 -> 3 -> 4 -> NULL\n\nThe final linked list is: 1 -> 2 -> 3 -> 4 -> NULL\n\"\"\"\n\n\n# Complete the removeDuplicates function below.\n\n#\n# For your reference:\n#\n# SinglyLinkedListNode:\n# int data\n# SinglyLinkedListNode next\n#\n#\ndef removeDuplicates(head):\n temp = head\n if temp is None:\n return\n while temp.next:\n if temp.data == temp.next.data:\n temp.next = temp.next.next\n\n else:\n temp = temp.next\n return head\n\n\n\n\n","repo_name":"anurag2050doit/DSA","sub_path":"Data Structures/Link List/Duplicate_nodes_sorted_ll.py","file_name":"Duplicate_nodes_sorted_ll.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"42526829103","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport sys\nimport h5py\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nmode = sys.argv[1]\ninputfile = sys.argv[2]\noutputfile = sys.argv[3]\n\nname = [\"isoE\", \"QPI\"][int(mode)]\n# 按输入参数选取名称。\n\nwith h5py.File(inputfile, \"r\") as ipt:\n\tdos = np.array(ipt[name][()])\n\nfig, ax = plt.subplots(figsize=(10,10))\nim = ax.imshow(dos)\nax.set_xlabel('x label')\nax.set_ylabel('y label')\nax.set_title(name)\nfig.colorbar(im)\nplt.savefig(outputfile)\n# 作图并储存。\n","repo_name":"htw18/7b7nb75tbt677rt","sub_path":"gimage.py","file_name":"gimage.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23682558572","text":"#!/usr/bin/env python3\nimport json\nimport logging\nimport os\nimport re\nimport subprocess\nimport sys\nimport tempfile\nimport zipfile\nfrom pathlib import Path\n\nlogger = logging.getLogger('create-zipfile')\n\nDISALLOWED_FILENAME_PATTERNS = list(map(re.compile, [\n\tr'^\\.git(hub|ignore|attributes|keep)$',\n\tr'^\\.(appveyor|travis)\\.yml$',\n\tr'^\\.editorconfig$',\n\tr'(?i)^changelog',\n\tr'(?i)^contributing',\n\tr'(?i)^upgrading',\n\tr'(?i)^copying',\n\tr'(?i)^index',\n\tr'(?i)^readme',\n\tr'(?i)^licen[cs]e',\n\tr'(?i)^version',\n\tr'^phpunit',\n\tr'^l?gpl\\.txt$',\n\tr'^composer\\.(json|lock)$',\n\tr'^package(-lock)?\\.json$',\n\tr'^yarn\\.lock$',\n\tr'^Makefile$',\n\tr'^build\\.xml$',\n\tr'^phpcs-ruleset\\.xml$',\n\tr'^\\.php_cs$',\n\tr'^phpmd\\.xml$',\n]))\n\nDISALLOWED_DEST_PATTERNS = list(map(re.compile, [\n\tr'^vendor/composer/installed\\.json$',\n\tr'^vendor/moneyphp/money/resources/logo.png$',\n\tr'(?i)^vendor/[^/]+/[^/]+/\\.?(test|doc|example|spec)s?',\n\tr'^vendor/[^/]+/[^/]+/\\.git((hub)?/|$)',\n]))\n\ndef is_not_unimportant(dest: Path) -> bool:\n\tfilename = dest.name\n\n\tfilename_disallowed = any([expr.match(filename) for expr in DISALLOWED_FILENAME_PATTERNS])\n\n\tdest_disallowed = any([expr.match(str(dest)) for expr in DISALLOWED_DEST_PATTERNS])\n\n\tallowed = not (filename_disallowed or dest_disallowed)\n\n\treturn allowed\n\nclass ZipFile(zipfile.ZipFile):\n\tdef directory(self, name, allowed=None):\n\t\tif allowed is None:\n\t\t\tallowed = lambda item: True\n\n\t\tfor root, dirs, files in os.walk(name):\n\t\t\troot = Path(root)\n\n\t\t\tfor directory in dirs:\n\t\t\t\tdirectory = Path(directory)\n\t\t\t\tpath = root / directory\n\n\t\t\t\tif allowed(path):\n\t\t\t\t\t# Directories are empty files whose path ends with a slash.\n\t\t\t\t\t# https://mail.python.org/pipermail/python-list/2003-June/205859.html\n\t\t\t\t\tself.writestr(str(self.prefix / path) + '/', '')\n\n\t\t\tfor file in files:\n\t\t\t\tpath = root / file\n\n\t\t\t\tif allowed(path):\n\t\t\t\t\tself.write(path, self.prefix / path)\n\tdef file(self, name):\n\t\tself.write(name, self.prefix / name)\n\ndef main():\n\tsource_dir = Path.cwd()\n\twith tempfile.TemporaryDirectory(prefix='entries-dist-') as temp_dir:\n\t\tdirty = subprocess.run(['git','-C', source_dir, 'diff-index', '--quiet', 'HEAD']).returncode == 1\n\t\tif dirty:\n\t\t\tlogger.warning('Repository contains uncommitted changes that will not be included in the dist archive.')\n\n\t\tlogger.info('Cloning the repository into a temporary directory…')\n\t\tsubprocess.check_call(['git', 'clone', '--shared', source_dir, temp_dir])\n\n\t\tos.chdir(temp_dir)\n\n\t\tif len(sys.argv) >= 2:\n\t\t\tfilename = sys.argv[1]\n\t\telse:\n\t\t\tshort_commit = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], encoding='utf-8').strip()\n\t\t\tfilename = f'entries-{short_commit}.zip'\n\n\t\tlogger.info('Installing frontend dependencies…')\n\t\tsubprocess.check_call(['npm', 'install'])\n\n\t\tlogger.info('Building asset bundles…')\n\t\tsubprocess.check_call(['npm', 'run', 'build'])\n\n\t\tlogger.info('Installing and optimizing backend dependencies…')\n\t\tsubprocess.check_call(['composer', 'install', '--no-dev', '--optimize-autoloader'])\n\n\t\t# fill archive with data\n\t\twith ZipFile(source_dir / filename, 'w', zipfile.ZIP_DEFLATED) as archive:\n\t\t\tarchive.prefix = Path('entries')\n\n\t\t\t# Assets are copied by Webpack to www/dist/.\n\t\t\tarchive.directory('www/', lambda file: not file.match('www/assets'))\n\n\t\t\tarchive.directory('app/')\n\t\t\tarchive.directory('log/')\n\t\t\tarchive.directory('temp/')\n\t\t\tarchive.directory('vendor/', is_not_unimportant)\n\n\t\t\tarchive.file('.htaccess')\n\t\t\tarchive.file('install.sql')\n\t\t\tarchive.file('README.md')\n\n\t\t\tlogger.info('Zipball ‘{}’ has been successfully generated.'.format(filename))\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"jtojnar/entries","sub_path":".github/workflows/create-zipball.py","file_name":"create-zipball.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"41335285619","text":"'''\nnumbers = {\n \"0\":[(0,0), (1,0), (1,-2), (0,-2), (0,0)],\n \"1\":[(1,0), (1,-2)],\n \"2\":[(0,0), (1,0), (1,-1), (0,-1), (0,-2), (1,-2)],\n \"3\":[(0,0), (1,0), (1,-1), (0,-1), (1,-1), (1,-2), (0,-2)]\n}\n'''\n\nimport os\nfrom shutil import copyfile\n\ndef loadChar(char):\n with open(f\"digits/{i}.txt\",\"r\") as f:\n return list(map(lambda i:list(map(float,i.split())),f.read().split(\"\\n\")))\n\nnumbers = {}\nfor i in [0,1,2,3,4,5,6,7,8,9,\"+\",\"-\",\"=\",\"x\",\"y\",\"^\"]:\n numbers[str(i)] = loadChar(i)\n\ncharWidth = 10\nletterSpacing = 0.5\n\ndef convGcode(items):\n pass\n\ndef writeChar(path, x, y):\n res = []\n\n # move to first point\n res.append(f\"G0 X{path[0][0]*charWidth+x} Y{path[0][1]*-1*charWidth+y}\")\n\n # move z-axis downward\n res.append(\"G0 Z0\")\n\n # draw path\n for point in path:\n res.append(f\"G1 X{point[0]*charWidth+x} Y{point[1]*-1*charWidth+y}\")\n \n # move z-axis upward\n res.append(\"G0 Z15\")\n\n # return final\n return \"\\n\".join(res)\n\ndef writeChars(s, x, y):\n res = \"\"\n \n # absolute positioning\n # res += \"G90\\n\"\n\n # write each character\n for i, char in enumerate(s):\n res += writeChar(numbers[char], x+i*(charWidth+letterSpacing), y) + \"\\n\"\n return res\n\ndef main(chars):\n open(\"res.gcode\",\"w\").write(writeChars(chars, 0, 0))\n os.system(\".\\\\gpx.exe -m r2x res.gcode\")\n\n # Write to SD\n copyfile(\"res.x3g\", \"D:\\\\res.x3g\")","repo_name":"knosmos/tlds","sub_path":"convGcode.py","file_name":"convGcode.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"73932982776","text":"import requests\r\nimport datetime\r\n\r\nfrom aiogram.dispatcher import Dispatcher\r\nfrom aiogram import Bot, types\r\nfrom aiogram.utils import executor\r\nimport telebot\r\n\r\n\r\napiKey = \"0c9aa0cd1afbf05191a808e0dbfd7f21\"\r\ntg_bot_token = \"6282754646:AAEjcO8bEdYOeQwjH2JiQUnc7H3k7TEIdVM\"\r\n\r\n\r\nbot = Bot(token=tg_bot_token)\r\ndp = Dispatcher(bot)\r\n@dp.message_handler(commands=[\"start\"])\r\nasync def start_command(message: types.Message):\r\n await message.reply(\"Привіт! Напиши назву міста і отримаєш прогноз погоди\")\r\n\r\n\r\n\r\n@dp.message_handler()\r\nasync def get_weather(message: types.Message):\r\n code_to_smile = {\r\n \"Clear\": \"Ясно \\U00002600\",\r\n \"Clouds\": \"Хмарно \\U00002601\",\r\n \"Rain\": \"Дощ \\U00002614\",\r\n \"Drizzle\": \"Дощ \\U00002600\",\r\n \"Thunderstorn\": \"Гроза \\U000026A1\",\r\n \"Show\": \"Сніг \\U0001F328\",\r\n \"Mist\": \"Туман \\U0001F328\"\r\n }\r\n\r\n try:\r\n r = requests.get(f\"https://api.openweathermap.org/data/2.5/weather?q={message.text}&appid={apiKey}&units=metric\")\r\n data = r.json()\r\n # pprint(data)\r\n city = data[\"name\"]\r\n cur_weather = data[\"main\"][\"temp\"]\r\n weather_description = data[\"weather\"][0][\"main\"]\r\n if weather_description in code_to_smile:\r\n wd = code_to_smile[weather_description]\r\n else:\r\n wd = \"Вигляни в вікно!\"\r\n\r\n cur_humidity = data[\"main\"][\"humidity\"]\r\n pressure = data[\"main\"][\"pressure\"]\r\n wind = data[\"wind\"][\"speed\"]\r\n sunrice_timestamp = datetime.datetime.fromtimestamp(data[\"sys\"][\"sunrise\"])\r\n sunset_timestamp = datetime.datetime.fromtimestamp(data[\"sys\"][\"sunset\"])\r\n length_of_the_day = datetime.datetime.fromtimestamp(data[\"sys\"][\"sunset\"]) - datetime.datetime.fromtimestamp(\r\n data[\"sys\"][\"sunrise\"])\r\n\r\n await message.reply(f\"***{datetime.datetime.now().strftime('%Y -%m-%d %H:%M')}***\\n\"\r\n f\"Погода в місті: {city}\\nТемпература: {cur_weather}C°{wd}\\n\"\r\n f\"Вологість: {cur_humidity}%\\nТиск:{pressure}\\nВітер:{wind}м/с\\n\"\r\n f\"Схід сонця:{sunrice_timestamp}\\nЗахід сонця:{sunset_timestamp}\\nТривалість дня:{length_of_the_day}\\n\")\r\n\r\n except:\r\n await message.reply(\"\\U00002620 Перевірте назву міста \\U00002620\")\r\n\r\n\r\nif __name__ == '__main__':\r\n executor.start_polling(dp)\r\n","repo_name":"oksanaSuriak/weather_bot_Telegram","sub_path":"main_tg_bot.py","file_name":"main_tg_bot.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11696497003","text":"import os\nfrom random import shuffle\nfrom tkinter import Button, Label, Toplevel\nfrom win_lose import WinWindow, LoseWindow\n\n\nclass CellField:\n def __init__(self, window, width, height, nr_bombs, top_w):\n self.top_wind = top_w\n # list under cells\n self.list_uc = []\n self.width = width\n self.height = height\n self.nr_bombs = nr_bombs\n global nr_cells_need_to_open\n nr_cells_need_to_open = width * height - int(width * height / 6)\n\n # Create under field\n map_bombs = self.mapbombs()\n map_bombs = self.put_borders(map_bombs)\n map_bombs = self.put_numbers(map_bombs)\n self.list_uc = self.create_under_cell_field(window, map_bombs)\n self.erase_border(self.list_uc)\n\n # Create cover for field\n self.list_oc = []\n self.cover_field(window)\n self.erase_border(self.list_oc)\n\n # all over cells will now about the list of all cells in the field and width of field\n for i in range(len(self.list_oc)):\n self.list_oc[i].get_info(list_ocf=self.list_oc, list_ucf=self.list_uc, width_f=self.width,\n top_w=self.top_wind)\n\n def mapbombs(self):\n # create a my_list of values like empty() or bomb(*)\n map1 = [\"*\"] * self.nr_bombs\n map2 = [' '] * int(self.width * self.height - self.nr_bombs)\n m = map1 + map2\n # shake the list map\n shuffle(m)\n\n return m\n\n def put_borders(self, my_map):\n m = my_map.copy()\n new_m = []\n for i in range(self.height):\n new_m.append(m[0:self.width])\n del m[0:self.width]\n\n for i in new_m:\n i.append('-')\n i.insert(0, '-')\n\n border = ['-'] * (self.width + 2)\n new_m.append(border)\n new_m.insert(0, border)\n\n for i in new_m:\n for j in i:\n m.append(j)\n\n for i in new_m:\n print(i)\n\n return m\n\n def put_numbers(self, my_map):\n m = my_map.copy()\n for i in range(len(m)):\n m[i] = self.count_bombs(m, i)\n\n return m\n\n def count_bombs(self, m, i):\n if m[i] == '-':\n return '-'\n if m[i] == '*':\n return '*'\n\n nr_bombs = 0\n\n if m[i - self.width - 3] == '*':\n nr_bombs += 1\n if m[i - self.width - 2] == '*':\n nr_bombs += 1\n if m[i - self.width - 1] == '*':\n nr_bombs += 1\n\n if m[i + 1] == '*':\n nr_bombs += 1\n if m[i - 1] == '*':\n nr_bombs += 1\n\n if m[i + self.width + 1] == '*':\n nr_bombs += 1\n if m[i + self.width + 2] == '*':\n nr_bombs += 1\n if m[i + self.width + 3] == '*':\n nr_bombs += 1\n if nr_bombs != 0:\n return str(nr_bombs)\n else:\n return ''\n\n def create_under_cell_field(self, window, map_bombs):\n give_color = {\n \"*\": 'black',\n '1': 'blue',\n '2': 'green',\n '3': 'red',\n '4': 'brown',\n '5': 'brown',\n '6': 'brown',\n '7': 'brown',\n '8': 'brown',\n '-': 'brown',\n '': 'brown'\n }\n ucf = [] # ucf under_cell_field the list of under_cells\n for i in range((self.width + 2) * (self.height + 2)):\n ucf.append(UnderCell(window, text=map_bombs[i], order=i, ucell_color=give_color[map_bombs[i]],\n ucell_bg_color='grey85'))\n if ucf[i].text == '':\n ucf[i].l1.config(bg='white')\n\n # Arrange the under_cells in the field\n ucf_copy = ucf.copy()\n for i in range(self.height + 2):\n for j in range(self.width + 2):\n ucf_copy[0].mygrid(i + 1, j)\n ucf_copy.pop(0)\n\n return ucf\n\n def erase_border(self, l_under_cells):\n for i in range(0, (self.width + 2) * (self.height + 2), self.width + 2):\n l_under_cells[i].erase()\n for i in range(self.width + 1, (self.width + 1) * (self.height + 3), self.width + 2):\n l_under_cells[i].erase()\n for i in range(0, self.width + 1):\n l_under_cells[i].erase()\n l_under_cells.reverse()\n for i in range(0, self.width + 1):\n l_under_cells[i].erase()\n l_under_cells.reverse()\n\n def cover_field(self, window):\n for i in range((self.width + 2) * (self.height + 2)):\n btn = OverCell(window, text=i, order=i, ocell_bg_color='white')\n self.list_oc.append(btn)\n\n ocf_copy = self.list_oc.copy()\n for i in range(self.height + 2):\n for j in range(self.width + 2):\n ocf_copy[0].mygrid(i + 1, j)\n ocf_copy.pop(0)\n\n\nclass OverCell:\n def __init__(self, window, text=0, order=0, ocell_color='grey99', ocell_bg_color='grey95',\n command=open):\n self.w = window\n self.top_wind = None\n self.width_field = None\n self.height_field = None\n self.list_uc = None\n self.list_oc = None\n self.is_open = False\n self.is_mark = False\n self.order = order\n self.text = text\n self.b1 = Button(window, text=text, bg=ocell_bg_color, fg=ocell_color, width=3, command=self.open)\n self.b1.bind(\"\", lambda e, i=self.order: self.mark_cell(e, i))\n\n def get_info(self, list_ocf, list_ucf, width_f, top_w):\n self.list_uc = list_ucf\n self.list_oc = list_ocf\n self.width_field = width_f\n self.top_wind = top_w\n\n def erase(self):\n self.b1.grid_forget()\n\n def mygrid(self, row, column):\n self.b1.grid(row=row, column=column)\n\n def get_order(self):\n return self.order\n\n def open(self):\n if not self.top_wind.is_started:\n self.top_wind.is_started = True\n self.top_wind.start()\n\n self.is_open = True\n global nr_cells_need_to_open\n nr_cells_need_to_open -= 1\n self.b1.grid_forget()\n if nr_cells_need_to_open == 0:\n print('you win')\n top_w = Toplevel(self.w)\n win_window = WinWindow(top_w, self.restart_game, self.exit)\n win_window.window.eval('tk::PlaceWindow . right')\n\n if self.list_uc[self.order].text == '':\n self.open_around(self.order)\n if self.list_uc[self.order].text == '*':\n self.explode()\n print('you lose')\n top_w = Toplevel(self.w)\n lose_window = LoseWindow(top_w, self.restart_game, self.exit)\n lose_window.window.eval('tk::PlaceWindow . right')\n\n def exit(self):\n print('exit')\n self.w.destroy()\n\n def restart_game(self, window):\n print('restart game')\n self.w.destroy()\n os.startfile(\"main.pyw\")\n\n def open_around(self, order):\n if self.check_cell(order - self.width_field - 3):\n self.list_oc[order - self.width_field - 3].open()\n if self.check_cell(order - self.width_field - 2):\n self.list_oc[order - self.width_field - 2].open()\n if self.check_cell(order - self.width_field - 1):\n self.list_oc[order - self.width_field - 1].open()\n\n if self.check_cell(order - 1):\n self.list_oc[order - 1].open()\n if self.check_cell(order + 1):\n self.list_oc[order + 1].open()\n\n if self.check_cell(order + self.width_field + 3):\n self.list_oc[order + self.width_field + 3].open()\n if self.check_cell(order + self.width_field + 2):\n self.list_oc[order + self.width_field + 2].open()\n if self.check_cell(order + self.width_field + 1):\n self.list_oc[order + self.width_field + 1].open()\n\n def check_cell(self, order):\n if self.list_uc[order].text not in ('*', '-') and not self.list_oc[order].is_open:\n return True\n return False\n\n def explode(self):\n self.top_wind.start()\n for i in self.list_uc:\n if i.text == '*':\n self.list_oc[i.order].b1.grid_forget()\n self.list_uc[i.order].l1.config(bg='red')\n\n def mark_cell(self, event, order):\n if not self.list_oc[order].is_mark:\n self.list_oc[order].b1.config(text=\"X\", fg='black')\n self.is_mark = True\n self.top_wind.minus_bomb()\n else:\n self.list_oc[order].b1.config(text=order, fg='grey99')\n self.is_mark = False\n self.top_wind.plus_bomb()\n\n\nclass UnderCell:\n def __init__(self, window, text='', order=0, ucell_color='grey95', ucell_bg_color='grey95'):\n self.order = order\n self.text = text\n\n self.l1 = Label(window, text=text, bg=ucell_bg_color, fg=ucell_color, width=3)\n\n def mygrid(self, row, column):\n self.l1.grid(row=row, column=column)\n\n def erase(self):\n self.l1.grid_forget()\n\n def get_order(self):\n return self.order\n","repo_name":"feudal/sapior02","sub_path":"cell_field.py","file_name":"cell_field.py","file_ext":"py","file_size_in_byte":9039,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"17771256753","text":"import numpy as np\nimport pandas as pd\nimport copy\nimport json\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom category_encoders import CatBoostEncoder\nfrom scipy.spatial.distance import jensenshannon\nfrom scipy.stats import gaussian_kde, ks_2samp, chisquare, wasserstein_distance\nfrom scipy.special import rel_entr\nfrom sklearn.model_selection import RandomizedSearchCV, train_test_split\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\nfrom sklearn.metrics import (r2_score, mean_absolute_error, precision_score,\n recall_score, accuracy_score, f1_score,\n roc_auc_score)\nfrom sklearn.utils import shuffle\n\nimport logging\nlogger = logging.getLogger()\n\n\nclass DataDriftDetector:\n \"\"\"Compare differences between 2 datasets\n DataDriftDetector creates useful methods to compare 2 datasets,\n created to allow ease of measuring the fidelity between 2 datasets.\n \n Methods\n ----\n calculate_drift:\n Calculates the distribution distance for each column between the\n datasets\n plot_numeric_to_numeric:\n Creates a pairplot between the 2 datasets\n plot_categorical_to_numeric:\n Creates a pairgrid violin plot between the 2 datasets\n plot_categorical:\n Creates a proportion histogram between the 2 datasets for categorical\n columns\n compare_ml_efficacy:\n Compares the ML efficacy of a model built between the 2 datasets\n Args\n ----\n df_prior: \n Pandas dataframe of the prior dataset. In practice, this would be the\n dataset used to train a live model\n df_post: \n Pandas dataframe of the post dataset. In practice, this would be the\n current dataset that's flowing into a live model\n categorical_columns: \n A list of categorical columns in the dataset, will be determined by\n column types if not provided\n numeric_columns: \n A list of numeric columns in the dataset, will be determined by\n column types if not provided\n \"\"\"\n def __init__(self,\n df_prior,\n df_post,\n categorical_columns=None,\n numeric_columns=None):\n assert isinstance(df_prior, pd.DataFrame),\\\n \"df_prior should be a pandas dataframe\"\n assert isinstance(df_post, pd.DataFrame),\\\n \"df_post should be a pandas dataframe\"\n assert sorted(df_prior.columns) == sorted(df_post.columns),\\\n \"df_prior and df_post should have the same column names\"\n assert all(df_prior.dtypes.sort_index() == df_post.dtypes.sort_index()),\\\n \"df_prior and df_post should have the same column types\"\n assert isinstance(categorical_columns, (list, type(None))),\\\n \"categorical_columns should be of type list\"\n assert isinstance(numeric_columns, (list, type(None))),\\\n \"numeric_columns should be of type list\"\n\n df_prior_ = df_prior.copy()\n df_post_ = df_post.copy()\n\n if categorical_columns is None:\n categorical_columns = (\n [c for c in df_prior_.columns if\n df_prior_.dtypes[c] == 'object']\n )\n logger.info(\n \"Identified categorical column(s): {}\".format(\n \", \".join(categorical_columns))\n )\n\n df_prior_[categorical_columns] = (\n df_prior_[categorical_columns].astype(str)\n )\n df_post_[categorical_columns] = (\n df_post_[categorical_columns].astype(str)\n )\n\n if numeric_columns is None:\n num_types = ['float64','float32','int32','int64','uint8']\n numeric_columns = (\n [c for c in df_prior_.columns if\n df_prior_.dtypes[c] in num_types]\n )\n logger.info(\"Identified numeric column(s): {}\".format(\n \", \".join(numeric_columns))\n )\n\n df_prior_[numeric_columns] = df_prior_[numeric_columns].astype(float)\n df_post_[numeric_columns] = df_post_[numeric_columns].astype(float)\n\n self.categorical_columns = categorical_columns\n self.numeric_columns = numeric_columns\n\n self.df_prior = df_prior_\n self.df_post = df_post_[df_prior_.columns]\n\n\n def calculate_drift(self, steps=100):\n \"\"\"Calculates metrics and test of similarity between the 2 datasets\n For categorical columns, the probability of each category will be\n computed separately for `df_prior` and `df_post`, and the distance \n between the 2 probability arrays will be computed. For\n numeric columns, the values will first be fitted into a gaussian KDE\n separately for `df_prior` and `df_post`, and a probability array\n will be sampled from them\n \n Args\n ----\n steps: int\n Number of steps to take to sample for the fitted KDE for numeric\n columns\n\n Returns\n ----\n Dictionary of results\n \"\"\"\n cat_res = {}\n num_res = {}\n\n for col in self.categorical_columns:\n # to ensure similar order, concat before computing probability\n col_prior = self.df_prior[col].to_frame()\n col_post = self.df_post[col].to_frame()\n col_prior['_source'] = 'prior'\n col_post['_source'] = 'post'\n\n col_ = pd.concat([col_prior, col_post], ignore_index=True)\n\n # aggregate and convert to probability array\n arr = (col_.groupby([col, '_source'])\n .size()\n .to_frame()\n .reset_index()\n .pivot(index=col, columns='_source')\n .droplevel(0, axis=1)\n .pipe(lambda df: df.loc[df.sum(axis=1).sort_values(ascending=False).index, :])\n )\n\n arr_ = arr.div(arr.sum(axis=0),axis=1)\n arr_.fillna(0, inplace=True)\n\n # calculate statistical distances\n kl_post_prior = sum(\n rel_entr(arr_['post'].to_numpy(), arr_['prior'].to_numpy())\n )\n kl_prior_post = sum(\n rel_entr(arr_['prior'].to_numpy(), arr_['post'].to_numpy())\n )\n\n jsd = jensenshannon(arr_['prior'].to_numpy(),\n arr_['post'].to_numpy())\n wd = wasserstein_distance(arr_['prior'].to_numpy(),\n arr_['post'].to_numpy())\n\n arr['post'] = arr['post'] / arr['post'].sum() * arr['prior'].sum()\n arr.fillna(0, inplace=True)\n \n # calculate test of similarity\n cs_test = chisquare(arr['post'].to_numpy(),\n arr['prior'].to_numpy())\n\n cat_res.update({\n col: {\n 'chi_square_test_statistic': cs_test[0],\n 'chi_square_test_p_value': cs_test[1],\n 'kl_divergence_post_given_prior': kl_post_prior,\n 'kl_divergence_prior_given_post': kl_prior_post,\n 'jensen_shannon_distance': jsd,\n 'wasserstein_distance': wd\n \n }\n })\n\n for col in self.numeric_columns:\n # fit gaussian_kde\n col_prior = self.df_prior[col].dropna()\n col_post = self.df_post[col].dropna()\n kde_prior = gaussian_kde(col_prior)\n kde_post = gaussian_kde(col_post)\n\n # get range of values\n min_ = min(col_prior.min(), col_post.min())\n max_ = max(col_prior.max(), col_post.max())\n range_ = np.linspace(start=min_, stop=max_, num=steps)\n\n # sample range from KDE\n arr_prior_ = kde_prior.evaluate(range_)\n arr_post_ = kde_post.evaluate(range_)\n\n arr_prior = arr_prior_ / np.sum(arr_prior_)\n arr_post = arr_post_ / np.sum(arr_post_)\n\n # calculate statistical distances\n jsd = jensenshannon(arr_prior, arr_post)\n wd = wasserstein_distance(arr_prior, arr_post)\n \n # calculate test of similarity\n ks_test = ks_2samp(arr_prior, arr_post)\n\n num_res.update({\n col: {\n 'ks_2sample_test_statistic': ks_test[0],\n 'ks_2sample_test_p_value': ks_test[1],\n 'jensen_shannon_distance': jsd,\n 'wasserstein_distance': wd\n \n }\n })\n\n return {'categorical': dict(cat_res),\n 'numerical': dict(num_res)}\n\n\n def plot_categorical_to_numeric(self,\n plot_categorical_columns=None,\n plot_numeric_columns=None,\n categorical_on_y_axis=True,\n grid_kws={'height':5},\n plot_kws={}):\n \"\"\"Plots charts to compare categorical to numeric columns pairwise.\n Plots a pairgrid violin plot of categorical columns to numeric\n columns, split and colored by the source of datasets\n \n Args\n ----\n plot_categorical_columns: \n List of categorical columns to plot, uses all if no specified\n plot_numeric_columns: \n List of numeric columns to plot, uses all if not specified\n categorical_on_y_axis: \n Determines layout of resulting image - if True, categorical\n columns will be on the y axis\n grid_kws: \n arguments to pass into the pair grid plot\n plot_kws: \n Arguments to pass into the violin plot\n Returns\n ----\n Resulting plot\n \"\"\"\n assert isinstance(plot_categorical_columns, (list, type(None))),\\\n \"plot_categorical_columns should be of type list\"\n assert isinstance(plot_numeric_columns, (list, type(None))),\\\n \"plot_numeric_columns should be of type list\"\n assert isinstance(categorical_on_y_axis, bool),\\\n \"categorical_on_y_axis should be a boolean value\"\n\n df_prior = self.df_prior.copy()\n df_post = self.df_post.copy()\n\n col_nunique = df_prior.nunique()\n\n if plot_categorical_columns is None:\n plot_categorical_columns = (\n [col for col in col_nunique.index if\n (col_nunique[col] <= 20) & (col in self.categorical_columns)]\n )\n\n if plot_numeric_columns is None:\n plot_numeric_columns = self.numeric_columns\n\n df_prior[\"_source\"] = \"Prior\"\n df_post[\"_source\"] = \"Post\"\n\n plot_df = pd.concat([df_prior, df_post])\n \n msg = (\n \"Plotting the following categorical column(s): \" +\n \", \".join(plot_categorical_columns) +\n \"\\nAgainst the following numeric column(s):\" +\n \", \".join(plot_numeric_columns) +\n \"\\nCategorical columns with high cardinality (>20 unique values)\" +\n \" are not plotted.\"\n )\n\n logger.info(msg)\n\n # violinplot does not treat numeric string cols as string - error\n # sln: added a whitespace to ensure it is read as a string\n plot_df[plot_categorical_columns] = (\n plot_df[plot_categorical_columns].astype(str) + \" \"\n )\n\n if categorical_on_y_axis:\n y_cols = plot_categorical_columns\n x_cols = plot_numeric_columns\n else:\n y_cols = plot_numeric_columns\n x_cols = plot_categorical_columns\n\n g = sns.PairGrid(data=plot_df,\n x_vars=x_cols,\n y_vars=y_cols,\n hue='_source',\n hue_kws={'split':True},\n **grid_kws)\n\n g.map(sns.violinplot,\n hue=plot_df['_source'],\n split=True,\n **plot_kws)\n\n g.add_legend()\n\n return g\n\n\n def plot_numeric_to_numeric(self,\n kind='scatter',\n diag_kind='kde',\n plot_kws=None,\n grid_kws=None,\n diag_kws={'common_norm':False},\n plot_numeric_columns=None,\n **kwargs):\n \"\"\"Plots charts to compare numeric columns pairwise.\n Plots a pairplot (from seaborn) of numeric columns, with a distribution\n plot on the diagonal and a scatter plot for all other charts\n Args\n ----\n plot_numeric_columns: \n List of numeric columns to plot, uses all if not specified\n kind: \n Plot kind for the pair plot\n diag_kind: \n Plot kind for the diagonal plots\n plot_kws: \n Arguments for both the diagonal and grid plots\n grid_kws: \n Arguments for the grid plots\n diag_kws: \n Arguments for the diagonal plots\n Returns\n ----\n Resulting plot\n \"\"\"\n assert isinstance(plot_numeric_columns, (list, type(None))),\\\n \"plot_numeric_columns should be of type list\"\n\n if plot_numeric_columns is None:\n plot_numeric_columns = self.numeric_columns\n\n df_prior = self.df_prior[plot_numeric_columns].copy()\n df_post = self.df_post[plot_numeric_columns].copy()\n\n df_prior['_source'] = \"Prior\"\n df_post['_source'] = \"Post\"\n\n plot_df = pd.concat([df_prior, df_post])\n plot_df.reset_index(drop=True, inplace=True)\n\n logger.info(\n \"Plotting the following numeric column(s): {}\".format(\n \", \".join(plot_numeric_columns))\n )\n\n g = sns.pairplot(data=plot_df,\n kind=kind,\n diag_kind=diag_kind,\n hue='_source',\n plot_kws=plot_kws,\n diag_kws=diag_kws,\n grid_kws=grid_kws,\n **kwargs)\n\n return g\n\n\n def plot_categorical(self, plot_categorical_columns=None, **kwargs):\n \"\"\"Plot histograms to compare categorical columns\n Args\n ----\n plot_categorical_columns: \n List of categorical columns to plot, uses all if no specified\n Returns\n ----\n Resulting plot\n \"\"\"\n assert isinstance(plot_categorical_columns, (list, type(None))),\\\n \"plot_categorical_columns should be of type list\"\n\n col_nunique = self.df_prior.nunique()\n if plot_categorical_columns is None:\n plot_categorical_columns = (\n [col for col in col_nunique.index if\n (col_nunique[col] <= 20) & (col in self.categorical_columns)]\n )\n\n logger.info(\n \"Plotting the following categorical column(s): {}\".format(\n \", \".join(plot_categorical_columns))\n )\n\n fig, ax = plt.subplots(len(plot_categorical_columns), 1,\n figsize=(10, 5*len(plot_categorical_columns)))\n \n for i, col in enumerate(plot_categorical_columns):\n if len(plot_categorical_columns) == 1:\n _ax = ax\n elif len(plot_categorical_columns) > 1:\n _ax = ax[i]\n\n _p1 = (self.df_prior[col].value_counts(normalize=True)\n .rename(\"Proportion\")\n .sort_index()\n .reset_index())\n _p2 = (self.df_post[col].value_counts(normalize=True)\n .rename(\"Proportion\")\n .sort_index()\n .reset_index())\n _p1['_source'] = 'Prior'\n _p2['_source'] = 'Post'\n _p = pd.concat([_p1, _p2])\n\n sns.barplot(x=\"index\",\n y=\"Proportion\",\n hue=\"_source\",\n data=_p,\n ax=_ax,\n **kwargs)\n _ax.legend(loc='upper right', title='_source')\n _ax.set_xlabel(col)\n _ax.tick_params(axis='x', labelrotation=90)\n\n plt.tight_layout()\n plt.close(fig)\n\n return fig\n\n\n def _rmse(self, targets, predictions):\n return np.sqrt(np.mean((predictions-targets)**2))\n\n\n def compare_ml_efficacy(self,\n target_column,\n test_data=None,\n OHE_columns=None,\n high_cardinality_columns=None,\n OHE_columns_cutoff=5,\n random_state=None,\n train_size=0.7,\n cv=3,\n n_iter=5,\n param_grid={'n_estimators': [100, 200],\n 'max_samples': [0.6, 0.8, 1],\n 'max_depth': [3, 4, 5]}):\n \"\"\"Compares the ML efficacy of the prior data to the post data\n For a given `target_column`, this builds a ML model separately with\n `df_prior` and `df_post`, and compares the performance\n between the 2 models on a test dataset. Test data will be drawn\n from `df_post` if it is not provided.\n Args\n ----\n target_column: \n Target column to be used for ML\n test_data: \n Pandas dataframe of test data, to do a train test split on the\n df_post if not provided\n OHE_columns: \n List of columns to be one hot encoded, will be determined\n if not provided\n high_cardinality_columns: \n List of columns to be cat boost encoded, will be\n determined if not provided\n OHE_columns_cutoff: \n Number of unique labels in a column to determine OHE_columns &\n high_cardinality_columns if not provided. \n random_state: \n Random state for the RandomizedSearchCV & the model fitting\n train_size: \n Proportion to split the df_post by, if test_data is not provided\n cv: \n Number of cross validation folds to be used in the\n RandomizedSearchCV\n n_iter: \n Number of iterations for the RandomizedSearchCV\n param_grid: \n Dictionary of hyperparameter values to be iterated by\n the RandomizedSearchCV\n Returns\n ----\n Returns a report of ML metrics between the prior model and the\n post model\n \"\"\"\n assert isinstance(target_column, str),\\\n \"target_column should be of type string\"\n assert target_column in self.df_prior.columns,\\\n \"target_column does not exist in df_prior\"\n assert isinstance(test_data, (pd.DataFrame, type(None))),\\\n \"test_data should be a pandas dataframe\"\n assert isinstance(OHE_columns, (list, type(None))),\\\n \"OHE_columns should be of type list\"\n assert isinstance(high_cardinality_columns, (list, type(None))),\\\n \"high_cardinality_columns should be of type list\"\n\n\n # TODO: - Allow choice of model?\n # - Allow choice of encoding for high cardinality cols?\n\n self.target_column = target_column\n self.train_size = train_size\n self.random_state = random_state\n self.cv = cv\n self.n_iter = n_iter\n self.param_grid = param_grid\n\n col_nunique = self.df_prior.nunique()\n\n if OHE_columns is None:\n OHE_columns = [col for col in col_nunique.index if\n (col_nunique[col] <= OHE_columns_cutoff) &\n (col in self.categorical_columns)]\n\n if high_cardinality_columns is None:\n high_cardinality_columns = [col for col in col_nunique.index if\n (col_nunique[col] > OHE_columns_cutoff) &\n (col in self.categorical_columns)]\n\n self.OHE_columns = OHE_columns\n self.high_cardinality_columns = high_cardinality_columns\n\n test_data_ = copy.deepcopy(test_data)\n\n if test_data_ is not None:\n test_data_[self.numeric_columns] = (\n test_data_[self.numeric_columns].astype(float)\n )\n test_data_[self.categorical_columns] = (\n test_data_[self.categorical_columns].astype(str)\n )\n\n self.test_data = test_data_\n\n self._ml_data_prep()\n\n if target_column in self.categorical_columns:\n self._build_classifier()\n self._eval_classifier()\n\n elif target_column in self.numeric_columns:\n self._build_regressor()\n self._eval_regressor()\n\n return self.ml_report\n\n\n def _ml_data_prep(self):\n \"\"\"Prepares datasets for ML\n This does one hot encoding, cat boost encoding, and train test\n split (if necessary).\n \"\"\"\n\n df_post = self.df_post.copy()\n train_prior = self.df_prior.copy()\n \n # drop NAs\n cols = self.categorical_columns+self.numeric_columns\n df_post = df_post[cols].dropna(how='any')\n train_prior = train_prior[cols].dropna(how='any')\n\n # create test data if not provided\n if self.test_data is None:\n\n msg = (\n \"No test data was provided. Test data will be created with \" +\n \"a {}-{} shuffle split from the post data set.\".format(\n str(round(self.train_size*100, 0)),\n str(round((1-self.train_size)*100, 0)))\n )\n logger.info(msg)\n\n df_post = shuffle(df_post)\n n_split = int(len(df_post)*self.train_size)\n\n train_post = df_post.iloc[:n_split].copy()\n test = df_post.iloc[n_split:].copy()\n\n else:\n test = self.test_data.copy()\n test = test[cols].dropna(how='any')\n train_post = df_post\n\n # determine columns for OHE & CatBoost\n OHE_columns = [col for col in self.OHE_columns if\n col != self.target_column]\n high_cardinality_columns = [col for col in self.high_cardinality_columns\n if col != self.target_column]\n\n if len(OHE_columns) > 0:\n logger.info(\"One hot encoded columns: {}\".format(\", \".join(OHE_columns)))\n if len(high_cardinality_columns) > 0:\n logger.info(\"Cat boost encoded columns: {}\".format(\", \".join(high_cardinality_columns)))\n\n # concat and then OHE to ensure columns match\n train_prior['source'] = \"Train Prior\"\n test['source'] = \"Test\"\n train_post['source'] = \"Train Post\"\n\n df = pd.concat([train_prior, test, train_post])\n df = pd.get_dummies(data=df, columns=OHE_columns)\n\n train_prior = df[df.source == 'Train Prior'].drop('source', axis=1)\n test = df[df.source == 'Test'].drop('source', axis=1)\n train_post = df[df.source == 'Train Post'].drop('source', axis=1)\n\n # CatBoostEncoder for high cardinality columns\n test_prior = test.copy()\n test_post = test.copy()\n\n tf_prior = CatBoostEncoder(cols=high_cardinality_columns,\n random_state=self.random_state)\n tf_post = CatBoostEncoder(cols=high_cardinality_columns,\n random_state=self.random_state)\n\n train_prior[high_cardinality_columns] = (\n tf_prior.fit_transform(train_prior[high_cardinality_columns],\n train_prior[self.target_column])\n )\n test_prior[high_cardinality_columns] = (\n tf_prior.transform(test_prior[high_cardinality_columns],\n test_prior[self.target_column])\n )\n train_post[high_cardinality_columns] = (\n tf_post.fit_transform(train_post[high_cardinality_columns],\n train_post[self.target_column])\n )\n test_post[high_cardinality_columns] = (\n tf_post.transform(test_post[high_cardinality_columns],\n test_post[self.target_column])\n )\n\n X_train_prior = train_prior.drop(self.target_column, axis=1).astype(float)\n y_train_prior = train_prior[self.target_column].astype(float)\n X_test_prior = test_prior.drop(self.target_column, axis=1).astype(float)\n y_test = test[self.target_column].astype(float)\n\n X_train_post = train_post.drop(self.target_column, axis=1).astype(float)\n y_train_post = train_post[self.target_column].astype(float)\n X_test_post = test_post.drop(self.target_column, axis=1).astype(float)\n\n self.X_train_prior = X_train_prior\n self.y_train_prior = y_train_prior\n self.X_test_prior = X_test_prior\n self.y_test = y_test\n self.X_train_post = X_train_post\n self.y_train_post = y_train_post\n self.X_test_post = X_test_post\n\n\n def _build_regressor(self):\n \"\"\"\n Builds a random forest regressor with a RandomizedSearchCV\n \"\"\"\n\n model_prior_ = RandomForestRegressor(random_state=self.random_state)\n model_post_ = RandomForestRegressor(random_state=self.random_state)\n\n model_prior = RandomizedSearchCV(model_prior_,\n self.param_grid,\n n_iter=self.n_iter,\n cv=self.cv,\n random_state=self.random_state)\n model_post = RandomizedSearchCV(model_post_,\n self.param_grid,\n n_iter=self.n_iter,\n cv=self.cv,\n random_state=self.random_state)\n\n model_prior.fit(self.X_train_prior, self.y_train_prior)\n model_post.fit(self.X_train_post, self.y_train_post)\n \n msg = (\n \"A RandomForestRegressor with a RandomizedSearchCV was trained.\" +\n \"\\nThe final model (trained with prior data) parameters are: \" +\n json.dumps(model_prior.best_params_) +\n \"\\nThe final model (trained with post data) parameters are: \" +\n json.dumps(model_post.best_params_)\n )\n\n logger.info(msg)\n\n self.model_prior = model_prior\n self.model_post = model_post\n\n\n def _build_classifier(self):\n \"\"\"\n Build a random forest classifier with a RandomizedSearchCV\n \"\"\"\n\n model_prior_ = RandomForestClassifier(random_state=self.random_state)\n model_post_ = RandomForestClassifier(random_state=self.random_state)\n\n model_prior = RandomizedSearchCV(model_prior_,\n self.param_grid,\n n_iter=self.n_iter,\n cv=self.cv,\n random_state=self.random_state)\n model_post = RandomizedSearchCV(model_post_,\n self.param_grid,\n n_iter=self.n_iter,\n cv=self.cv,\n random_state=self.random_state)\n\n model_prior.fit(self.X_train_prior, self.y_train_prior)\n model_post.fit(self.X_train_post, self.y_train_post)\n \n msg = (\n \"A RandomForestClassifier with a RandomizedSearchCV was trained.\" +\n \"\\nThe final model (trained with prior data) parameters are: \" +\n json.dumps(model_prior.best_params_) +\n \"\\nThe final model (trained with post data) parameters are: \" +\n json.dumps(model_post.best_params_)\n )\n\n logger.info(msg)\n\n self.model_prior = model_prior\n self.model_post = model_post\n\n\n def _eval_regressor(self):\n \"\"\"\n Calculates the RMSE, MAE & R2 score of the prior and post model.\n Returns a pandas dataframe of the results.\n \"\"\"\n\n y_pred_prior = self.model_prior.predict(self.X_test_prior)\n y_pred_post = self.model_post.predict(self.X_test_post)\n\n rmse_prior = self._rmse(self.y_test, y_pred_prior)\n mae_prior = mean_absolute_error(self.y_test, y_pred_prior)\n r2_prior = r2_score(self.y_test, y_pred_prior)\n\n rmse_post = self._rmse(self.y_test, y_pred_post)\n mae_post = mean_absolute_error(self.y_test, y_pred_post)\n r2_post = r2_score(self.y_test, y_pred_post)\n\n res = pd.DataFrame({\n 'RMSE': [rmse_prior, rmse_post],\n 'MAE': [mae_prior, mae_post],\n 'R2': [r2_prior, r2_post]\n },\n index=['Prior', 'Post']\n )\n\n self.ml_report = res\n\n\n def _eval_classifier(self):\n \"\"\"\n Calculates the accuracy, precision, recall, F1 score & AUC of the\n prior and post model.\n Returns a pandas dataframe of the result.\n \"\"\"\n\n y_pred_prior = self.model_prior.predict(self.X_test_prior)\n y_pred_post = self.model_post.predict(self.X_test_post)\n\n y_test_ = pd.DataFrame(self.y_test)\n y_pred_prior = pd.DataFrame(y_pred_prior, columns=y_test.columns)\n y_pred_post = pd.DataFrame(y_pred_post, columns=y_test.columns)\n\n y_pred_prior['source'] = \"prior\"\n y_pred_post['source'] = \"post\"\n y_test['source'] = \"test\"\n\n y_ = pd.concat([y_pred_prior, y_pred_post, y_test])\n cols = [col for col in y_.columns if col != \"source\"]\n y_ = pd.get_dummies(y_, columns=cols)\n\n y_pred_prior = y_[y_.source == 'prior'].drop('source', axis=1).values\n y_pred_post = y_[y_.source == 'post'].drop('source', axis=1).values\n y_test = y_[y_.source == 'test'].drop('source', axis=1).values\n\n y_.drop('source', axis=1, inplace=True)\n class_labels = y_.columns\n\n res = pd.DataFrame([])\n\n if (len(y_test[0]) == 2):\n # for binary classification\n # only take position 1, assuming position 1 is the true label\n iters = [1]\n else:\n # for multiclass classification\n iters = range(len(y_test[0]))\n\n for i in iters:\n\n precision_prior = precision_score(y_test[:,i], y_pred_prior[:,i])\n recall_prior = recall_score(y_test[:,i], y_pred_prior[:,i])\n acc_prior = accuracy_score(y_test[:,i], y_pred_prior[:,i])\n f1_prior = f1_score(y_test[:,i], y_pred_prior[:,i])\n try:\n auc_prior = roc_auc_score(y_test[:,i], y_pred_prior[:,i])\n except ValueError:\n auc_prior = \"NA\"\n\n precision_post = precision_score(y_test[:,i], y_pred_post[:,i])\n recall_post = recall_score(y_test[:,i], y_pred_post[:,i])\n acc_post = accuracy_score(y_test[:,i], y_pred_post[:,i])\n f1_post = f1_score(y_test[:,i], y_pred_post[:,i])\n try:\n auc_post = roc_auc_score(y_test[:,i], y_pred_post[:,i])\n except ValueError:\n auc_post = \"NA\"\n\n multiindex = [(str(class_labels[i]), 'Prior'),\n (str(class_labels[i]), 'Post')]\n\n index = pd.MultiIndex.from_tuples(multiindex,\n names=['Class', 'Data Type'])\n\n score = pd.DataFrame({\n 'Accuracy': [acc_prior, acc_post],\n 'Precision': [precision_prior, precision_post],\n 'Recall': [recall_prior, recall_post],\n 'F1': [f1_prior, f1_post],\n 'AUC': [auc_prior, auc_post]\n },\n index=index\n )\n\n res = pd.concat([res, score])\n\n self.ml_report = res\n","repo_name":"kelvnt/data-drift-detector","sub_path":"data_drift_detector/data_drift_detector.py","file_name":"data_drift_detector.py","file_ext":"py","file_size_in_byte":32473,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"22"} +{"seq_id":"39863199264","text":"from ctypes import (\n byref, c_char, c_char_p, c_int, cast, create_string_buffer, pointer,\n POINTER\n)\nimport sys\nfrom OpenGL import GL as gl\n\n\nclass ShaderError(Exception): pass\nclass CompileError(ShaderError): pass\nclass LinkError(ShaderError): pass\n\n\nshaderErrors = {\n gl.GL_INVALID_VALUE: 'GL_INVALID_VALUE (bad 1st arg)',\n gl.GL_INVALID_OPERATION: 'GL_INVALID_OPERATION '\n '(bad id or immediate mode drawing in progress)',\n gl.GL_INVALID_ENUM: 'GL_INVALID_ENUM (bad 2nd arg)',\n}\n\n\nclass _Shader(object):\n\n type = None\n\n def __init__(self, sources):\n if isinstance(sources, basestring):\n self.sources = [sources]\n else:\n self.sources = sources\n self.id = None\n \n \n def _get(self, paramId):\n outvalue = c_int(0)\n value = gl.glGetShaderiv(self.id, paramId) #, byref(outvalue))\n #value = outvalue.value\n if value in shaderErrors.keys():\n msg = '%s from glGetShader(%s, %s, &value)'\n raise ValueError(msg % (shaderErrors[value], self.id, paramId))\n return value\n\n\n def getCompileStatus(self):\n return bool(self._get(gl.GL_COMPILE_STATUS))\n\n\n def getInfoLogLength(self):\n return self._get(gl.GL_INFO_LOG_LENGTH)\n\n\n def getInfoLog(self):\n return gl.glGetShaderInfoLog(self.id)\n\n def _srcToArray(self):\n num = len(self.sources)\n all_source = (c_char_p * num)(*self.sources)\n return num, cast(pointer(all_source), POINTER(POINTER(c_char)))\n \n\n def compile(self):\n self.id = gl.glCreateShader(self.type)\n\n num, src = self._srcToArray()\n gl.glShaderSource(self.id, self.sources)\n gl.glCompileShader(self.id)\n\n if not self.getCompileStatus():\n raise CompileError(self.getInfoLog())\n\n\n\nclass VertexShader(_Shader):\n type = gl.GL_VERTEX_SHADER\n\n\nclass FragmentShader(_Shader):\n type = gl.GL_FRAGMENT_SHADER\n\n\n\nclass ShaderProgram(object):\n\n def __init__(self, *shaders):\n self.shaders = list(shaders)\n self.id = None\n\n \n def _get(self, paramId):\n outvalue = c_int(0)\n gl.glGetProgramiv(self.id, paramId, byref(outvalue))\n value = outvalue.value\n if value in shaderErrors.keys():\n msg = '%s from glGetProgram(%s, %s, &value)'\n raise ValueError(msg % (shaderErrors[value], self.id, paramId))\n return value\n \n \n def getLinkStatus(self):\n return bool(self._get(gl.GL_LINK_STATUS))\n\n\n def getInfoLogLength(self):\n return self._get(gl.GL_INFO_LOG_LENGTH)\n\n\n def getInfoLog(self):\n return gl.glGetProgramInfoLog(self.id)\n \n\n def _getMessage(self):\n messages = []\n for shader in self.shaders:\n log = shader.getInfoLog()\n if log:\n messages.append(log)\n log = self.getInfoLog()\n if log:\n messages.append(log)\n return '\\n'.join(messages)\n\n \n def use(self):\n try:\n self.id = gl.glCreateProgram()\n except: \n self.id = -1\n message = \"Error - No Compatible video board found ( no GPU with GLSL support )\"\n sys.stderr.write( \"%s\\n\" % message )\n# message = self._getMessage()\n \n if self.id>-1:\n for shader in self.shaders:\n shader.compile()\n gl.glAttachShader(self.id, shader.id)\n\n gl.glLinkProgram(self.id)\n\n message = self._getMessage()\n if not self.getLinkStatus():\n raise LinkError(message)\n\n gl.glUseProgram(self.id)\n\n return message\n\n def bind(self):\n # bind the program\n if self.id > -1:\n gl.glUseProgram(self.id)\n\n def unbind(self):\n # unbind whatever program is currently bound - not necessarily this program,\n # so this should probably be a class method instead\n if self.id > -1:\n gl.glUseProgram(0)\n\n\n # upload a floating point uniform\n # this program must be currently bound\n def uniformf(self, name, *vals):\n# self.bind()\n # check there are 1-4 values\n if self.id > -1:\n if len(vals) in range(1, 5):\n # select the correct function\n { 1 : gl.glUniform1f,\n 2 : gl.glUniform2f,\n 3 : gl.glUniform3f,\n 4 : gl.glUniform4f\n # retrieve the uniform location, and set\n }[len(vals)](gl.glGetUniformLocation(self.id, name), *vals)\n# self.unbind()\n\n # upload an integer uniform\n # this program must be currently bound\n def uniformi(self, name, *vals):\n# self.bind()\n # check there are 1-4 values\n if len(vals) in range(1, 5):\n # select the correct function\n { 1 : gl.glUniform1i,\n 2 : gl.glUniform2i,\n 3 : gl.glUniform3i,\n 4 : gl.glUniform4i\n # retrieve the uniform location, and set\n }[len(vals)](gl.glGetUniformLocation(self.id, name), *vals)\n# self.unbind()\n\n # upload a uniform matrix\n # works with matrices stored as lists,\n # as well as euclid matrices\n def uniform_matrixf(self, name, mat):\n# self.bind()\n # obtian the uniform location\n loc = gl.glGetUniformLocation(self.id, name)\n # uplaod the 4x4 floating point matrix\n gl.glUniformMatrix4fv(loc, 1, False, (c_float * 16)(*mat))\n# self.unbind()\n\n\n","repo_name":"hradec/chemshapes","sub_path":"host/pyglet_shaders/shader.py","file_name":"shader.py","file_ext":"py","file_size_in_byte":5577,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"22"} +{"seq_id":"20312768673","text":"from django.urls import path\n\nfrom common.views import index\nfrom rx.views import list_rx, create_rx, edit_rx, user_workflow, on_hold_create, on_hold_delete, prepared_create, \\\n prepared_delete, ready_create, ready_delete, finished_create, finished_delete, delete_rx\n\nurlpatterns = (\n path('', list_rx, name='list rx'),\n path('workflow/', user_workflow, name='current user profile'),\n path('workflow//', user_workflow, name='user profile'),\n\n path('create/', create_rx, name='create rx'),\n path('edit/', edit_rx, name='edit rx'),\n path('delete/', delete_rx, name='delete rx'),\n\n path('onhold/', on_hold_create, name='on hold rx'),\n path('onhold/delete/', on_hold_delete, name='delete on hold'),\n\n path('prepared/', prepared_create, name='prepared rx'),\n path('prepared/delete/', prepared_delete, name='delete prepared'),\n\n path('ready/', ready_create, name='ready rx'),\n path('ready/delete/', ready_delete, name='delete ready'),\n\n path('finished/', finished_create, name='finished rx'),\n path('finished/delete/', finished_delete, name='delete finished'),\n)\n","repo_name":"tswetozar/rubia","sub_path":"rx/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74640163254","text":"import os\nimport time\n\n\ndef gcd(a, b, c, forward_filename, backward_filename, u=None, v=None, u_prev=None, v_prev=None, u_prev_prev=None,\n v_prev_prev=None):\n a, b = max(a, b), min(a, b)\n\n with open(forward_filename, 'a+') as f:\n f.write(f'{a} = {a // b} * {b} + {a % b}\\n')\n\n if a % b == 0:\n d = b\n s = c * u // b\n t = c * v // b\n return b, (u, v), (s, t)\n else:\n m = -(a // b)\n if u is None and v is None:\n u = 1\n v = m\n else:\n if u_prev is None and v_prev is None:\n u_prev = u\n v_prev = v\n u = u * m\n v = v * m + 1\n else:\n u_prev_prev, v_prev_prev = u_prev, v_prev\n u_prev, v_prev = u, v\n u = u * m + u_prev_prev\n v = v * m + v_prev_prev\n\n u_sign, v_sign = '', '-'\n if v > 0:\n v_sign = '+'\n if u < 0:\n u_sign = '-'\n with open(backward_filename, 'a+') as f:\n f.write(f'{a % b} = {a} - {-m} * {b} = {u_sign}{abs(u)}a {v_sign} {abs(v)}b\\n')\n\n return gcd(b, a % b, c, forward_filename, backward_filename, u, v, u_prev, v_prev)\n\n\nif __name__ == '__main__':\n forward_filename = 'forward.txt'\n backward_filename = 'backward.txt'\n try:\n os.remove(forward_filename)\n os.remove(backward_filename)\n except FileNotFoundError:\n pass\n a1, b1, c1 = 288, 88, 40\n a2, b2, c2 = 13273706, 2264466, 14934\n a3, b3, c3 = 8338378103479608, 1939482877349796, 112662\n t = time.time()\n for i in range(100):\n d = gcd(a1, b1, c1, forward_filename, backward_filename)\n # print(d)\n print((time.time() - t) / 100)\n","repo_name":"yhaidai/year-4","sub_path":"semester-2/mathematical-fundamentals-of-data-security/lab1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32802393023","text":"import os\nimport pytest\nimport pandas as pd\n\n\n@pytest.fixture(scope=\"session\")\ndef input_data():\n # For larger datasets, here we would use a testing sub-sample.\n root_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))\n data = pd.read_csv(os.path.join(root_path, 'data') + \"/census_clean.csv\")\n return data\n\n\n@pytest.fixture()\ndef categorical_features():\n # For larger datasets, here we would use a testing sub-sample.\n cat_features = [\n \"workclass\",\n \"education\",\n \"marital-status\",\n \"occupation\",\n \"relationship\",\n \"race\",\n \"sex\",\n \"native-country\",\n ]\n return cat_features\n","repo_name":"blewy/DeployML_devops","sub_path":"starter/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74988647096","text":"# В этой задаче решаем динамическую задачу при наличии точечных управляющих катушек, камеры с магнитной проницаемостью отличной от нуля\n\n\n# %% Imports\nfrom fenics import *\nimport matplotlib.pyplot as matplt\nimport logger\nimport mshr\nimport time\nimport funcs as fu\nimport MEPHIST_data as M\nimport logger\nfrom geometry import Geometry\nfrom boundary_conditions import BoundaryConditions\nimport point_source_data as psd\nimport numpy\nfrom expressions import Expressions\nfrom MEPHIST_dynamics_params import Problem\n\n# %% Pre-programm stuff\nt0 = time.time()\ncurrent_pyfile = \"\\n\\n---------MEPHIST_dynamics.py---------\"\nlogger.log_n_output(\"%s\" % current_pyfile, 'red')\nfu.print_colored(\"Date_Time is: %s\" % fu.Time_name(), 'cyan')\nPATH = 'MEPHIST_dynamics'\n\n# %% Needed objects and contour levels\nboundary_conditions = BoundaryConditions()\ngeometry = Geometry()\np = Problem()\ne = Expressions()\n\n# %% Domain and mesh definition\ndomain = geometry.rectangle_domain(\n area=[p.domain_geometry[0], p.domain_geometry[1], p.domain_geometry[2], p.domain_geometry[3]])\n\ngeometry.register_plot_domain(p.plot_domain)\nmephist_inner_surface = geometry.inner_mephist_vessel()\nmephist_outer_surface = geometry.outer_mephist_vessel()\n\nvacuum_vessel = mephist_outer_surface - mephist_inner_surface\n\ndomain.set_subdomain(1, vacuum_vessel) # vessel\ndomain.set_subdomain(2, mephist_inner_surface) # plasma\n\ngeometry.generate_mesh_in_domain(domain=domain, density=p.mesh_density)\n\nfu.fenics_plot(p, geometry.mesh, PATH)\n\nmarkers = MeshFunction(\"size_t\", geometry.mesh,\n geometry.mesh.topology().dim(), geometry.mesh.domains())\n\nfu.fenics_plot(p, markers, PATH)\n# fu.fenics_plot(p, markers, \"%s_nobar\" % PATH)\n\n# %% Step coefficients classes\n\n\nclass Permeability(UserExpression):\n def __init__(self, mesh, **kwargs):\n super().__init__(**kwargs)\n self.markers = markers\n\n def eval_cell(self, values, x, cell):\n if self.markers[cell.index] == 0:\n values[0] = p.VACUUM_PERMEABILITY # vacuum\n elif self.markers[cell.index] == 1:\n values[0] = p.VESSEL_PERMEABILITY # vessel\n else:\n values[0] = p.PLASMA_PERMEABILITY # plasma\n\n def value_shape(self):\n return ()\n\n\nclass Conductivity(UserExpression):\n def __init__(self, mesh, **kwargs):\n super().__init__(**kwargs)\n self.markers = markers\n\n def eval_cell(self, values, x, cell):\n if self.markers[cell.index] == 0:\n values[0] = p.VACUUM_CONDUCTIVITY # vacuum\n elif self.markers[cell.index] == 1:\n values[0] = p.VESSEL_CONDUCTIVITY # vessel\n else:\n values[0] = p.PLASMA_CONDUCTIVITY # plasma\n\n def value_shape(self):\n return ()\n\n\n# %% Define function space and step coefficients\nV = FunctionSpace(geometry.mesh, 'P', 1) # standard triangular mesh\n\nmu = Permeability(geometry.mesh, degree=0)\nsg = Conductivity(geometry.mesh, degree=0)\n\n# fu.fenics_plot(interpolate(sg, V), PATH, '', 'colorbar')\n# fu.countour_plot_via_mesh(geometry, interpolate(\n# mu, V), levels=p.levels, PATH=PATH, plot_title='Permeability')\n# fu.countour_plot_via_mesh(geometry, interpolate(\n# sg, V), levels=p.levels, PATH=PATH, plot_title='Conductivity')\n\nu = TrialFunction(V) # u must be defined as function before expression def\nv = TestFunction(V)\n\n# %% Boundary conditions\nu_D = boundary_conditions.constant_boundary_condition(\"0\")\nbc = DirichletBC(V, u_D, fu.Dirichlet_boundary)\n\n# %% Solve\n[r_2, r] = geometry.operator_weights(V)\n\npoint_sources = fu.Array_Expression(fu.ArrayOfPointSources(psd.PointSource(1)))\n\ndx = Measure('dx', domain=geometry.mesh, subdomain_data=markers)\n\n# %% Solve stationary\nsource = e.iter_point_source(p, [p.disp_x[0], p.disp_z[0]])\na = dot(grad(u)/r, grad(r_2*v))*dx\nL = sum(point_sources[2:len(point_sources)])*r*v*dx(0) + \\\n source*r*v*dx(2) # !!!\n\nu0 = Function(V)\nsolve(a == L, u0, bc)\np.find_levels(u0, step=p.step)\n\nfu.What_time_is_it(t0, 'Initial problem solved')\nfu.countour_plot_via_mesh(geometry, u0, levels=p.levels,\n PATH=PATH+'_nobar',\n current_disp=[p.disp_x[0], p.disp_z[0]],\n plt_vessel=True,\n do_plasma_centre=True,\n colorbar=False,\n grid=True,\n xticks_array=p.xticks)\nfu.countour_plot_via_mesh(geometry, u0, levels=p.levels,\n PATH=PATH,\n current_disp=[p.disp_x[0], p.disp_z[0]],\n plt_vessel=True,\n do_plasma_centre=True,\n colorbar=True,\n grid=True,\n xticks_array=p.xticks)\n# fu.fenics_plot(p, u0, PATH, colorbar=True)\n\ndt = numpy.diff(p.t)\nfor i in range(len(dt)):\n logger.info(\"i = %d out of %d\" % (i, len(dt)))\n fu.print_colored_n_white(colored_text=\"Time: \",\n color='blue', white_text=str(p.t[i+1]))\n u = Function(V)\n v = TestFunction(V)\n\n source = e.iter_point_source(p, [p.disp_x[i+1], p.disp_z[i+1]])\n\n # F = dot(grad(u)/r, grad(r_2*v))*dx \\\n # + fu.M0*mu*sg / dt[i] * (u - u0)*r*v*dx \\\n # - sum(point_sources[2:len(point_sources)])*r*v*dx(0) \\\n # - source*r*v*dx(2)\n F = dot(grad(u)/r, grad(r_2*v))*dx \\\n - sum(point_sources[2:len(point_sources)])*r*v*dx(0) \\\n - source*r*v*dx(2)\n\n solve(F == 0, u, bc)\n # p.find_levels(u, step=p.step)\n\n fu.What_time_is_it(t0, \"Problem solved for t = %f\" % p.t[i+1])\n # p.find_levels_with_exponent(u, exponent=p.exponent, step=p.step)\n fu.countour_plot_via_mesh(geometry, u, levels=p.levels,\n PATH=PATH+'_nobar',\n current_disp=[p.disp_x[i+1], p.disp_z[i+1]],\n plt_vessel=True,\n do_plasma_centre=True,\n colorbar=False,\n grid=True,\n xticks_array=p.xticks)\n fu.countour_plot_via_mesh(geometry, u, levels=p.levels,\n PATH=PATH,\n current_disp=[p.disp_x[i+1], p.disp_z[i+1]],\n plt_vessel=True,\n do_plasma_centre=True,\n colorbar=True,\n grid=True,\n xticks_array=p.xticks)\n\n u0 = u\n\n fu.print_colored(text=\"Iteration finished. %d/%d = %.2f\" \n % (i, len(dt)-1, 100*i/(len(dt)-1)), color='yellow')\n\n\nfu.What_time_is_it(t0, message='Done')\nlogger.log_n_output_colored_message(\n colored_message=\"'Done'\\n\", color='red', white_message='')\n","repo_name":"GeorgeTim1998/grad_shafranov","sub_path":"MEPHIST_dynamics.py","file_name":"MEPHIST_dynamics.py","file_ext":"py","file_size_in_byte":6945,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"30864016683","text":"\"\"\"This is main server file\"\"\"\nimport os\nfrom http.server import CGIHTTPRequestHandler, ThreadingHTTPServer\nimport logging\nfrom sys import argv\nfrom io import BytesIO\nimport threading\n\nLOG_FILENAME = \"webserverlog.txt\"\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n# create console handler and set level to debug\nch = logging.FileHandler(filename=LOG_FILENAME)\n# create formatter\nformatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n# add formatter to ch\nch.setFormatter(formatter)\n# add ch to logger\nlogger.addHandler(ch)\n\n\n# -------------------------------------------------------------------------------\n\nclass ServerException(Exception):\n \"\"\"For internal error reporting.\"\"\"\n pass\n\n# -------------------------------------------------------------------------------\n\n\nclass BaseCase:\n \"\"\"Parent for case handlers.\"\"\"\n\n @staticmethod\n def handle_file(handler, full_path):\n \"\"\"This is static hanle file method with two args as handler and full_path\"\"\"\n\n try:\n with open(full_path, 'rb') as reader:\n content = reader.read()\n handler.send_content(content)\n except IOError as msg:\n msg = \"'{0}' cannot be read: {1}\".format(full_path, msg)\n handler.handle_error(msg)\n\n @staticmethod\n def index_path(handler):\n \"\"\"This is static index path method with one args as handler\"\"\"\n return os.path.join(handler.full_path, 'index.html')\n\n @staticmethod\n def forms_path(handler):\n \"\"\"This is static forms path method with one args as handler\"\"\"\n return os.path.join(handler.full_path, 'forms.html')\n\n @staticmethod\n def form_get_path(handler):\n \"\"\"This is static form get path method with one args as handler\"\"\"\n return os.path.join(handler.full_path, 'form_get.html')\n\n def test(self, handler):\n \"\"\"This is test method with one args as handler\"\"\"\n print(\"This will override from child classes\", handler.full_path)\n\n def act(self, handler):\n \"\"\"This is act method with one args as handler\"\"\"\n print(\"This will override from child classes\", handler.full_path)\n\n\n# -------------------------------------------------------------------------------\nclass CaseNoFile(BaseCase):\n \"\"\"File or directory does not exist.\"\"\"\n\n def test(self, handler):\n file_ath = handler.full_path.split(\"?\", 1)\n path = file_ath[0]\n return not os.path.exists(path)\n\n def act(self, handler):\n raise ServerException(\"'{0}' not found\".format(handler.path))\n\n\n# -------------------------------------------------------------------------------\nclass CaseCgiFile(BaseCase):\n \"\"\"cgi file exists\"\"\"\n\n @staticmethod\n def run_cgi(handler):\n \"\"\"This is static run_cgi method with one args as handler\"\"\"\n cmd = \"python \" + handler.full_path\n child_stdin, child_stdout = os.popen(cmd)\n child_stdin.close()\n data = child_stdout.read()\n child_stdout.close()\n handler.send_content(data)\n\n def test(self, handler):\n return handler.is_cgi()\n\n def act(self, handler):\n handler.run_cgi()\n\n\n# -------------------------------------------------------------------------------\n\n\nclass CaseExistingFile(BaseCase):\n \"\"\"File exists.\"\"\"\n\n def test(self, handler):\n return os.path.isfile(handler.full_path)\n\n def act(self, handler):\n self.handle_file(handler, handler.full_path)\n\n# -------------------------------------------------------------------------------\n\n\nclass CaseDirectoryIndexFile(BaseCase):\n \"\"\"Serve index.html page for a directory.\"\"\"\n\n def test(self, handler):\n return os.path.isdir(handler.full_path) and \\\n os.path.isfile(self.index_path(handler))\n\n def act(self, handler):\n self.handle_file(handler, self.index_path(handler))\n\n\n# -------------------------------------------------------------------------------\n\nclass CaseDirectoryNoIndexFile(BaseCase):\n \"\"\"Serve listing for a directory without an index.html page.\"\"\"\n\n # How to display a directory listing.\n Listing_Page = '''\\\n \n \n
    \n {0}\n
\n \n \n '''\n\n def list_dir(self, handler, full_path):\n \"\"\"This is list_dir method with two args as handler and full_path\"\"\"\n try:\n entries = os.listdir(full_path)\n bullets = ['
  • {0}
  • '.format(e) for e in entries if not e.startswith('.')]\n page = self.Listing_Page.format('\\n'.join(bullets))\n handler.send_content(page.encode('utf-8'))\n except OSError as msg:\n msg = \"'{0}' cannot be listed: {1}\".format(self.path, msg)\n handler.handle_error(msg)\n\n def test(self, handler):\n return os.path.isdir(handler.full_path) and \\\n not os.path.isfile(self.index_path(handler))\n\n def act(self, handler):\n self.list_dir(handler, handler.full_path)\n\n\n# ------------------------------------------------------------------------------\n\n\nclass CaseAlwaysFail(BaseCase):\n \"\"\"Base case if nothing else worked.\"\"\"\n\n def test(self, handler):\n return True\n\n def act(self, handler):\n raise ServerException(\"Unknown object '{0}'\".format(handler.path))\n\n\n# -------------------------------------------------------------------------------\n\n\n# noinspection PyAttributeOutsideInit\n\n\nclass RequestHandler(CGIHTTPRequestHandler):\n \"\"\"If the requested path maps to a file, that file is served.\n If anything goes wrong, an error page is constructed.\"\"\"\n logger.info(\"Request handle is processed, in case of failure error page is constructed.\")\n\n protocol_version = 'HTTP/1.1'\n logger.info(\"USing protocol version: %s\", str(protocol_version))\n\n buffer = 1\n log_file = open(LOG_FILENAME, 'w', buffer)\n\n Cases = [CaseNoFile(),\n CaseCgiFile(),\n CaseExistingFile(),\n CaseDirectoryIndexFile(),\n CaseDirectoryNoIndexFile(),\n CaseAlwaysFail()]\n\n # How to display an error.\n Error_Page = \"\"\"\\\n \n \n

    Error accessing {path}

    \n

    {msg}

    \n \n \n \"\"\"\n\n def guess_type(self, path):\n mimetype = CGIHTTPRequestHandler.guess_type(self, path)\n logger.info(\"Type of request handler: %s\", str(mimetype))\n if mimetype == 'application/octet-stream':\n if path.endswith('manifest'):\n mimetype = 'text/cache-manifest'\n return mimetype\n\n # Classify and handle request.\n\n def do_HEAD(self):\n logger.info(\"Thread Count :%s \", threading.active_count())\n logger.info(\"\\n%s\\nPath: %s\\nHeaders:\\n%s\",\n str(self.requestline), str(self.path), str(self.headers))\n self.close_connection = True\n\n def do_GET(self):\n logger.info(\"Thread Count :%s \", threading.active_count())\n logger.info(\"\\n%s\\nPath: %s\\nHeaders:\\n%s\",\n str(self.requestline), str(self.path), str(self.headers))\n try:\n print(\"Get method call\")\n self.full_path = os.getcwd() + self.path\n\n for case in self.Cases:\n if case.test(self):\n logger.info(\"In case if test is successful action is processed at path: %s\",\n str(self.full_path))\n case.act(self)\n break\n\n except Exception as msg:\n logger.info(\"Exception occurred with msg: %s :\", msg)\n self.handle_error(msg)\n\n def do_POST(self):\n logger.info(\"\\n%s\\nPath: %s\\nHeaders:\\n%s\",\n str(self.requestline), str(self.path), str(self.headers))\n print(\"Post method call\")\n content_length = int(self.headers['Content-Length'])\n body = self.rfile.read(content_length)\n logger.info(str(body))\n self.send_response(200)\n self.end_headers()\n response = BytesIO()\n logger.info(\"Response to be posted: %s\", str(response))\n response.write(b'This is POST request. ')\n response.write(b'Received: ')\n response.write(body)\n self.wfile.write(response.getvalue())\n self.close_connection = True\n\n def do_PUT(self):\n \"\"\"This is do_PUT method\"\"\"\n logger.info(\"\\n%s\\nPath: %s\\nHeaders:\\n%s\",\n str(self.requestline), str(self.path), str(self.headers))\n filename = os.path.basename(self.path)\n file_length = int(self.headers['Content-Length'])\n with open(filename, 'wb') as output_file:\n logger.info(\"File %s is opened to write response\", str(filename))\n output_file.write(self.rfile.read(file_length))\n self.send_response(201, 'Created')\n self.end_headers()\n reply_body = 'Saved \"%s\"\\n' % filename\n logger.info(\"Replying with: %d\", str(reply_body))\n self.wfile.write(reply_body.encode('utf-8'))\n self.close_connection = True\n\n # Handle unknown objects.\n def handle_error(self, msg):\n \"\"\"This is handler error method with one args as msg\"\"\"\n logger.info('handle_error msg %s', str(msg))\n content = self.Error_Page.format(path=self.path, msg=msg)\n self.send_content(content, 404)\n\n # Send actual content.\n def send_content(self, content, status=200):\n \"\"\"This is send_content method with two args as content and status\"\"\"\n logger.info('send_content...\\n')\n self.send_response(status)\n ctype = self.guess_type(path=self.full_path)\n if ctype == 'application/octet-stream':\n ctype = 'text/html'\n self.send_header(\"Content-type\", ctype)\n logger.info(\"Header is sent with type: %s\", ctype)\n self.send_header(\"Content-Length\", str(len(content)))\n self.end_headers()\n self.wfile.write(content)\n\n def log_message(self, format, *args):\n self.log_file.write(\"%s - - [%s] %s\\n\" %\n (self.client_address[0],\n self.log_date_time_string(),\n format % args))\n\n# -------------------------------------------------------------------------------\n\n\ndef run(server_class=ThreadingHTTPServer, handler_class=RequestHandler, port=8080):\n \"\"\"This is run method with three args as ThreadingHTTPServer, RequestHandler, and port\"\"\"\n server_address = ('', port)\n server = server_class(server_address, handler_class)\n logger.info('Starting webserver...\\n')\n try:\n server.serve_forever()\n except KeyboardInterrupt:\n pass\n server.server_close()\n logger.info('Stopping webserver...\\n')\n\n\nif __name__ == '__main__':\n if len(argv) == 2:\n run(port=int(argv[1]))\n else:\n run()\n","repo_name":"jnitin/webserver","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":10855,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"20207557392","text":"from math import sqrt\n\ndef gf(n, k):\n hi = 1\n for i in range(1, min(k, int(sqrt(n)))+1):\n if n % i == 0:\n if n // i <= k:\n hi = max(hi, n//i)\n else:\n hi = max(hi, i)\n return hi\n\nfor _ in range(int(input())):\n n, k = map(int, input().split())\n print(n // gf(n, k))","repo_name":"tylanmm/comp_prog","sub_path":"codeforces/solved/buying_shovels.py","file_name":"buying_shovels.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31431905271","text":"from django.contrib import admin\nfrom django.conf import settings\nfrom django.urls import path, include\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('phadmin/', admin.site.urls),\n path('', include('apps.home.urls', namespace='home')),\n path('users/', include('apps.users.urls', namespace='users')),\n path('products/', include('apps.products.urls', namespace='products')),\n path('orders/', include('apps.orders.urls', namespace='orders')),\n path('api/', include('apps.users.api.urls', namespace='users_api')),\n path('api/', include('apps.products.api.urls', namespace='products_api')),\n path('api/', include('apps.home.api.urls', namespace='home_api')),\n path('api/', include('apps.orders.api.urls', namespace='order_api')),\n path('api/', include('apps.auction.api.urls', namespace='auction_api')),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"Nimatah/Stoneet","sub_path":"project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12134119742","text":"import logging\nfrom diworker.diworker.migrations.base import BaseMigration\nfrom pymongo import UpdateOne\nfrom optscale_client.rest_api_client.client_v2 import Client as RestClient\n\n\"\"\"\nSet 'box_usage' flag to Fargate, Lambda and SageMaker raw expenses\n\"\"\"\n\nLOG = logging.getLogger(__name__)\n\nCHUNK_SIZE = 200\n\n\nclass Migration(BaseMigration):\n\n @property\n def mongo_raw(self):\n return self.db.raw_expenses\n\n @property\n def rest_cl(self):\n if self._rest_cl is None:\n self._rest_cl = RestClient(\n url=self.config_cl.restapi_url(), verify=False)\n self._rest_cl.secret = self.config_cl.cluster_secret()\n return self._rest_cl\n\n @property\n def mongo_temp_table(self):\n # temporary table for storing handled cloud accounts\n return self.db.migration_2023040614450000\n\n def get_cloud_accounts_ids(self):\n cloud_accounts_ids = set()\n _, organizations = self.rest_cl.organization_list()\n for org in organizations['organizations']:\n _, accounts = self.rest_cl.cloud_account_list(\n org['id'], type='aws_cnr')\n cloud_accounts_ids.update(\n account['id'] for account in accounts['cloud_accounts'])\n return cloud_accounts_ids\n\n @staticmethod\n def _is_flavor_usage(expense):\n usage_type = expense.get('lineItem/UsageType', '')\n service_code = expense.get('product/servicecode', '')\n description = expense.get('lineItem/LineItemDescription', '')\n return ((service_code == 'AmazonECS' and 'Fargate' in usage_type) or\n (service_code == 'AmazonSageMaker' and 'ml.' in description) or\n (service_code == 'AWSLambda' and'Lambda-GB-Second' in usage_type) or\n ('BoxUsage' in usage_type))\n\n def upgrade(self):\n cloud_accounts_ids = self.get_cloud_accounts_ids()\n for i, cloud_account_id in enumerate(cloud_accounts_ids):\n LOG.info('Started processing for cloud account: %s (%s/%s)' % (\n cloud_account_id, i+1, len(cloud_accounts_ids)))\n is_processed = self.mongo_temp_table.find({\n 'cloud_account_id': cloud_account_id})\n if is_processed:\n LOG.info('Cloud account %s already processed' % cloud_account_id)\n continue\n update_bulk = []\n raw_expenses = self.mongo_raw.find({\n 'cloud_account_id': cloud_account_id,\n 'box_usage': {'$exists': False},\n 'product/servicecode': {\n '$in': ['AmazonECS', 'AmazonSageMaker', 'AWSLambda']}\n })\n for expense in raw_expenses:\n if self._is_flavor_usage(expense):\n expense['box_usage'] = True\n update_bulk.append(UpdateOne({'_id': expense['_id']},\n {'$set': expense}))\n if len(update_bulk) >= CHUNK_SIZE:\n self.mongo_raw.bulk_write(update_bulk)\n update_bulk = []\n if update_bulk:\n self.mongo_raw.bulk_write(update_bulk)\n self.mongo_temp_table.insert_one(\n {'cloud_account_id': cloud_account_id})\n try:\n self.mongo_temp_table.drop()\n except Exception as exc:\n LOG.warning('Failed to drop temp table: %s' % str(exc))\n\n def downgrade(self):\n cloud_accounts_ids = self.get_cloud_accounts_ids()\n for i, cloud_account_id in enumerate(cloud_accounts_ids):\n LOG.info('Started processing for cloud account: %s (%s/%s)' % (\n cloud_account_id, i+1, len(cloud_accounts_ids)))\n is_processed = self.mongo_temp_table.find({\n 'cloud_account_id': cloud_account_id})\n if is_processed:\n LOG.info('Cloud account %s already processed' % cloud_account_id)\n continue\n update_bulk = []\n raw_expenses = self.mongo_raw.find({\n 'cloud_account_id': cloud_account_id,\n 'box_usage': True,\n 'product/servicecode': {\n '$in': ['AmazonECS', 'AmazonSageMaker', 'AWSLambda']}\n })\n for expense in raw_expenses:\n expense.pop('box_usage', None)\n update_bulk.append(UpdateOne({'_id': expense['_id']},\n {'$set': expense}))\n if len(update_bulk) >= CHUNK_SIZE:\n self.mongo_raw.bulk_write(update_bulk)\n update_bulk = []\n if update_bulk:\n self.mongo_raw.bulk_write(update_bulk)\n self.mongo_temp_table.insert_one(\n {'cloud_account_id': cloud_account_id})\n try:\n self.mongo_temp_table.drop()\n except Exception as exc:\n LOG.warning('Failed to drop temp table: %s' % str(exc))\n","repo_name":"hystax/optscale","sub_path":"diworker/diworker/migrations/2023040614450000_set_box_usage_flag.py","file_name":"2023040614450000_set_box_usage_flag.py","file_ext":"py","file_size_in_byte":4977,"program_lang":"python","lang":"en","doc_type":"code","stars":646,"dataset":"github-code","pt":"22"} +{"seq_id":"30051257304","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File : LeetCode5508.py\n# Author: WangYu\n# Date : 2020-09-06\n\nfrom typing import List\nfrom _collections import defaultdict\nclass Solution:\n def numTriplets(self, nums1: List[int], nums2: List[int]) -> int:\n sum = 0\n dict1 = defaultdict(int)\n dict2 = defaultdict(int)\n for i in num1:\n dict1[i*i] = dict1[i*i] + 1\n for i in num2:\n dict2[i*i] = dict2[i*i] + 1\n for i in range(len(num1)-1):\n for j in range(i+1,len(num1)):\n sum += dict2[num1[i]*num1[j]]\n for i in range(len(num2)-1):\n for j in range(i+1,len(num2)):\n sum += dict1[num2[i]*num2[j]]\n return sum\n\nnum1, num2 = [1,1],[1,1,1]\ns = Solution()\nprint(s.numTriplets(num1,num2))\n","repo_name":"wangyu33/LeetCode","sub_path":"LeetCode5508.py","file_name":"LeetCode5508.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"1675808450","text":"import os\nimport unittest\nimport Browser\nimport time\n\nclass ClikAndHoldOnElement(unittest.TestCase):\n def setUp(self):\n self.driver = Browser.Chrome()\n\n def testClikAndHoldOnElement(self):\n driver = self.driver\n WebSite = os.path.join(os.path.abspath('..'),'Html','10.33.html')\n driver.get(WebSite)\n time.sleep(2) #等待2秒\n\n from selenium.webdriver import ActionChains #引入ActionChains包\n Div = driver.find_element_by_id('div1')\n\n for i in range(2):\n ActionChains(driver).click_and_hold(Div).perform() #使用click_and_hold()方法将模拟鼠标左键停留在元素上\n time.sleep(2)\n\n ActionChains(driver).release(Div).perform() #使用release()方法,去释放按下的鼠标左键\n time.sleep(2)\n \n \n def tearDown(self):\n self.driver.quit()\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Eason0731/MyPython","sub_path":"src/MyItems/Selenium/Selenium WebDriver 3.0 Tutorial/Chapter 10/10.33.py","file_name":"10.33.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"10188049828","text":"# -*- coding: utf-8 -*-\n\nfrom textwrap import dedent\nfrom juicer.operation import Operation\n\n\nclass OptimizatedOperation(Operation):\n \"\"\"OptimizatedOperation.\n\n Merge many tasks in only one\n \"\"\"\n\n def __init__(self, parameters, named_inputs, named_outputs):\n Operation.__init__(self, parameters, named_inputs, named_outputs)\n\n if not 'code_0' in self.parameters and 'code_1' in self.parameters:\n raise ValueError(\n _(\"Parameter {} and {} must be informed for task {}\")\n .format('code_0', 'code_1', self.__class__))\n\n self.order = self.parameters['task']['order']\n self.code_0 = self.parameters.get('code_0', '')\n self.code_1 = self.parameters.get('code_1', '')\n self.has_code = True\n self.has_code_otm = True\n self.number_tasks = self.parameters.get('number_tasks', 0)\n self.fist_id = self.parameters.get('fist_id', 0)\n\n if 'output data' in self.named_outputs:\n self.output = self.named_outputs['output data']\n elif 'output projected data' in self.named_outputs:\n self.output = self.named_outputs['output projected data']\n else:\n self.output = 'output_data_{}'.format(self.order)\n\n self.has_import = \"from pycompss.api.task import task\\n\" \\\n \"from pycompss.api.parameter import *\\n\"\n\n def generate_code(self):\n \"\"\"Generate code.\"\"\"\n self.input_data = self.named_inputs.get('input data', '')\n code = \"\"\n if self.parameters['first_slug'] == 'data-reader':\n self.input_data = 'hdfs_blocks'\n if self.parameters['first_slug'] == 'read-shapefile':\n self.input_data = 'shapefile_data'\n\n if self.parameters['first_slug'] == 'apply-model':\n\n code += \"\"\"\n conf = []\n {code}\n {out} = [[] for _ in range(numFrag)]\n for f in range(numFrag):\n {out}[f] = otm_call_{order}({model}, {input}[f], conf, f)\n \"\"\".format(code=self.code_0, out=self.output,\n order=self.order,\n model=self.named_inputs['model'],\n input=self.input_data)\n else:\n code += \"\"\"\n conf = []\n {code}\n {output} = [[] for _ in range(numFrag)]\n for f in range(numFrag):\n {output}[f] = otm_call_{order}({input}[f], conf, f)\n \"\"\".format(code=self.code_0, output=self.output,\n order=self.order,\n input=self.input_data)\n\n return dedent(code)\n\n def generate_optimization_code(self):\n \"\"\"Generate code.\"\"\"\n code = \"\"\n for idx, c in enumerate(self.code_1):\n c = c.replace('conf_X', 'conf_{}'.format(idx))\n code += \"\"\"conf_{idx} = settings[{idx}]{code}\"\"\"\\\n .format(idx=idx, code=c)\n\n return dedent(code)\n\n","repo_name":"eubr-bigsea/juicer","sub_path":"juicer/compss/optimizated_operation.py","file_name":"optimizated_operation.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"43049189440","text":"import unittest\n\n\nclass TestUCSValidateMethod(unittest.TestCase):\n def test_001_fabricvlan_id_100(self):\n # type: uint\n # range: [\"1-3967\", \"1-4029\", \"4048-4091\", \"4048-4093\"]\n from ucsmsdk.mometa.fabric.FabricVlan import FabricVlan\n\n vlan = FabricVlan(parent_mo_or_dn=\"parent_dn\", name=\"my_vlan\")\n vlan.id = 100\n\n def test_002_fabricvlan_id_4000(self):\n # type: uint\n # range: [\"1-3967\", \"1-4029\", \"4048-4091\", \"4048-4093\"]\n from ucsmsdk.mometa.fabric.FabricVlan import FabricVlan\n\n vlan = FabricVlan(parent_mo_or_dn=\"parent_dn\", name=\"my_vlan\")\n vlan.id = 4000\n\n def test_003_fabricvlan_id_5000(self):\n # type: uint\n # range: [\"1-3967\", \"1-4029\", \"4048-4091\", \"4048-4093\"]\n from ucsmsdk.mometa.fabric.FabricVlan import FabricVlan\n\n with self.assertRaises(Exception):\n vlan = FabricVlan(parent_mo_or_dn=\"parent_dn\", name=\"my_vlan\")\n vlan.id = 5000\n\n def test_004_equipmentPOST_globalid(self):\n import ucsmsdk.ucsxmlcodec as xc\n\n xml_str = '''\n \n '''\n\n xc.from_xml_str(xml_str)\n","repo_name":"CiscoUcs/ucsmsdk","sub_path":"tests/common/test_ucsvalidatemethod.py","file_name":"test_ucsvalidatemethod.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"22"} +{"seq_id":"43043204112","text":"import logging\n\nimport numpy as np\nimport numpy.linalg as npl\nfrom sklearn.linear_model import LinearRegression\n\nlogger = logging.getLogger(__name__)\n\n\ndef fit_least_squares_estimator(x_arr: np.ndarray, y_vec: np.ndarray, lamb: float = 0.0) -> np.ndarray:\n \"\"\"\n Fit least squares estimator\n :param x_arr: The training set features matrix. Each row represents an example.\n :param y_vec: the labels vector.\n :param lamb: regularization term.\n :return: the fitted parameters. A column vector\n \"\"\"\n n, m = x_arr.shape\n phi_t_phi_plus_lamb = x_arr.T @ x_arr + lamb * np.eye(m)\n\n # If invertible, regular least squares\n if npl.cond(phi_t_phi_plus_lamb) < 1 / np.finfo('float').eps:\n inv = npl.inv(phi_t_phi_plus_lamb)\n theta = inv @ x_arr.T @ y_vec\n else: # minimum norm\n # inv = npl.pinv(x_arr @ x_arr.T)\n # theta = x_arr.T @ inv @ y_vec\n reg = LinearRegression(fit_intercept=False).fit(\n x_arr, y_vec) # using scipy is more stable\n theta = reg.coef_\n\n theta = np.expand_dims(theta, 1)\n return theta\n\n\ndef calc_theta_norm(theta: np.ndarray) -> float:\n return npl.norm(theta) ** 2\n\n\ndef calc_square_error(x_data: np.ndarray, y_labels: np.ndarray, theta: np.ndarray) -> np.ndarray:\n y_hat = x_data @ theta\n y_hat = y_hat.squeeze()\n return np.squeeze((y_hat - y_labels) ** 2)\n\n\ndef calc_logloss(x_arr: np.ndarray, y_gt: np.ndarray, theta: np.ndarray, var: float) -> np.ndarray:\n y_hat = x_arr @ theta\n y_hat = y_hat.squeeze()\n\n logloss = 0.5 * np.log(2 * np.pi * var) + (y_gt - y_hat) ** 2 / (2 * var)\n return logloss.squeeze()\n\n\ndef calc_best_var(phi_arr: np.ndarray, y: np.ndarray, theta: np.ndarray) -> float:\n y_hat = phi_arr @ theta\n epsilon_square = (y_hat.squeeze() - y.squeeze()) ** 2\n var = np.mean(epsilon_square)\n return float(var)\n","repo_name":"kobybibas/pnml_linear_regression_simulation","sub_path":"src/learner_utils/learner_helpers.py","file_name":"learner_helpers.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"21135602181","text":"'''\nHuman Activity Recognizer\nCreated by Austin Koenig\nVersion: 0.0.1\n\nThe object of this project is to create a classifier which can differentiate between 14 different human activities from data collected via a triaxial accelerometer mounted to the subjects' wrists.\n\nAccelerometer Specifications\n----------------------------\n Type : tri-axial accelerometer\n Measurement range : [- 1.5g; + 1.5g]\n Sensitivity : 6 bits per axis\n Output data rate : 32 Hz\n\nData Source: https://archive.ics.uci.edu/ml/datasets/Dataset+for+ADL+Recognition+with+Wrist-worn+Accelerometer\n\nNote that files with a `.jl` extension are serialized joblib files.\n\nSee `README.md` for more details.\n'''\n# usual imports\nimport joblib\nimport json\nimport os\nimport shutil\nimport sys\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nnp.random.seed(12) # seed for reproducibility\n\n# keras imports\nimport keras\nfrom keras import models\nfrom keras import layers\nfrom keras import optimizers\nfrom keras import regularizers\nfrom keras import callbacks\n\n# sklearn imports\nfrom sklearn import preprocessing\nfrom sklearn import metrics\nfrom sklearn import utils\n\n\nclass TimingCallback(callbacks.Callback):\n '''\n This class comes from solution at: https://github.com/keras-team/keras/issues/5105\n '''\n def __init__(self):\n self.logs = []\n \n def on_epoch_begin(self, epoch, logs = {}):\n self.starttime = time.time()\n \n def on_epoch_end(self, epoch, logs = {}):\n self.logs.append(time.time() - self.starttime)\n\n\nclass ActivityRecognizer(object):\n def __init__(self):\n pass\n \n def preprocess_data(self, input_filepath = './data/', output_filepath = './output/', split = 0.85, sequence_length = 512):\n '''\n Extract and preprocess data from database or other type of storage system and restore for later use.\n '''\n self._reset_log() # reset the log file\n\n metadata = {}\n scalers = {}\n data = {}\n\n metadata['input_filepath'] = input_filepath\n metadata['output_filepath'] = output_filepath\n metadata['train_test_split'] = split\n metadata['sequence_length'] = sequence_length\n metadata['labels'] = os.listdir(metadata['input_filepath'])\n metadata['num_categories'] = len(metadata['labels'])\n metadata['class_samples'] = {}\n metadata['total_samples'] = 0\n metadata['class_frequencies'] = {}\n metadata['shapes'] = None\n\n scalers['x'] = preprocessing.MinMaxScaler(feature_range = (0, 1), copy = False)\n scalers['y'] = preprocessing.MinMaxScaler(feature_range = (0, 1), copy = False)\n scalers['z'] = preprocessing.MinMaxScaler(feature_range = (0, 1), copy = False)\n\n data['all'] = {\n 'x': np.empty((0, metadata['sequence_length'], 3)),\n 'y': np.array([])\n }\n\n data['train'] = {\n 'x': np.empty((0, metadata['sequence_length'], 3)),\n 'y': np.array([])\n }\n\n data['test'] = {\n 'x': np.empty((0, metadata['sequence_length'], 3)),\n 'y': np.array([])\n }\n\n for i in range(len(metadata['labels'])):\n root = os.path.join(metadata['input_filepath'], metadata['labels'][i])\n files = os.listdir(root) # go through data subdirectories\n metadata['class_samples'][metadata['labels'][i]] = 0\n\n for f in files:\n x = np.genfromtxt(os.path.join(root, f), delimiter = ' ') # get sample from text file\n x = keras.preprocessing.sequence.pad_sequences(x.T, maxlen = metadata['sequence_length'], value = 0)\n x = np.reshape(x, (1, x.shape[1], x.shape[0]))\n data['all']['x'] = np.vstack((data['all']['x'], x))\n data['all']['y'] = np.append(data['all']['y'], i)\n metadata['class_samples'][metadata['labels'][i]] += 1\n metadata['total_samples'] += 1\n \n for i in range(len(metadata['labels'])):\n metadata['class_frequencies'][metadata['labels'][i]] = round(metadata['class_samples'][metadata['labels'][i]] / metadata['total_samples'], 6)\n \n data['all']['y'] = keras.utils.to_categorical(data['all']['y'], metadata['num_categories']) # one hot encode outcome\n\n # shuffle data\n new_indices = np.arange(data['all']['x'].shape[0])\n np.random.shuffle(new_indices)\n data['all']['x'] = data['all']['x'][new_indices, :, :]\n data['all']['y'] = data['all']['y'][new_indices, :]\n\n metadata['shapes'] = {\n 'x': data['all']['x'].shape,\n 'y': data['all']['y'].shape\n }\n \n split_index = round(split * data['all']['x'].shape[0])\n\n data['train']['x'] = data['all']['x'][:split_index, :, :]\n data['train']['y'] = data['all']['y'][:split_index, :]\n\n data['test']['x'] = data['all']['x'][split_index:, :, :]\n data['test']['y'] = data['all']['y'][split_index:, :]\n\n print(\"## Preprocessing\\n\")\n\n shapes = {\n 'train_split': split,\n 'test_split': round(1 - split, 4),\n 'shapes': {\n 'x_train': data['train']['x'].shape,\n 'y_train': data['train']['y'].shape,\n 'x_test': data['test']['x'].shape,\n 'y_test': data['test']['y'].shape,\n }\n }\n\n print(\"```\")\n print(json.dumps(shapes, indent = 4))\n print(\"```\")\n\n axis_keys = ['x', 'y', 'z']\n for i in range(len(axis_keys)):\n scalers[axis_keys[i]].fit_transform(data['train']['x'][:, :, i])\n scalers[axis_keys[i]].transform(data['test']['x'][:, :, i])\n \n json.dump(metadata, open('./output/metadata.json', 'w'), indent = 4)\n joblib.dump(scalers, './output/scalers.joblib')\n joblib.dump(data['all'], './output/data.joblib')\n joblib.dump(data['train'], './output/train.joblib')\n joblib.dump(data['test'], './output/test.joblib')\n \n def generate_model(self):\n '''\n Generate model and store for later use.\n '''\n metadata = json.load(open('./output/metadata.json', 'r'))\n model = models.Sequential([\n layers.Conv1D(512, 4, activation = 'relu', input_shape = metadata['shapes']['x'][1:]),\n layers.Conv1D(512, 4, activation = 'relu'),\n layers.Conv1D(512, 4, activation = 'relu'),\n layers.MaxPooling1D(8),\n layers.Dropout(0.5),\n layers.Conv1D(512, 4, activation = 'relu', kernel_regularizer = regularizers.l2(0.01), bias_regularizer = regularizers.l2(0.01)),\n layers.Conv1D(512, 4, activation = 'relu', kernel_regularizer = regularizers.l2(0.01), bias_regularizer = regularizers.l2(0.01)),\n layers.Conv1D(512, 4, activation = 'relu', kernel_regularizer = regularizers.l2(0.01), bias_regularizer = regularizers.l2(0.01)),\n layers.MaxPooling1D(6),\n layers.Dropout(0.5),\n layers.Conv1D(512, 2, activation = 'relu', kernel_regularizer = regularizers.l2(0.01), bias_regularizer = regularizers.l2(0.01)),\n layers.Conv1D(512, 2, activation = 'relu', kernel_regularizer = regularizers.l2(0.01), bias_regularizer = regularizers.l2(0.01)),\n layers.Conv1D(512, 2, activation = 'relu', kernel_regularizer = regularizers.l2(0.01), bias_regularizer = regularizers.l2(0.01)),\n layers.MaxPooling1D(4),\n layers.Dropout(0.4),\n layers.Flatten(),\n layers.Dense(1024, activation = 'relu', kernel_regularizer = regularizers.l2(0.01), bias_regularizer = regularizers.l2(0.01)),\n layers.Dense(1024, activation = 'relu', kernel_regularizer = regularizers.l2(0.01), bias_regularizer = regularizers.l2(0.01)),\n layers.Dense(1024, activation = 'relu', kernel_regularizer = regularizers.l2(0.01), bias_regularizer = regularizers.l2(0.01)),\n layers.Dropout(0.4),\n layers.Dense(metadata['num_categories'], activation = 'softmax')\n ])\n model.compile(loss = 'categorical_crossentropy', optimizer = optimizers.SGD(learning_rate = 0.01, nesterov = True), metrics = ['acc'])\n print(\"## Model Hierarchy\")\n print(\"```\")\n model.summary()\n print(\"```\")\n model.save('./output/model.hdf5')\n \n def train_model(self, num_epochs = 256, batch = 4, val_split = 0.2):\n '''\n Train a model from the saved preprocessed data and saved model and restore model for later use.\n '''\n print(\"## Model Training\\n\")\n model = models.load_model('./output/model.hdf5')\n train = joblib.load('./output/train.joblib')\n metadata = json.load(open('./output/metadata.json', 'r'))\n\n metadata['epochs'] = num_epochs\n metadata['batch_size'] = batch\n metadata['train_val_split'] = val_split\n\n savebest = callbacks.ModelCheckpoint('./output/model.hdf5', monitor = 'val_acc', verbose = 1, save_best_only = True, mode = 'max')\n timing = TimingCallback()\n\n print(\"```\")\n history = model.fit(train['x'], train['y'], \n epochs = num_epochs,\n batch_size = batch,\n verbose = 2,\n validation_split = val_split,\n callbacks = [savebest, timing]).history\n print(\"```\")\n\n metadata['train_time'] = f\"{round(sum(timing.logs), 4)} s\"\n metadata['mean_epoch_train_time'] = f\"{round(sum(timing.logs) / metadata['epochs'], 4)} s\"\n\n joblib.dump(history, './output/history.joblib')\n json.dump(metadata, open('./output/metadata.json', 'w'), indent = 4)\n #model.save('./output/model.hdf5')\n \n def evaluate_model(self):\n '''\n Evaluate stored model and generate output files:\n - Log file\n - Confusion Matrix\n - Classification report\n - Accuracy\n - Precision\n - Recall\n - F1\n - Accuracy plot\n - Loss plot\n - Heatmap of confusion matrix\n '''\n model = models.load_model('./output/model.hdf5')\n test = joblib.load('./output/test.joblib')\n history = joblib.load('./output/history.joblib')\n metadata = json.load(open('./output/metadata.json', 'r'))\n\n evaluation = model.evaluate(test['x'], test['y'], verbose = 2)\n\n print(\"## Model Evaluation\\n\")\n\n preds = model.predict(test['x'])\n predy = np.argmax(preds, axis = 1)\n truey = np.argmax(test['y'], axis = 1)\n labs = np.arange(len(metadata['labels']))[utils.multiclass.unique_labels(predy)]\n\n cm = metrics.confusion_matrix(truey, predy, labels = labs)\n cr = metrics.classification_report(truey, predy)\n\n print(\"### Class Dictionary\\n\")\n\n print(\"```\")\n for i in range(metadata['num_categories']):\n print(f\"{i} : {metadata['labels'][i]}\")\n print(\"```\")\n\n results = {\n 'loss': evaluation[0],\n 'accuracy': evaluation[1],\n 'confusion_matrix': cm,\n 'classification_report': cr\n }\n\n print(\"### Results\\n\")\n\n print(\"```\")\n print(f\"Test Loss: {round(results['loss'], 4)}\")\n print(f\"Test Accuracy: {round(results['accuracy'], 4)}\")\n print(f\"\\nConfusion Matrix: \\n{results['confusion_matrix']}\")\n print(f\"\\nClassification Report: \\n{results['classification_report']}\")\n print(f\"\\nTraining Time: {metadata['train_time']} s\")\n print(\"```\")\n\n print('### Metadata\\n')\n print(json.dumps(metadata, indent = 4))\n\n loss_figure = plt.figure(figsize = (20, 15))\n loss_axis = loss_figure.add_subplot(111)\n loss_axis.plot(range(metadata['epochs']), history['loss'], label = 'Train')\n loss_axis.plot(range(metadata['epochs']), history['val_loss'], label = 'Validation')\n loss_axis.plot(np.repeat(results['loss'], metadata['epochs']), label = 'Best Test')\n loss_axis.title.set_text(\"Losses\")\n loss_axis.set_xlabel(\"Epoch\")\n loss_axis.set_ylabel(\"Loss\")\n loss_axis.legend()\n\n acc_figure = plt.figure(figsize = (20, 15))\n acc_axis = acc_figure.add_subplot(111)\n acc_axis.plot(range(metadata['epochs']), history['acc'], label = 'Train')\n acc_axis.plot(range(metadata['epochs']), history['val_acc'], label = 'Validation')\n acc_axis.plot(np.repeat(results['accuracy'], metadata['epochs']), label = 'Best Test')\n acc_axis.title.set_text(\"Losses\")\n acc_axis.set_xlabel(\"Epoch\")\n acc_axis.set_ylabel(\"Loss\")\n acc_axis.legend()\n\n cm_figure = plt.figure(figsize = (20, 15))\n cm_axis = cm_figure.add_subplot(111)\n im = cm_axis.matshow(cm)\n cm_figure.colorbar(im)\n cm_axis.set_xticklabels(labs)\n cm_axis.set_yticklabels(labs)\n cm_axis.set_xlabel(\"Predicted\")\n cm_axis.set_ylabel(\"True\")\n\n joblib.dump(results, './output/results.joblib')\n loss_figure.savefig('./output/loss.pdf')\n acc_figure.savefig('./output/acc.pdf')\n cm_figure.savefig('./output/cm.pdf')\n\n def _reset_log(self):\n self._sanitize_directories()\n sys.stdout = open('./output/log.md', 'w') # log file\n print(\"#
    Activity Recognizer Output Log
    \\n\")\n\n def _sanitize_directories(self):\n if os.path.exists('./output/'): # if the output directory exists\n shutil.rmtree('./output/') # delete it\n os.makedirs('./output/') # make a new output directory","repo_name":"austindkoenig/Human-Activity-Recognition","sub_path":"src/ActivityRecognition.py","file_name":"ActivityRecognition.py","file_ext":"py","file_size_in_byte":13715,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"74557784694","text":"class Grade:\n def __init__(self, kr, en, math):\n self.kr = kr\n self.en = en\n self.math = math\n\n def sum(self):\n return self.kr + self.en + self.math\n\n def avg(self):\n return self.sum() / 3\n\n def get_grade(self):\n score = int(self.avg())\n if score >= 90:\n grade = 'A학점'\n elif score >= 80:\n grade = 'B학점'\n elif score >= 70:\n grade = 'C학점'\n elif score >= 60:\n grade = 'D학점'\n elif score >= 50:\n grade = 'E학점'\n else:\n grade = 'F학점'\n\n\n return grade\n\n @staticmethod\n def main():\n g = Grade(int(input('국어점수를 입력하시오:')), int(input('영어점수를 입력하시오:')),int(input('수학점수를 입력하���오:')))\n print(f'총점: {g.sum()}')\n print(f'평균: {g.avg()}')\n print(f'학점: {g.get_grade()}')\n\nGrade.main()\n\n","repo_name":"Hyunwoo29/python-oop","sub_path":"encapsulation/grade.py","file_name":"grade.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37312993005","text":"def create(order):\r\n file = open('./Assignements/sales.txt' , 'w')\r\n file.write(\"Biggies\\nPatia Bhubaneswar- 751023\" + '\\n' + '---------------------------------------'+'\\n\\n')\r\n total = 0\r\n for item, price in order.items():\r\n file.write('- ' + item + ' ' + format(price, '.2f') + '\\n')\r\n total += price\r\n sgst = cgst = (total * 9)/100\r\n total += cgst + sgst\r\n \r\n file. write('\\n S.G.S.T @7.5% = ' + format(sgst,'.2f')+ '\\n')\r\n file. write(' C.G.S.T @8% = ' + format(cgst,'.2f')+ '\\n')\r\n file. write(' Subtotal = ' + format(total,'.2f')+ '\\n\\n\\n')\r\n \r\n file.write('---------------------------------------' + '\\n' + 'Thank you for coming !! ')\r\n file.close()\r\n \r\nmy_dict={'Chicken Burger ': 200 , 'Chessy Fries ': 90 , 'Veggie Rolls ': 65, 'Cappucino ': 85 , 'Grilled Sandwich ': 350 ,'Blue Mohito ': 180}\r\ncreate(my_dict)\r\n","repo_name":"HimaniParida/Python","sub_path":"Assignements/Food_Bills.py","file_name":"Food_Bills.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"73274419896","text":"import numpy as np\n\n\ndef forward(A, B, pi, O):\n \"\"\"\n Calculates the probability of an observation sequence O given the model(A, B, pi).\n :param A: state transition probabilities (NxN)\n :param B: observation probabilities (NxM)\n :param pi: initial state probabilities (N)\n :param O: sequence of observations(T) where observations are just indices for the columns of B (0-indexed)\n N is the number of states,\n M is the number of possible observations, and\n T is the sequence length.\n :return: The probability of the observation sequence and the calculated alphas in the Trellis diagram with shape\n (N, T) which should be a numpy array.\n \"\"\"\n\n a = [[None for _ in O] for _ in pi]\n\n for i, pi_item in enumerate(pi):\n a[i][0] = pi_item * B[i][O[0]]\n\n for t, o_item in enumerate(O):\n if t == 0:\n continue\n for j in range(len(pi)):\n sigma = sum(map(lambda x: x[1][j] * a[x[0]][t - 1], enumerate(A)))\n a[j][t] = B[j][o_item] * sigma\n probability = sum(map(lambda x: x[-1], a))\n return probability, np.array(a)\n\n\ndef viterbi(A, B, pi, O):\n \"\"\"\n Calculates the most likely state sequence given model(A, B, pi) and observation sequence.\n :param A: state transition probabilities (NxN)\n :param B: observation probabilities (NxM)\n :param pi: initial state probabilities(N)\n :param O: sequence of observations(T) where observations are just indices for the columns of B (0-indexed)\n N is the number of states,\n M is the number of possible observations, and\n T is the sequence length.\n :return: The most likely state sequence with shape (T,) and the calculated deltas in the Trellis diagram with shape\n (N, T). They should be numpy arrays.\n \"\"\"\n\n d = [[p * B[i][O[0]]] + [0 for _ in range(len(O)-1)] for i, p in enumerate(pi)]\n\n winning_edges = []\n\n for t in range(1, len(O)):\n winning_edges.append([[-1 for _ in A] for _ in B])\n for j, bj in enumerate(B):\n b = bj[O[t]]\n max_val = float('-inf')\n winner = -1\n for i, ai in enumerate(A):\n curr = ai[j] * d[i][t - 1]\n if curr > max_val:\n max_val = curr\n winner = i\n winning_edges[t - 1][j] = winner\n d[j][t] = b * max_val\n\n last_column = list(map(lambda x: x[-1], d))\n max_d = last_column.index(max(last_column))\n\n route = [max_d]\n current = max_d\n walk = len(winning_edges) - 1\n while walk != -1:\n prev = winning_edges[walk][current]\n route.append(prev)\n current = prev\n walk -= 1\n\n return np.array(route[::-1]), np.array(d)\n\n\n\n\n\n","repo_name":"frozsgy/METU","sub_path":"CENG499/HW4/hmm.py","file_name":"hmm.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"22"} +{"seq_id":"10092573663","text":"from data_structures.binary_tree import BinaryTree\nfrom collections import deque\n\n\ndef breadth_first(tree):\n if tree.root is None:\n return []\n breadth = deque()\n breadth.append(tree.root)\n traversal = []\n\n while len(breadth) > 0:\n front = breadth.popleft()\n traversal.append(front.value)\n\n if front.left is not None:\n breadth.append(front.left)\n\n if front.right is not None:\n breadth.append(front.right)\n\n return traversal\n","repo_name":"LogiDaBear/data-structures-and-algorithms","sub_path":"python/code_challenges/tree_breadth_first.py","file_name":"tree_breadth_first.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23844429314","text":"import json\nimport concurrent.futures\nfrom itertools import chain\nfrom functools import reduce\nfrom marshmallow import Schema, ValidationError\nfrom abc import ABC\nfrom src.lib.jsonapi.client import JSONAPIClient\n\ndef implements(instance, cls):\n return issubclass(type(instance), cls) or type(instance)==cls\n\nclass ListAPI(ABC):\n\n def __init__(self):\n try:\n assert(all(map(lambda args: implements(*args), (\n (self.client, JSONAPIClient), \n (self.query_schema, Schema), \n (self.resource, str),\n )))\n )\n except AssertionError as e:\n raise NotImplementedError()\n\n def _request(self, query_params):\n self._validate_query(query_params)\n return self.client.get(self.resource, query_params)\n\n def _validate_query(self, query_params):\n try:\n self.query_schema.load(query_params)\n except ValidationError as e:\n raise Exception(e)\n\n # Methods\n def all(self, query_params={}):\n queries = [query_params]\n data = []\n while queries:\n next_query = queries=queries.pop()\n response = self._request(next_query)\n data += response[\"data\"]\n if response[\"meta\"][\"has_next\"]:\n queries.append({\n **next_query,\n **{ \"page[number]\": response[\"meta\"][\"page\"]+1}\n })\n return data\n\n def filter_by_id(self, ids=[]):\n queries = self.batch_queries(ids)\n with concurrent.futures.ThreadPoolExecutor() as executor:\n result = executor.map(self._request, queries)\n chained = chain(*[response['data'] for response in result])\n return list(chained)\n\n # Helpers\n @staticmethod\n def batch_queries(ids=[]):\n \"\"\"\n Split list of ids into lists of at most 175 ids \n \"\"\"\n batches = [ids[i:i + 175] for i in range(0, len(ids), 175)]\n return list(map(ListAPI.build_id_query, batches))\n\n @staticmethod\n def build_id_query(ids=[]):\n id_string = ','.join(map(str,ids)) #stringify and join ids\n return {\n \"filter[id][in]\": id_string,\n \"page[size]\": 175\n }","repo_name":"tammysams/prediction-service","sub_path":"src/lib/jsonapi/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26489963795","text":"import os\nimport sys\nfrom urllib.request import urlretrieve\n\nsys.path.append(os.path.abspath(\"exts\"))\nextensions = [\n \"cards\",\n \"myst_parser\",\n \"nbsphinx\",\n \"rawfiles\",\n \"sphinx.ext.githubpages\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinxext.opengraph\",\n \"ablog\",\n \"sphinx_design\",\n \"sphinx_reredirects\",\n \"sphinxcontrib.youtube\",\n]\nmyst_enable_extensions = [\"colon_fence\"]\nmyst_update_mathjax = False\ntemplates_path = [\"_templates\"]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"sunpy\": (\"https://docs.sunpy.org/en/stable\", None),\n \"astropy\": (\"https://docs.astropy.org/en/stable\", None),\n \"ndcube\": (\"https://docs.sunpy.org/projects/ndcube/en/stable\", None),\n \"drms\": (\"https://docs.sunpy.org/projects/drms/en/stable/\", None),\n \"aiapy\": (\"https://aiapy.readthedocs.io/en/stable/\", None),\n}\nrawfiles = [\"jitsi.html\", \"issues.html\", \"chat.html\", \"community_meeting_agenda.html\"]\nmathjax_path = \"https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/MathJax.js?config=TeX-MML-AM_CHTML\"\nblog_baseurl = \"https://sunpy.org/\"\nblog_feed_fulltext = True\nblog_feed_length = 10\nblog_feed_archives = True\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".md\": \"markdown\",\n}\nexclude_patterns = [\n \"posts/*/.ipynb_checkpoints/*\",\n \".github/*\",\n \".history/*\",\n \"github_submodule/*\",\n \"LICENSE.md\",\n \"README.md\",\n \"_build/*\",\n \"CITATION.rst\",\n \".tox/*\",\n]\nmaster_doc = \"index\"\nproject = \"SunPy\"\nauthor = \"SunPy Project\"\ncopyright = \"SunPy Project\"\nshow_sphinx = True\nversion = \"\"\nrelease = \"main\"\nlanguage = \"en\"\n\npygments_style = \"sphinx\"\n\n\ndefault_role = \"obj\"\nhtml_theme = \"sunpy\"\nhtml_title = \"sunpy.org\"\nhtml_static_path = [\"_static\"]\nhtml_extra_path = [\"_static/img\"]\nhtml_theme_options = {\"show_prev_next\": False, \"sst_is_root\": True}\n\nhtml_css_files = [\n \"sunpy_org.css\",\n]\n\nblog_sidebars = [\n \"ablog/postcard.html\",\n \"ablog/recentposts.html\",\n \"ablog/tagcloud.html\",\n \"ablog/categories.html\",\n \"ablog/archives.html\",\n]\n\nhtml_sidebars = {\n \"*\": [],\n \"about\": [\"about-sidebar.html\"],\n \"coc\": [\"about-sidebar.html\"],\n \"about/**\": [\"about-sidebar.html\"],\n \"posts/**\": [\"ablog/postcard.html\"],\n \"blog\": blog_sidebars,\n \"blog/**\": blog_sidebars,\n}\n\nredirects = {\n \"project/meetings\": \"about/meetings\",\n \"project/roles\": \"about/roles\",\n \"project\": \"about/project\",\n \"project/affiliated\": \"affiliated\",\n}\n\n# nbsphinx options\nnbsphinx_prolog = r\"\"\"\n{% set docname = env.doc2path(env.docname, base=None) %}\n\n.. only:: html\n\n .. role:: raw-html(raw)\n :format: html\n\n .. nbinfo::\n\n This blog post was written in a `Jupyter notebook`__.\n Click here for an interactive version:\n :raw-html:`\"Binder`\n\n __ https://github.com/sunpy/sunpy.org/blob/{{ env.config.release }}/{{ docname }}\n\"\"\"\n# sphinxext-opengraph\nogp_site_url = \"https://sunpy.org/\"\nogp_image = \"https://raw.githubusercontent.com/sunpy/sunpy-logo/master/generated/sunpy_logo_word.png\"\nogp_description_length = 300\nogp_type = \"website\"\n\nurlretrieve(\n \"https://raw.githubusercontent.com/sunpy/sunpy/main/sunpy/CITATION.rst\",\n filename=\"CITATION.rst\",\n)\n\n# These links have anchors that linkcheck does not like\nlinkcheck_ignore = [\n \"https://app.element.io/#/room/#sunpy:openastronomy.org\",\n]\nlinkcheck_anchors_ignore = [\n \"/projects\\?project=develop_sunkit-image\",\n \"the-executive\",\n \"acceptance-process-for-affiliated-packages\",\n \"detailed-description\",\n \"!forum/sunpy\",\n]\n","repo_name":"sunpy/sunpy.org","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":3752,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"22"} +{"seq_id":"6580259576","text":"from attr import validate\nfrom django.test import Client, TestCase\nfrom game.models import Game, Player\nfrom game.views import *\nfrom welcome.models import User\n\n\ndef set_login(client):\n session = client.session\n session['login'] = True\n session.save()\n\n\nclass GameTestCase(TestCase):\n\n def setUp(self):\n game = Game.objects.create(lobby_code=1111, player_num=1)\n user = User.objects.create(name=\"Rory\", email=\"rc680@exeter.ac.uk\", password=\"password\")\n Player.objects.create(username=\"Rory\", game=game, user=user, seeker=False, ready=False)\n\n def test_game_creation(self):\n game = Game.objects.filter(lobby_code=1111)\n self.assertEqual(len(game), 1)\n\n def test_game_fields(self):\n game = Game.objects.get(lobby_code=1111)\n self.assertEqual(game.lobby_code, 1111)\n self.assertEqual(game.player_num, 1)\n self.assertEqual(game.players_finished, 0)\n self.assertEqual(game.game_start_time, 0)\n self.assertEqual(game.running, False)\n self.assertEqual(game.winner, 'N')\n self.assertEqual(game.hiding_time, 60)\n self.assertEqual(game.seeking_time, 600)\n self.assertEqual(game.seeker_num, 1)\n self.assertEqual(game.radius, 100)\n self.assertEqual(game.lobby_longitude, 0.0)\n self.assertEqual(game.lobby_latitude, 0.0)\n\n def test_game_update(self):\n game = Game.objects.get(lobby_code=1111)\n game.hiding_time = 70\n game.save()\n new_game = Game.objects.get(lobby_code=1111)\n self.assertEqual(new_game.hiding_time, 70)\n\n def test_all_ready(self):\n game = Game.objects.get(lobby_code=1111)\n self.assertEqual(game.all_ready(), False)\n player = Player.objects.get(username=\"Rory\")\n player.ready = True\n player.save()\n self.assertEqual(game.all_ready(), True)\n\n def test_game_delete(self):\n game = Game.objects.get(lobby_code=1111)\n game.delete()\n self.assertEqual(len(Game.objects.all()), 0)\n\n\nclass PlayerTestCase(TestCase):\n\n def setUp(self):\n game = Game.objects.create(lobby_code=1111, player_num=1)\n user = User.objects.create(name=\"Rory\", email=\"rc680@exeter.ac.uk\", password=\"password\")\n Player.objects.create(username=\"Rory\", game=game, user=user, seeker=False, ready=False)\n\n def test_player_creation(self):\n player = Player.objects.filter(username=\"Rory\")\n self.assertEqual(len(player), 1)\n\n def test_player_fields(self):\n player = Player.objects.get(username=\"Rory\")\n game = Game.objects.get(lobby_code=1111)\n user = User.objects.get(name=\"Rory\")\n self.assertEqual(player.username, \"Rory\")\n self.assertEqual(player.game, game)\n self.assertEqual(player.user, user)\n self.assertEqual(player.seeker, False)\n self.assertEqual(player.ready, False)\n self.assertEqual(player.hider_code, None)\n self.assertEqual(player.found, False)\n \n def test_player_update(self):\n player = Player.objects.get(username=\"Rory\")\n player.ready = True\n player.save()\n new_player = Player.objects.get(username=\"Rory\")\n self.assertEqual(new_player.ready, True)\n\n def test_player_delete(self):\n player = Player.objects.get(username=\"Rory\")\n player.delete()\n self.assertEqual(len(Player.objects.all()), 0)\n\n\nclass CreateTestCase(TestCase):\n\n def setUp(self):\n self.client = Client()\n set_login(self.client)\n\n def test_create_response_code(self):\n response = self.client.get('/game/create/')\n self.assertEqual(response.status_code, 200)\n\n def test_create_redirect(self):\n self.client.session.flush()\n response = self.client.get('/game/create/')\n self.assertEqual(response.status_code, 302)\n set_login(self.client)\n\n def test_create_context(self):\n response = self.client.get('/game/create/')\n self.assertEqual('lobby_code' in response.context, True)\n\n\nclass JoinTestCase(TestCase):\n\n def setUp(self):\n self.client = Client()\n set_login(self.client)\n\n def test_join_response_code(self):\n response = self.client.get('/game/join/')\n self.assertEqual(response.status_code, 200)\n\n def test_join_redirect(self):\n self.client.session.flush()\n response = self.client.get('/game/join/')\n self.assertEqual(response.status_code, 302)\n\n\nclass CodeTestCase(TestCase):\n\n def test_code_is_number(self):\n code = generate_code()\n self.assertEqual(int(code), code)\n\n def test_code_range(self):\n code = generate_code()\n self.assertEqual(1000 <= code < 10000, True)\n\n\nclass InputsTestCase(TestCase):\n\n def test_null_inputs(self):\n post = {\n 'hiding_time': '',\n 'seeking_time': '',\n 'seeker_num': '',\n 'radius': ''\n }\n\n self.assertEqual(validate_inputs(post)[0], True)\n\n def test_letter_inputs(self):\n post = {\n 'hiding_time': 'a',\n 'seeking_time': 'b',\n 'seeker_num': 'c',\n 'radius': 'd'\n }\n\n results = validate_inputs(post)\n\n self.assertEqual(results[0], False)\n self.assertEqual(results[1], \"Settings must be digits!\")\n\n def test_bad_hiding_time(self):\n post = {\n 'hiding_time': '1',\n 'seeking_time': '',\n 'seeker_num': '',\n 'radius': ''\n }\n \n results = validate_inputs(post)\n\n self.assertEqual(results[0], False)\n self.assertEqual(results[1], \"Hiding time must be between 20 and 120 seconds!\")\n\n def test_bad_seeking_time(self):\n post = {\n 'hiding_time': '',\n 'seeking_time': '1',\n 'seeker_num': '',\n 'radius': ''\n }\n \n results = validate_inputs(post)\n\n self.assertEqual(results[0], False)\n self.assertEqual(results[1], \"Seeking time must be between 120 and 1200 seconds!\")\n\n def test_bad_seeker_num(self):\n post = {\n 'hiding_time': '',\n 'seeking_time': '',\n 'seeker_num': '0',\n 'radius': ''\n }\n \n results = validate_inputs(post)\n\n self.assertEqual(results[0], False)\n self.assertEqual(results[1], \"Seekers must be between 1 and 8!\")\n\n def test_bad_radius(self):\n post = {\n 'hiding_time': '',\n 'seeking_time': '',\n 'seeker_num': '',\n 'radius': '1'\n }\n \n results = validate_inputs(post)\n\n self.assertEqual(results[0], False)\n self.assertEqual(results[1], \"Radius must be between 50 and 1000 meters!\")\n\n def test_valid(self):\n post = {\n 'hiding_time': '100',\n 'seeking_time': '600',\n 'seeker_num': '1',\n 'radius': '100'\n }\n \n results = validate_inputs(post)\n\n self.assertEqual(results[0], True)\n\n\nclass CreateGameTestCase(TestCase):\n\n def test_null_inputs(self):\n post = {\n 'hiding_time': '',\n 'seeking_time': '',\n 'seeker_num': '',\n 'radius': '',\n 'lobby_latitude': 0,\n 'lobby_longitude': 0\n }\n\n results = create_game(post, 1111)\n\n self.assertEqual(results[0], True)\n self.assertEqual(len(Game.objects.all()), 1)\n\n def test_bad_inputs(self):\n post = {\n 'hiding_time': 'a',\n 'seeking_time': 'b',\n 'seeker_num': 'c',\n 'radius': 'd'\n }\n\n results = create_game(post, 1111)\n\n self.assertEqual(results[0], False)\n self.assertEqual(results[1], \"Settings must be digits!\")\n self.assertEqual(len(Game.objects.all()), 0)\n\n def test_good_inputs(self):\n post = {\n 'hiding_time': '100',\n 'seeking_time': '600',\n 'seeker_num': '1',\n 'radius': '100',\n 'lobby_latitude': 0,\n 'lobby_longitude': 0\n }\n\n results = create_game(post, 1111)\n\n self.assertEqual(results[0], True)\n self.assertEqual(len(Game.objects.all()), 1)\n\n\nclass CreatePlayerTestCase(TestCase):\n\n def setUp(self):\n game = Game.objects.create(lobby_code=1111, player_num=1)\n user = User.objects.create(name=\"Rory\", email=\"rc680@exeter.ac.uk\", password=\"password\")\n User.objects.create(name=\"Rory2\", email=\"rc6802@exeter.ac.uk\", password=\"password\")\n Player.objects.create(username=\"Rory\", game=game, user=user, seeker=False, ready=False)\n\n def test_duplicate_username(self):\n game = Game.objects.get(lobby_code=1111)\n result = create_player(game, \"Rory\", \"rc6802@exeter.ac.uk\")\n self.assertEqual(result, False)\n self.assertEqual(len(Player.objects.all()), 1)\n\n def test_valid(self):\n game = Game.objects.get(lobby_code=1111)\n result = create_player(game, \"Rory2\", \"rc6802@exeter.ac.uk\")\n self.assertEqual(result, True)\n self.assertEqual(len(Player.objects.all()), 2)\n\n\nclass CheckRejoinTestCase(TestCase):\n\n def setUp(self):\n game = Game.objects.create(lobby_code=1111, player_num=1)\n user = User.objects.create(name=\"Rory\", email=\"rc680@exeter.ac.uk\", password=\"password\")\n Player.objects.create(username=\"Rory\", game=game, user=user, seeker=False, ready=False)\n\n def check_valid(self):\n game = Game.objects.get(lobby_code=1111)\n result = check_rejoin(game, \"rc680@exeter.ac.uk\")\n self.assertEqual(result, True)\n\n def check_invalid(self):\n game = Game.objects.get(lobby_code=1111)\n result = check_rejoin(game, \"test\")\n self.assertEqual(result, False)\n","repo_name":"HaydenGillyon/GroupProject2022","sub_path":"campus_game_project/game/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":9802,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"74733453494","text":"#! python3\n# mclip.py - A multi-clipboard program \n\ntext = {\n \"agree\": \"yes, I agree, that sounds great to me!\",\n \"busy\": \"sorry but can we do this another time?\",\n \"upsell\": \"would you be willing to make this charitable donation?\"\n}\n\nimport sys\nimport pyperclip\n\nif len(sys.argv) < 2:\n print(\"usage: python mclip.py [keyphrase] - copy phrase text\") \n sys.exit()\n\nkeyphrase = sys.argv[1]\n\nif keyphrase in text:\n pyperclip.copy(text[keyphrase])\n print(\"text for\", keyphrase, \"copied to clipboard\")\nelse:\n print(\"there is no text for\", keyphrase)\n","repo_name":"BraydenBasinger/python_practice","sub_path":"automate_the_boring_stuff/mclip.py","file_name":"mclip.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10593933171","text":"#from pyexpat.errors import messages\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import *\nimport numpy as np\nimport random\nimport imageio\nimport cv2\nimport matplotlib.pyplot as plt\nfrom skimage.morphology import convex_hull_image, erosion\nfrom skimage.morphology import square\nimport skimage\nimport math\nimport os\nimport random\nfrom unidecode import unidecode\nimport sys\n\n\ndef getTerminationBifurcation(img, mask):\n img = img == 255\n (rows, cols) = img.shape\n minutiaeTerm = np.zeros(img.shape)\n minutiaeBif = np.zeros(img.shape)\n \n for i in range(1,rows-1):\n for j in range(1,cols-1):\n if(img[i][j] == 1):\n block = img[i-1:i+2,j-1:j+2]\n block_val = np.sum(block)\n if(block_val == 2):\n minutiaeTerm[i,j] = 1\n elif(block_val == 4):\n minutiaeBif[i,j] = 1\n \n mask = convex_hull_image(mask>0)\n mask = erosion(mask, square(5)) \n minutiaeTerm = np.uint8(mask)*minutiaeTerm\n return(minutiaeTerm, minutiaeBif)\n\n\ndef genID():\n characters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\n ID = random.choice(characters)\n ID += random.choice(characters)\n ID += str(random.randint(100,999))\n ID += str(random.randint(10,99))\n return ID\n\nclass App(QWidget):\n def __init__(self):\n super().__init__()\n self.title = 'App'\n \n def initUIOpen(self):\n self.setWindowTitle(self.title)\n filename = self.openFileNameDialog()\n #self.show()\n return filename\n\n def initUISave(self):\n self.setWindowTitle(self.title)\n filename = self.saveFileDialog()\n return filename\n \n def openFileNameDialog(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getOpenFileName(self,\"OpenFile\", \"\",\"All Files (*);;Text Files (*.txt)\", options=options)\n return fileName\n \n def saveFileDialog(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getSaveFileName(self,\"QFileDialog.getSaveFileName()\",\"\",\"All Files (*);;Text Files (*.txt)\", options=options)\n return fileName\n \n\n\n\nclass MinutiaeFeature(object):\n def __init__(self, locX, locY, Orientation, Type):\n self.locX = locX\n self.locY = locY\n self.Orientation = Orientation\n self.Type = Type\n\ndef computeAngle(block, minutiaeType):\n angle = 0\n (blkRows, blkCols) = np.shape(block)\n CenterX, CenterY = (blkRows-1)/2, (blkCols-1)/2\n if(minutiaeType.lower() == 'termination'):\n sumVal = 0\n for i in range(blkRows):\n for j in range(blkCols):\n if((i == 0 or i == blkRows-1 or j == 0 or j == blkCols-1) and block[i][j] != 0):\n angle = -math.degrees(math.atan2(i-CenterY, j-CenterX))\n sumVal += 1\n if(sumVal > 1):\n angle = float('nan')\n return(angle)\n elif(minutiaeType.lower() == 'bifurcation'):\n (blkRows, blkCols) = np.shape(block)\n CenterX, CenterY = (blkRows - 1) / 2, (blkCols - 1) / 2\n angle = []\n sumVal = 0\n for i in range(blkRows):\n for j in range(blkCols):\n if ((i == 0 or i == blkRows - 1 or j == 0 or j == blkCols - 1) and block[i][j] != 0):\n angle.append(-math.degrees(math.atan2(i - CenterY, j - CenterX)))\n sumVal += 1\n if(sumVal != 3):\n angle = float('nan')\n return(angle)\n \n\n\ndef extractMinutiaeFeatures(skel, minutiaeTerm, minutiaeBif):\n FeaturesTerm = []\n\n minutiaeTerm = skimage.measure.label(minutiaeTerm, connectivity=2)\n RP = skimage.measure.regionprops(minutiaeTerm)\n \n WindowSize = 2 \n FeaturesTerm = []\n for i in RP:\n (row, col) = np.int16(np.round(i['Centroid']))\n block = skel[row-WindowSize:row+WindowSize+1, col-WindowSize:col+WindowSize+1]\n angle = computeAngle(block, 'Termination')\n FeaturesTerm.append(MinutiaeFeature(row, col, angle, 'Termination'))\n\n FeaturesBif = []\n minutiaeBif = skimage.measure.label(minutiaeBif, connectivity=2)\n RP = skimage.measure.regionprops(minutiaeBif)\n WindowSize = 1 \n for i in RP:\n (row, col) = np.int16(np.round(i['Centroid']))\n block = skel[row-WindowSize:row+WindowSize+1, col-WindowSize:col+WindowSize+1]\n angle = computeAngle(block, 'Bifurcation')\n FeaturesBif.append(MinutiaeFeature(row, col, angle, 'Bifurcation'))\n return(FeaturesTerm, FeaturesBif)\n\n\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n\n self.PATH = os.getcwd()\n self.PATH +='\\data'\n if not os.path.exists(self.PATH):\n os.mkdir(self.PATH)\n\n\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1299, 855)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setGeometry(QtCore.QRect(-10, 0, 1271, 821))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.tabWidget.setFont(font)\n self.tabWidget.setObjectName(\"tabWidget\")\n self.tabKiemTraVanTay = QtWidgets.QWidget()\n self.tabKiemTraVanTay.setObjectName(\"tabKiemTraVanTay\")\n self.txtID = QtWidgets.QPlainTextEdit(self.tabKiemTraVanTay)\n self.txtID.setGeometry(QtCore.QRect(580, 90, 451, 79))\n self.txtID.setObjectName(\"txtID\")\n self.label = QtWidgets.QLabel(self.tabKiemTraVanTay)\n self.label.setGeometry(QtCore.QRect(160, 90, 221, 71))\n self.label.setObjectName(\"label\")\n self.label_2 = QtWidgets.QLabel(self.tabKiemTraVanTay)\n self.label_2.setGeometry(QtCore.QRect(160, 230, 291, 71))\n self.label_2.setObjectName(\"label_2\")\n self.label_3 = QtWidgets.QLabel(self.tabKiemTraVanTay)\n self.label_3.setGeometry(QtCore.QRect(160, 410, 261, 71))\n self.label_3.setObjectName(\"label_3\")\n self.txtName = QtWidgets.QPlainTextEdit(self.tabKiemTraVanTay)\n self.txtName.setGeometry(QtCore.QRect(580, 220, 451, 91))\n self.txtName.setObjectName(\"txtName\")\n self.txtLinkFinger = QtWidgets.QPlainTextEdit(self.tabKiemTraVanTay)\n self.txtLinkFinger.setGeometry(QtCore.QRect(580, 400, 451, 101))\n self.txtLinkFinger.setObjectName(\"txtLinkFinger\")\n self.btnLinkAnh = QtWidgets.QPushButton(self.tabKiemTraVanTay)\n self.btnLinkAnh.setGeometry(QtCore.QRect(1100, 420, 151, 81))\n self.btnLinkAnh.setObjectName(\"btnLinkAnh\")\n self.btnKiemTra = QtWidgets.QPushButton(self.tabKiemTraVanTay)\n self.btnKiemTra.setGeometry(QtCore.QRect(860, 580, 161, 91))\n self.btnKiemTra.setObjectName(\"btnKiemTra\")\n self.tabWidget.addTab(self.tabKiemTraVanTay, \"\")\n self.btnClear = QtWidgets.QPushButton(self.tabKiemTraVanTay)\n self.btnClear.setGeometry(QtCore.QRect(360, 580, 161, 91))\n self.btnClear.setObjectName(\"btnClear\")\n\n self.tab_2 = QtWidgets.QWidget()\n self.tab_2.setObjectName(\"tab_2\")\n self.label_5 = QtWidgets.QLabel(self.tab_2)\n self.label_5.setGeometry(QtCore.QRect(110, 30, 301, 91))\n self.label_5.setObjectName(\"label_5\")\n self.label_6 = QtWidgets.QLabel(self.tab_2)\n self.label_6.setGeometry(QtCore.QRect(110, 220, 201, 61))\n self.label_6.setObjectName(\"label_6\")\n self.txtAddLinkFinger = QtWidgets.QPlainTextEdit(self.tab_2)\n self.txtAddLinkFinger.setGeometry(QtCore.QRect(500, 210, 401, 101))\n self.txtAddLinkFinger.setObjectName(\"txtAddLinkFinger\")\n self.txtAddName = QtWidgets.QPlainTextEdit(self.tab_2)\n self.txtAddName.setGeometry(QtCore.QRect(500, 30, 401, 101))\n self.txtAddName.setObjectName(\"txtAddName\")\n self.btnAddLinkFinger = QtWidgets.QPushButton(self.tab_2)\n self.btnAddLinkFinger.setGeometry(QtCore.QRect(950, 210, 141, 81))\n self.btnAddLinkFinger.setObjectName(\"btnAddLinkFinger\")\n self.btnAddNew = QtWidgets.QPushButton(self.tab_2)\n self.btnAddNew.setGeometry(QtCore.QRect(730, 360, 171, 71))\n self.btnAddNew.setObjectName(\"btnAddNew\")\n self.txtInfor = QtWidgets.QTextBrowser(self.tab_2)\n self.txtInfor.setGeometry(QtCore.QRect(500, 460, 621, 261))\n self.txtInfor.setObjectName(\"txtInfor\")\n self.label_7 = QtWidgets.QLabel(self.tab_2)\n self.label_7.setGeometry(QtCore.QRect(140, 540, 201, 61))\n self.label_7.setObjectName(\"label_7\")\n self.btnAddClear = QtWidgets.QPushButton(self.tab_2)\n self.btnAddClear.setGeometry(QtCore.QRect(1040, 30, 151, 71))\n self.btnAddClear.setObjectName(\"btnAddClear\")\n self.tabWidget.addTab(self.tab_2, \"\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n self.tabWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n #something\n self.btnLinkAnh.clicked.connect(self.openLink)\n self.btnAddLinkFinger.clicked.connect(self.openAddLink)\n self.btnAddNew.clicked.connect(self.addNewPerson)\n self.btnClear.clicked.connect(self.checkClear)\n self.btnAddClear.clicked.connect(self.addClear)\n self.btnKiemTra.clicked.connect(self.checkPerson)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.label.setText(_translate(\"MainWindow\", \"ID:\"))\n self.label_2.setText(_translate(\"MainWindow\", \"Tên người dùng:\"))\n self.label_3.setText(_translate(\"MainWindow\", \"Ảnh vân tay: \"))\n self.btnLinkAnh.setText(_translate(\"MainWindow\", \"Chọn\"))\n self.btnKiemTra.setText(_translate(\"MainWindow\", \"Kiểm tra\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabKiemTraVanTay), _translate(\"MainWindow\", \"Kiểm tra\"))\n self.btnClear.setText(_translate(\"MainWindow\", \"Xóa\"))\n self.label_5.setText(_translate(\"MainWindow\", \"Tên người dùng mới:\"))\n self.label_6.setText(_translate(\"MainWindow\", \"Ảnh vân tay: \"))\n self.btnAddLinkFinger.setText(_translate(\"MainWindow\", \"Chọn\"))\n self.btnAddNew.setText(_translate(\"MainWindow\", \"Thêm mới\"))\n self.label_7.setText(_translate(\"MainWindow\", \"Thông tin\"))\n self.btnAddClear.setText(_translate(\"MainWindow\", \"Xóa\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate(\"MainWindow\", \"Thêm Mới\"))\n \n def openLink(self):\n linkFile = App().initUIOpen()\n if not linkFile == '':\n self.txtLinkFinger.setPlainText(linkFile)\n\n def openAddLink(self):\n linkFile = App().initUIOpen()\n if not linkFile == '':\n self.txtAddLinkFinger.setPlainText(linkFile)\n\n def checkClear(self):\n self.txtID.setPlainText('')\n self.txtLinkFinger.setPlainText('')\n self.txtName.setPlainText('')\n\n def addClear(self):\n self.txtAddName.setPlainText('')\n self.txtAddLinkFinger.setPlainText('')\n self.txtInfor.setPlainText('')\n\n def messenger(self,x):\n msg = QtWidgets.QMessageBox()\n msg.setIcon(QtWidgets.QMessageBox.Information)\n msg.setInformativeText(\"Không được để trống {}\".format(x))\n msg.setWindowTitle(\"Thông báo\")\n msg.exec_()\n \n def checkMessenger(self,x):\n msg = QtWidgets.QMessageBox()\n msg.setIcon(QtWidgets.QMessageBox.Information)\n if x == 'ID':\n msg.setInformativeText(\"không có ID này\")\n elif x == 'Name':\n msg.setInformativeText(\"Tên người dùng sai\")\n elif x == 'false':\n msg.setInformativeText(\"Vân Tay không chính xác\")\n else :\n msg.setInformativeText(\"Thành công !!!\")\n msg.setWindowTitle(\"Thông báo\")\n msg.exec_()\n\n def addNewPerson(self):\n if self.txtAddName.toPlainText() == '' :\n self.messenger('Tên')\n return\n if self.txtAddLinkFinger.toPlainText() == '':\n self.messenger('link ảnh')\n return\n \n personName = self.txtAddName.toPlainText()\n personName = unidecode(personName).upper()\n personID = genID()\n while personID in os.listdir(self.PATH):\n personID = genID\n\n Pathfinger = self.txtAddLinkFinger.toPlainText()\n PATHID = self.PATH + '\\\\' + personID\n os.mkdir(PATHID)\n f=open(PATHID+'\\\\name.txt','w')\n f.write(personName)\n f.close()\n\n img1 = imageio.imread(Pathfinger)\n THRESHOLD1= img1.mean()\n img = cv2.imread(Pathfinger,0) \n img = np.array(img > THRESHOLD1).astype(int)\n skel = skimage.morphology.skeletonize(img)\n skel = np.uint8(skel)*255\n mask = img*255\n\n (minutiaeTerm, minutiaeBif) = getTerminationBifurcation(skel, mask)\n FeaturesTerm, FeaturesBif = extractMinutiaeFeatures(skel, minutiaeTerm, minutiaeBif)\n \n BifLabel = skimage.measure.label(minutiaeBif, connectivity=1)\n TermLabel = skimage.measure.label(minutiaeTerm, connectivity=1)\n\n #BifLabel = BifLabel.astype(int)\n #TermLabel = TermLabel.astype(int)\n #print(BifLabel)\n np.savetxt(PATHID+'\\\\BifLabel.txt',BifLabel)\n np.savetxt(PATHID+'\\\\TermLabel.txt',TermLabel)\n\n self.txtInfor.setPlainText('ID: '+personID+'\\nHọ và tên: '+personName)\n\n def checkPerson(self):\n if self.txtID.toPlainText() == '':\n self.messenger('ID')\n return\n if self.txtName.toPlainText() == '':\n self.messenger('Họ và Tên')\n return\n if self.txtLinkFinger.toPlainText() == '':\n self.messenger('')\n return\n\n ID = self.txtID.toPlainText()\n name = self.txtName.toPlainText()\n pathFinger = self.txtLinkFinger.toPlainText()\n pathPerson = self.PATH+'\\\\'+ID\n if ID not in os.listdir(self.PATH):\n self.checkMessenger('ID')\n return\n \n name = unidecode(name).upper()\n\n f = open(pathPerson+'\\\\name.txt','r')\n checkName = f.read()\n f.close\n\n if not checkName == name:\n self.checkMessenger('Name')\n return\n \n\n img1 = imageio.imread(pathFinger)\n THRESHOLD1= img1.mean()\n img = cv2.imread(pathFinger,0) \n img = np.array(img > THRESHOLD1).astype(int)\n skel = skimage.morphology.skeletonize(img)\n skel = np.uint8(skel)*255\n mask = img*255\n (minutiaeTerm, minutiaeBif) = getTerminationBifurcation(skel, mask)\n FeaturesTerm, FeaturesBif = extractMinutiaeFeatures(skel, minutiaeTerm, minutiaeBif)\n BifLabel = skimage.measure.label(minutiaeBif, connectivity=1)\n TermLabel = skimage.measure.label(minutiaeTerm, connectivity=1)\n\n matrixBif = np.loadtxt(pathPerson+'\\\\BifLabel.txt')\n matrixTerm = np.loadtxt(pathPerson+'\\\\TermLabel.txt')\n\n\n checkBif = matrixBif == BifLabel\n checkTerm = matrixTerm == TermLabel\n #print(np.linalg.matrix_rank(matrixBif),np.linalg.matrix_rank(BifLabel),np.linalg.matrix_rank(matrixTerm),np.linalg.matrix_rank(TermLabel))\n\n if checkBif.all() and checkTerm.all():\n self.checkMessenger('win')\n else:\n self.checkMessenger('false')\n \nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n","repo_name":"2ThuyThuy/app-about-security","sub_path":"appAES/main_final.py","file_name":"main_final.py","file_ext":"py","file_size_in_byte":16078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12999600165","text":"from selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom log.Log import info\nimport time\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nclass Flow_element():\n '''元素数据操作'''\n def __init__(self, driver):\n self.driver: WebDriver = driver\n self.locat_method_dict = {\n \"xpath\":By.XPATH,\n \"id\":By.ID,\n \"css\":By.CSS_SELECTOR,\n \"class\":By.CLASS_NAME,\n \"name\":By.NAME\n }\n\n def location(self, location_method=None, locaton_element=None):\n '''定位操作'''\n # print(location_method,locaton_element\n\n if location_method != None and locaton_element != None:\n WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((self.locat_method_dict[location_method],locaton_element)))\n self.locat = self.driver.find_element(self.locat_method_dict[location_method], locaton_element)\n else:\n info('定位错误:%s %s' % (location_method, locaton_element))\n\n def page_operation(self, send_operation=None, data=None, clear_data=None):\n '''数据操作'''\n # self.driver.execute_script(\"arguments[0].setAttribute('style',arguments[1]);\", self.locat,\n # \"background:green;border:2px solid red;\")\n time.sleep(0.3)\n\n if send_operation == 'send_keys':\n if clear_data != 'N':\n self.locat.send_keys(Keys.CONTROL + 'a')\n self.locat.send_keys(Keys.BACKSPACE)\n # print(data)\n if data != None:\n self.locat.send_keys(data)\n elif send_operation == 'click':\n # print(self.driver.page_source)\n self.locat.click()\n time.sleep(1)\n elif send_operation == \"backspace\": # 清空输入框\n self.locat.send_keys(Keys.CONTROL + 'a')\n self.locat.send_keys(Keys.BACKSPACE)\n elif send_operation == \"actionchains\": # 鼠标悬停\n ActionChains(self.driver).move_to_element(self.locat).perform()\n elif send_operation == \"text\":\n data_text = self.locat.text\n return [data_text]\n elif send_operation == 'value':\n # print(self.driver.page_source)\n return [self.locat.get_attribute('value')]\n elif send_operation == 'is_selected':\n return [str(self.locat.is_selected())]\n\n\n\n def js_operation(self,js_oper):\n self.driver.execute_script(js_oper,self.locat)#\"js操作\",\"元素定位\"\n\n def web_operation(self,send_operation):\n if send_operation == \"refresh\":\n self.driver.refresh()\n time.sleep(1)\n\n def return_driver(self):\n return self.driver\n","repo_name":"zd08/Webdriver_auto","sub_path":"flow_auto_path/local_element_path.py","file_name":"local_element_path.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40377139730","text":"import streamlit as st\nfrom random import randint\n\nrnum_ = randint(0,255)\nyes = {\"y\",\"yes\",\"yeah\",\"ok\",\"sure\"}\nno = {\"n\",\"no\",\"nope\",\"bye\"}\nwelcome = \"Welcome to this random number generator, please pick a number between 0 and 255.\\n\" #Hit q or type quit when you have had enough :)\\n\"\nturns = [0]\nblank = ''\n\ndef clear_text_input():\n ans.empty()\n\n\ndef yournum():\n global turns\n global ans\n ans = st.text_input(\"What is your number?: \", key=\"num_input\")\n \n \n try:\n rnum(int(ans))\n except ValueError:\n st.write(\"Try a number...\")\n\ndef rnum(x):\n global rnum_\n global turns\n #turns = turns + 1\n try:\n if x > rnum_:\n hi = x - rnum_\n st.write(\"Your number is \", hi, \" higher than the random number\")\n elif x < rnum_:\n lo = rnum_ - x\n st.write(\"Your number is \", lo, \" lower than the random number\")\n elif x == rnum_:\n st.write(\"Your number is exactly the random number, awesome!\")\n rnum_ = None\n except AttributeError:\n st.write(\"Pick another one...\")\n\ndef ta():\n ta_ = st.text_input(\"Do you want to try again?: \", key=\"try_again_input\").lower()\n if ta in yes:\n rs()\n elif ta in no:\n quit()\n else:\n yournum()\n\ndef rs():\n global rnum_ \n global turns\n if rnum_ is None:\n rnum_ = randint(0,255)\n #turns = (0)\n st.write(welcome)\n yournum()\n\n#--- MAIN RUN CODE ---#\nst.write(welcome)\nyournum()\n","repo_name":"pickelmikel/musical-dollop","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34046953718","text":"import json\nfrom strenum import StrEnum\n\nimport dateutil.parser\n\n\nclass WebdavPolicy(StrEnum):\n \"\"\"我只知道这一个\"\"\"\n R302 = \"302_redirect\"\n\n\nclass ExtractFolder(StrEnum):\n \"\"\"\n 提取文件夹方法\n \"\"\"\n Front = \"front\"\n End = \"end\"\n\n\nclass Driver(StrEnum):\n \"\"\"\n 其他的还没找出来\n \"\"\"\n Local = \"Local\"\n Aliyundrive = \"Aliyundrive\"\n\n\ndef _get_value(json_dict: dict, key, default=None):\n if key == \"modified\":\n modified_str = json_dict.get(key, \"1970-01-01T00:00:00:000Z\")\n # go time is diff\n v = dateutil.parser.parser().parse(modified_str)\n elif key == \"addition\":\n v = json.loads(json_dict.get(key, \"{}\"))\n else:\n v = json_dict.get(key, default)\n return v\n\n\nclass Info:\n _default = {}\n\n def __init__(self):\n pass\n\n @classmethod\n def creatInfo(cls, info_cls: type, info_json):\n return info_cls(info_json)\n\n @classmethod\n def retInfo(cls, info_cls: type, info_json):\n if isinstance(info_json, dict):\n return info_cls(info_json)\n if isinstance(info_json, list):\n return [info_cls(i) for i in info_json]\n\n def trans(self, info_json: dict):\n for k in self._default:\n v = info_json.get(k, None)\n\n self.__setattr__(k, v)\n\n\nclass FileInfo(Info):\n _default = {\n \"name\": \"name\",\n \"size\": 1,\n \"is_dir\": False,\n \"modified\": \"1970-01-01T00:00:00:000Z\",\n \"sign\": \"xxxx\",\n \"thumb\": \"\",\n \"type\": 5,\n \"raw_url\": \"\",\n \"readme\": \"\",\n \"provider\": \"Aliyundrive\",\n \"related\": None\n }\n\n def __init__(self, info_json):\n super().__init__()\n # 这里是为了自动补全加的,不然很不好用\n self.name = None\n self.size = None\n self.is_dir = None\n self.sign = None\n self.thumb = None\n self.type = None\n self.modified = None\n self.raw_url = None\n self.readme = None\n self.provider = None\n self.related = None\n\n self.trans(info_json)\n\n def __str__(self):\n return f\"{self.__class__}[name:{self.name},size:{self.size},is_dir:{self.is_dir},modified:{self.modified}]\"\n\n\nclass SettingInfo(Info):\n _default = {\n \"key\": \"version\",\n \"value\": \"v3.5.1\",\n \"help\": \"\",\n \"type\": \"string\",\n \"options\": \"\",\n \"group\": 0,\n \"flag\": 2\n }\n\n def __init__(self, info_json):\n super().__init__()\n # 这里是为了自动补全加的,不然很不好用\n self.key = None\n self.value = None\n self.type = None\n self.options = None\n self.group = None\n self.flag = None\n\n self.trans(info_json)\n\n def __str__(self):\n return f\"{self.__class__}[key:{self.key},value:{self.value}]\"\n\n\nclass UserInfo(Info):\n _default = {\n \"id\": 1,\n \"username\": \"admin\",\n \"password\": \"\",\n \"base_path\": \"/\",\n \"role\": 2,\n \"permission\": 0\n }\n\n def __init__(self, info_json):\n super().__init__()\n # 这里是为了自动补全加的,不然很不好用\n self.id = None\n self.username = None\n self.password = None\n self.base_path = None\n self.role = None\n self.permission = None\n self.trans(info_json)\n\n def __str__(self):\n return f\"{self.__class__}[username:{self.username},base_path:{self.base_path}]\"\n\n\nclass StorageInfo(Info):\n _default = {\n \"id\": 1,\n \"mount_path\": \"/阿里云盘\",\n \"order\": 0,\n \"driver\": \"Aliyundrive\",\n \"cache_expiration\": 30,\n \"status\": \"work\",\n \"addition\": \"{\\\"root_folder_id\\\":\\\"xxx\\\",\\\"refresh_token\\\":\\\"xxx\\\",\\\"order_by\\\":\\\"name\\\",\"\n \"\\\"order_direction\\\":\\\"ASC\\\",\\\"rapid_upload\\\":false}\",\n \"remark\": \"\",\n \"modified\": \"2022-11-26T18:55:55.261579727+08:00\",\n \"disabled\": False,\n \"order_by\": \"\",\n \"order_direction\": \"\",\n \"extract_folder\": \"\",\n \"web_proxy\": False,\n \"webdav_policy\": \"302_redirect\",\n \"down_proxy_url\": \"\"\n }\n\n def __init__(self, info_json):\n super().__init__()\n # 这里是为了自动补全加的,不然很不好用\n self.id = None\n self.mount_path = None\n self.order = None\n self.driver = None\n self.cache_expiration = None\n self.status = None\n self.addition = None\n self.modified = None\n self.disabled = None\n self.order_by = None\n self.order_direction = None\n self.extract_folder = None\n self.web_proxy = None\n self.webdav_policy = None\n self.down_proxy_url = None\n\n self.trans(info_json)\n\n def __str__(self):\n return f\"{self.__class__}[driver:{self.driver},mount_path:{self.mount_path},modified:{self.modified}]\"\n\n\nclass MetaInfo(Info):\n _default = {\n \"id\": 1,\n \"path\": \"/a\",\n \"password\": \"i\",\n \"p_sub\": False,\n \"write\": False,\n \"w_sub\": False,\n \"hide\": \"\",\n \"h_sub\": False,\n \"readme\": \"\",\n \"r_sub\": False\n }\n\n def __init__(self, info_json):\n super().__init__()\n # 这里是为了自动补全加的,不然很不好用\n self.id = None\n self.path = None\n self.password = None\n self.p_sub = None\n self.write = None\n self.w_sub = None\n self.hide = None\n self.h_sub = None\n self.readme = None\n self.r_sub = None\n\n self.trans(info_json)\n\n def __str__(self):\n return f\"{self.__class__}[path:{self.path},password:{self.password}]\"\n\n\nclass TaskInfo(Info):\n _default = {\n \"id\": \"1\",\n \"name\": \"upload 1.png to [/s](/test)\",\n \"state\": \"succeeded\",\n \"status\": \"\",\n \"progress\": 100,\n \"error\": \"\"\n }\n\n def __init__(self, info_json):\n super().__init__()\n # 这里是为了自动补全加的,不然很不好用\n self.id = None\n self.name = None\n self.state = None\n self.status = None\n self.progress = None\n self.error = None\n\n self.trans(info_json)\n\n def __str__(self):\n return f\"{self.__class__}[path:{self.name},password:{self.state}]\"\n","repo_name":"tempwswm/alistclient","sub_path":"alist/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"2620450459","text":"import telebot\nfrom telebot import types\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport pandas as pd\nimport dataframe_image as dfi\nfrom csv import reader\nfrom get_schedule import schedule_regular_sorted\nfrom get_kafedra import kaf_df, fak_df\nfrom get_contacts import contacts_df\nimport gc\nbot = telebot.TeleBot('2067658930:AAHk7crhhbvxSZ0AFr0NswT4YYQnhjZ5f-g')\n\npd.set_option(\"display.max_colwidth\", 10000)\n\nstore_fak_clicked = None\nstore_course_clicked = None\nfak_course_url_final = None\nfak_original = None\ncourse_original = None\ngroup_list = None\nstore_group_clicked = None\nschedule_df_final = None\nheader_list = None\n\nlegend_fak = {\n \"fak_FMTP\": \"ФМТП\",\n \"fak_FTM\": \"ФТМ\",\n \"fak_FEMP\": \"ФЕМП\",\n \"fak_FIT\": \"ФІТ\",\n \"fak_FRGTB\": \"ФРГТБ\",\n \"fak_FFO\": \"ФФО\"\n}\nlegend_course = {\n \"course-1\": \"1\",\n \"course-2\": \"2\",\n \"course-3\": \"3\",\n \"course-4\": \"4\",\n \"course-1m\": \"1м\",\n \"course-2m\": \"2м\"\n}\n\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n keyboardmain = types.InlineKeyboardMarkup(row_width=2)\n mainmenu = types.InlineKeyboardButton(\n text=\"Головне меню\", callback_data=\"mainmenu\")\n keyboardmain.add(mainmenu)\n bot.reply_to(\n message, f'Я інформаційний чатбот. Приємно познайомитися, {message.from_user.first_name}', reply_markup=keyboardmain)\n\n # bot.send_message(message.chat.id, f\"Натисніть, аби перейти:\",\n # reply_markup=keyboardmain)\n\n############################################################################################################################################################################\n\n\n@bot.message_handler(content_types=[\"text\"])\ndef main_menu(message):\n keyboardmain = types.InlineKeyboardMarkup(row_width=2)\n get_schedule = types.InlineKeyboardButton(\n text=\"Графік навчального процесу\", callback_data=\"get-schedule\")\n get_news = types.InlineKeyboardButton(\n text=\"Новини i анонси\", callback_data=\"get-news-announces\")\n get_kafedra = types.InlineKeyboardButton(\n text=\"Факультети і кафедри\", callback_data=\"get-kafedra\")\n get_contacts = types.InlineKeyboardButton(\n text=\"Контакти\", callback_data=\"get-contacts\")\n get_chart = types.InlineKeyboardButton(\n text=\"Карта\", callback_data=\"get_map\")\n keyboardmain.add(get_schedule, get_news, get_kafedra,\n get_contacts, get_chart)\n bot.send_message(message.chat.id, f\"Натисніть, аби перейти:\",\n reply_markup=keyboardmain)\n gc.collect()\n\ndef enter_fakulty_name(message):\n keyboard_fak = types.InlineKeyboardMarkup(row_width=3)\n fak_FMTP = types.InlineKeyboardButton(\n text=\"ФМТП\", callback_data=\"fak_FMTP\")\n fak_FTM = types.InlineKeyboardButton(text=\"ФТМ\", callback_data=\"fak_FTM\")\n fak_FEMP = types.InlineKeyboardButton(\n text=\"ФЕМП\", callback_data=\"fak_FEMP\")\n fak_FIT = types.InlineKeyboardButton(text=\"ФІТ\", callback_data=\"fak_FIT\")\n fak_FRGTB = types.InlineKeyboardButton(\n text=\"ФРГТБ\", callback_data=\"fak_FRGTB\")\n fak_FFO = types.InlineKeyboardButton(text=\"ФФО\", callback_data=\"fak_FFO\")\n backbutton = types.InlineKeyboardButton(\n text=\"Назад\", callback_data=\"mainmenu\")\n keyboard_fak.add(fak_FMTP, fak_FTM, fak_FEMP, fak_FIT,\n fak_FRGTB, fak_FFO, backbutton)\n bot.send_message(message.chat.id, \"Виберіть факультет\",\n reply_markup=keyboard_fak)\n gc.collect()\n\ndef enter_course_number(message):\n keyboard_course = types.InlineKeyboardMarkup(row_width=3)\n course_1 = types.InlineKeyboardButton(\n text=\"1й курс\", callback_data=\"course-1\")\n course_2 = types.InlineKeyboardButton(\n text=\"2й курс\", callback_data=\"course-2\")\n course_3 = types.InlineKeyboardButton(\n text=\"3й курс\", callback_data=\"course-3\")\n course_4 = types.InlineKeyboardButton(\n text=\"4й курс\", callback_data=\"course-4\")\n course_1m = types.InlineKeyboardButton(\n text=\"1й-м курс\", callback_data=\"course-1m\")\n course_2m = types.InlineKeyboardButton(\n text=\"2й-м курс\", callback_data=\"course-2m\")\n backbutton = types.InlineKeyboardButton(\n text=\"Назад\", callback_data=\"get-schedule\")\n keyboard_course.add(course_1, course_2, course_3,\n course_4, course_1m, course_2m, backbutton)\n bot.send_message(message.chat.id, \"Виберіть курс:\",\n reply_markup=keyboard_course)\n gc.collect()\n############################################################################################################################################################################\n\n\n@bot.callback_query_handler(func=lambda call: \"fak\" in call.data)\ndef fak_find(call):\n global store_fak_clicked\n global fak_original\n store_fak_clicked = call.data\n # print(store_fak_clicked)\n fak_original = legend_fak.get(store_fak_clicked)\n enter_course_number(call.message)\n # print(fak_original, not(fak_original))\n gc.collect()\n\n@bot.callback_query_handler(func=lambda call: \"course\" in call.data)\ndef course_find(call):\n global store_course_clicked\n global course_original\n store_course_clicked = call.data\n # print(store_course_clicked)\n course_original = legend_course.get(store_course_clicked)\n # print(course_original, not(course_original))\n find_fak_course(call.message)\n gc.collect()\n\n@bot.callback_query_handler(func=lambda call: \"fak\" and \"course\" in call.data)\ndef find_fak_course(call):\n global fak_course_url_final\n fak_course_url = schedule_regular_sorted.loc[(schedule_regular_sorted['Факультет'] == fak_original) & (\n schedule_regular_sorted['Курс'].isin([course_original]))]\n fak_course_url_final = fak_course_url[fak_course_url.columns[2]].to_string(\n index=False)\n # print(fak_course_url_final)\n # print(type(fak_course_url_final))\n call.data = fak_course_url_final\n # bot.send_message(call.chat.id, fak_course_url_final)\n get_group_list(call)\n gc.collect()\n\n@bot.callback_query_handler(func=lambda call: call.data.startswith('http'))\ndef get_group_list(call):\n global group_list\n global schedule_df_final\n global header_list\n schedule_df = requests.get(fak_course_url_final).content\n excel_file = pd.ExcelFile(schedule_df)\n sheets = excel_file.sheet_names\n # Format the list of sheet names\n schedule_sheet_names = [name.casefold() for name in sheets]\n filtered_schedule_list = list(\n filter(lambda el: not 'начитка' in el, schedule_sheet_names))\n\n print(filtered_schedule_list)\n \n # Get the index that matches our sheet to find\n index = schedule_sheet_names.index(filtered_schedule_list[0].lower())\n if filtered_schedule_list[0] in schedule_sheet_names:\n # Feed this index into pandas\n df = pd.read_excel(excel_file, sheet_name=index)\n df = df.replace('\\n', '', regex=True)\n df = df.replace('№тижня', 'Номертижня', regex=True)\n # print(df)\n schedule_df_new = df.loc[(df == 'Номертижня').any(1).idxmax(\n ):].iloc[:, 0:].reset_index(drop=True).T.drop_duplicates().T\n # grab the first row for the header\n new_header = schedule_df_new.iloc[0]\n # take the data less the header row\n schedule_df_new = schedule_df_new[1:]\n schedule_df_new.columns = new_header # set the header row as the df header\n schedule_df_final = schedule_df_new\n schedule_df_final = schedule_df_final[schedule_df_final.iloc[:, 0].ne(\n schedule_df_final.columns[0])]\n header_list = list(schedule_df_final.columns)\n print(header_list)\n group_list = [x for x in header_list if x.endswith('група')]\n print_group_schedule(call)\n gc.collect()\n\ndef print_group_schedule(call):\n keyboard_group = types.ReplyKeyboardMarkup(\n row_width=2, resize_keyboard=True, one_time_keyboard=True)\n for button_content in group_list:\n schedule_btn = types.KeyboardButton(button_content)\n keyboard_group.add(schedule_btn)\n msg = bot.send_message(call.chat.id, 'Оберіть групу',\n reply_markup=keyboard_group)\n bot.register_next_step_handler(msg, schedule_on_selection)\n gc.collect()\n\ndef schedule_on_selection(message):\n group_number = message.text\n # print(group_number)\n schedule_df_group = schedule_df_final[[header_list[0], header_list[1],\n header_list[2], group_number]].dropna(how='all').reset_index(drop=True)\n schedule_df_group[[header_list[0], header_list[1], header_list[2]]] = schedule_df_group[[\n header_list[0], header_list[1], header_list[2]]].fillna(method='ffill')\n schedule_df_group = schedule_df_group.dropna(thresh=1)\n schedule_df_group.dropna(subset=[group_number], inplace=True)\n schedule_df_group1 = schedule_df_group.groupby([header_list[0], header_list[1], header_list[2]], sort=False, dropna=True)[\n group_number].apply(lambda x: ' / '.join(map(str, x))).reset_index()\n schedule_df_group1.dropna(subset=[group_number], inplace=True)\n schedule_df_group_styled = schedule_df_group1.style.background_gradient()\n dfi.export(schedule_df_group_styled, \"mytable.png\")\n schedule_img = open(\"mytable.png\", 'rb')\n if message != None:\n markup = types.ReplyKeyboardRemove(selective=False)\n bot.send_photo(message.chat.id, schedule_img, reply_markup=markup)\n back_main_menu_key = types.InlineKeyboardMarkup(row_width=1)\n backbutton = types.InlineKeyboardButton(\n text=\"Назад\", callback_data=\"mainmenu\")\n back_main_menu_key.add(backbutton)\n bot.send_message(message.chat.id, \"Повернутися до головного меню\",\n reply_markup=back_main_menu_key)\n gc.collect()\n##################################################################################################################################################################\n\n\n@bot.callback_query_handler(func=lambda call: \"faculty\" in call.data)\ndef fakultets_output(call):\n keyboard_fakultet = types.InlineKeyboardMarkup()\n for x, y in fak_df.items():\n faculty_btn = types.InlineKeyboardButton(text=x, url=y)\n # backbutton = types.InlineKeyboardButton(text=\"Назад\", callback_data=\"mainmenu\")\n keyboard_fakultet.add(faculty_btn)\n bot.send_message(call.message.chat.id, 'Оберіть факультет:',\n reply_markup=keyboard_fakultet)\n \n keyboard_return = types.InlineKeyboardMarkup()\n backbutton = types.InlineKeyboardButton(\n text=\"До головного меню\", callback_data=\"mainmenu\")\n keyboard_return.add(backbutton)\n bot.send_message(call.message.chat.id, \"Натисніть\",\n reply_markup=keyboard_return)\n gc.collect()\n\n@bot.callback_query_handler(func=lambda call: \"kafedras\" in call.data)\ndef fakultets_output(call):\n keyboard_kafedra = types.InlineKeyboardMarkup()\n for x, y in kaf_df.items():\n kafedra_btn = types.InlineKeyboardButton(text=x, url=y)\n # backbutton = types.InlineKeyboardButton(text=\"Назад\", callback_data=\"mainmenu\")\n keyboard_kafedra.add(kafedra_btn)\n bot.send_message(call.message.chat.id, 'Оберіть кафедру:',\n reply_markup=keyboard_kafedra)\n\n keyboard_return = types.InlineKeyboardMarkup()\n backbutton = types.InlineKeyboardButton(\n text=\"До головного меню\", callback_data=\"mainmenu\")\n keyboard_return.add(backbutton)\n bot.send_message(call.message.chat.id, \"Натисніть\",\n reply_markup=keyboard_return)\n gc.collect()\n##################################################################################################################################################################\n\n\n@bot.callback_query_handler(func=lambda call: \"novyny\" in call.data)\ndef print_news(call):\n with open('news.csv', 'r', newline='', encoding='utf-8') as csvfile:\n csv_reader = reader(csvfile)\n header = next(csv_reader)\n if header != None:\n for i in range(3):\n # print(csvfile.readline().encode('utf-8'))\n bot.send_message(call.message.chat.id,\n csvfile.readline()[1:].encode('utf-8'))\n keyboard_news = types.InlineKeyboardMarkup()\n news = types.InlineKeyboardButton(\n text=\"Читати більше\", url=\"https://knute.edu.ua/b/read-news/?uk\")\n backbutton = types.InlineKeyboardButton(\n text=\"До меню новин\", callback_data=\"get-news-announces\")\n keyboard_news.add(news, backbutton)\n bot.send_message(call.message.chat.id, text=\"Виберіть дію:\",\n reply_markup=keyboard_news)\n gc.collect()\n\n@bot.callback_query_handler(func=lambda call: \"anonsy\" in call.data)\ndef print_news(call):\n with open('announces.csv', 'r', newline='', encoding='utf-8') as csvfile:\n csv_reader = reader(csvfile)\n header = next(csv_reader)\n if header != None:\n for i in range(3):\n # print(csvfile.readline().encode('utf-8'))\n bot.send_message(call.message.chat.id,\n csvfile.readline()[1:].encode('utf-8'))\n keyboard_announces = types.InlineKeyboardMarkup()\n announces = types.InlineKeyboardButton(\n text=\"Читати більше\", url=\"https://knute.edu.ua/b/read-allnnoun/?uk\")\n backbutton = types.InlineKeyboardButton(\n text=\"До меню новин\", callback_data=\"get-news-announces\")\n keyboard_announces.add(announces, backbutton)\n bot.send_message(call.message.chat.id, text=\"Виберіть дію:\",\n reply_markup=keyboard_announces)\n gc.collect()\n###################################################################################################################################################################\n\n\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_inline(call):\n if call.data == \"mainmenu\":\n keyboardmain = types.InlineKeyboardMarkup(row_width=2)\n get_schedule = types.InlineKeyboardButton(\n text=\"Розклад навчального процесу\", callback_data=\"get-schedule\")\n get_news = types.InlineKeyboardButton(\n text=\"Новини і анонси\", callback_data=\"get-news-announces\")\n get_kafedra = types.InlineKeyboardButton(\n text=\"Факультети і кафедри\", callback_data=\"get-kafedra\")\n get_contacts = types.InlineKeyboardButton(\n text=\"Контакти\", callback_data=\"get-contacts\")\n get_chart = types.InlineKeyboardButton(\n text=\"Карта\", callback_data=\"get_map\")\n keyboardmain.add(get_schedule, get_news, get_kafedra,\n get_contacts, get_chart)\n bot.send_message(call.message.chat.id,\n text=\"Виберіть необхідне:\", reply_markup=keyboardmain)\n\n###################################################################################################################################################################\n elif call.data == \"get-schedule\":\n global request_button\n keyboard = types.InlineKeyboardMarkup()\n schedule_regular = types.InlineKeyboardButton(\n text=\"звичайний\", callback_data=\"schedule-regular\")\n # schedule_exam = types.InlineKeyboardButton(\n # text=\"екзаменаційний\", callback_data=\"schedule-exam\")\n timetable = types.InlineKeyboardButton(\n text=\"розклад дзвінків\", callback_data=\"timetable\")\n backbutton = types.InlineKeyboardButton(\n text=\"Назад\", callback_data=\"mainmenu\")\n keyboard.add(schedule_regular, timetable, backbutton)\n bot.send_message(call.message.chat.id,\n text=\"Виберіть необхідне:\", reply_markup=keyboard)\n\n elif call.data == \"schedule-regular\":\n enter_fakulty_name(call.message)\n\n elif call.data == \"timetable\":\n keyboard_timetable = types.InlineKeyboardMarkup()\n backbutton = types.InlineKeyboardButton(\n text=\"Назад\", callback_data=\"get-schedule\")\n keyboard_timetable.add(backbutton)\n timetable_img = open('timetable.PNG', 'rb')\n bot.send_photo(call.message.chat.id, timetable_img,\n reply_markup=keyboard_timetable)\n\n###################################################################################################################################################################\n elif call.data == \"get-news-announces\":\n keyboard_news_announces = types.InlineKeyboardMarkup()\n news = types.InlineKeyboardButton(\n text=\"Новини\", callback_data=\"novyny\")\n announces = types.InlineKeyboardButton(\n text=\"Анонси\", callback_data=\"anonsy\")\n backbutton = types.InlineKeyboardButton(\n text=\"До головного меню\", callback_data=\"mainmenu\")\n keyboard_news_announces.add(news, announces, backbutton)\n bot.send_message(call.message.chat.id, text=\"Виберіть дію:\",\n reply_markup=keyboard_news_announces)\n\n###################################################################################################################################################################\n elif call.data == \"get-kafedra\":\n keyboard_kafedra = types.InlineKeyboardMarkup()\n fakultets = types.InlineKeyboardButton(\n text=\"Факультети\", callback_data=\"faculty\")\n kafedras = types.InlineKeyboardButton(\n text=\"Кафедри\", callback_data=\"kafedras\")\n backbutton = types.InlineKeyboardButton(\n text=\"Назад\", callback_data=\"mainmenu\")\n keyboard_kafedra.add(fakultets, kafedras, backbutton)\n bot.send_message(\n call.message.chat.id, text=\"Виберіть необхідне:\", reply_markup=keyboard_kafedra)\n\n###################################################################################################################################################################\n elif call.data == \"get-contacts\":\n keyboard_contacts = types.InlineKeyboardMarkup()\n # with open('contacts.csv', 'r', newline='', encoding='utf-8') as csvfile:\n # for i in range(3):\n # bot.send_message(call.message.chat.id, csvfile.readlines()[1:].encode('utf-8'))\n backbutton = types.InlineKeyboardButton(\n text=\"До головного меню\", callback_data=\"mainmenu\")\n keyboard_contacts.add(backbutton)\n bot.send_message(call.message.chat.id, text=\"Повернутись\",\n reply_markup=keyboard_contacts)\n for index, row in contacts_df.iterrows():\n bot.send_message(\n call.message.chat.id, row['Відділ'] + \" \" + row['Телефон'] + \" \" + row['email'])\n bot.send_message(call.message.chat.id, text=\"Повернутись\",\n reply_markup=keyboard_contacts)\n\n###################################################################################################################################################################\n elif call.data == \"get_map\":\n keyboard_map = types.InlineKeyboardMarkup()\n backbutton = types.InlineKeyboardButton(\n text=\"До головного меню\", callback_data=\"mainmenu\")\n keyboard_map.add(backbutton)\n map_img = open('map.PNG', 'rb')\n bot.send_photo(call.message.chat.id, map_img)\n bot.send_message(call.message.chat.id, text='''А - вул. Кіото 19 (головний корпус)\nБ - вул. Кіото 19 (бібліотечний корпус)\nВ - вул. Кіото 19 (Конгрес-центр)\nГ - вул. Мілютенка 8\nД - вул. Кіото 21\nЕ - вул. Мілютенка 4\nЛ - вул. Кіото 23\nМ - вул. Чигоріна 57\nН - вул. Чигоріна 57а\nР - вул. Раєвського 36''', reply_markup=keyboard_map)\n gc.collect()\n\n##################################################################################################################################################################\nif __name__ == \"__main__\":\n bot.polling(none_stop=True)\n","repo_name":"Nick-Alskling/ns_magistr_diploma","sub_path":"telegrambot.py","file_name":"telegrambot.py","file_ext":"py","file_size_in_byte":20800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43708815101","text":"import django\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import LSTM, Dense\nfrom sklearn.preprocessing import MinMaxScaler\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'T_AI.settings')\ndjango.setup()\n\nfrom cabinet.models import BtcPrice\n\n\ndef calculate_percentage(part, whole):\n return 100 * float(part) / float(whole)\n\n\n# Получение упорядоченного списка цен Bitcoin prices = [13000, 14000, 15000... N]\nprices = [float(obj.price) for obj in BtcPrice.objects.order_by('time')]\n\n# Создаем MinMaxScaler, который будет нормализовывать наши данные в формат от 0 до 1\nscaler = MinMaxScaler(feature_range=(0, 1))\n\n# Нормализация данных prices стал списком списков prices = [[0.161009], [0.158240], ... [n]]\nprices = scaler.fit_transform(np.array(prices).reshape(-1, 1))\n\n# подготовка данных для LSTM\nn_steps = 168\n# x это список списков входных данных [[0.161009, 0.158240 ..], [0.158240, 0.1693259], ... [n]]\n# y это список результатов [0.181009, 0.198240, ... [n]]\nx = np.array([prices[i:i + n_steps, 0] for i in range(len(prices) - n_steps - 2)])\ny = np.array(prices[n_steps + 2:, 0])\n\n# разделение данных на обучающую и тестовую выборку\ntrain_size = int(0.9 * len(x))\n# x_train и y_train это списки списков как x [[0.161009, 0.158240 ..], [0.158240, 0.1693259], ... [n]]\n# y_train и y_test это списки как y [0.181009, 0.198240, ... [n]]\nx_train, x_test = x[:train_size], x[train_size:]\ny_train, y_test = y[:train_size], y[train_size:]\n\n# преобразование данных в формат, который можно подать на вход LSTM\n# Каждая еденица изменила значение на список - было 0.198240, стало [0.198240]\nx_train = x_train.reshape((x_train.shape[0], x_train.shape[1], 1))\nx_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 1))\n\n# создание модели\nmodel = Sequential()\nmodel.add(LSTM(50, activation='relu', input_shape=(n_steps, 1)))\nmodel.add(Dense(1))\nmodel.compile(optimizer='adam', loss='mse')\n\n# обучение модели\nmodel.fit(x_train, y_train, epochs=1, verbose=1)\n\n# прогнозирование\ny_pred = model.predict(x_test)\n\n# Переводим прогнозированные значения обратно из нормализованного формата\ny_pred = scaler.inverse_transform(y_pred)\ny_test = scaler.inverse_transform(y_test.reshape(-1, 1))\n\n\n# Вернем x_test к трехмерной форме\nx_test_3d = x_test.reshape(-1, n_steps, 1)\n\n# Переводим все элементы обратно из нормализованного формата\nx_test_inv = scaler.inverse_transform(x_test_3d.reshape(-1, 1)).reshape(-1, n_steps)\n\ncount_of_correct_prediction = 0\n\n# визуализация результатов\nfor i in range(len(y_pred)):\n\n print(f'Курс был: {x_test_inv[i][-1]}')\n print(f'Курс стал через 24 часа: {y_test[i][0]}')\n print(f'Предсказано: {y_pred[i][0]}')\n print(\"Курс упал\" if y_test[i][0] < x_test_inv[i][-1] else \"Курс поднялся\")\n print(f'Предсказано что курс {\"упадет\" if y_pred[i][0] < x_test_inv[i][-1] else \"поднимется\"}')\n if (y_test[i][0] > x_test_inv[i][-1] and y_pred[i][0] > x_test_inv[i][-1]) or \\\n (y_test[i][0] < x_test_inv[i][-1] and y_pred[i][0] < x_test_inv[i][-1]):\n count_of_correct_prediction += 1\n print(\"Предсказание верное!\")\n else:\n print(\"Предсказание неверное - отстой\")\n\nprint(f'Всего предсказаний: {len(y_pred)}')\nprint(f'Верных предсказаний: {count_of_correct_prediction}')\nprint(f'На {(count_of_correct_prediction / len(y_pred)) * 100}% предсказано верно')","repo_name":"karik741/T_AI","sub_path":"educate_model.py","file_name":"educate_model.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33697896631","text":"#https://www.hackerrank.com/challenges/python-sort-sort/\nm, n= input().split()\nn = int(n)\nm = int(m)\nv = []\nfor i in range(m):\n v.append([])\nfor j in range(n):\n vals = input().split()\n vals = [int(a) for a in vals]\n for j in range(m):\n v[j].append(vals[j])\ncol = int(input())\nto_sort_col = []\nfor i in range(n):\n to_sort_col.append((v[col][i], i))\nto_sort_col = sorted(to_sort_col, key=lambda x : x[0])\nfor elem in to_sort_col:\n val,idx = elem\n for i in range(m):\n print(str(v[i][idx]) + \" \",end=\"\")\n print(\"\")\n","repo_name":"wesleymesquita/Snippets","sub_path":"HR/sort_data.py","file_name":"sort_data.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20870935772","text":"import numpy as np\nimport cv2 as cv\nfrom matplotlib import pyplot as plt\n\nimg = cv.imread(\"../../data/blox.jpg\", 0)\n\n# Initiate FAST object with default values\nfast = cv.FastFeatureDetector_create()\n\n# find and draw the keypoints\nkp = fast.detect(img, None)\nimg2 = cv.drawKeypoints(img, kp, None, color=(255, 0, 0))\n\n# Print all default params\nprint(\"Threshold: {}\".format(fast.getThreshold()), '\\n'\n \"nonmaxSuppression: {}\".format(fast.getNonmaxSuppression()), '\\n'\n \"neighborhood: {}\".format(fast.getType()), '\\n'\n \"Total Keypoints with nonmaxSuppression: {}\".format(len(kp)))\n\ncv.imwrite(\"output-files/fast_true.png\", img2)\ncv.imshow(\"With Suppression\", img2)\n\n# Disable nonmaxSuppression\nfast.setNonmaxSuppression(False)\nkp = fast.detect(img, None)\nprint(\"Total Keypoints without nonmaxSuppression: {}\".format(len(kp)))\n\nimg3 = cv.drawKeypoints(img, kp, None, color=(255, 0, 0))\ncv.imwrite(\"output-files/fast_false.png\", img3)\ncv.imshow(\"Without Suppression\", img3)\nk = cv.waitKey(0)\nwhile True:\n if k == 27:\n cv.destroyAllWindows()\n break\n","repo_name":"codearchive/opencv-python-tutorial","sub_path":"feature-detection-description/FAST/FAST.py","file_name":"FAST.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38367548502","text":"import os\nimport random\nimport string\nfrom datetime import datetime\n\nimport sqlite3\nfrom sqlalchemy import *\n\nfrom loguru import logger\n\ndef randomdata(n):\n for id in range(n):\n numval = random.randint(1,1000)\n textval = ''.join(random.choices(string.ascii_letters, k=50))\n yield id, numval, textval\n\n\nif __name__ == \"__main__\":\n from pathlib import Path\n logger.debug(\"Delete old DB\")\n Path(\"test.db\").unlink(missing_ok=True)\n\n logger.debug(\"Create engine\")\n engine = create_engine(\"sqlite:///test.db\")\n metadata = MetaData()\n test = Table('test', metadata,\n Column('id', Integer, primary_key=True),\n Column('numval', Integer),\n Column('textval', String),\n )\n logger.debug(\"Create tables\")\n metadata.create_all(engine)\n stmt = insert(test)\n logger.debug(\"Connect\")\n with engine.begin() as connection:\n logger.debug(\"Build data\")\n data = [dict(id=id, numval=numval, textval=textval) for id, numval, textval in randomdata(1_000_000)]\n logger.debug(\"Start insert\")\n start = datetime.now()\n connection.execute(stmt, data)\n logger.debug(\"Time taken: {dur}\", dur=datetime.now() - start)\n #with sqlite3.connect(\"test.db\") as conn:\n # conn.execute(\"BEGIN\")\n # start = datetime.now()\n # conn.executemany(\"INSERT INTO test VALUES (?,?,?)\", randomdata(1_000_000))\n #print(\"Time taken:\", datetime.now() - start)\n","repo_name":"pfmoore/temp","sub_path":"dbtest.py","file_name":"dbtest.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"29171385384","text":"# Needed for inport of common.py\n##############################################################\nimport os, sys\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nsys.path.append(parentdir) \nfrom common import common\n##############################################################\n\none_dig_seperators=[',',';','(',')','[',']','+','-','/','*',':','.']\ntwo_dig_seperators=[\":=\",\"<=\",\"<=\",\"==\",\"!=\",\"//\",\"/*\",\"*/\"]\n\ntokens = []\n\n\nfilename = \"\"\n\nsource_file = None\n\n\n# Bad hack needed to allow for opening different files\ndef openFile(filename):\n global source_file\n #filename = \"../test0.src\"\n\n source_file = open(filename, 'r')\n\n\n\n\n# Enum used for types to prevent passing around strings.\n# reserved_word = [\"PROGRAM\",\"IS\",\"VARIABLE\",\"BEGIN\",\"END\",\".\",\"//\"]\n# reserved_types =[\"INTEGER\",\"BOOL\",\"FLOAT\",\"STRING\",\"CHAR\",\"ID\"]\n# control_words = [\"IF\",\"THEN\",\"ELSE\",\"FOR\",\"WHILE\",\"SWITCH\",\"CASE\"]\n# mult_op = [\"*\",\"/\",\"DIV\",\"MOD\",\"AND\"]\n# add_op = [\"+\",\"-\",\"OR\"]\n# assign_op = [\":=\"]\n# re_op=[\"=\",\"!=\",\"<\",\"<=\",\">=\",\">\"]\n# id = starts with letter than letters and or digitis\n# digit = 0-9\n# letter = az and A-Z\n\n# num is digits,optional_fraction,optional_exponent\n# digits = digit and any number of other digits\n# opetional_fraction = .plus digits\n\ndef identify_token(token_text_lower):\n token_text = token_text_lower.upper()\n\n # Start with a token of Invalid type and hopefully identify before sending\n # Only certain tokens such as number, and id will need a value\n return_token = common.token(common.token_types.t_INVALID,None);\n\n if token_text==\"PROGRAM\":\n return_token.type = common.token_types.t_PROGRAM\n elif token_text==\"IS\":\n return_token.type = common.token_types.t_IS\n elif token_text==\"VARIABLE\":\n return_token.type = common.token_types.t_VARIABLE\n elif token_text==\"BEGIN\":\n return_token.type = common.token_types.t_BEGIN\n elif token_text==\"END\":\n return_token.type = common.token_types.t_END\n elif token_text==\".\":\n return_token.type = common.token_types.t_DOT \n elif token_text==\"//\":\n return_token.type = common.token_types.t_LINE_COMMENT\n elif token_text==\"INTEGER\":\n return_token.type = common.token_types.t_INTEGER\n elif token_text==\"BOOL\":\n return_token.type = common.token_types.t_BOOL\n elif token_text==\"FLOAT\":\n return_token.type = common.token_types.t_FLOAT\n elif token_text==\"STRING\":\n return_token.type = common.token_types.t_STRING\n elif token_text==\"CHAR\":\n return_token.type = common.token_types.t_CHAR\n elif token_text==\"IF\":\n return_token.type = common.token_types.t_IF\n elif token_text==\"THEN\":\n return_token.type = common.token_types.t_THEN\n elif token_text==\"ELSE\":\n return_token.type = common.token_types.t_ELSE\n elif token_text==\"FOR\":\n return_token.type = common.token_types.t_FOR\n elif token_text==\"WHILE\":\n return_token.type = common.token_types.t_WHILE\n elif token_text==\"PROCEDURE\":\n return_token.type = common.token_types.t_PROCEDURE\n elif token_text==\"SWITCH\":\n return_token.type = common.token_types.t_SWITCH\n elif token_text==\"RETURN\":\n return_token.type = common.token_types.t_RETURN\n elif token_text==\"CASE\":\n return_token.type = common.token_types.t_CASE\n elif token_text==\"GLOBAL\":\n return_token.type = common.token_types.t_GLOBAL\n elif token_text==\"*\":\n return_token.type = common.token_types.t_MULT_OP\n elif token_text==\"/\":\n return_token.type = common.token_types.t_DIVIDE_OP\n elif token_text==\"AND\":\n return_token.type = common.token_types.t_AND\n elif token_text==\"+\":\n return_token.type = common.token_types.t_ADD_OP\n elif token_text==\"-\":\n return_token.type = common.token_types.t_SUBTRACT_OP\n elif token_text==\"OR\":\n return_token.type = common.token_types.t_OR\n elif token_text==\"NOT\":\n return_token.type = common.token_types.t_NOT\n elif token_text==\":=\":\n return_token.type = common.token_types.t_ASSIGN\n elif token_text==\"=\":\n return_token.type = common.token_types.t_EQUALS\n elif token_text==\"==\":\n return_token.type = common.token_types.t_DOUBLE_EQUALS\n elif token_text==\"<\":\n return_token.type = common.token_types.t_LESS_THAN\n elif token_text==\"<=\":\n return_token.type = common.token_types.t_LESS_THAN_OR_EQUAL\n elif token_text==\">\":\n return_token.type = common.token_types.t_GREATER_THAN\n elif token_text==\">=\":\n return_token.type = common.token_types.t_GREATER_THAN_OR_EQUAL\n elif token_text==\":\":\n return_token.type = common.token_types.t_COLON\n elif token_text==\";\":\n return_token.type = common.token_types.t_SEMI_COLON\n elif token_text==\"(\":\n return_token.type = common.token_types.t_LEFT_PAREN\n elif token_text==\")\":\n return_token.type = common.token_types.t_RIGHT_PAREN\n elif token_text==\"[\":\n return_token.type = common.token_types.t_LEFT_BRACKET\n elif token_text==\"]\":\n return_token.type = common.token_types.t_RIGHT_BRACKET\n elif token_text==\"TRUE\":\n return_token.type = common.token_types.t_TRUE\n elif token_text==\"FALSE\":\n return_token.type = common.token_types.t_FALSE\n\n elif token_text.isnumeric():\n return_token.type = common.token_types.t_NUMBER\n return_token.value = token_text\n elif token_text[0].isalpha():\n return_token.type = common.token_types.t_ID\n return_token.value = token_text\n\n print(\"** Token Type: \"+str(return_token.type)+\" : \"+str(return_token.value))\n if return_token.type==common.token_types.t_INVALID:\n print(token_text)\n print(return_token)\n exit()\n return return_token\n\ndef getNextToken():\n word = \"\"\n textMode=False\n commentMode=False\n multiLineCount = 0\n while 1:\n # Read two characters. Peek does not advance the iterator\n cur_char = getNextChar()\n next_char = peekNextChar()\n\n # If not valid char, most likely end of file\n if not cur_char:\n return None\n\n # Add the the current \"word\", only if special case modes are not active and it is not a comment\n if textMode==False and commentMode==False and cur_char+next_char!=\"//\" and cur_char+next_char!=\"/*\":\n\n # First check if cur_char is a one or two digit pre-defined token\n if cur_char+next_char in two_dig_seperators:\n # Make a pointless read to advance the iterator that was peeked\n temp = getNextChar()\n return identify_token(cur_char+next_char)\n elif cur_char in one_dig_seperators:\n return identify_token(cur_char)\n\n # Do not add spaces, tabs newlines etc to word\n if not isWhitespace(cur_char):\n word = word + cur_char\n\n # If the next chatacter is whitespace or a seperator, our current word is over\n if next_char in one_dig_seperators or isWhitespace(next_char):\n if word!=\"\":\n return identify_token(word)\n # textMode allows for quotations that will not get chopped by the lexer\n if cur_char=='\"':\n textMode=True\n else:\n # Start ignoring all characters for comments\n if cur_char+next_char==\"//\":\n commentMode=True\n temp = getNextChar()\n if cur_char+next_char==\"/*\":\n print(\"Start Comment\",cur_char,next_char)\n commentMode=True\n multiLineCount = multiLineCount + 1\n temp = getNextChar()\n # Comments end at the end of the line, add multi-line comment support later\n if commentMode==True:\n if cur_char=='\\n' and multiLineCount==0:\n commentMode=False\n if cur_char+next_char==\"*/\":\n print(\"End Comment\")\n multiLineCount=multiLineCount-1\n if multiLineCount==0:\n print(\"Done\")\n commentMode=False\n temp = getNextChar()\n\n if textMode==True:\n if cur_char!='\"':\n word=word+cur_char\n\n if cur_char=='\"':\n textMode=False\n return_token = common.token(common.token_types.t_INVALID,None);\n return_token.type = common.token_types.t_STRING_VALUE\n return return_token\n\n\n\ndef getNextChar():\n char = source_file.read(1)\n return char\n\n# Chapter 3.2 Dragon book\n# Used two-digit tokens\ndef peekNextChar():\n pos = source_file.tell()\n char = source_file.read(1)\n source_file.seek(pos)\n return char\n\ndef printTokens():\n tempIndex=0\n for token in tokens:\n print(str(tempIndex)+\" \"+token)\n tempIndex=tempIndex+1\n\ndef isWhitespace(inChar):\n if inChar==' ' or inChar=='\\t' or inChar=='\\n' or inChar=='\\v' or inChar=='\\f' or inChar=='\\r':\n return True\n return False\n\n\ndef main():\n pass\n\n\n# i=0\n# while True:\n# result = getNextToken()\n# if result==None:\n# break\n# print(str(i)+\": \"+str(result.type)+\" \"+str(result.value))\n# if result.type==common.token_types.t_INVALID:\n# print(\"Error\")\n# exit()\n# i=i+1\n# source_file.close()\n\n","repo_name":"boxerbomb/dragon_compiler","sub_path":"lexer/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":9486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18674749661","text":"#https://leetcode.com/problems/single-number-ii/\n\nclass Solution:\n def singleNumber(self, nums: List[int]) -> int:\n n=len(nums)\n if(n==1):\n return nums[0]\n nums=sorted(nums)\n print(nums)\n if(nums[0]!=nums[1]):\n return nums[0]\n if(nums[n-1]!=nums[n-2]):\n return nums[n-1]\n for i in range(1,n-1):\n if((nums[i]!=nums[i-1] and nums[i]!=nums[i+1])):\n return nums[i]\n ","repo_name":"thecodearrow/LeetCode-Python-Solutions","sub_path":"June 2020 LeetCode Challenge/Single Number 2.py","file_name":"Single Number 2.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"10566694311","text":"from testutil import prepare_asset_options, asset_exists, wait_blocks\nfrom cli_wallet import CLI_WALLET\nfrom base_asset import Asset\n\n\nclass UserAsset(Asset):\n def __init__(self, registrar, precision=1, name=None):\n super(UserAsset, self).__init__(registrar, precision=precision,\n name=name)\n\n def create_asset(self, options):\n response = CLI_WALLET.send_request(\n \"create_asset\", [self.registrar, self.name, self.precision,\n options, None, True])\n return response\n\n def try_create_asset(self, options):\n response = CLI_WALLET.try_send_request(\n \"create_asset\", [self.registrar, self.name, self.precision,\n options, None, True])\n return response\n\n\ndef create_user_asset(account_name, balance, precision, asset_name=None,\n options=None, permissions_int=0):\n options = prepare_asset_options(permissions_int, 0) if options is None \\\n else options\n\n user_asset = UserAsset(account_name, precision, asset_name)\n\n if asset_exists(user_asset.name):\n return user_asset\n\n user_asset.create_asset(options)\n user_asset.issue_asset(balance)\n wait_blocks(1)\n\n return user_asset\n","repo_name":"openledger/bitshares-tests","sub_path":"utils/user_asset.py","file_name":"user_asset.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"18825836280","text":"from flask import Flask, request, jsonify\nfrom api.ssh import SSH\nfrom api.runtime.runtime_execution import RuntimeExecution\nfrom api.kubernetes import Kubernetes\nfrom api.docker import Docker\nfrom api.runtime.constants import TOMCAT\nfrom xml.dom import minidom\nimport json\nimport subprocess\nimport os\n\nHOST='0.0.0.0'\nPORT='8090'\nDEBUG=True\n\napp = Flask(__name__)\n\n# docker run -p 8090:8090 -v /var/run/docker.sock:/var/run/docker.sock intaayushsinhaml/env\n\n'''\nEndpoints:\n\n/docker_registry_cred \n/kube_config_cred \n\n/vm_cred\n/vm_cred//\n\n/login_vm//\n\n/get_os_info\n/discover_process\n\n/start_containerization// - saves docker file\n/save_code\n/save_container_info\n/build_container\n/push_container_docker_registry\n\n/initialize_kubernetes\n/kubernetes/add_container\n/kubernetes/add_service\n/kubernetes/add_volume\n/kubernetes/transfer_data_to_volume\n/kubernetes/save\n/kubernetes/apply\n\n/get_docker_file//\n/get_kubernetes_file\n\n/logout_vm\n\n/kubernetes/get_services\n\n/terminate -- clears all variable\n'''\n\n# os.system(\"rm -rf user_files/\")\n\nDOCKER_USERNAME=None\nDOCKER_PASSWORD=None\nDOCKER_REGISTRY=None\n\nVM_CRED=[]\nCURRENT_VM=None\nVM_PROCESS = dict()\n\nssh=None\ndocker_client=None\nkubernetes_object=None\nRUNTIME=None\nPROCESS_LIST=[]\n_env=None\n\ndef build_response(msg, code=200, success=True):\n msg[\"success\"]=success\n msg[\"code\"]=code\n resp=jsonify(msg)\n resp.status_code=code\n return resp\n\ndef check_vm_cred_exists(username, hostname):\n global VM_CRED\n for cred in VM_CRED:\n if cred[\"username\"]==username and cred[\"hostname\"]==hostname:\n return True\n return False\n\ndef get_vm_cred(username, hostname):\n global VM_CRED\n for cred in VM_CRED:\n if cred[\"username\"]==username and cred[\"hostname\"]==hostname:\n return cred\n return {}\n\ndef set_current_vm(username, hostname):\n global VM_CRED, CURRENT_VM\n for cred in VM_CRED:\n if cred[\"username\"]==username and cred[\"hostname\"]==hostname:\n CURRENT_VM=cred\n\ndef get_vm_list_excluded():\n global VM_CRED, CURRENT_VM\n _excluded_vm=[]\n for cred in VM_CRED:\n if cred != CURRENT_VM:\n _excluded_vm.append(cred)\n return _excluded_vm\n\ndef is_process_in_process_list(process_port, process_id, process_name):\n global PROCESS_LIST\n for i in PROCESS_LIST:\n if i[\"process_port\"]==process_port and \\\n i[\"process_id\"]==process_id and \\\n i[\"process_name\"]==process_name:\n return True\n return False\n\ndef find_tomcat_port(process_id):\n global ssh\n tomcat_ports = []\n\n process_port_info = ssh.get_activate_process_on_port()\n\n #find tomcat ports\n for process_tuple in process_port_info:\n if process_tuple[1] == process_id:\n tomcat_ports.append(process_tuple[0])\n\n #code for finding http port\n _cmd_for_env_var = 'ps -ef | grep catalina | sed -n \\'1p\\''\n _, output, _ = ssh.exec_command(_cmd_for_env_var)\n\n words = output.split()\n\n result = [i for i in words if i.startswith('-Dcatalina.home')]\n _CATALINA_HOME = result[0].split('=')[1]\n\n result = [i for i in words if i.startswith('-Dcatalina.base')]\n _CATALINA_BASE = result[0].split('=')[1]\n\n _, output, _error = ssh.exec_command('cat ' + _CATALINA_HOME + '/conf/server.xml')\n\n xmldoc = minidom.parseString(output)\n connector_list = xmldoc.getElementsByTagName('Connector')\n #print(len(connector_list))\n\n ret_port = '8080'\n for c in connector_list:\n _port = c.attributes['port'].value\n _protocol = c.attributes['protocol'].value\n if(_protocol == 'HTTP/1.1' and _port in tomcat_ports):\n ret_port = _port\n \n return ret_port\n\n@app.route(\"/\")\ndef home():\n return build_response({\"message\": \"system is working\"})\n\n@app.route(\"/docker_registry_cred\", methods=['POST'])\ndef docker_registry_cred():\n try:\n _json=request.json\n\n _req =[\"username\", \"password\", \"registry\"]\n for r in _req:\n if r not in _json:\n return build_response({\"message\": \"invalid data\"}, code=400, success=False)\n\n _username=_json[\"username\"]\n _password=_json[\"password\"]\n _registry=_json[\"registry\"]\n\n global DOCKER_USERNAME, DOCKER_PASSWORD, DOCKER_REGISTRY\n\n DOCKER_USERNAME=_username\n DOCKER_PASSWORD=_password\n DOCKER_REGISTRY=_registry\n\n return build_response({\"message\": \"data added successfully\"})\n\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route(\"/kube_config_cred\", methods=[\"POST\"])\ndef kube_config_cred():\n try:\n _json=request.json\n\n if \"config\" not in _json:\n return build_response({\"message\": \"invalid data\"}, code=400, success=False)\n\n _config=_json[\"config\"]\n\n if not os.path.exists('user_files/'):\n os.mkdir('user_files/')\n\n _path=''\n if os.path.exists(_path+\"kube_config_file\"):\n _kube_file = open(_path+\"kube_config_file\", 'w')\n else:\n _kube_file = open(_path+\"kube_config_file\", 'x')\n _kube_file.write(_config)\n _kube_file.close() \n\n return build_response({\"message\": \"kube_config file stored\"})\n\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route(\"/vm_cred\", methods=[\"GET\", \"POST\"])\ndef vm_cred():\n global VM_CRED\n if request.method == \"GET\":\n return build_response({\"vm\": VM_CRED})\n\n try:\n _json=request.json\n\n _req =[\"hostname\", \"username\", \"password\", \"port\", \"pkey\", \"passphrase\"]\n for r in _req:\n if r not in _json:\n return build_response({\"message\": \"invalid data\"}, code=400, success=False)\n\n VM_CRED.append(_json)\n\n return build_response({\"message\": \"added successfully\"})\n\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route(\"/vm_cred//\", methods=[\"GET\", \"PUT\"])\ndef vm_cred_spec(username, hostname):\n global VM_CRED\n if request.method == 'GET':\n\n for cred in VM_CRED:\n if cred[\"username\"]==username and cred[\"hostname\"]==hostname:\n return build_response({\"data\": cred})\n \n return build_response({\"message\": \"no such vm\"}, code=200, success=False)\n\n try:\n _json=request.json\n _req =[\"hostname\", \"username\", \"password\", \"port\", \"private_key\", \"passphrase\"]\n for r in _req:\n if r not in _json:\n return build_response({\"message\": \"invalid data\"}, code=400, success=False)\n\n VM_CRED.append(_json)\n\n return build_response({\"message\": \"updated successfully\"})\n\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route(\"/login_vm//\")\ndef login_vm(username, hostname):\n global ssh, DOCKER_USERNAME, DOCKER_PASSWORD, DOCKER_REGISTRY, docker_client\n if not check_vm_cred_exists(username, hostname):\n return build_response({\"message\": \"no such vm\"}, code=400, success=False)\n\n try:\n \n vm_cred=get_vm_cred(username, hostname)\n ssh = SSH(hostname=vm_cred[\"hostname\"], \n port=vm_cred[\"port\"], \n username=vm_cred[\"username\"], \n password=vm_cred[\"password\"], \n pkey=vm_cred[\"pkey\"], \n passphrase=vm_cred[\"passphrase\"])\n\n # login to docker also\n docker_client = Docker(DOCKER_REGISTRY, DOCKER_USERNAME, DOCKER_PASSWORD, ssh, dev=True)\n docker_client.login()\n\n set_current_vm(username, hostname)\n\n return build_response({\"message\": \"login successful\"})\n\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route(\"/get_os_info\")\ndef get_os_info():\n global ssh\n if ssh is None:\n return build_response({\"message\": \"login into vm first\"}, code=400, success=False)\n try:\n return build_response({\"data\": ssh.get_operating_system()})\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n \n@app.route(\"/discover_process\")\ndef discover_process():\n global PROCESS_LIST, ssh, VM_PROCESS\n if ssh is None:\n return build_response({\"message\": \"login into vm first\"}, code=400, success=False)\n PROCESS_LIST=[]\n process_port_info = ssh.get_activate_process_on_port()\n for process_tuple in process_port_info:\n vm_data=get_vm_list_excluded()\n runtime_exec = RuntimeExecution(process_tuple[0], process_tuple[1], process_tuple[2], ssh, docker_client, vm_data)\n if runtime_exec.is_supported():\n PROCESS_LIST.append({\"process_port\": process_tuple[0], \n \"process_id\": process_tuple[1], \n \"process_name\": process_tuple[2]})\n try:\n _, output, _ = ssh.exec_command('ps -ef | grep -c catalina')\n if int(output) > 1:\n _, output, _=ssh.exec_command('ps -ef | grep catalina | sed -n \\'1p\\' | awk \\'{print $2}\\'')\n \n process_id = str(output.split('\\n')[0])\n process_port = find_tomcat_port(process_id)\n PROCESS_LIST.append({\n \"process_port\": process_port,\n \"process_id\": process_id,\n \"process_name\": TOMCAT\n })\n except Exception as e:\n print(e)\n VM_PROCESS[CURRENT_VM[\"hostname\"]] = PROCESS_LIST\n return build_response({\"data\": PROCESS_LIST})\n\n@app.route(\"/start_containerization///\")\ndef start_containerization(process_port, process_id, process_name):\n global ssh, docker_client, RUNTIME, PROCESS_LIST, VM_PROCESS\n\n if ssh is None:\n return build_response({\"message\": \"login into vm first\"}, code=400, success=False)\n\n if not is_process_in_process_list(process_port, process_id, process_name):\n return build_response({\"message\": \"invalid data\"}, code=400, success=False)\n\n try:\n runtime_exec = RuntimeExecution(process_port, \n process_id,\n process_name, \n ssh, \n docker_client,\n VM_PROCESS)\n if runtime_exec.is_supported():\n\n RUNTIME = runtime_exec.get_runtime()\n return build_response({\"message\": \"started\"})\n else:\n return build_response({\"message\": \"process is not supported\"}, code=400, success=False)\n\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route(\"/start_containerization/mysql/////\")\ndef start_containerization_mysql(process_port, process_id, process_name, mysql_username, mysql_password):\n global ssh, docker_client, RUNTIME, PROCESS_LIST, VM_PROCESS\n\n if ssh is None:\n return build_response({\"message\": \"login into vm first\"}, code=400, success=False)\n\n if not is_process_in_process_list(process_port, process_id, process_name):\n return build_response({\"message\": \"invalid data\"}, code=400, success=False)\n\n try:\n runtime_exec = RuntimeExecution(process_port, \n process_id,\n process_name, \n ssh, \n docker_client,\n VM_PROCESS)\n if runtime_exec.is_supported():\n \n RUNTIME = runtime_exec.get_runtime(mysql_db_username=mysql_username,\n mysql_db_password=mysql_password)\n return build_response({\"message\": \"started\"})\n else:\n return build_response({\"message\": \"process is not supported\"}, code=400, success=False)\n\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n\n@app.route(\"/save_code\")\ndef save_code():\n global RUNTIME\n if RUNTIME is None:\n return build_response({\"message\": \"containerization has not been started\"}, code=400, success=False)\n\n try:\n RUNTIME.save_code()\n return build_response({\"message\": \"code saved successfully\"})\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route(\"/save_container_info\")\ndef save_container_info():\n global RUNTIME\n if RUNTIME is None:\n return build_response({\"message\": \"containerization has not been started\"}, code=400, success=False)\n\n try:\n RUNTIME.save_container_info()\n return build_response({\"message\": \"container information saved successfully\"})\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route(\"/build_container\")\ndef build_container():\n global RUNTIME\n if RUNTIME is None:\n return build_response({\"message\": \"containerization has not been started\"}, code=400, success=False)\n\n try:\n RUNTIME.build_container()\n return build_response({\"message\": \"container build was successfully\"})\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route(\"/push_container_docker_registry\")\ndef push_container_docker_registry():\n global RUNTIME\n if RUNTIME is None:\n return build_response({\"message\": \"containerization has not been started\"}, code=400, success=False)\n\n try:\n RUNTIME.push_container_docker_registry()\n return build_response({\"message\": \"container was successfully pushed to docker registry\"})\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route(\"/initialize_kubernetes//\")\ndef initialize_kubernetes_file(name, no_replica):\n try:\n global kubernetes_object, ssh\n if ssh is None:\n return build_response({\"message\": \"login into vm first\"}, code=400, success=False)\n kubernetes_object=Kubernetes(name, ssh, no_replica)\n return build_response({\"message\": \"\"})\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route(\"/kubernetes/add_container\")\ndef kubernetes_add_container():\n try:\n global kubernetes_object, RUNTIME, _env\n if RUNTIME is None:\n return build_response({\"message\": \"containerization has not been started\"}, code=400, success=False)\n if kubernetes_object is None:\n return build_response({\"message\": \"initailize kubernetes first\"}, code=400, success=False)\n _json=request.json\n _env=_json\n if \"name\" in _json and \"value\" in _json:\n kubernetes_object.add_container(RUNTIME.get_name(), RUNTIME.get_port(), RUNTIME.get_image(), _json, mount_path='/var/lib/mysql')\n else:\n kubernetes_object.add_container(RUNTIME.get_name(), RUNTIME.get_port(), RUNTIME.get_image())\n kubernetes_object.add_service(RUNTIME.get_name(), RUNTIME.get_port())\n return build_response({\"message\": \"container added\"})\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route(\"/kubernetes/add_service\")\ndef kubernetes_add_service():\n try:\n global kubernetes_object, RUNTIME\n if RUNTIME is None:\n return build_response({\"message\": \"containerization has not been started\"}, code=400, success=False)\n if kubernetes_object is None:\n return build_response({\"message\": \"initailize kubernetes first\"}, code=400, success=False)\n \n kubernetes_object.add_service(RUNTIME.get_name(), RUNTIME.get_port())\n return build_response({\"message\": \"service added\"})\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route(\"/kubernetes/add_volume\")\ndef add_volume():\n try:\n global kubernetes_object, RUNTIME\n if RUNTIME is None:\n return build_response({\"message\": \"containerization has not been started\"}, code=400, success=False)\n if kubernetes_object is None:\n return build_response({\"message\": \"initailize kubernetes first\"}, code=400, success=False)\n kubernetes_object.add_volume(RUNTIME.get_name())\n kubernetes_object.add_persistent_volume(RUNTIME.get_name())\n return build_response({\"message\": \"volume is added\"})\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route(\"/kubernetes/transfer_data_to_volume\")\ndef transfer_data_to_volume():\n try:\n global kubernetes_object, RUNTIME, _env, ssh\n if RUNTIME is None:\n return build_response({\"message\": \"containerization has not been started\"}, code=400, success=False)\n if kubernetes_object is None:\n return build_response({\"message\": \"initailize kubernetes first\"}, code=400, success=False)\n \n _pod_name = kubernetes_object.get_pod_name()\n _pod_name = _pod_name.strip()\n\n _source=ssh.get_user_data_path(partial=True) + RUNTIME.get_process_path() + '/' + 'db_dump.sql'\n _destination='/tmp/db_dump.sql'\n _db_password=_env['value']\n kubernetes_object.transfer_file_to_pod(_source, _destination, _pod_name, _db_password)\n kubernetes_object.kubectl_restart_pod(_pod_name)\n\n return build_response({\"message\": \"data transfered successfully\"})\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n\n@app.route(\"/kubernetes/save\")\ndef save_kubernetes_file():\n try:\n global kubernetes_object, RUNTIME\n if RUNTIME is None:\n return build_response({\"message\": \"containerization has not been started\"}, code=400, success=False)\n if kubernetes_object is None:\n return build_response({\"message\": \"initailize kubernetes first\"}, code=400, success=False)\n kubernetes_object.save_file()\n return build_response({\"message\": \"saved\"})\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route(\"/kubernetes/apply\")\ndef apply_kubernetes_file():\n try:\n global kubernetes_object\n if kubernetes_object is None:\n return build_response({\"message\": \"initailize kubernetes first\"}, code=400, success=False)\n kubernetes_object.kubectl_apply()\n return build_response({\"message\": \"applied\"})\n \n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route(\"/logout_vm//\")\ndef logout_vm(username, hostname):\n global ssh\n if not check_vm_cred_exists(username, hostname):\n return build_response({\"message\": \"no such vm\"}, code=400, success=False)\n if ssh is None:\n return build_response({\"message\": \"not logged in\"}, code=400, success=False)\n try:\n \n ssh.close()\n clear()\n return build_response({\"message\": \"logout successful\"})\n\n except Exception as e:\n return build_response({\"message\": str(e)}, code=500, success=False)\n\n@app.route('/kubernetes/get_services')\ndef kubernetes_get_services():\n _cmd='kubectl --kubeconfig kube_config_file get services | awk \\'{print $1, $2, $3, $4, $5, $6}\\''\n p = subprocess.Popen(_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n output, error = p.communicate()\n error = str(error.decode())\n output = str(output.decode())\n if error != \"\":\n return build_response({\"message\": error}, code=400, success=False)\n return build_response({\"message\": output})\n\ndef clear():\n global RUNTIME, ssh, docker_client, PROCESS_LIST, kubernetes_object, _env\n ssh=None\n docker_client=None\n RUNTIME=None\n PROCESS_LIST=[]\n kubernetes_object=None\n _env=None\n\nif __name__ == '__main__':\n\n app.run(HOST, PORT, DEBUG)\n","repo_name":"aayushsinha44/a2c","sub_path":"a2c/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":20274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"69999773496","text":"# 2021-3-4 python mqrun.py essay_version_to_snts http://cikuu.werror.com/app/mq/callback.py \nimport pika,json, sys, time, fire,requests,os\nnow\t= lambda: time.strftime('%Y.%m.%d %H:%M:%S ',time.localtime(time.time()))\n\ndef consume(queue_name='test_queue', callback_url='http://cikuu.werror.com/app/mq/hello.py', host='192.168.1.214', port=5672, user='pigai', pwd='NdyX3KuCq', func_name='callback'): \n\n\tqueue_name = os.getenv('queue_name', 'test_queue')\n\tcallback_url = os.getenv('callback_url', 'http://cikuu.werror.com/app/mq/hello.py')\n\thost = os.getenv('host', '192.168.1.214')\n\tport = os.getenv('port', 5672)\n\tuser = os.getenv('user', 'pigai')\n\tpwd = os.getenv('pwd', 'NdyX3KuCq')\n\tfunc_name = os.getenv('func_name', 'callback')\n\n\tprint(f\"queue is : {queue_name}, callback_url = {callback_url}, host:{host}, port:{port}, {user}, {pwd}\", flush=True)\n\tcredentials = pika.PlainCredentials(user, pwd) \n\tconnection = pika.BlockingConnection(pika.ConnectionParameters(host = host,port = port,virtual_host = '/',credentials = credentials))\n\tchannel=connection.channel()\n\n\ttry:\n\t\tresult = channel.queue_declare(queue = queue_name, durable=True) #'snts_by_gec'\n\t\n\t\tcode = requests.get(callback_url).text\n\t\tscope = {}\n\t\texec(code, scope)\n\n\t\t#x = __import__(func_path, fromlist=['callback'])\n\t\tchannel.basic_consume(queue_name, scope[func_name])\n\t\tchannel.start_consuming()\n\t\t#connection.close()\n\n\texcept Exception as e: \n\t\tprint (\"mqrun consume ex:\", e) \n\t\tchannel.close()\n\t\tconnection.close()\n\nif __name__ == '__main__': \n\tfire.Fire(consume)","repo_name":"corpusly/docker","sub_path":"mqrun/mqrun.py","file_name":"mqrun.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24593661573","text":"import requests\r\n\r\nmain_link = 'https://oauth.vk.com/authorize'\r\n\r\nclient_id = 'client_id=7399273'\r\nscore = 'scope=photos,audio,video,docs,notes,pages,status,offers,questions,wall,groups,email,notifications,stats,ads,offline,docs,pages,stats,notifications'\r\nresponse_type = 'response_type=token'\r\n\r\nfirst_link = f'{main_link}?{client_id}&{score}&{response_type}'\r\n\r\ntest = requests.get(first_link)\r\nprint(test)\r\n\r\nmethods = ['account.getInfo', 'users.get']\r\ntoken = 'здесь будет токен))'\r\n\r\nlast_link = f'https://api.vk.com/method/{methods[1]}'\r\nparams = {\r\n 'access_token': f'{token}',\r\n 'v': '5.103'\r\n}\r\n\r\nresponse = requests.get(last_link, params=params).json()\r\nresult = response['response'][0]\r\n\r\nprint(f\"{result['first_name']} {result['last_name']}\")\r\n","repo_name":"Yegor9151/Methods_of_collecting_and_processing_data_from_the_Internet","sub_path":"Parser_API_vk.py","file_name":"Parser_API_vk.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2935395740","text":"class Directory:\n\n def __init__(self):\n self.directory = {}\n\n def add(self, name):\n \"\"\"\n \"\"\"\n name_parts = name.split(' ')\n if len(name_parts) < 2:\n return False\n\t\t\n last_name = name_parts[len(name_parts) - 1].lower()\n\t\t\n # First occurrance of family name\n if last_name not in self.directory:\n self.directory[last_name] = []\n\n first_name = name_parts[0].lower()\n # Add first name to family\n self.directory[last_name].append(first_name)\n\t\t\n return True\n\n def adds(self, name_list):\n \"\"\"\n \"\"\"\n for name in name_list:\n result = self.add(name)\n \n if(result == False):\n return False\n\n def contains(self, name):\n \"\"\"\n \"\"\"\n name_parts = name.split(' ')\n\t\t\n # Only given the family name\n if len(name_parts) == 1:\n return name_parts[0] in self.directory\n # Given first and last name\n elif len(name_parts) >= 2:\n first_name = name_parts[0].lower()\n last_name = name_parts[len(name_parts) - 1].lower()\n\n # Find the name within the directory\n if last_name in self.directory:\n return first_name in self.directory[last_name]\n\n return False\n\n def get_directory(self):\n return self.directory\n\t\t","repo_name":"FenrirUnbound/Mitsuo","sub_path":"lib/directory.py","file_name":"directory.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"12340590607","text":"import md5\nimport sys\n\ndef verbose_usage_and_exit():\n \"\"\" Prints usage and exits. \"\"\"\n sys.stderr.write('Usage:\\n')\n sys.stderr.write('\\tpython \\n'.format(sys.argv[0]))\n exit(0)\n\n\nbad = ['ab', 'cd', 'pq', 'xy']\n\ndef isGood(line):\n isGood1 = len([1 for c in line if c in 'aeiou']) >= 3\n isGood2 = any(a == b for a, b in zip(line, line[1:]))\n isGood3 = not any(part in line for part in bad)\n\n if isGood1 and isGood2 and isGood3:\n return 1\n else:\n return 0\n\n\n\n\ndef main(path):\n cntr = 0\n\n with open(path, 'r') as fin:\n for line in fin:\n cntr += isGood(line)\n\n print(cntr)\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n verbose_usage_and_exit()\n main(sys.argv[1])\n","repo_name":"mratkovic/advent_of_code","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31687057157","text":"# @Date: 2020-05-02T17:04:11+05:30\n# @Last modified time: 2020-05-02T17:08:49+05:30\n\n# First Bad Version\n# https://leetcode.com/problems/first-bad-version/\n\n# Concept Binary search\ndef firstBadVersion(self, n):\n\n l = 1\n r = n\n\n while l' % self.id\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n if request.method == 'POST':\n f_borrower = request.form['borrower']\n f_loan_number = request.form['loan_number']\n f_loan_type = request.form['loan_type']\n f_contact = request.form['contact']\n f_contact_email = request.form['contact_email']\n f_phone_number = request.form['phone_number']\n f_residents = request.form['residents']\n f_street_address = request.form['street_address']\n f_city_state_zip = request.form['city_state_zip']\n new_file = Payoff(borrower=f_borrower, loan_number=f_loan_number,\n loan_type=f_loan_type, contact=f_contact, contact_email=f_contact_email, \n phone_number=f_phone_number, residents=f_residents,\n street_address=f_street_address, city_state_zip=f_city_state_zip)\n\n try:\n db.session.add(new_file)\n db.session.commit()\n return redirect('/')\n except:\n return 'There was an issue adding your payoff'\n\n else:\n payoffs = Payoff.query.order_by(Payoff.date_created).all()\n return render_template('index.html', payoffs=payoffs)\n\n\n# Delete\n@app.route('/delete/')\ndef delete(id):\n payoff_to_delete = Payoff.query.get_or_404(id)\n\n try:\n db.session.delete(payoff_to_delete)\n db.session.commit()\n return redirect('/')\n except:\n return 'There was a problem deleting that payoff'\n\n\n# Update\n@app.route('/update/', methods=['GET', 'POST'])\ndef update(id):\n payoff = Payoff.query.get_or_404(id)\n\n if request.method == 'POST':\n payoff.borrower = request.form['borrower']\n payoff.loan_number = request.form['loan_number']\n payoff.heloc_number = request.form['heloc_number']\n payoff.contact = request.form['contact']\n payoff.contact_email = request.form['contact_email']\n payoff.phone_number = request.form['phone_number']\n payoff.residents = request.form['residents']\n payoff.street_address = request.form['street_address']\n payoff.city_state_zip = request.form['city_state_zip']\n payoff.loan_type = request.form['loan_type']\n payoff.closing_date = request.form['closing_date']\n payoff.funding_date = request.form['funding_date']\n payoff.cancel_date = request.form['cancel_date']\n payoff.fedex_no = request.form['fedex_no']\n payoff.file_comment = request.form['comment']\n if request.form['docs_received'] == 'on':\n payoff.documents_received = datetime.today()\n if request.form['new_lender_info'] == 'on':\n payoff.new_lender_info == True\n try:\n print(request.form['docs_received'])\n except:\n print('could not print')\n try:\n print(request.form['new_lender_info'])\n except:\n print('could not print')\n\n\n\n try:\n db.session.commit()\n return redirect('/')\n except:\n return 'There was an issue updating your payoff'\n\n else:\n return render_template('update.html', payoff=payoff)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"Cheyer129/macapp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28586842824","text":"import pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom numpy.testing import assert_array_equal\n\nfrom sklearn.compose import make_column_transformer\nfrom sklearn.datasets import make_classification\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC, SVR\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import plot_confusion_matrix\nfrom sklearn.metrics import ConfusionMatrixDisplay\n\n\n# TODO: Remove when https://github.com/numpy/numpy/issues/14397 is resolved\npytestmark = pytest.mark.filterwarnings(\n \"ignore:In future, it will be an error for 'np.bool_':DeprecationWarning:\"\n \"matplotlib.*\")\n\n\n@pytest.fixture(scope=\"module\")\ndef n_classes():\n return 5\n\n\n@pytest.fixture(scope=\"module\")\ndef data(n_classes):\n X, y = make_classification(n_samples=100, n_informative=5,\n n_classes=n_classes, random_state=0)\n return X, y\n\n\n@pytest.fixture(scope=\"module\")\ndef fitted_clf(data):\n return SVC(kernel='linear', C=0.01).fit(*data)\n\n\n@pytest.fixture(scope=\"module\")\ndef y_pred(data, fitted_clf):\n X, _ = data\n return fitted_clf.predict(X)\n\n\ndef test_error_on_regressor(pyplot, data):\n X, y = data\n est = SVR().fit(X, y)\n\n msg = \"plot_confusion_matrix only supports classifiers\"\n with pytest.raises(ValueError, match=msg):\n plot_confusion_matrix(est, X, y)\n\n\ndef test_error_on_invalid_option(pyplot, fitted_clf, data):\n X, y = data\n msg = (r\"normalize must be one of \\{'true', 'pred', 'all', \"\n r\"None\\}\")\n\n with pytest.raises(ValueError, match=msg):\n plot_confusion_matrix(fitted_clf, X, y, normalize='invalid')\n\n\n@pytest.mark.parametrize(\"with_labels\", [True, False])\n@pytest.mark.parametrize(\"with_display_labels\", [True, False])\ndef test_plot_confusion_matrix_custom_labels(pyplot, data, y_pred, fitted_clf,\n n_classes, with_labels,\n with_display_labels):\n X, y = data\n ax = pyplot.gca()\n labels = [2, 1, 0, 3, 4] if with_labels else None\n display_labels = ['b', 'd', 'a', 'e', 'f'] if with_display_labels else None\n\n cm = confusion_matrix(y, y_pred, labels=labels)\n disp = plot_confusion_matrix(fitted_clf, X, y,\n ax=ax, display_labels=display_labels,\n labels=labels)\n\n assert_allclose(disp.confusion_matrix, cm)\n\n if with_display_labels:\n expected_display_labels = display_labels\n elif with_labels:\n expected_display_labels = labels\n else:\n expected_display_labels = list(range(n_classes))\n\n expected_display_labels_str = [str(name)\n for name in expected_display_labels]\n\n x_ticks = [tick.get_text() for tick in disp.ax_.get_xticklabels()]\n y_ticks = [tick.get_text() for tick in disp.ax_.get_yticklabels()]\n\n assert_array_equal(disp.display_labels, expected_display_labels)\n assert_array_equal(x_ticks, expected_display_labels_str)\n assert_array_equal(y_ticks, expected_display_labels_str)\n\n\n@pytest.mark.parametrize(\"normalize\", ['true', 'pred', 'all', None])\n@pytest.mark.parametrize(\"include_values\", [True, False])\ndef test_plot_confusion_matrix(pyplot, data, y_pred, n_classes, fitted_clf,\n normalize, include_values):\n X, y = data\n ax = pyplot.gca()\n cmap = 'plasma'\n cm = confusion_matrix(y, y_pred)\n disp = plot_confusion_matrix(fitted_clf, X, y,\n normalize=normalize,\n cmap=cmap, ax=ax,\n include_values=include_values)\n\n assert disp.ax_ == ax\n\n if normalize == 'true':\n cm = cm / cm.sum(axis=1, keepdims=True)\n elif normalize == 'pred':\n cm = cm / cm.sum(axis=0, keepdims=True)\n elif normalize == 'all':\n cm = cm / cm.sum()\n\n assert_allclose(disp.confusion_matrix, cm)\n import matplotlib as mpl\n assert isinstance(disp.im_, mpl.image.AxesImage)\n assert disp.im_.get_cmap().name == cmap\n assert isinstance(disp.ax_, pyplot.Axes)\n assert isinstance(disp.figure_, pyplot.Figure)\n\n assert disp.ax_.get_ylabel() == \"True label\"\n assert disp.ax_.get_xlabel() == \"Predicted label\"\n\n x_ticks = [tick.get_text() for tick in disp.ax_.get_xticklabels()]\n y_ticks = [tick.get_text() for tick in disp.ax_.get_yticklabels()]\n\n expected_display_labels = list(range(n_classes))\n\n expected_display_labels_str = [str(name)\n for name in expected_display_labels]\n\n assert_array_equal(disp.display_labels, expected_display_labels)\n assert_array_equal(x_ticks, expected_display_labels_str)\n assert_array_equal(y_ticks, expected_display_labels_str)\n\n image_data = disp.im_.get_array().data\n assert_allclose(image_data, cm)\n\n if include_values:\n assert disp.text_.shape == (n_classes, n_classes)\n fmt = '.2g'\n expected_text = np.array([format(v, fmt) for v in cm.ravel(order=\"C\")])\n text_text = np.array([\n t.get_text() for t in disp.text_.ravel(order=\"C\")])\n assert_array_equal(expected_text, text_text)\n else:\n assert disp.text_ is None\n\n\ndef test_confusion_matrix_display(pyplot, data, fitted_clf, y_pred, n_classes):\n X, y = data\n\n cm = confusion_matrix(y, y_pred)\n disp = plot_confusion_matrix(fitted_clf, X, y, normalize=None,\n include_values=True, cmap='viridis',\n xticks_rotation=45.0)\n\n assert_allclose(disp.confusion_matrix, cm)\n assert disp.text_.shape == (n_classes, n_classes)\n\n rotations = [tick.get_rotation() for tick in disp.ax_.get_xticklabels()]\n assert_allclose(rotations, 45.0)\n\n image_data = disp.im_.get_array().data\n assert_allclose(image_data, cm)\n\n disp.plot(cmap='plasma')\n assert disp.im_.get_cmap().name == 'plasma'\n\n disp.plot(include_values=False)\n assert disp.text_ is None\n\n disp.plot(xticks_rotation=90.0)\n rotations = [tick.get_rotation() for tick in disp.ax_.get_xticklabels()]\n assert_allclose(rotations, 90.0)\n\n disp.plot(values_format='e')\n expected_text = np.array([format(v, 'e') for v in cm.ravel(order=\"C\")])\n text_text = np.array([\n t.get_text() for t in disp.text_.ravel(order=\"C\")])\n assert_array_equal(expected_text, text_text)\n\n\ndef test_confusion_matrix_contrast(pyplot):\n # make sure text color is appropriate depending on background\n\n cm = np.eye(2) / 2\n disp = ConfusionMatrixDisplay(cm, display_labels=[0, 1])\n\n disp.plot(cmap=pyplot.cm.gray)\n # diagonal text is black\n assert_allclose(disp.text_[0, 0].get_color(), [0.0, 0.0, 0.0, 1.0])\n assert_allclose(disp.text_[1, 1].get_color(), [0.0, 0.0, 0.0, 1.0])\n\n # off-diagonal text is white\n assert_allclose(disp.text_[0, 1].get_color(), [1.0, 1.0, 1.0, 1.0])\n assert_allclose(disp.text_[1, 0].get_color(), [1.0, 1.0, 1.0, 1.0])\n\n disp.plot(cmap=pyplot.cm.gray_r)\n # diagonal text is white\n assert_allclose(disp.text_[0, 1].get_color(), [0.0, 0.0, 0.0, 1.0])\n assert_allclose(disp.text_[1, 0].get_color(), [0.0, 0.0, 0.0, 1.0])\n\n # off-diagonal text is black\n assert_allclose(disp.text_[0, 0].get_color(), [1.0, 1.0, 1.0, 1.0])\n assert_allclose(disp.text_[1, 1].get_color(), [1.0, 1.0, 1.0, 1.0])\n\n # Regression test for #15920\n cm = np.array([[19, 34], [32, 58]])\n disp = ConfusionMatrixDisplay(cm, display_labels=[0, 1])\n\n disp.plot(cmap=pyplot.cm.Blues)\n min_color = pyplot.cm.Blues(0)\n max_color = pyplot.cm.Blues(255)\n assert_allclose(disp.text_[0, 0].get_color(), max_color)\n assert_allclose(disp.text_[0, 1].get_color(), max_color)\n assert_allclose(disp.text_[1, 0].get_color(), max_color)\n assert_allclose(disp.text_[1, 1].get_color(), min_color)\n\n\n@pytest.mark.parametrize(\n \"clf\", [LogisticRegression(),\n make_pipeline(StandardScaler(), LogisticRegression()),\n make_pipeline(make_column_transformer((StandardScaler(), [0, 1])),\n LogisticRegression())])\ndef test_confusion_matrix_pipeline(pyplot, clf, data, n_classes):\n X, y = data\n with pytest.raises(NotFittedError):\n plot_confusion_matrix(clf, X, y)\n clf.fit(X, y)\n y_pred = clf.predict(X)\n\n disp = plot_confusion_matrix(clf, X, y)\n cm = confusion_matrix(y, y_pred)\n\n assert_allclose(disp.confusion_matrix, cm)\n assert disp.text_.shape == (n_classes, n_classes)\n\n\n@pytest.mark.parametrize(\"colorbar\", [True, False])\ndef test_plot_confusion_matrix_colorbar(pyplot, data, fitted_clf, colorbar):\n X, y = data\n\n def _check_colorbar(disp, has_colorbar):\n if has_colorbar:\n assert disp.im_.colorbar is not None\n assert disp.im_.colorbar.__class__.__name__ == \"Colorbar\"\n else:\n assert disp.im_.colorbar is None\n disp = plot_confusion_matrix(fitted_clf, X, y, colorbar=colorbar)\n _check_colorbar(disp, colorbar)\n # attempt a plot with the opposite effect of colorbar\n disp.plot(colorbar=not colorbar)\n _check_colorbar(disp, not colorbar)\n\n\n@pytest.mark.parametrize(\"values_format\", ['e', 'n'])\ndef test_confusion_matrix_text_format(pyplot, data, y_pred, n_classes,\n fitted_clf, values_format):\n # Make sure plot text is formatted with 'values_format'.\n X, y = data\n cm = confusion_matrix(y, y_pred)\n disp = plot_confusion_matrix(fitted_clf, X, y,\n include_values=True,\n values_format=values_format)\n\n assert disp.text_.shape == (n_classes, n_classes)\n\n expected_text = np.array([format(v, values_format)\n for v in cm.ravel()])\n text_text = np.array([\n t.get_text() for t in disp.text_.ravel()])\n assert_array_equal(expected_text, text_text)\n\n\ndef test_confusion_matrix_standard_format(pyplot):\n cm = np.array([[10000000, 0], [123456, 12345678]])\n plotted_text = ConfusionMatrixDisplay(\n cm, display_labels=[False, True]).plot().text_\n # Values should be shown as whole numbers 'd',\n # except the first number which should be shown as 1e+07 (longer length)\n # and the last number will be shown as 1.2e+07 (longer length)\n test = [t.get_text() for t in plotted_text.ravel()]\n assert test == ['1e+07', '0', '123456', '1.2e+07']\n\n cm = np.array([[0.1, 10], [100, 0.525]])\n plotted_text = ConfusionMatrixDisplay(\n cm, display_labels=[False, True]).plot().text_\n # Values should now formatted as '.2g', since there's a float in\n # Values are have two dec places max, (e.g 100 becomes 1e+02)\n test = [t.get_text() for t in plotted_text.ravel()]\n assert test == ['0.1', '10', '1e+02', '0.53']\n\n\n@pytest.mark.parametrize(\"display_labels, expected_labels\", [\n (None, [\"0\", \"1\"]),\n ([\"cat\", \"dog\"], [\"cat\", \"dog\"]),\n])\ndef test_default_labels(pyplot, display_labels, expected_labels):\n cm = np.array([[10, 0], [12, 120]])\n disp = ConfusionMatrixDisplay(cm, display_labels=display_labels).plot()\n\n x_ticks = [tick.get_text() for tick in disp.ax_.get_xticklabels()]\n y_ticks = [tick.get_text() for tick in disp.ax_.get_yticklabels()]\n\n assert_array_equal(x_ticks, expected_labels)\n assert_array_equal(y_ticks, expected_labels)\n\n\ndef test_error_on_a_dataset_with_unseen_labels(\n pyplot, fitted_clf, data, n_classes\n):\n \"\"\"Check that when labels=None, the unique values in `y_pred` and `y_true`\n will be used.\n Non-regression test for:\n https://github.com/scikit-learn/scikit-learn/pull/18405\n \"\"\"\n X, y = data\n\n # create unseen labels in `y_true` not seen during fitting and not present\n # in 'fitted_clf.classes_'\n y = y + 1\n disp = plot_confusion_matrix(fitted_clf, X, y)\n\n display_labels = [tick.get_text() for tick in disp.ax_.get_xticklabels()]\n expected_labels = [str(i) for i in range(n_classes + 1)]\n assert_array_equal(expected_labels, display_labels)\n","repo_name":"ryfeus/lambda-packs","sub_path":"Sklearn_arm/source/sklearn/metrics/_plot/tests/test_plot_confusion_matrix.py","file_name":"test_plot_confusion_matrix.py","file_ext":"py","file_size_in_byte":12210,"program_lang":"python","lang":"en","doc_type":"code","stars":1104,"dataset":"github-code","pt":"22"} +{"seq_id":"72253537977","text":"import sys\r\nimport random\r\n\r\n#~2n comparisons\r\ndef minMax1(inp):\r\n if(inp[0] < inp[1]):\r\n min = inp[0]\r\n max = inp[1]\r\n else:\r\n min = inp[1]\r\n max = inp[0] \r\n \r\n for i in range(2, len(inp)):\r\n if(inp[i] < min):\r\n min = inp[i]\r\n elif(inp[i] > max):\r\n max = inp[i]\r\n print(min)\r\n print(max)\r\n\r\n#~1.5n comparisons\r\ndef minMax2(inp):\r\n if(inp[0] < inp[1]):\r\n min = inp[0]\r\n max = inp[1]\r\n else:\r\n min = inp[1]\r\n max = inp[0] \r\n \r\n for i in range(2, len(inp)-1, 2):\r\n if(inp[i] < inp[i+1]):\r\n pmin = inp[i]\r\n pmax = inp[i+1]\r\n else:\r\n pmin = inp[i+1]\r\n pmax = inp[i] \r\n \r\n if(pmin < min):\r\n min = pmin\r\n if(pmax > max):\r\n max = pmax\r\n if(i < len(inp)):\r\n if(inp[i] < min):\r\n min = inp[i]\r\n elif(inp[i] > max):\r\n max = inp[i] \r\n print(min)\r\n print(max)\r\n \r\ndef main():\r\n n = int(sys.argv[1])\r\n inp = [0]*n\r\n for i in range(n):\r\n inp[i] = random.randint(1, 2*n)\r\n print(inp)\r\n minMax1(inp)\r\n minMax2(inp)\r\n \r\nif __name__==\"__main__\":\r\n main()\r\n","repo_name":"algorithmica-repository/SCIS-TOP20ADV-PYTHON","sub_path":"adhoc thinking - I/minmax.py","file_name":"minmax.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"13592581076","text":"'''\r\nYou are given a binary string S and an integer K. In one operation, you can pick any bit and flip it, i.e turn 0 to 1 or 1 to 0. \r\nCan you make the string S a palindrome using exactly K operations?\r\n\r\nPrint YES if this is possible, and NO if it is not.\r\n\r\nSample Input :\r\n2\r\n3 0\r\n110\r\n6 1\r\n101100\r\nSample Output :\r\nNO\r\nYES\r\n'''\r\nt=int(input())\r\nfor i in range(t):\r\n n,k=map(int,input().split()) \r\n s=list(input()) \r\n if k==0:\r\n if s==s[::-1]:\r\n print('YES')\r\n else:\r\n print('NO') \r\n continue\r\n i=0 \r\n j=len(s)-1\r\n while(i= 0:\n subobjPred = getattr(SCHEMA, key[:dot])\n subobjNode = self.graph.value(subject=subject,\n predicate=subobjPred)\n if not subobjNode:\n subobjNode = rdflib.BNode()\n self.graph.add((subject, subobjPred, subobjNode))\n for dimensionProperty in dimensionProperties:\n if dimensionProperty['propertyID'] == str(key[:dot]):\n if dimensionProperty['propertyType']:\n if (None, SCHEMA.dimension, dimensionProperty['propertyType']) not in self.graph:\n self.graph.add((subobjNode, rdflib.RDF.type,\n dimensionProperty['propertyType']))\n break\n self._AddDimensionValueTriple(subobjNode, key[dot + 1:], val,\n dimensionProperties, tableMappings)\n return\n lang = None\n at = key.find('@')\n if at >= 0:\n key = key[:at]\n obj = rdflib.Literal(val, language=key[at + 1:])\n else:\n obj = rdflib.Literal(val)\n predicate = getattr(SCHEMA, key)\n for tableMapping in tableMappings:\n if tableMapping['columnIdentifier'] == key:\n if tableMapping['sourceEntity']:\n predicate = tableMapping['sourceEntity']\n break\n self.graph.add((subject, predicate, obj))\n\n def _ExpandDimensionValue(self, dim, equivalentTypes, row_id, row,\n dimensionProperties, tableMappings):\n subject = rdflib.URIRef(row_id)\n self.graph.add((dim, SCHEMA.codeList, row_id))\n self.graph.add((subject, rdflib.RDF.type,\n SCHEMA.DimensionValue))\n self.graph.add(\n (subject, SCHEMA.dimension, dim))\n for type_id in equivalentTypes:\n self.graph.add((subject, rdflib.RDF.type,\n rdflib.URIRef(type_id)))\n for key, val in row.items():\n self._AddDimensionValueTriple(subject, key, val, dimensionProperties,\n tableMappings)\n\n def _ExpandCodeList(self, dim):\n # Set up the DimensionValue's triples.\n # Start with types\n if urlparse(dim).fragment:\n id_prefix = str(dim)\n else:\n id_prefix = str(dim) + '#' + str(self.graph.triples(\n subject=dim,\n predicate=SCHEMA.name)[0])\n id_prefix += '='\n equivalentTypes = self.graph.objects(\n subject=dim,\n predicate=SCHEMA.equivalentType)\n dimensionProperties = self._GetDimensionProperties(dim)\n tableMappings = self._GetTableMappings(dim)\n for codeList in self.graph.objects(\n subject=dim,\n predicate=SCHEMA.codeList):\n if codeList not in self.subjects:\n self.graph.remove((dim, SCHEMA.codeList, codeList))\n with self.getter.Fetch(str(codeList)) as f:\n reader = DictReader(f)\n for row in reader:\n self._ExpandDimensionValue(\n dim, equivalentTypes,\n rdflib.URIRef(id_prefix + row['codeValue']), row,\n dimensionProperties, tableMappings)\n\n def _ExpandFootnotes(self):\n for result in self.graph.query(\n MakeSparqlSelectQuery(\n ('?ds', 'a', 'schema:StatisticalDataset'),\n ('?ds', 'schema:footnote', '?fn'),\n ns_manager=self.graph.namespace_manager)):\n if result['fn'] not in self.subjects:\n self.graph.remove((result['ds'], SCHEMA.footnote, result['fn']))\n id_prefix = urldefrag(str(result['ds'])).url\n with self.getter.Fetch(str(result['fn'])) as f:\n reader = DictReader(f)\n for row in reader:\n row_id = rdflib.URIRef(id_prefix + '#footnote=' + row['codeValue'])\n self.graph.add((result['ds'], SCHEMA.footnote, row_id))\n self.graph.add((row_id, rdflib.RDF.type,\n SCHEMA.StatisticalAnnotation))\n for key, val in row.items():\n fields = key.split('@')\n if len(fields) > 1:\n # A language code is specified\n self.graph.add((row_id, getattr(SCHEMA, fields[0]),\n rdflib.Literal(val, language=fields[1])))\n else:\n self.graph.add((row_id, getattr(SCHEMA, key),\n rdflib.Literal(val)))\n\n def _GetDimensionDataForSlice(self, slice_id, tableMappings):\n ret = {}\n dims = sorted(\n self.graph.objects(\n subject=slice_id,\n predicate=SCHEMA.dimension))\n for dim_id in dims:\n dim_type = list(self.graph.objects(\n subject=dim_id,\n predicate=rdflib.RDF.type))\n dim_equiv_types = list(self.graph.objects(\n subject=dim_id,\n predicate=SCHEMA.equivalentType))\n csv_id = urldefrag(dim_id).fragment\n for tableMapping in tableMappings:\n if tableMapping['sourceEntity'] == dim_id:\n csv_id = str(tableMapping['columnIdentifier'])\n break\n if not csv_id:\n print(\"Unable to determine CSV ID for dimension\", dim_id,\n file=sys.stderr)\n exit(1)\n ret[csv_id] = {\n 'id': dim_id,\n 'type': dim_type,\n 'types': dim_equiv_types\n }\n return ret\n\n def _GetMeasureDataForSlice(self, slice_id, tableMappings):\n ret = {}\n measures = sorted(\n self.graph.objects(\n subject=slice_id,\n predicate=SCHEMA.measure))\n for measure_id in measures:\n unit_codes = list(self.graph.objects(\n subject=measure_id,\n predicate=SCHEMA.unitCode))\n unit_texts = list(self.graph.objects(\n subject=measure_id,\n predicate=SCHEMA.unitText))\n csv_id = urldefrag(measure_id).fragment\n for tableMapping in tableMappings:\n if tableMapping['sourceEntity'] == measure_id:\n csv_id = str(tableMapping['columnIdentifier'])\n break\n if not csv_id:\n print(\"Unable to determine CSV ID for metric\", measure_id,\n file=sys.stderr)\n exit(1)\n ret[csv_id] = {\n 'id': measure_id,\n 'unit_code': unit_codes,\n 'unit_text': unit_texts,\n }\n return ret\n\n def _MakeSliceDataRowId(self, slice_id, dims, measures, row, tableMappings):\n ret = str(slice_id)\n if not urldefrag(slice_id).fragment:\n ret += '#'\n else:\n ret += '/'\n for dim in dims:\n dim_key = dim\n for tableMapping in tableMappings:\n if tableMapping['sourceEntity'] == dim:\n if tableMapping['columnIdentifier']:\n dim_key = str(tableMapping['columnIdentifier'])\n break\n ret += dim + '=' + row[dim_key]\n ret += '/'\n for measure in measures:\n ret += measure\n ret += '/'\n return ret\n\n def _ExpandObservationDimensionValue(self, dim, data, row_id, row):\n node_id = rdflib.BNode()\n self.graph.add((row_id, SCHEMA.dimensionValue, node_id))\n self.graph.add((node_id, rdflib.RDF.type, SCHEMA.DimensionValue))\n self.graph.add((node_id, SCHEMA.dimension, data['id']))\n for dim_type in data['type']:\n if dim_type.endswith('CategoricalDimension'):\n for type_id in data['types']:\n self.graph.add((node_id, rdflib.RDF.type, type_id))\n self.graph.add((node_id, SCHEMA.codeValue, rdflib.Literal(row[dim])))\n else:\n if data['types']:\n self.graph.add(\n (node_id, SCHEMA.value,\n rdflib.Literal(row[dim],\n datatype=rdflib.URIRef(data['types'][0]))))\n else:\n self.graph.add((node_id, SCHEMA.value, rdflib.Literal(row[dim])))\n\n def _ExpandObservationMeasureValue(self, measure, data, row_id, row):\n node_id = rdflib.BNode()\n self.graph.add((row_id, SCHEMA.measureValue, node_id))\n self.graph.add((node_id, rdflib.RDF.type, SCHEMA.MeasureValue))\n for unit_code in data['unit_code']:\n self.graph.add((node_id, SCHEMA.unitCode, rdflib.Literal(unit_code)))\n for unit_text in data['unit_text']:\n self.graph.add((node_id, SCHEMA.unitCode, rdflib.Literal(unit_text)))\n self.graph.add((node_id, SCHEMA.value, rdflib.Literal(row[measure])))\n for footnote in row.get(measure + '*', '').split(';'):\n footnote_id = rdflib.BNode()\n self.graph.add((node_id, SCHEMA.footnote, footnote_id))\n self.graph.add((footnote_id, rdflib.RDF.type,\n SCHEMA.StatisticalAnnotation))\n self.graph.add((footnote_id, SCHEMA.codeValue, rdflib.Literal(footnote)))\n\n def _ExpandSliceData(self, slice_id):\n tableMappings = self._GetTableMappings(slice_id)\n dim_data = self._GetDimensionDataForSlice(slice_id, tableMappings)\n measure_data = self._GetMeasureDataForSlice(slice_id, tableMappings)\n for data_id in self.graph.objects(\n subject=slice_id,\n predicate=SCHEMA.data):\n if data_id not in self.subjects:\n with self.getter.Fetch(data_id) as f:\n reader = DictReader(f)\n try:\n for row in reader:\n row_id = rdflib.URIRef(self._MakeSliceDataRowId(\n slice_id, dim_data, measure_data, row, tableMappings))\n self.graph.add((slice_id, SCHEMA.data, row_id))\n self.graph.add((row_id, rdflib.RDF.type, SCHEMA.Observation))\n self.graph.add((row_id, SCHEMA.slice, slice_id))\n for dim, data in dim_data.items():\n self._ExpandObservationDimensionValue(dim, data, row_id, row)\n for measure, data in measure_data.items():\n self._ExpandObservationMeasureValue(measure, data, row_id, row)\n except Exception as e:\n raise RuntimeError(f\"Error processing {data_id} at line {reader.line_num}\") from e\n\n def Expand(self):\n for dim in set(self.graph.subjects(\n predicate=rdflib.RDF.type,\n object=SCHEMA.CategoricalDimension)):\n self._ExpandCodeList(dim)\n self._ExpandFootnotes()\n for slice_id in set(self.graph.subjects(\n predicate=rdflib.RDF.type,\n object=SCHEMA.DataSlice)):\n self._ExpandSliceData(slice_id)\n return self.graph\n\n\nclass Dspl2JsonLdExpander(object):\n \"\"\"Expand CSV files in an DSPL2 directly as JSON-LD\"\"\"\n def __init__(self, getter):\n self.getter = getter\n\n def _ExpandCodeList(self, dim):\n \"\"\"Load a code list from CSV and return a list of JSON-LD objects.\"\"\"\n codeList = []\n dimProps = []\n tableMappings = {}\n for dimProp in AsList(GetSchemaProp(dim, 'dimensionProperty')):\n dimProps.append(dimProp)\n for tableMapping in AsList(GetSchemaProp(dim, 'tableMapping')):\n tableMappings[GetUrl(tableMapping['sourceEntity'])] = tableMapping\n\n with self.getter.Fetch(GetSchemaProp(dim, 'codeList')) as f:\n reader = DictReader(f)\n for row in reader:\n entry = {k: v for k, v in row.items()}\n if GetSchemaProp(dim, 'equivalentType'):\n entry['@type'] = ['DimensionValue']\n entry['@type'] += AsList(GetSchemaProp(\n dim, 'equivalentType'))\n else:\n entry['@type'] = 'DimensionValue'\n entry['@id'] = GetSchemaId(dim) + '='\n entry['@id'] += row['codeValue']\n entry['dimension'] = GetSchemaId(dim)\n for dimProp in dimProps:\n propId = GetSchemaProp(dimProp, 'propertyID')\n value = dimProp.get('value')\n if propId:\n if value:\n entry[dimProp['propertyID']] = value\n continue\n columnId = propId\n dimPropId = GetSchemaId(dimProp)\n if dimPropId:\n tableMapping = tableMappings.get(dimPropId)\n if tableMapping and 'columnIdentifier' in tableMapping:\n columnId = tableMapping.get('columnIdentifier')\n else:\n columnId = propId\n for field in row:\n if field == columnId:\n if columnId != propId:\n entry[propId] = entry[columnId]\n del entry[columnId]\n elif field.startswith(columnId + '.'):\n entry[columnId] = entry.get(columnId, {\n '@type': dimProp['propertyType']\n })\n if isinstance(entry[columnId], str):\n entry[columnId] = {\n '@type': dimProp['propertyType'],\n 'name': row['columnId']\n }\n entry[columnId][field[len(columnId) + 1:]] = entry[field]\n del entry[field]\n codeList.append(entry)\n return codeList\n\n def _ExpandFootnotes(self, filename, json_val):\n \"\"\"Load footnotes from CSV and return a list of JSON-LD objects.\"\"\"\n footnotes = []\n with self.getter.Fetch(filename) as f:\n reader = DictReader(f)\n for row in reader:\n row['@type'] = 'StatisticalAnnotation'\n row['@id'] = GetSchemaId(json_val) + '#footnote='\n row['@id'] += row['codeValue']\n row['dataset'] = GetSchemaId(json_val)\n footnotes.append(row)\n return footnotes\n\n def _ExpandSliceData(self, slice, dim_defs_by_id, meas_defs_by_id):\n data = []\n tableMappings = {}\n for tableMapping in AsList(GetSchemaProp(slice, 'tableMapping')):\n tableMappings[GetUrl(tableMapping['sourceEntity'])] = tableMapping\n\n with self.getter.Fetch(GetSchemaProp(slice, 'data')) as f:\n reader = DictReader(f)\n for row in reader:\n val = {}\n val['@type'] = 'Observation'\n val['slice'] = GetSchemaId(slice)\n val['dimensionValue'] = []\n val['measureValue'] = []\n for dim in AsList(GetSchemaProp(slice, 'dimension')):\n dim = GetUrl(dim)\n dim_def = dim_defs_by_id.get(dim)\n if dim_def is None:\n raise RuntimeError(\"Unable to find definition for dimension \" + dim)\n tableMapping = tableMappings.get(dim)\n if tableMapping:\n col_id = tableMapping['columnIdentifier']\n else:\n col_id = urlparse(dim).fragment\n dim_val = {\n '@type': 'DimensionValue',\n 'dimension': dim,\n }\n if dim_def:\n if GetSchemaProp(dim_def, '@type') == 'CategoricalDimension':\n dim_val['codeValue'] = row[col_id]\n elif GetSchemaProp(dim_def, '@type') == 'TimeDimension':\n if GetSchemaProp(dim_def, 'equivalentType'):\n dim_val['value'] = {\n '@type': GetSchemaProp(dim_def, 'equivalentType'),\n '@value': row[col_id]\n }\n else:\n dim_val['value'] = row[col_id]\n val['dimensionValue'].append(dim_val)\n\n for measure in AsList(GetSchemaProp(slice, 'measure')):\n measure = GetUrl(measure)\n meas_def = meas_defs_by_id.get(measure)\n tableMapping = tableMappings.get(measure)\n if tableMapping:\n col_id = tableMapping['columnIdentifier']\n else:\n col_id = urlparse(measure).fragment\n val['measureValue'].append({\n '@type': 'MeasureValue',\n 'measure': measure,\n 'value': row[col_id]\n })\n if row.get(col_id + '*'):\n val['measureValue'][-1]['footnote'] = [\n {\n '@type': 'StatisticalAnnotation',\n 'codeValue': footnote\n }\n for footnote in row[col_id + '*'].split(';')\n ]\n data.append(val)\n return data\n\n def Expand(self, *, expandDimensions=True, expandSlices=True):\n json_val = FrameGraph(self.getter.graph, frame=_DataFileFrame)\n if expandDimensions:\n for dim in AsList(GetSchemaProp(json_val, 'dimension')):\n if isinstance(dim.get('codeList'), str):\n dim['codeList'] = self._ExpandCodeList(dim)\n if isinstance(GetSchemaProp(json_val, 'footnote'), str):\n json_val['footnote'] = self._ExpandFootnotes(\n GetSchemaProp(json_val, 'footnote'), json_val)\n if expandSlices:\n dim_defs_by_id = MakeIdKeyedDict(\n AsList(GetSchemaProp(json_val, 'dimension')))\n meas_defs_by_id = MakeIdKeyedDict(\n AsList(GetSchemaProp(json_val, 'measure')))\n for slice in AsList(GetSchemaProp(json_val, 'slice')):\n if isinstance(GetSchemaProp(slice, 'data'), str):\n slice['data'] = self._ExpandSliceData(slice, dim_defs_by_id,\n meas_defs_by_id)\n return json_val\n","repo_name":"google/dspl","sub_path":"tools/dspl2/dspl2/expander.py","file_name":"expander.py","file_ext":"py","file_size_in_byte":18148,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"22"} +{"seq_id":"26984488009","text":"from __future__ import division\nimport tensorflow as tf\nimport numpy as np\n\neps = 1e-5\n\n\ndef embedding_nearest_neighbors(embeddings, batch=None):\n normalized_embeddings = tf.contrib.layers.unit_norm(embeddings, 1, .00001)\n selected_embeddings = tf.nn.embedding_lookup(normalized_embeddings, batch) \\\n if batch is not None else normalized_embeddings\n similarity = tf.matmul(selected_embeddings, normalized_embeddings, transpose_b=True)\n return similarity\n\n\ndef gather_nd(params, indices, shape=None, name=None):\n if shape is None:\n shape = params.get_shape().as_list()\n rank = len(shape)\n flat_params = tf.reshape(params, [-1])\n multipliers = [reduce(lambda x, y: x * y, shape[i + 1:], 1) for i in range(0, rank)]\n indices_unpacked = tf.unstack(tf.cast(tf.transpose(indices, [rank - 1] + range(0, rank - 1), name), 'int32'))\n flat_indices = sum([a * b for a, b in zip(multipliers, indices_unpacked)])\n return tf.gather(flat_params, flat_indices, name=name)\n\n\ndef repeat(tensor, reps):\n flat_tensor = tf.reshape(tensor, [-1, 1]) # Convert to a len(yp) x 1 matrix.\n repeated = tf.tile(flat_tensor, [1, reps]) # Create multiple columns.\n repeated_flat = tf.reshape(repeated, [-1]) # Convert back to a vector.\n return repeated_flat\n\n\ndef last_relevant(output, length):\n batch_size = tf.shape(output)[0]\n max_length = tf.shape(output)[1]\n out_size = int(output.get_shape()[2])\n index = tf.range(0, batch_size) * max_length + (length - 1)\n flat = tf.reshape(output, [-1, out_size])\n relevant = tf.gather(flat, index)\n return relevant\n\n\ndef word_dropout(token_batch, keep_prob, pad_id=0, unk_id=1):\n \"\"\" apply word dropout\"\"\"\n # create word dropout mask\n word_probs = np.random.random(token_batch.shape)\n drop_indices = np.where((word_probs > keep_prob) & (token_batch != pad_id))\n token_batch[drop_indices[0], drop_indices[1]] = unk_id\n return token_batch\n\n\ndef attention(keys, values, query, filter=None, message=None, scaled=True):\n attention_expanded = tf.expand_dims(query, [2])\n scale = tf.sqrt(tf.cast(tf.shape(query)[-1], tf.float32)) if scaled else 1.0\n attention_scores = tf.matmul(keys, attention_expanded) / scale\n if filter is not None:\n attention_scores = tf.add(attention_scores, filter)\n attention_weights = tf.nn.softmax(attention_scores, dim=1)\n # if message is not None:\n # attention_weights = tf.Print(attention_weights, [tf.shape(attention_weights),\n # tf.reduce_min(attention_weights[0]),\n # tf.reduce_max(attention_weights[0]),\n # tf.reduce_mean(attention_weights[0])],\n # message=message)\n weighted_tokens = tf.multiply(values, attention_weights)\n return weighted_tokens, attention_weights\n\n\ndef apply_nonlinearity(parameters, nonlinearity_type):\n if nonlinearity_type == \"relu\":\n return tf.nn.relu(parameters, name=\"relu\")\n elif nonlinearity_type == \"tanh\":\n return tf.nn.tanh(parameters, name=\"tanh\")\n elif nonlinearity_type == \"sigmoid\":\n return tf.nn.sigmoid(parameters, name=\"sigmoid\")\n\n\ndef initialize_weights(shape, name, init_type, gain=\"1.0\", divisor=1.0):\n if init_type == \"random\":\n return tf.get_variable(name, initializer=tf.truncated_normal(shape, stddev=0.1))\n if init_type == \"xavier\":\n # shape_is_tensor = issubclass(type(shape), tf.Tensor)\n # rank = len(shape.get_shape()) if shape_is_tensor else len(shape)\n # if rank == 4:\n # return tf.get_variable(name, shape=shape, initializer=tf.contrib.layers.xavier_initializer_conv2d())\n return tf.get_variable(name, shape=shape, initializer=tf.contrib.layers.xavier_initializer())\n if init_type == \"identity\":\n middle = int(shape[1] / 2)\n if shape[2] == shape[3]:\n array = np.zeros(shape, dtype='float32')\n identity = np.eye(shape[2], shape[3])\n array[0, middle] = identity\n else:\n m1 = divisor / shape[2]\n m2 = divisor / shape[3]\n sigma = eps*m2\n array = np.random.normal(loc=0, scale=sigma, size=shape).astype('float32')\n for i in range(shape[2]):\n for j in range(shape[3]):\n if int(i*m1) == int(j*m2):\n array[0, middle, i, j] = m2\n return tf.get_variable(name, initializer=array)\n if init_type == \"varscale\":\n return tf.get_variable(name, shape=shape, initializer=tf.contrib.layers.variance_scaling_initializer())\n if init_type == \"orthogonal\":\n gain = np.sqrt(2) if gain == \"relu\" else 1.0\n array = np.zeros(shape, dtype='float32')\n random = np.random.normal(0.0, 1.0, (shape[2], shape[3])).astype('float32')\n u, _, v_t = np.linalg.svd(random, full_matrices=False)\n middle = int(shape[1] / 2)\n array[0, middle] = gain * v_t\n return tf.get_variable(name, initializer=array)\n\n\ndef residual_layer(input, w, b, filter_width, dilation, nonlinearity, batch_norm,\n name, batch_size, max_sequence_len, activation, training):\n # if activation == \"pre\" (2): BN -> relu -> weight -> BN -> relu -> weight -> addition\n conv_in_bn = tf.contrib.layers.batch_norm(input, decay=0.995, scale=False, is_training=training, trainable=True) \\\n if batch_norm and activation == 2 else input\n conv_in = apply_nonlinearity(conv_in_bn, nonlinearity) if activation == 2 else conv_in_bn\n\n conv = tf.nn.atrous_conv2d(\n conv_in,\n w,\n rate=dilation,\n padding=\"SAME\",\n name=name) \\\n if dilation > 1 else \\\n tf.nn.conv2d(conv_in, w, strides=[1, filter_width, 1, 1], padding=\"SAME\", name=name)\n\n conv_b = tf.nn.bias_add(conv, b)\n # return conv_b\n\n # if activation == \"post\" (1): weight -> BN -> relu -> weight -> BN -> addition -> relu\n conv_out_bn = tf.contrib.layers.batch_norm(conv_b, decay=0.995, scale=False, is_training=training, trainable=True) \\\n if batch_norm and activation != 2 else conv_b\n conv_out = apply_nonlinearity(conv_out_bn, nonlinearity) if activation != 2 else conv_out_bn\n # if activation == \"none\" (0): weight -> BN -> relu\n conv_shape = w.get_shape()\n if conv_shape[-1] != conv_shape[-2] and activation != 0:\n # if len(input_shape) != 2:\n input = tf.reshape(input, [-1, tf.to_int32(conv_shape[-2])])\n w_r = initialize_weights([conv_shape[-2], conv_shape[-1]], \"w_o_\" + name, init_type=\"xavier\")\n b_r = tf.get_variable(\"b_r_\" + name, initializer=tf.constant(0.01, shape=[conv_shape[-1]]))\n input_projected = tf.nn.xw_plus_b(input, w_r, b_r, name=\"proj_r_\" + name)\n # if len(output_shape) != 2:\n input_projected = tf.reshape(input_projected, tf.stack([batch_size, 1, max_sequence_len, tf.to_int32(conv_shape[-1])]))\n return tf.add(input_projected, conv_out)\n else:\n return conv_out\n\n\ndef orthonormal_initializer(input_size, output_size):\n \"\"\"\"\"\"\n print(tf.get_variable_scope().name)\n I = np.eye(output_size)\n lr = .1\n eps = .05/(output_size + input_size)\n success = False\n tries = 0\n while not success and tries < 10:\n Q = np.random.randn(input_size, output_size) / np.sqrt(output_size)\n for i in xrange(100):\n QTQmI = Q.T.dot(Q) - I\n loss = np.sum(QTQmI**2 / 2)\n Q2 = Q**2\n Q -= lr*Q.dot(QTQmI) / (np.abs(Q2 + Q2.sum(axis=0, keepdims=True) + Q2.sum(axis=1, keepdims=True) - 1) + eps)\n if np.max(Q) > 1e6 or loss > 1e6 or not np.isfinite(loss):\n tries += 1\n lr /= 2\n break\n success = True\n if success:\n print('Orthogonal pretrainer loss: %.2e' % loss)\n else:\n print('Orthogonal pretrainer failed, using non-orthogonal random matrix')\n Q = np.random.randn(input_size, output_size) / np.sqrt(output_size)\n return Q.astype(np.float32)\n\n\ndef calc_f_score(precision, recall, beta=1):\n if precision + recall <= 0:\n return 0.0\n return (1+(beta**2)) * ((precision * recall) /\n (((beta**2)*precision) + recall))\n\n\ndef load_pretrained_embeddings(str_id_map, embedding_file, dim, vocab_size):\n # load embeddings, if given; initialize in range [-.01, .01]\n preloaded_embeddings = {}\n embeddings_used = 0\n var = 0\n if embedding_file != '':\n print('Loading embeddings from %s ' % embedding_file)\n with open(embedding_file, 'r') as f:\n for line in f.readlines():\n key, value_str = line.strip().split(' ', 1)\n if key not in str_id_map and key+'@' in str_id_map:\n key += '@'\n if key in str_id_map:\n preloaded_vector = [float(v) for v in value_str.split(' ')]\n if len(preloaded_vector) == dim:\n embeddings_used += 1\n v = np.array(preloaded_vector)\n if v.shape[0] == dim:\n var += np.var(v)\n preloaded_embeddings[key] = v\n print(\"Loaded %d/%d embeddings (%2.2f%% coverage)\" % (\n embeddings_used, vocab_size, embeddings_used / float(vocab_size) * 100))\n alpha = (var / len(preloaded_embeddings)) if var != 0 else .1\n normalizer = 1000.0\n print('alpha: %2.3f' % alpha)\n embedding_matrix = np.array([preloaded_embeddings[t] / normalizer if t in preloaded_embeddings\n else (np.sqrt(6.0 / (np.sum(dim))))\n * np.random.uniform(low=-alpha, high=alpha, size=dim)\n for t in str_id_map.iterkeys()])\n return embedding_matrix\n","repo_name":"patverga/bran","sub_path":"src/tf_utils.py","file_name":"tf_utils.py","file_ext":"py","file_size_in_byte":9939,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"22"} +{"seq_id":"2034802483","text":"import bpy\nfrom io_mesh_atomic.xyz_import import ELEMENTS_DEFAULT\n\n\nclass AtomsExport(object):\n __slots__ = ('element', 'location')\n def __init__(self, element, location):\n self.element = element\n self.location = location\n\n\ndef export_xyz(obj_type, filepath_xyz):\n\n list_atoms = []\n counter = 0\n for obj in bpy.context.selected_objects:\n\n if \"STICK\" in obj.name.upper():\n continue\n\n if obj.type not in {'MESH', 'SURFACE', 'META'}:\n continue\n\n name = \"\"\n for element in ELEMENTS_DEFAULT:\n if element[1] in obj.name:\n if element[2] == \"Vac\":\n name = \"X\"\n else:\n name = element[2]\n\n if name == \"\":\n if obj_type == \"0\":\n name = \"?\"\n else:\n continue\n\n if len(obj.children) != 0:\n for vertex in obj.data.vertices:\n location = obj.matrix_world @ vertex.co\n list_atoms.append(AtomsExport(name, location))\n counter += 1\n else:\n if not obj.parent:\n location = obj.location\n list_atoms.append(AtomsExport(name, location))\n counter += 1\n\n xyz_file_p = open(filepath_xyz, \"w\")\n xyz_file_p.write(\"%d\\n\" % counter)\n xyz_file_p.write(\"This XYZ file has been created with Blender \"\n \"and the addon Atomic Blender - XYZ. \"\n \"For more details see the wiki pages of Blender.\\n\")\n\n for i, atom in enumerate(list_atoms):\n string = \"%3s%15.5f%15.5f%15.5f\\n\" % (\n atom.element,\n atom.location[0],\n atom.location[1],\n atom.location[2])\n xyz_file_p.write(string)\n\n xyz_file_p.close()\n\n return True\n","repo_name":"Bforartists/Bforartists","sub_path":"scripts/addons/io_mesh_atomic/xyz_export.py","file_name":"xyz_export.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":492,"dataset":"github-code","pt":"22"} +{"seq_id":"20428468255","text":"class simulatedGame(object):\n '''\n A simplified version of game class for simulations in tree search\n '''\n\n def __init__(self, slots, gameEnded, winner, redTurn):\n self.slots = slots\n self.gameEnded = gameEnded\n self.winner = winner # False(0) for red(1) won, True(1) for yellow(-1) won, -1 for draw\n self.redTurn = redTurn\n\n # Return True if successfully played the move, input is assumed to be valid\n def placeMove(self, x):\n # Find y of the move at x\n y = 0\n while y < 6 and (self.slots[x][y] == 0):\n y += 1\n y -= 1\n # Modify the board\n self.__changeBoard(x, y)\n self.redTurn = not self.redTurn # Change sides\n # Check the game status\n if self.__checkPlayerWon(1):\n self.gameEnded = True\n self.winner = 0\n elif self.__checkPlayerWon(-1):\n self.gameEnded = True\n self.winner = 1\n elif self.__checkDraw():\n self.gameEnded = True\n self.winner = -1\n \n def __checkPlayerWon(self, color):\n # Check Horizontal\n for y in range(6):\n for x in range(0, 4):\n if self.slots[x][y]==color and self.slots[x+1][y]==color \\\n and self.slots[x+2][y]==color and self.slots[x+3][y]==color:\n return True\n # Check Vertical\n for x in range(7):\n for y in range(0, 3):\n if self.slots[x][y]==color and self.slots[x][y+1]==color \\\n and self.slots[x][y+2]==color and self.slots[x][y+3]==color: \n return True\n # Check Diagonal\n for x in range(0,4):\n for y in range(3,6):\n if self.slots[x][y]==color and self.slots[x+1][y-1]==color \\\n and self.slots[x+2][y-2]==color and self.slots[x+3][y-3]==color:\n return True\n if self.slots[x][y-3]==color and self.slots[x+1][y-2]==color \\\n and self.slots[x+2][y-1]==color and self.slots[x+3][y]==color:\n return True\n\n # Place the move by editing slots list\n def __changeBoard(self, x, y):\n if self.redTurn:\n c = 1\n else:\n c = -1\n self.slots[x][y] = c\n\n def __checkDraw(self):\n for x in range(7):\n # False if any slot on the top row is empty\n if self.slots[x][0] == 0:\n return False\n return True\n\n # Return a list of valid moves (0-6)\n def getValidMoves(self, returnBinary=False):\n cord = []\n binaryCord = []\n for x in range(7):\n lowest_y = -1\n for y in range(6):\n if self.slots[x][y] == 0:\n lowest_y += 1\n if lowest_y != -1:\n cord.append(x)\n binaryCord.append(1)\n else:\n binaryCord.append(0)\n if returnBinary:\n return binaryCord, cord\n else:\n return cord","repo_name":"Tailen/Alpha_Connect","sub_path":"simulateGame.py","file_name":"simulateGame.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"21353263528","text":"\"\"\"Write a function that receives as a parameters a list of musical notes (strings), a list of moves\n (integers) and a start position (integer). The function will return the song composed by going though\n the musical notes beginning with the start position and following the moves given as parameter.\"\"\"\n\nnotes = [\"do\", \"re\", \"mi\", \"fa\", \"sol\"]\nmoves = [1, -3, 4, 2]\nstart = 2\n\n\ndef compose(notes, moves, start):\n result = []\n result.append(notes[start])\n actual = start\n for index in moves:\n actual = (actual + index) % len(notes)\n result.append(notes[actual])\n return result\n\n\nprint(compose(notes, moves, start))\n","repo_name":"SpantuTheodor/Laboratoare-PY","sub_path":"Laborator2/Problema4.py","file_name":"Problema4.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"536477780","text":"import tensorflow as tf\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom utils import read_raw, plot_history\nimport os\nfrom tensorflow.keras.optimizers import SGD\n\n#config = read_raw('config.cfg')\n\n\nWIDTH = round(400)\nHEIGHT = round(400)\n\nIMG_SHAPE = (WIDTH, HEIGHT, 3)\n\n# Create the base model from the pre-trained model MobileNet V2\nbase_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,\n include_top=False,\n weights='imagenet')\nbase_model.trainable = False\nglobal_average_layer = tf.keras.layers.GlobalAveragePooling2D()\nprediction_layer = tf.keras.layers.Dense(1,activation='sigmoid')\n\nmodel = tf.keras.Sequential([\n base_model,\n global_average_layer,\n tf.keras.layers.Dropout(0.2),\n prediction_layer,\n])\n\n#base_dir = '/Users/giuseppemarotta/Documents/raw-data/project-x'\nSOURCEDIR = '/Users/giuseppemarotta/Documents/raw-data/originals/'\n\nUNHAPPEN_SOURCE_DIR = SOURCEDIR+\"unhappen-rides/\"\nTRAINING_UNHAPPEN_DIR = SOURCEDIR+\"tmp/unhappen-v-happen/training/unhappen/\"\nTESTING_UNHAPPEN_DIR = SOURCEDIR+\"tmp/unhappen-v-happen/testing/unhappen/\"\nHAPPEN_SOURCE_DIR = SOURCEDIR+\"valid-rides/\"\nTRAINING_HAPPEN_DIR = SOURCEDIR+\"tmp/unhappen-v-happen/training/happen/\"\nTESTING_HAPPEN_DIR = SOURCEDIR+\"tmp/unhappen-v-happen/testing/happen/\"\n\n#train_dir = os.path.join(base_dir, 'train')\ntrain_dir = SOURCEDIR + \"tmp/unhappen-v-happen/training/\"\nvalidation_dir = SOURCEDIR + \"tmp/unhappen-v-happen/testing/\"\n\n\n\n#validation_dir = os.path.join(base_dir, 'validation')\n\n# Directory with our training cat pictures\n#train_valid_rides_dir = os.path.join(train_dir, 'valid')\n\n# Directory with our training dog pictures\n#train_invalid_rides_dir = os.path.join(train_dir, 'invalid')\n\n# Directory with our training cat pictures\n#validation_valid_rides_dir = os.path.join(validation_dir, 'valid')\n\n# Directory with our training dog pictures\n#validation_invalid_rides_dir = os.path.join(validation_dir, 'invalid')\n\ntrain_datagen = ImageDataGenerator( rescale =1.0/255.,\n fill_mode='nearest',\n width_shift_range=0.1,\n horizontal_flip=True,\n rotation_range=25,\n zoom_range=0.1\n )\n\n# Flow training images in batches of 128 using train_datagen generator\ntrain_generator = train_datagen.flow_from_directory(\n train_dir, # This is the source directory for training images\n target_size=(WIDTH, HEIGHT), # All images will be resized to 150x150\n color_mode='rgb',\n class_mode='binary',\n shuffle=True,\n )\n\nvalidation_datagen = ImageDataGenerator(rescale=1.0/255.)\n\nvalidation_generator = validation_datagen.flow_from_directory(\n validation_dir, # This is the source directory for training images\n target_size=(WIDTH, HEIGHT), # All images will be resized to 150x150\n batch_size=16,\n color_mode='rgb',\n # Since we use binary_crossentropy loss, we need binary labels\n class_mode='binary',\n shuffle=True)\n\n# All images will be rescaled by 1./255\n\n\ncheckpoint_path = \"uh_training_lr001/cp.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\nprint(checkpoint_dir)\n# Create a callback that saves the model's weights\ncp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n verbose=1,\n period=1)\n\nSTEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size\nlatest = tf.train.latest_checkpoint(checkpoint_dir)\nif latest:\n model.load_weights(latest)\nmodel.compile(loss='binary_crossentropy',\n optimizer=RMSprop(lr=0.001),\n #optimizer=SGD(lr=0.1, momentum=0.9),\n metrics=['acc'])\nprint('Print in ' + str(STEP_SIZE_TRAIN))\nmodel.summary()\nprint('Number of Steps =' +str(STEP_SIZE_TRAIN))\nhistory = model.fit_generator(\n train_generator,\n validation_data=validation_generator,\n epochs=6,\n verbose=1,\n callbacks=[cp_callback]\n)\n\nplot_history(history)","repo_name":"seppemarotta/unhappen-ride-tf-model","sub_path":"unhappen-ride-tf-model.py","file_name":"unhappen-ride-tf-model.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"8554585159","text":"import unittest\nfrom modules.User import *\nfrom modules.Role import *\nfrom main import *\nfrom unittest.mock import patch, Mock\n\n\nclass TestSum(unittest.TestCase):\n @patch('main.getRoles')\n def test_set_user_successful(self, MockRoles):\n \"\"\"\n Test that it set valid set of users\n \"\"\"\n\n # Arrange\n getRoles = MockRoles()\n roles = []\n\n roles.append(Role(1, 'Admin', 0))\n roles.append(Role(2, 'Manager', 1))\n roles.append(Role(3, 'Supervisor', 2))\n\n getRoles.return_value = roles\n\n data = '[{\"Id\":1,\"Name\":\"Adam Admin\",\"Role\":1},{\"Id\":2,\"Name\":\"Emily Employee\",\"Role\":2},{\"Id\":3,\"Name\":\"Sam Supervisor\",\"Role\":3}]'\n\n # Act\n result = setUsers(data, loadDefault=False)\n\n # Assert\n self.assertEqual(len(result), 3)\n\n @patch('main.getRoles')\n def test_set_user_failed(self, MockRoles):\n \"\"\"\n Test that it set valid set of users\n \"\"\"\n\n # Arrange\n getRoles = MockRoles()\n roles = []\n\n roles.append(Role(1, 'Admin', 0))\n roles.append(Role(2, 'Manager', 1))\n roles.append(Role(3, 'Supervisor', 2))\n\n getRoles.return_value = roles\n\n data = '[{\"Id\":1,\"Name\":\"Adam Admin\",\"Role\":1},{\"Id\":2,\"Name\":\"Emily Employee\",\"Role\":2},{\"Id\":3,\"Name\":\"Sam Supervisor\",\"Role\":3}]'\n\n # Act\n result = setUsers(data, loadDefault=False)\n\n # Assert\n self.assertIsNot(len(result), 6)\n\n def test_set_role_successful(self):\n \"\"\"\n Test that it set valid set of roles\n \"\"\"\n\n # Arrange\n data = '[{\"Id\":1,\"Name\":\"System Administrator\",\"Parent\":0},{\"Id\":2,\"Name\":\"Location Manager\",\"Parent\":1},{\"Id\":3,\"Name\":\"Supervisor\",\"Parent\":2},{\"Id\":4,\"Name\":\"Employee\",\"Parent\":3},{\"Id\":5,\"Name\":\"Trainer\",\"Parent\":3}]'\n\n # Act\n result = setRoles(data, loadDefault=False)\n\n # Assert\n self.assertEqual(len(result), 5)\n\n def test_get_sub_ordinates_successful(self):\n \"\"\"\n Test that it set valid set of users\n \"\"\"\n\n # Arrange\n setRoles([], loadDefault=True)\n setUsers([], loadDefault=True)\n\n # Act\n result = getSubOrdinates(4, printResult=False)\n\n # Assert\n self.assertEqual(len(result), 3)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"arashzargar/UsersHierarchy","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37674582893","text":"from typing import Any, Dict, Optional\n\nimport pytorch_lightning as pl\nimport torch\nfrom sklearn.model_selection import GroupShuffleSplit\nfrom torch.utils.data import DataLoader, Subset\n\nfrom ..datasets import TropicalCycloneWindEstimation\n\n# https://github.com/pytorch/pytorch/issues/60979\n# https://github.com/pytorch/pytorch/pull/61045\nDataLoader.__module__ = \"torch.utils.data\"\n\n\nclass CycloneDataModule(pl.LightningDataModule):\n \"\"\"LightningDataModule implementation for the NASA Cyclone dataset.\n\n Implements 80/20 train/val splits based on hurricane storm ids.\n See :func:`setup` for more details.\n \"\"\"\n\n def __init__(\n self,\n root_dir: str,\n seed: int,\n batch_size: int = 64,\n num_workers: int = 0,\n api_key: Optional[str] = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"Initialize a LightningDataModule for NASA Cyclone based DataLoaders.\n\n Args:\n root_dir: The ``root`` arugment to pass to the\n TropicalCycloneWindEstimation Datasets classes\n seed: The seed value to use when doing the sklearn based GroupShuffleSplit\n batch_size: The batch size to use in all created DataLoaders\n num_workers: The number of workers to use in all created DataLoaders\n api_key: The RadiantEarth MLHub API key to use if the dataset needs to be\n downloaded\n \"\"\"\n super().__init__() # type: ignore[no-untyped-call]\n self.root_dir = root_dir\n self.seed = seed\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.api_key = api_key\n\n def custom_transform(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Transform a single sample from the Dataset.\n\n Args:\n sample: dictionary containing image and target\n\n Returns:\n preprocessed sample\n \"\"\"\n sample[\"image\"] = sample[\"image\"] / 255.0 # scale to [0,1]\n sample[\"image\"] = (\n sample[\"image\"].unsqueeze(0).repeat(3, 1, 1)\n ) # convert to 3 channel\n sample[\"label\"] = torch.as_tensor(sample[\"label\"]).float()\n\n return sample\n\n def prepare_data(self) -> None:\n \"\"\"Initialize the main ``Dataset`` objects for use in :func:`setup`.\n\n This includes optionally downloading the dataset. This is done once per node,\n while :func:`setup` is done once per GPU.\n \"\"\"\n TropicalCycloneWindEstimation(\n self.root_dir,\n split=\"train\",\n transforms=self.custom_transform,\n download=self.api_key is not None,\n api_key=self.api_key,\n )\n\n def setup(self, stage: Optional[str] = None) -> None:\n \"\"\"Create the train/val/test splits based on the original Dataset objects.\n\n The splits should be done here vs. in :func:`__init__` per the docs:\n https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html#setup.\n\n We split samples between train/val by the ``storm_id`` property. I.e. all\n samples with the same ``storm_id`` value will be either in the train or the val\n split. This is important to test one type of generalizability -- given a new\n storm, can we predict its windspeed. The test set, however, contains *some*\n storms from the training set (specifically, the latter parts of the storms) as\n well as some novel storms.\n\n Args:\n stage: stage to set up\n \"\"\"\n self.all_train_dataset = TropicalCycloneWindEstimation(\n self.root_dir,\n split=\"train\",\n transforms=self.custom_transform,\n download=False,\n )\n\n self.all_test_dataset = TropicalCycloneWindEstimation(\n self.root_dir,\n split=\"test\",\n transforms=self.custom_transform,\n download=False,\n )\n\n storm_ids = []\n for item in self.all_train_dataset.collection:\n storm_id = item[\"href\"].split(\"/\")[0].split(\"_\")[-2]\n storm_ids.append(storm_id)\n\n train_indices, val_indices = next(\n GroupShuffleSplit(test_size=0.2, n_splits=2, random_state=self.seed).split(\n storm_ids, groups=storm_ids\n )\n )\n\n self.train_dataset = Subset(self.all_train_dataset, train_indices)\n self.val_dataset = Subset(self.all_train_dataset, val_indices)\n self.test_dataset = Subset(\n self.all_test_dataset, range(len(self.all_test_dataset))\n )\n\n def train_dataloader(self) -> DataLoader[Any]:\n \"\"\"Return a DataLoader for training.\n\n Returns:\n training data loader\n \"\"\"\n return DataLoader(\n self.train_dataset,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n shuffle=True,\n )\n\n def val_dataloader(self) -> DataLoader[Any]:\n \"\"\"Return a DataLoader for validation.\n\n Returns:\n validation data loader\n \"\"\"\n return DataLoader(\n self.val_dataset,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n shuffle=False,\n )\n\n def test_dataloader(self) -> DataLoader[Any]:\n \"\"\"Return a DataLoader for testing.\n\n Returns:\n testing data loader\n \"\"\"\n return DataLoader(\n self.test_dataset,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n shuffle=False,\n )\n","repo_name":"Nirj2004/TORCHGEO","sub_path":"torchgeo/datamodules/cyclone.py","file_name":"cyclone.py","file_ext":"py","file_size_in_byte":5573,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"2110953610","text":"from django.shortcuts import render\nfrom temba.models import ContactGroup\n\n# Create your views here.\n\n\ndef index(request):\n all_groups = ContactGroup.objects.all()\n groups = []\n for this_group in all_groups:\n groups.append((this_group.group_name, this_group.number_of_contacts))\n context = {'all_groups_list': groups}\n return render(request, 'temba/index.html', context)\n","repo_name":"allandereal/temba_port","sub_path":"temba/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"529435440","text":"#!/usr/bin/env python3\nimport rospy\nimport numpy as np\nimport cv2 as cv\nfrom gaze_tracking.msg import gazedata\nfrom gaze_tracking.msg import Blob_Params\nfrom gaze_tracking.msg import Blobs\n\ngaze_received = False\nblob_received = False\n\n# Get the gaze tracker data from the message\ndef get_gaze(data):\n global POGX\n global POGY\n global POGV\n global POGD\n global gaze_received\n POGX = data.POGX\n POGY = data.POGY\n POGV = data.POGV\n POGD = data.POGD\n gaze_received = True\n\n data_received = True\n\ndef get_blobs(data):\n global blob_list\n global blob_received\n blob_list = data.blob_list\n blob_received = True\n\ndef main():\n rospy.init_node('sel_obj_blobs', anonymous = True)\n # initilize a subscriber to get darknet bounding boxes\n gaze_sub = rospy.Subscriber(\"/gaze_publisher\", gazedata, get_gaze)\n sel_areas = rospy.Subscriber(\"/blob_data\", Blobs, get_blobs)\n sel_pub = rospy.Publisher('/selected_obj', Blob_Params, queue_size = 1)\n rate = rospy.Rate(10)\n\n while not rospy.is_shutdown():\n if gaze_received & blob_received:\n for i in range(len(blob_list)):\n # relative gaze point for cropped image\n point_x = int(900*POGX)\n point_y = int(720*POGY)\n x_coord = blob_list[i].x_coord\n y_coord = blob_list[i].y_coord\n radius = blob_list[i].radius\n # print(x_coord, y_coord, radius)\n # print(point_x, point_y)\n\n # this is actually selecting within a box vs a circle shape\n # should actually provide more robust selectin because bigger area\n if ((x_coord - radius) < point_x < (x_coord + radius)) and ((y_coord - radius) < point_y < (y_coord + radius)):\n if POGD < .6:\n print(\"object\", i, \"will be selected\")\n continue\n elif POGD > .6:\n sel_pub.publish(blob_list[i])\n print(\"object\", i, \"selected\")\n\n\n rate.sleep()\n\nif __name__ == '__main__':\n main()\n","repo_name":"onewordFOURWORDS/gaze-tracking-pick-and-place","sub_path":"src/object_selection/scripts/sel_obj_blobs.py","file_name":"sel_obj_blobs.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"5322555452","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Sep 29 18:27:27 2020\r\n\r\n@author: utob\r\n\"\"\"\r\n# HAME POINT HA BAYAD BOZORG NEVESHTE SHAVAD\r\nclass Point:\r\n def move(self):\r\n print(\"move\")\r\n \r\n def draw(self):\r\n print(\"draw\")\r\n \r\n\r\nPoint1 = Point()\r\nPoint1.x = 10\r\nPoint1.y = 20\r\nprint(Point1.x)\r\nPoint1.draw()\r\n\r\nPoint2 = Point()\r\nPoint2.x = 1 \r\nprint(Point2.x)\r\n","repo_name":"pani-ps1/my-practice-2-python","sub_path":"43.CALSSES1.py","file_name":"43.CALSSES1.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42839245617","text":"# function goes here\n\n# checks that input is a float or an integer which is more zero.\n# Has custom error messages for int/float\ndef num_check(question, error, num_type):\n valid = False\n while not valid:\n\n try:\n response = num_type(input(question))\n\n # if response is less or = to 0\n if response <= 0:\n print(error)\n else:\n return response\n\n except ValueError:\n print(error)\n\n# MAIN ROUTINE\nget_int = num_check(\"How much do you need? \",\n \"Please enter an integer (whole number) which is more than 0\\n\",\n int)\n\nget_cost = num_check(\"How much does it costs? $\",\n \"Please enter an number that is more than 0\\n\",\n float)\n\nprint(\"You need: {}\".format(get_int))\nprint(\"It coast: ${} \".format(get_cost))","repo_name":"bholaa72429/FRC","sub_path":"01_number_checker.py","file_name":"01_number_checker.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20058055348","text":"from brownie import accounts \nfrom brownie.network.account import ClefAccount\n\n\ndef encode_function_data(initializer=None, *args):\n \"\"\"Encodes the function call so we can work with an initializer.\n\n Args:\n initializer ([brownie.network.contract.ContractTx], optional):\n The initializer function we want to call. Example: `box.store`.\n Defaults to None.\n\n args (Any, optional):\n The arguments to pass to the initializer function\n\n Returns:\n [bytes]: Return the encoded bytes.\n \"\"\"\n if not len(args): args = b''\n\n if initializer: return initializer.encode_input(*args)\n\n return b''\n\n\ndef upgrade(\n account,\n proxy,\n newimplementation_address,\n proxy_admin_contract=None,\n initializer=None,\n *args\n):\n transaction = None\n if proxy_admin_contract:\n if initializer:\n encoded_function_call = encode_function_data(initializer, *args)\n transaction = proxy_admin_contract.upgradeAndCall(\n proxy.address,\n newimplementation_address,\n encoded_function_call,\n {\"from\": account},\n )\n else:\n transaction = proxy_admin_contract.upgrade(\n proxy.address, newimplementation_address, {\"from\": account}\n )\n else:\n if initializer:\n encoded_function_call = encode_function_data(initializer, *args)\n transaction = proxy.upgradeToAndCall(\n newimplementation_address, encoded_function_call, {\"from\": account}\n )\n else:\n transaction = proxy.upgradeTo(newimplementation_address, {\"from\": account})\n return transaction\n\ndef get_account(useClef):\n print(useClef)\n if useClef == False:\n print(\"Grabbing first embed account.\")\n return accounts[0]\n\n print(\"Connecting to Clef.\")\n accounts.connect_to_clef()\n for act in accounts:\n if type(act) is ClefAccount:\n print(\"Grabbing first Clef account.\")\n return act\n\n return None","repo_name":"kilogold/Door","sub_path":"brownie/scripts/helpful_scripts.py","file_name":"helpful_scripts.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10203772879","text":"cur_x = 3\nrate = 0.01 # Learning rate\nprecision = 0.00001\nprevious_step_size = 1\nmax_iters = 10000\niters = 0\ndf = lambda x: 2*x - 2.71 #Gradient\n\nwhile previous_step_size > precision and iters < max_iters:\n prev_x = cur_x # Store current x value in prev_x\n cur_x = cur_x - rate * df(prev_x) # Grad descent\n previous_step_size = abs(cur_x - prev_x) # Change in x\n iters = iters + 1 # iteration count\n print(\"Iteration\", iters, \"\\nX value is\", cur_x) # Print iterations\n\nprint(\"The local minimum occurs at\", cur_x)","repo_name":"szymonln/Mownit2","sub_path":"lab9gradient.py","file_name":"lab9gradient.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"22802761958","text":"# -*- coding: utf-8 -*-\n\nimport simple_draw as sd\n\n\n# Запросить у пользователя желаемую фигуру посредством выбора из существующих\n# вывести список всех фигур с номерами и ждать ввода номера желаемой фигуры.\n# и нарисовать эту фигуру в центре экрана\n\n\ndef figura(nach, ugol, dlina, storana):\n i = int(360 / storana)\n for ug in range(0, 361, i):\n v = sd.get_vector(start_point=nach, angle=ugol + ug, length=dlina, width=3)\n v.draw(color=sd.COLOR_DARK_GREEN)\n nach = v.end_point\n\n\ndef treugol():\n storana = 3\n nach = sd.get_point(200, 200)\n dlina = 100\n ugol = 30\n return figura(nach=nach, ugol=ugol, dlina=dlina, storana=storana)\n\n\ndef kvadrat():\n storana = 4\n nach = sd.get_point(200, 200)\n dlina = 100\n ugol = 30\n return figura(nach=nach, ugol=ugol, dlina=dlina, storana=storana)\n\n\ndef pyti():\n nach = sd.get_point(200, 200)\n storana = 5\n dlina = 100\n ugol = 30\n return figura(nach=nach, ugol=ugol, dlina=dlina, storana=storana)\n\n\ndef shesty():\n nach = sd.get_point(200, 200)\n storana = 6\n dlina = 100\n ugol = 30\n return figura(nach=nach, ugol=ugol, dlina=dlina, storana=storana)\n\n\ndef print_them_all_v(**kwargs):\n print('Возможные фигуры:')\n for key, value in kwargs.items():\n print(key, ':', value)\n\n\ndef find_element(tree, elem):\n func = {'0': treugol, '1': kvadrat, '2': pyti, '3': shesty}\n if elem in tree:\n func[elem]()\n return\n\n\nnumer_figury = {\n '0': 'треугольник',\n '1': 'квадрат',\n '2': 'пятиугольник',\n '3': 'шестиугольник'\n}\n\nprint_them_all_v(**numer_figury)\n\nwhile True:\n n = input('Введите желаемую фигуру от 0 до 3: ')\n if n in numer_figury:\n find_element(numer_figury, n)\n break\n else:\n print('Вы ввели некорректную фигуры!!!')\n\nsd.pause()\n\n","repo_name":"nickolas-black/Python","sub_path":"разные программки/Запросить у пользователя желаемую фигуру посредством выбора из существующих.py","file_name":"Запросить у пользователя желаемую фигуру посредством выбора из существующих.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19996061523","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport pandas as pd\nfrom datetime import datetime as dt \n\nfrom parameters import list_cities, cities_all\n\nclass InputContainerMain:\n\tdef __init__(\n\t\tself,\n\t\tpicked_city='all', picked_version=1,\n\t\tpicked_datetime=str(pd.to_datetime('2020-07-01')),\n\t\tmax_date_allowed=str(pd.to_datetime('2020-12-31')),\n\t\tpicked_hour= 16,\n\t\tfilter_keyword='', n_clicks=0,\n\t\ttask_in_progress=False):\n\n\t\tself.container = [\n\t\t\thtml.Label('Select City:', \n\t\t\t\tstyle={'margin': '5px 10px 5px 0'}),\n\t\t\tdcc.Dropdown(id='picked_city',\n\t\t\t\toptions = list_cities,\n\t\t\t\tvalue = picked_city,\n\t\t\t\tclearable = False,\n\t\t\t\tstyle = {'minWidth': '150px'}),\n\t\t\tdcc.Dropdown(id='picked_version',\n\t\t\t\toptions = [{'label': 'Version ' + str(i+1), 'value': i+1}\n\t\t\t\t for i, ver in enumerate(cities_all)],\n\t\t\t\tvalue = picked_version,\n\t\t\t\tstyle = {'minWidth': '100px'},\n\t\t\t\tclearable = False),\n\t\t\thtml.Label('Select Date:', \n\t\t\t\tstyle={'margin': '5px 10px 5px 25px'}),\n\t\t\tdcc.DatePickerSingle(\n\t\t id='picked_datetime',\n\t\t min_date_allowed=dt(2020, 5, 27),\n\t\t max_date_allowed=max_date_allowed,\n\t\t date=picked_datetime,\n\t\t\t),\n\t\t\tdcc.Dropdown(id='picked_hour', \n\t\t\t\toptions = [{'label': str(h) + ':00', 'value':h} for h in range(24)],\n\t\t\t\tvalue=picked_hour,\n\t\t\t\tclearable = False,\n\t\t\t\tstyle = {'minWidth': '100px'},\n\t\t\t\t),\n\t\t\tdcc.Input(id='filter_keyword', type='text', value=filter_keyword,\n\t\t\t\tplaceholder='Enter a keyword',\n\t\t\t\t#debounce=True,\n\t\t\t\tstyle = {'minWidth': '80px'}\n\t\t\t),\n\t\t\thtml.Button('Filter', id='filter_submit',\n\t\t\t\tn_clicks = n_clicks,\n\t\t\t\tstyle = {'padding': '0 20px'}),\n\t\t\t# hidden data\n\t\t\thtml.Div(id= 'task_in_progress', \n\t\t\t\tstyle={'display': 'none'}, children = task_in_progress)\n\t\t\t]\n\t\tself.assign_index()\n\n\tdef assign_index(self):\n\t\tself.idx = {} \n\t\tfor i, item in enumerate(self.container): \n\t\t\tif hasattr(item, 'id'): \n\t\t\t\tself.idx[item.id] = i\n\n\tdef set_picked_version_style(self):\n\t pos1 = self.idx['picked_city']\n\t pos2 = self.idx['picked_version'] \n\t if 'props' in self.container[pos1]:\n\t city = self.container[pos1]['props']['value']\n\t if city=='all': \n\t self.container[pos2]['props']['style'] = {'min-width': '100px', 'display': 'flex'}\n\t else:\n\t self.container[pos2]['props']['style'] = {'display': 'none'}\n\t elif hasattr(self.container[pos1],'value'):\n\t city = self.container[pos1].value \n\t if city=='all': \n\t self.container[pos2].style = {'min-width': '100px', 'display': 'flex'}\n\t else:\n\t self.container[pos2].style = {'display': 'none'}\n\n\n# def clear_filter_keyword(input_container):\n# \t pos = [i for i, item in enumerate(input_container) if getattr(item, 'id', '_na')=='filter_keyword'][0]\n# \t if 'props' in input_container[pos]:\n# \t input_container[pos]['props']['value'] = ''\n# \t elif hasattr(input_container[pos],'value'):\n# \t input_container,[pos].value = ''\n\ndef set_task_in_progress_false(input_container, in_progress=False):\n pos = [i for i, item in enumerate(input_container) if item['props'].get('id', '_na')=='task_in_progress'][0]\n input_container[pos]['props']['children'] = in_progress\n\n\n\ndef subscribe_button(subscribe=True, email=''):\n\tif subscribe:\n\t\tstyle1 = {'padding': '0 20px'}\n\t\tstyle2 = {'display': 'none'}\n\telse:\n\t\tstyle1 = {'display': 'none'}\n\t\tstyle2 = {'padding': '0 20px'}\n\n\treturn \t[dcc.Input(id='subscribe_email', type='text', value=email,\n\t\t\t\t\t\tplaceholder='Enter an email address',\n\t\t\t\t\t\tdebounce=True,\n\t\t\t\t\t\tstyle = {'minWidth': '50px'}\n\t\t\t\t\t),\n\t\t\t html.Button('Subscribe', id='button_subscribe',\n\t\t\t\t\t\tn_clicks = 0,\n\t\t\t\t\t\tstyle = style1),\n\t\t\t html.Button('Unsubscribe', id='button_unsubscribe',\n\t\t\t\t\t\tn_clicks = 0,\n\t\t\t\t\t\tstyle = style2)\n\t\t\t ]\n\n\n","repo_name":"kotamine/dash_BLM_tracker","sub_path":"container_app/dynamic_input_containers.py","file_name":"dynamic_input_containers.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15530438422","text":"#BotTalk \n\n#Created by Adam Brook on May 17 2023\n\n\nimport os\nimport openai\nimport gradio as gr\n\n\nopenai.api_key = \"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\"\n\nstart_sequence = \"\\nAI:\"\nrestart_sequence = \"\\nHuman: \"\nprompt = \"The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.\\n\\nHuman: Hello, who are you?\\nAI: I am an AI created by OpenAI. How can I help you today?\\nHuman: \",\n\n# openai responses\ndef openai_new(ques):\n response = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt = prompt,\n temperature=0.9,\n max_tokens=150,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0.6,\n stop=[\" Human:\", \" AI:\"]\n )\n return response.choices[0].text\n\n#Human responses\ndef convo(input, old):\n old = old or []\n su = list(sum(old,()))\n su.append(input)\n inp = ''.join(su)\n out = openai_new(inp)\n old.append((input,out))\n return old, old\n\nblocks = gr.Blocks()\n\n#Textbox functionality\n\nwith blocks:\n chatb = gr.Chatbot()\n mess = gr.Textbox(placeholder=prompt)\n state = gr.State()\n submit = gr.Button(\"Tap\")\n submit.click(convo, inputs = [mess,state], outputs = [chatb,state])\n blocks.launch(debug=True)\n","repo_name":"adamb5/BotTalk","sub_path":"BtMAIN.py","file_name":"BtMAIN.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40925832725","text":"# Complete the function sum_m_to_n below\r\ndef sum_m_to_n(input_m, input_n):\r\n m = int(input_m)\r\n n = int(input_n)\r\n sumx = range(m,n+1)\r\n sumy = 0\r\n for sum in sumx:\r\n sumy = sumy+sum\r\n return sumy\r\n \r\n# Output Generation. You are not allowed to modify the following codes\r\ndef main():\r\n input_m = int(input(\"Enter an integer m please:\"))\r\n input_n = int(input(\"Enter an integer n please:\"))\r\n return_sum = sum_m_to_n(input_m, input_n)\r\n print(\"Sum of all integers in between\", input_m, \"and\", input_n, \"is\", return_sum)\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Kenson9999/5_2_4_1_2_2","sub_path":"lab5/lab6_5.py","file_name":"lab6_5.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3900847652","text":"import pygame\nfrom pygame.locals import *\nimport widget\nfrom util import *\n\nclass Button(widget.Widget):\n \"\"\"A button that uses a callback.\"\"\"\n\n focusable = False\n \n def __init__(self, **kwargs):\n \"\"\"Create a new button, options are:\n x, y = position\n width, height = size (calculated from image or label)\n label = The label on a text button (the default)\n font = pygame.font object to render text\n font_size = size of the font\n color = color of text and other foreground\n image = The image to place on an image button. (pygame surface)\n callback = A function of no arguments that is called when the button is pressed.\n padding = Padding around the text or image, set to 0 to have no border\n \"\"\"\n super(Button, self).__init__(**kwargs)\n\n self.padding = kwargs.get('padding', 4)\n self.callback = kwargs.get('callback', lambda: None)\n\n self.image = kwargs.get('image', None)\n self.label = kwargs.get('label', \"\")\n\n if self.image:\n self.is_text_button = False\n w, h = self.image.get_size()\n self.resize(w=w + self.padding * 2, h=h + self.padding * 2)\n else:\n self.is_text_button = True\n w, h = self.font.size(self.label)\n self.resize(w=w + self.padding * 2, h=h + self.padding * 2)\n\n self.back_color = (120, 120, 200)\n\n def display(self, surface):\n \"\"\"Display the selected widget, and the tabs.\"\"\"\n x, y, w, h = self.pos\n\n if self.padding > 0:\n pygame.draw.rect(surface, self.back_color, Rect(x, y, w - 1, h - 1), 2)\n \n if self.is_text_button:\n blit_text(surface, self.label, x + self.padding, y + self.padding, self.font, self.color)\n else:\n surface.blit(self.image, (x + self.padding, y + self.padding))\n\n def handle(self, event):\n \"\"\"self.selection handles the event dispatch.\n Except for tab mouse events which change the current selection.\n (Or go to the selection if in that space.\"\"\"\n if event.type == MOUSEBUTTONDOWN:\n self.callback()\n","repo_name":"vwood/Wind","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71847425360","text":"import pytorch_lightning as pl\n\nfrom utils.data import MNISTDataModule\nfrom utils.learner import ResnetModel\n\n\ndef test_train():\n dm = MNISTDataModule(\n batch_size=6, # low bs to fit on CPU if needed\n samples=50 # small size for the smoke test\n )\n dm.setup()\n\n model = ResnetModel()\n trainer = pl.Trainer(max_epochs=5, default_root_dir=\"resnet_checkpoints/\")\n trainer.fit(model, dm)\n","repo_name":"MarioProjects/wandb-cicd","sub_path":"tests/test_train.py","file_name":"test_train.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22476161229","text":"from util.path_util import PathUtil\nimport json\nfrom component.model.sentence import Sentence\nfrom component.model.api_knowledge import APIKnowledge\n\n\nclass DataUtil:\n @classmethod\n def write_list_to_json(cls, data, file_name):\n json_obj = json.dumps(data, indent=4)\n file_object = open(file_name, 'w')\n file_object.write(json_obj)\n file_object.close()\n\n @classmethod\n def sentence_data(cls, path=PathUtil.all_sentence_dict()):\n with open(path, 'r', encoding='utf-8') as f:\n sentence_dict_list = json.load(f)\n seed_sentence = []\n for sentence in sentence_dict_list:\n seed_sentence.append(Sentence.from_dict(sentence))\n return seed_sentence\n\n @classmethod\n def seed_api_knowledge_data(cls, path=PathUtil.seed_api_knowledge_list()):\n with open(path, 'r') as f:\n api_knowledge_list = json.load(f)\n result = []\n for api_knowledge in api_knowledge_list:\n result.append(APIKnowledge.from_dict(api_knowledge))\n return result\n\n @classmethod\n def seed_sentence_data(cls, path=PathUtil.seed_sentence_list()):\n with open(path, 'r', encoding='utf-8') as f:\n sentence_dict_list = json.load(f)\n seed_sentence = []\n for sentence in sentence_dict_list:\n seed_sentence.append(Sentence.from_dict(sentence))\n return seed_sentence\n","repo_name":"Sirius5272/APIKnowledgeMining","sub_path":"util/data_util.py","file_name":"data_util.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23517656511","text":"from django.urls import path\n\nfrom v1.userapp.views.privilege import PrivilegeList, Privilege, PrivilegeDetail\n\nurlpatterns = [\n\n path('', Privilege.as_view()),\n path('', PrivilegeDetail.as_view()),\n path('list/', PrivilegeList.as_view()),\n\n]","repo_name":"bynryTechnologies/Neovibe-API","sub_path":"api/v1/userapp/urls/privilege_urls.py","file_name":"privilege_urls.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18478132509","text":"import string\nletter = input()\ncorrect_list = string.ascii_lowercase + string.ascii_uppercase\nsecret_n = False\nsecret_o = False\nsecret_c = False\nresulting_string = str()\ntemp_string = \"\"\nwhile letter != \"End\":\n if letter in correct_list:\n if letter == \"n\" and secret_n:\n temp_string += letter\n elif letter == \"o\" and secret_o:\n temp_string += letter\n elif letter == \"c\" and secret_c:\n temp_string += letter\n elif letter != \"n\" and letter != \"o\" and letter != \"c\":\n temp_string += letter\n if letter == \"n\":\n secret_n = True\n elif letter == \"o\":\n secret_o = True\n elif letter == \"c\":\n secret_c = True\n if secret_n and secret_o and secret_c:\n secret_n = False\n secret_o = False\n secret_c = False\n resulting_string += temp_string\n resulting_string += \" \"\n temp_string = \"\"\n letter = input()\nprint(resulting_string)\n\n","repo_name":"RadoslavTs/SoftUni-Courses","sub_path":"1. Python Basics/4. While Loop/Additional/3. stream_of_letters.py","file_name":"3. stream_of_letters.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4697676769","text":"import demistomock as demisto\nfrom CommonServerPython import *\nfrom CommonServerUserPython import *\n''' IMPORTS '''\nimport json\nimport re\nimport requests\nimport os\nimport shlex\nimport mimetypes\nimport subprocess\nfrom datetime import date\nfrom distutils.util import strtobool\n\n# Disable insecure warnings\nrequests.packages.urllib3.disable_warnings()\n\n''' GLOBALS/PARAMS '''\n\nTOKEN = demisto.params().get('token')\n# Remove trailing slash to prevent wrong URL path to service\nSERVER = demisto.params()['url'][:-1] \\\n if (demisto.params()['url'] and demisto.params()['url'].endswith('/')) else demisto.params()['url']\n# Should we use SSL\nUSE_SSL = not demisto.params().get('insecure', False)\n# Service base URL\nBASE_URL = SERVER + '/api/v3/'\n# Headers to be sent in requests\nHEADERS = {\n 'Authorization': 'Token {}'.format(TOKEN),\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'\n}\n\nPOST_HEADERS = {\n 'Authorization': 'Token {}'.format(TOKEN),\n 'Accept': 'application/json'\n # Exclude Content-Type as \"requests\" call must set boundary value used to delineate the parts in the POST body\n}\n\n# Remove proxy if not set to true in params\nif not demisto.params().get('proxy'):\n del os.environ['HTTP_PROXY']\n del os.environ['HTTPS_PROXY']\n del os.environ['http_proxy']\n del os.environ['https_proxy']\n\n\n''' HELPER FUNCTIONS '''\n\n\ndef http_request(method, url_suffix, params=None, data=None, files=None):\n # A wrapper for requests lib to send our requests and handle requests and responses better\n res = requests.request(\n method,\n BASE_URL + url_suffix,\n verify=USE_SSL,\n params=params,\n data=data,\n headers=HEADERS,\n files=files\n )\n # Handle error responses gracefully\n if res.status_code in {404}:\n # Specific case to prevent error message if file not in Viper\n return False\n elif res.status_code not in {200}:\n return_error('Error in API call to Viper [%d] - %s' % (res.status_code, res.reason))\n return res.json()\n\n\ndef http_post(url_fragment, params=None, data=None, files=None):\n res = requests.request(\n url=BASE_URL + url_fragment,\n method='POST',\n verify=USE_SSL,\n params=params,\n data=data,\n headers=POST_HEADERS,\n files=files\n )\n # Handle error responses gracefully\n if res.status_code not in {201}:\n # Add status for updating tag entries with incident id if file already in Viper\n if 'error' in res.json() and res.json()['error']['code'] == 'DuplicateFileHash':\n return 'add tags'\n else:\n warning = {\n 'Type': 11,\n 'Contents': 'Upload unsuccessful',\n 'ContentsFormat': formats['markdown']\n }\n demisto.results(warning)\n else:\n pass\n return res.json()\n\ndef get_first(iterable, default=None):\n \"\"\"\n Returns the first item for an iterable object\n\n :type iterable: ``obj``\n :param iterable: An iterable object, like a dict\n\n :type default: ``str``\n :param default: The default property to return\n\n :return: First item within an iterable, or the default if not iterable\n :rtype: ``dict``\n \"\"\"\n if iterable:\n for item in iterable:\n return item\n return default\n\n\n''' COMMANDS + REQUESTS FUNCTIONS '''\n\n\ndef test_module():\n \"\"\"\n Performs basic get request to get item samples\n \"\"\"\n samples = http_request('GET', 'test-auth/')\n if 'message' in samples:\n demisto.results('ok')\n else:\n return_error(samples)\n\n\ndef viper_search_command():\n \"\"\"\n Command called to index Viper database given a file hash\n \"\"\"\n\n # Collect SHA56 hash from demisto details\n hash_value = demisto.args().get('file')\n hash_type = get_hash_type(hash_value)\n\n # search and return Viper data\n raw = viper_hash_search(hash_value)\n if not raw:\n warning = {\n 'Type': 11,\n 'Contents': 'File not found in Viper',\n 'ContentsFormat': formats['markdown']\n }\n demisto.results(warning)\n else:\n data = get_first(raw)\n\n # Do string manipulation in url to navigate to web interface\n analysis = '[Viper Database Entry](' + str(raw['links']['web']) + ')'\n\n\n # Grab tag strings & format tag strings\n pretty_tags = [tag['data']['tag'] for tag in raw['data']['tag_set']]\n\n # Table of data to populate Viper.File\n table = {\n 'Viper ID': raw['data']['id'],\n 'Created at': raw['data']['created_at'],\n 'SHA256': raw['data']['sha256'],\n 'SHA1': raw['data']['sha1'],\n 'MD5': raw['data']['md5'],\n 'ssdeep': raw['data']['ssdeep'],\n 'Link': analysis,\n 'Tags': pretty_tags\n }\n\n # Version of table for context data - no markdown formatting for url\n cd_table = {\n 'Viper ID': raw['data']['id'],\n 'Created at': raw['data']['created_at'],\n 'SHA256': raw['data']['sha256'],\n 'SHA1': raw['data']['sha1'],\n 'MD5': raw['data']['md5'],\n 'ssdeep': raw['data']['ssdeep'],\n 'Link': raw['links']['web'],\n 'Tags': pretty_tags\n }\n hr = tableToMarkdown('Viper Search Results', table)\n\n\n # If it's in Viper, it's bad - right?\n # dbot score:\n # 0 -> Unknown\n # 1 -> Good\n # 2 -> Suspicious\n # 3 -> Bad, mmkay\n dbot_score = 3\n dbot_output = {\n 'Type': 'file',\n 'Indicator': hash_value,\n 'Vendor': 'Viper',\n 'Score': dbot_score\n }\n\n # Build indicator output for file entry context\n file_output = {\n hash_type.upper(): hash_value,\n 'ssdeep': raw['data']['ssdeep']\n }\n\n # If the dbot score is 3, the file is malicious\n if dbot_score == 3:\n file_output['Malicious'] = {\n 'Vendor': 'Viper',\n 'Description': pretty_tags\n }\n\n # Entry Context\n ec = {\n 'DBotScore': dbot_output,\n # This builds the 'Viper.File' context item - avoid duplicates with the value of the 'id' parameter\n 'Viper.File': createContext(cd_table, id=raw.get('id'), removeNull=True),\n # Using DT selectors to prevent duplicate context entry data\n 'File(val.MD5 && val.MD5 == obj.MD5 || val.SHA1 && val.SHA1 == obj.SHA1 || val.SHA256 && val.SHA256 == obj.SHA256)': file_output\n }\n\n demisto.results({\n 'Type': entryTypes['note'],\n 'Contents': table,\n 'ContentsFormat': formats['json'],\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': hr,\n 'EntryContext': ec\n })\n\ndef viper_hash_search(hash_value):\n \"\"\"\n Performs search of Viper database given file hash\n \"\"\"\n url_fragment = 'project/default/malware/{}'.format(hash_value)\n response = http_request('GET', url_fragment, None, None, None)\n return response\n\n\ndef viper_upload_command():\n \"\"\"\n Command to upload a file to Viper database\n \"\"\"\n\n # Get entry id, filename and filepath\n file_entry = demisto.args().get('EntryID')\n filename = demisto.getFilePath(file_entry)['name']\n filepath = demisto.getFilePath(file_entry)['path']\n\n\n # Send file to Viper\n response = viper_upload(filepath, filename, file_entry.lower())\n\n \n # Update tags if necessary\n if response == 'add tags':\n curr_hash = demisto.context().get('File')[2]['SHA256']\n url_fragment = \"project/default/malware/{}/tag/\".format(curr_hash)\n curr_tags = [result['data']['tag'] for result in http_request('GET', url_fragment, None, None, None)['results']]\n if file_entry not in curr_tags:\n demisto.results(\"File already in Viper. Updating tags...\")\n data = {'tag': file_entry}\n add_tags = http_post(url_fragment, None, data=data)\n else:\n demisto.results(\"File already in Viper. Viper entry is up to date.\")\n\ndef viper_upload(path, name, entry_id):\n \"\"\"\n Performs upload of file to Viper database.\n \"\"\"\n # Get absolute filepath for upload\n new_path = os.path.abspath(path)\n files = {'file': (name, open(new_path, 'rb'))}\n incident_name = demisto.get(demisto.investigation(), 'name')\n\n\n # Create some basic demisto-related tags to attach to file details on initial upload\n data = {'tag_list': entry_id + ',' + str(date.today()) + ',' + 'demisto' + ',' + incident_name}\n upload = http_post('project/default/malware/upload/', None, data=data, files=files)\n return upload\n\n\n''' COMMANDS MANAGER / SWITCH PANEL '''\n\nLOG('Command being called is %s' % (demisto.command()))\n\ntry:\n if demisto.command() == 'test-module':\n # This is the call made when pressing the integration test button.\n test_module()\n elif demisto.command() == 'file':\n # Collect SHA56 hash from demisto details\n hash_value = demisto.args().get('file')\n hash_type = get_hash_type(hash_value)\n\n # Check if hash is SHA256 - Viper API only supports SHA256\n if hash_type.lower() != 'sha256':\n error = True\n else:\n error = False\n if not error:\n viper_search_command()\n else:\n warning = {\n 'Type': 11,\n 'Contents': 'Hash not recognized. Please use SHA256 hashes',\n 'ContentsFormat': formats['markdown']\n }\n demisto.results(warning)\n elif demisto.command() == 'upload':\n viper_upload_command()\n\n# Log exceptions\nexcept Exception as e:\n LOG(e.message)\n LOG.print_log()\n raise\n","repo_name":"somya-metron/demisto-content","sub_path":"Viper/Viper.py","file_name":"Viper.py","file_ext":"py","file_size_in_byte":9825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39731658416","text":"from functools import cached_property\nfrom hashlib import sha224\nfrom pathlib import Path\nimport re\nfrom typing import List\n\nimport msgspec\n\nSHA224 = re.compile(r'[a-f0-9]{56}')\n\n\nclass InvalidIDError(Exception):\n pass\n\n\nclass ID(str):\n def __new__(cls, content):\n if (not cls.isValidID(content)):\n raise InvalidIDError(\n f'{content} is not a Valid ID.\\nValid IDs are SSH224 Hashes. Please use the id generator attached to the ID Class you are trying to use.')\n return super().__new__(cls, content)\n\n @staticmethod\n def isValidID(id: str) -> bool:\n return bool(SHA224.fullmatch(id))\n\n\nclass ArticleID(ID):\n @classmethod\n def getArticleID(cls, label, content, editOf) -> 'ArticleID':\n if (editOf is None):\n _editOf = b''\n else:\n _editOf = str.encode(editOf.articleId)\n _label = str.encode(label.name)\n _content = str.encode(content)\n return cls(sha224(\n b''.join([\n _label,\n _content,\n _editOf,\n ])\n ).hexdigest())\n\n\nclass LabelID(ID):\n @classmethod\n def getLabelID(cls, path: tuple[str]) -> 'LabelID':\n package = b''.join([str.encode(crumb) for crumb in path])\n return cls(sha224(package).hexdigest())\n\n\nclass BaseLabel(msgspec.Struct, kw_only=True, dict=True, frozen=True):\n path: tuple[str]\n\n @classmethod\n def fromUnsafeString(cls, unsafe_raw_name: str):\n label_path = tuple([unsafe_raw_name])\n label_path = cls.clean_path(label_path)\n label = cls(path=label_path)\n return label\n\n @classmethod\n def fromUnsafeList(cls, unsafe_list: list[str]):\n label_path = tuple(unsafe_list)\n label_path = cls.clean_path(label_path)\n label = cls(path=label_path)\n return label\n\n @classmethod\n def fromPath(cls, path: Path, root: Path | None = None):\n if root is not None:\n root = root.resolve()\n path = path.relative_to(root)\n label_path = path.parts\n label_path = cls.clean_path(label_path)\n label = cls(path=label_path)\n return label\n\n @cached_property\n def labelId(self):\n return LabelID.getLabelID(self.path)\n\n @property\n def name(self) -> str:\n return self.path[-1]\n\n @property\n def parents(self) -> tuple[str]:\n return self.path[:-1]\n\n @classmethod\n def clean_path(cls, path: tuple[str]) -> tuple[str]:\n path = tuple(cls.as_safe_string(crumb) for crumb in path)\n if not (all(map(cls.is_valid, path))):\n raise AttributeError\n return path\n\n @classmethod\n def is_valid(cls, string: str) -> bool:\n if type(string) is not str:\n return False\n\n if not string:\n return False\n safe_str = cls.as_safe_string(string)\n return (\n (len(string) > 0) and\n (len(safe_str) > 0)\n )\n\n @ staticmethod\n def as_safe_string(string: str) -> str:\n s = str(string).strip().replace(\" \", \"_\")\n s = re.sub(r\"(?u)[^-\\w.]\", \"\", s)\n if s in {\"\", \".\", \"..\"}:\n s = '-'\n return s\n\n\ndef Label(path: List[str]):\n return BaseLabel.fromUnsafeList(path)\n\n\ndef SimpleLabel(name: str):\n return BaseLabel.fromUnsafeString(name)\n\n\ndef PathLabel(path: Path, root: Path | None = None):\n return BaseLabel.fromPath(path, root)\n\n\ndef InoLabel(path: Path):\n stat = path.resolve().stat()\n return SimpleLabel(f\"{stat.st_ino}\")\n","repo_name":"roughdrafts-xyz/Linki","sub_path":"linki/id.py","file_name":"id.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"23834694625","text":"import pytest\n\nfrom panel.widgets.indicators import (\n Dial, Gauge, Number, Tqdm,\n)\n\n\ndef test_number_none(document, comm):\n number = Number(value=None, name='Value')\n\n model = number.get_root(document, comm)\n\n assert model.text.endswith(\"<div style="font-size: 54pt; color: black">-</div>\")\n\n number.nan_format = 'nan'\n\n assert model.text.endswith(\"<div style="font-size: 54pt; color: black">nan</div>\")\n\n\ndef test_number_thresholds(document, comm):\n number = Number(value=0, colors=[(0.33, 'green'), (0.66, 'yellow'), (1, 'red')])\n\n model = number.get_root(document, comm)\n\n assert 'green' in model.text\n\n number.value = 0.5\n\n assert 'yellow' in model.text\n\n number.value = 0.7\n\n assert 'red' in model.text\n\n\ndef test_dial_thresholds(document, comm):\n dial = Dial(value=0, colors=[(0.33, 'green'), (0.66, 'yellow'), (1, 'red')])\n\n model = dial.get_root(document, comm)\n\n cds = model.select(name='annulus_source')\n\n assert ['green', 'whitesmoke'] == cds.data['color']\n\n dial.value = 50\n\n assert ['yellow', 'whitesmoke'] == cds.data['color']\n\n dial.value = 72\n\n assert ['red', 'whitesmoke'] == cds.data['color']\n\n\ndef test_dial_none(document, comm):\n dial = Dial(value=None, name='Value')\n\n model = dial.get_root(document, comm)\n\n cds = model.select(name='annulus_source')\n\n assert list(cds.data['starts']) == [9.861110273767961, 9.861110273767961]\n assert list(cds.data['ends']) == [9.861110273767961, 5.846852994181004]\n\n text_cds = model.select(name='label_source')\n\n assert text_cds.data['text'] == ['Value', '-%', '0%', '100%']\n\n dial.nan_format = 'nan'\n\n assert text_cds.data['text'] == ['Value', 'nan%', '0%', '100%']\n\n\ndef test_dial_thresholds_with_bounds(document, comm):\n dial = Dial(value=25, colors=[(0.33, 'green'), (0.66, 'yellow'), (1, 'red')],\n bounds=(25, 75))\n\n model = dial.get_root(document, comm)\n\n cds = model.select(name='annulus_source')\n\n assert ['green', 'whitesmoke'] == cds.data['color']\n\n dial.value = 50\n\n assert ['yellow', 'whitesmoke'] == cds.data['color']\n\n dial.value = 75\n\n assert ['red', 'whitesmoke'] == cds.data['color']\n\n\ndef test_dial_bounds():\n dial = Dial(bounds=(0, 20))\n\n with pytest.raises(ValueError):\n dial.value = 100\n\n\ndef test_gauge_bounds():\n dial = Gauge(bounds=(0, 20))\n\n with pytest.raises(ValueError):\n dial.value = 100\n\ndef test_tqdm_color():\n tqdm = Tqdm()\n tqdm.text_pane.styles={'color': 'green'}\n for _ in tqdm(range(0,2)):\n pass\n assert tqdm.text_pane.styles[\"color\"]==\"green\"\n","repo_name":"holoviz/panel","sub_path":"panel/tests/widgets/test_indicators.py","file_name":"test_indicators.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":3266,"dataset":"github-code","pt":"3"} +{"seq_id":"3736671503","text":"\n\nclass Contacto:\n\n def __init__(self, name, phone, email):\n self._name = name\n self._phone = phone\n self._email = email\n\n\nclass Agenda:\n def __init__(self):\n self._contacto = []\n\n def add(self, name, phone, email):\n contacto = Contacto(name, phone, email)\n self._contacto.append(contacto)\n\n def show_contact(self):\n for obj in self._contacto:\n self._print_contacto(obj)\n\n def _print_contacto(self, contacto):\n print(\"Nombre: {}\".format(contacto._name))\n print(\"Teléfono: {}\".format(contacto._phone))\n print(\"Email: {}\".format(contacto._email))\n print(\"----------------------------------\")\n\n def show_contact_idx(self):\n for obj in self._contacto:\n index = self._contacto.index(obj) + 1\n print(\"{} ---- {}\".format(index, obj._name))\n\n def delete_contact_selected(self, num):\n idx = num - 1\n self._contacto.remove(self._contacto[idx])\n\n def modify_contact(self, num):\n idx = num-1\n name = input(\"Nombre a modificar: \")\n phone = input(\"Teléfono a modificar: \")\n email = input(\"Email a modificar: \")\n self._contacto[idx]._name = name\n self._contacto[idx]._phone = phone\n self._contacto[idx]._email = email\n\n\n\ndef run():\n agenda = Agenda()\n\n while True:\n command = input('''\n ¿Qué desea hacer?\n [a]gregar contacto\n [e]liminar contacto\n [m]odificar contacto\n [l]istar contactos\n [s]alir\n\n ''')\n\n if command.lower() == 'a':\n name = input(\"Escribe el nombre del contacto: \")\n phone = input(\"Escribe el teléfono del contacto: \")\n email = input(\"Escribe el email del contacto: \")\n agenda.add(name, phone, email)\n\n elif command.lower() == 'e':\n try:\n agenda.show_contact_idx()\n print(\"Escriba el número de la persona a la cual desea eliminar\")\n num = int(input())\n agenda.delete_contact_selected(num)\n except IndexError:\n print(\"Elige un número correcto\")\n\n elif command.lower() == 'm':\n print(\"¿A quién modificará?\")\n agenda.show_contact_idx()\n num = int(input())\n agenda.modify_contact(num)\n\n elif command.lower() == 'l':\n agenda.show_contact()\n\n else:\n break\n\n\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"CelsoEspinoza/Python.curso","sub_path":"first_course/contactos.py","file_name":"contactos.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27091668615","text":"import pygame, time\nfrom utils import Utils\nfrom setting import Setting\nfrom assets import Graphic\n\nclass Sprite_Pixel(pygame.sprite.Sprite):\n\tdef __init__(self, pos, size=(1, 1)):\n\t\tsuper().__init__()\n\t\tself.size = size\n\t\tself.image = pygame.Surface(size)\n\t\tself.rect = self.image.get_rect(center=pos)\n\nclass Rotate_Image(pygame.sprite.Sprite):\n\tdef __init__(self, image_filename, size, pos, deviation=(0, 0), color=None, parent=None):\n\t\tsuper().__init__()\n\t\tself.size = Utils.int_tuple(size)\n\t\tself.pos = pos\n\t\tself.deviation = deviation\n\t\tself.angle = 0\n\t\tself.speed = 0\n\t\tself.parent = parent\n\t\tself.original_image = pygame.image.load(image_filename)\n\t\tself.original_image = pygame.transform.scale(self.original_image, self.size)\n\t\tself.transition = self.original_image.copy()\n\t\tself.image = self.transition\n\t\tself.rect = self.image.get_rect(center=Setting.Environment.NULL_POINT)\n\t\tif color != None:\n\t\t\tself.change_color(color)\n\tdef change_color(self, color):\n\t\tcoloredSurface = self.original_image.copy()\n\t\tUtils.color_surface(coloredSurface, color)\n\t\tself.transition = coloredSurface\n\tdef update(self):\n\t\tself.image = pygame.transform.rotozoom(self.transition, self.angle, 1)\n\t\tcenter = Utils.rotate(Utils.mul_tuple(self.deviation, -1), self.angle)\n\t\tcenter = Utils.int_tuple(Utils.sum_tuple(center, self.pos))\n\t\tself.rect = self.image.get_rect(center=center)\n\tdef turn(self, angle):\n\t\tself.angle += angle\n\t\tself.angle %= 360\n\nclass Shell_Explosion(Rotate_Image):\n\tdef __init__(self, size, pos, player):\n\t\tsuper().__init__(Graphic.Tank.SHELL_EXPLOSION, size, pos)\n\t\tself.player = player\n\t\tself.last = round(Setting.Environment.SHELL_EXPLOSION_LAST / Setting.Window.MSPF)\n\tdef update(self):\n\t\tsuper().update()\n\t\tself.last -= 1\n\nclass Cannon_Explosion(Rotate_Image):\n\tdef __init__(self, size, pos, angle, player):\n\t\tsuper().__init__(Graphic.Tank.CANNON_EXPLOSION, size, pos)\n\t\tself.player = player\n\t\tself.angle = angle\n\t\tself.last = round(Setting.Environment.CANNON_EXPLOSION_LAST / Setting.Window.MSPF)\n\tdef update(self):\n\t\tsuper().update()\n\t\tself.last -= 1\n\nclass Shell(Rotate_Image):\n\tdef __init__(self, size, pos, angle, tank):\n\t\tsuper().__init__(Graphic.Tank.TANK_SHELL, size, pos, parent=tank)\n\t\tself.angle = angle\n\t\tself.init_pos = pos\n\t\tself.body = tank.body\n\t\tself.player = tank.player\n\t\tself.explo_width = tank.tank_gameplay.SHELL_EXPLOSION_WIDTH\n\t\tself.damage = tank.tank_gameplay.SHELL_DAMAGE\n\t\tself.acceleration = tank.tank_gameplay.SHELL_SPEED\n\tdef update(self):\n\t\tsuper().update()\n\t\ttotal_angle = self.angle + Setting.Tank_Model.Tank.INIT_ANGLE\n\t\tself.speed = Utils.next_speed(self.speed, Utils.acceleration(self.acceleration))\n\t\tdistance = Utils.scale(self.speed, (1000, Setting.Window.MSPF))\n\t\tself.pos = Utils.mirror(Utils.translate(Utils.mirror(self.pos), distance, total_angle))\n\tdef is_out_boundary(self):\n\t\tmax_dim = max(self.size)\n\t\twidth, height = Setting.Window.WIDTH, Setting.Window.HEIGHT\n\t\tif self.pos[0] - max_dim / 2 >= width or self.pos[0] + max_dim / 2 <= 0:\n\t\t\treturn True\n\t\tif self.pos[1] - max_dim / 2 >= height or self.pos[1] + max_dim / 2 <= 0:\n\t\t\treturn True\n\t\treturn False\n\nclass Arty_Shell(Rotate_Image):\n\tdef __init__(self, size, player, origin, land_point):\n\t\tsuper().__init__(Graphic.Tank.TANK_SHELL, size, origin)\n\t\tself.angle = Utils.angle_onscreen_seg(origin, land_point)\n\t\tself.init_pos = origin\n\t\tself.land_point = land_point\n\t\tself.player = player\n\t\tgameplay = Setting.Gameplay.SPG\n\t\tself.explo_width = gameplay.SHELL_EXPLOSION_WIDTH\n\t\tself.damage = gameplay.SHELL_DAMAGE\n\t\tself.acceleration = gameplay.SHELL_SPEED\n\tdef update(self):\n\t\tsuper().update()\n\t\tself.speed = Utils.next_speed(self.speed, Utils.acceleration(self.acceleration))\n\t\tdistance = Utils.scale(self.speed, (1000, Setting.Window.MSPF))\n\t\tnew_pos = Utils.mirror(Utils.translate(Utils.mirror(self.pos), distance, self.angle))\n\t\tif Utils.is_between(self.pos, new_pos, self.land_point):\n\t\t\tself.pos = self.land_point\n\t\telse:\n\t\t\tself.pos = new_pos\n\nclass Flame(pygame.sprite.Sprite):\n\tCHANGE = 100\n\tdef __init__(self, size, pos):\n\t\tsuper().__init__()\n\t\tself.images = []\n\t\tself.pos = pos\n\t\tmul = max(size) / max(Setting.Tank_Model.Tank.FLAME_ACTUAL)\n\t\tscale = Utils.int_tuple(Utils.mul_tuple(Setting.Tank_Model.Tank.FLAME_DIMENSION, mul))\n\t\tfor i in range(Graphic.Tank.Flame.NUMBER):\n\t\t\timage = pygame.image.load(Graphic.Tank.Flame.get_flame(i))\n\t\t\timage = pygame.transform.scale(image, scale)\n\t\t\tself.images.append(image)\n\t\tself.index = 0\n\t\tself.last_change = time.time() * 1000\n\tdef update(self):\n\t\tnow = time.time() * 1000\n\t\tif now - self.last_change > Flame.CHANGE:\n\t\t\tself.index += 1\n\t\t\tself.index %= Graphic.Tank.Flame.NUMBER\n\t\tself.image = self.images[self.index]\n\t\tself.rect = self.image.get_rect(center=self.pos)\n\nclass Wall(pygame.sprite.Sprite):\n\tdef __init__(self, wall_from, wall_to, wall_height):\n\t\tsuper().__init__()\n\t\tmin_tuple = tuple(min(tuple(x)) for x in zip(wall_from, wall_to))\n\t\tmax_tuple = tuple(max(tuple(x)) for x in zip(wall_from, wall_to))\n\t\tself.top_left = min_tuple\n\t\tself.down_right = max_tuple\n\t\tself.height = wall_height\n\t\tself.size = Utils.sum_tuple(max_tuple, Utils.mul_tuple(min_tuple, -1))\n\t\tself.size = tuple(max(Setting.Environment.INF_WALL, x) for x in self.size)\n\t\tself.image = pygame.Surface(self.size)\n\t\tself.image.fill(Setting.Environment.WALL_COLOR)\n\t\tself.rect = pygame.Rect(min_tuple, self.size)\n\nclass Boundary(pygame.sprite.Sprite):\n\tdef __init__(self, b_from, b_to):\n\t\tsuper().__init__()\n\t\tmin_tuple = tuple(min(tuple(x)) for x in zip(b_from, b_to))\n\t\tmax_tuple = tuple(max(tuple(x)) for x in zip(b_from, b_to))\n\t\tself.size = Utils.sum_tuple(max_tuple, Utils.mul_tuple(min_tuple, -1))\n\t\tself.image = pygame.Surface(self.size)\n\t\tself.rect = pygame.Rect(min_tuple, self.size)\n\t\t\nclass Progess_Bar(pygame.sprite.Sprite):\n\tdef __init__(self, color_full, color_miss, size):\n\t\tsuper().__init__()\n\t\tself.color_full = color_full\n\t\tself.color_miss = color_miss\n\t\tself.size = size\n\t\tself.percent = 1\n\t\tself.image = pygame.Surface(self.size)\n\t\tself.pos = Setting.Environment.NULL_POINT\n\t\tself.rect = self.image.get_rect(center=self.pos)\n\n\tdef update(self):\n\t\tfull_width = int(self.percent * self.image.get_size()[0])\n\t\tfor x in range(full_width):\n\t\t\tfor y in range(self.image.get_size()[1]):\n\t\t\t\tself.image.set_at([x, y], self.color_full)\n\t\tfor x in range(full_width, self.image.get_size()[0]):\n\t\t\tfor y in range(self.image.get_size()[1]):\n\t\t\t\tself.image.set_at([x, y], self.color_miss)\n\t\tself.rect = self.image.get_rect(center=self.pos)\n\nclass Name_Bar(pygame.sprite.Sprite):\n\tdef __init__(self, name, font_size, color):\n\t\tsuper().__init__()\n\t\tself.name = name\n\t\tself.font_size = font_size\n\t\tself.color = color\n\t\tself.font = pygame.font.SysFont(Setting.Window.FONT, font_size)\n\t\tself.image = self.font.render(name, True, color)\n\t\tself.pos = Setting.Environment.NULL_POINT\n\t\tself.rect = self.image.get_rect(center=self.pos)\n\n\tdef update(self):\n\t\tself.rect = self.image.get_rect(center=self.pos)","repo_name":"alm818/battle_tank_multiplayer","sub_path":"scripts/ingame/sprite.py","file_name":"sprite.py","file_ext":"py","file_size_in_byte":6947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"31728999962","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import ShareLink\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse, FileResponse, HttpResponse, Http404\nfrom django.core.files.storage import FileSystemStorage\nfrom django.urls import reverse\nfrom Files.file_utils import recursive_file_list\nfrom Files.views import FileView\nimport os.path as p\nfrom io import BytesIO\nimport zipfile\nfrom django.views import View\n\n@login_required\ndef create_share_link(request, path):\n \"\"\"\n Creates a share link for a file : uses the file path & request user to identify\n the shared file, generates a 16-char string to use it as link, then sends this\n link back to client.\n \"\"\"\n fs = FileSystemStorage()\n full_path = p.join(request.user.username, 'files', path)\n if p.exists(p.join(fs.location, full_path)):\n link = ShareLink(link=ShareLink.link_generation(),\n file_path=path, creator=request.user)\n link.save()\n return JsonResponse({'link': link.link})\n else:\n raise Http404\n\nclass ShareDownloadView(View, FileView):\n \"\"\"\n Allows users (even unregistered) to download a shared file from its link\n \"\"\"\n def get(self, request, file_link):\n link = get_object_or_404(ShareLink, link=file_link)\n to_send = p.join(link.creator.username, \"files\", link.file_path)\n if p.exists(p.join(self.fs.location, to_send)):\n # Send file or folder if exists\n # Compress all folder into zip and return it\n if p.isdir(p.join(self.fs.location, to_send)):\n filenames = recursive_file_list(\n p.join(self.fs.location, to_send))\n zip_filename = f\"{p.basename(to_send)}.zip\"\n s = BytesIO()\n zf = zipfile.ZipFile(s, \"w\")\n for fpath in filenames:\n fdir, fname = p.split(fpath)\n zf.write(fpath, fpath.replace(p.join(\n self.fs.location, link.creator.username, 'files'), ''))\n zf.close()\n resp = HttpResponse(\n s.getvalue(), content_type=\"application/x-zip-compressed\")\n resp['Content-Disposition'] = f'attachment; filename={zip_filename}'\n return resp\n else:\n # Open and send file\n file = self.fs.open(to_send, 'rb')\n return FileResponse(file, file.name, as_attachment=True)\n file.close()\n else:\n raise Http404 # Return HTTP 404 error\n","repo_name":"HugoNeveux/django_file_storage","sub_path":"Django_cloud/Share/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74751993041","text":"from django.core.management.base import BaseCommand, CommandError\nfrom API.models import PictureTarget\nfrom random import randint\n\npictureNames = ['eraser', 'bag', 'notebook', 'pen', 'credit card', 'wallet', 'key',\n 'toothbrush', 'headphone', 'water bottle', 'pencil', 'mouse', 'glasses', \n 'clock', 'sandals', 'cellphone', 'keyboard']\n\nclass Command(BaseCommand):\n help = 'Changes picture target'\n\n def handle(self, *args, **options):\n current_picture = PictureTarget.objects.all()[0]\n current_name = current_picture.name\n current_picture.delete()\n newPicture = pictureNames[randint(0, len(pictureNames)-1)]\n while newPicture == current_name:\n newPicture = pictureNames[randint(0, len(pictureNames)-1)]\n PictureTarget.objects.create(name=newPicture)\n ","repo_name":"LeonardoGaldino/FastPicAPI","sub_path":"FastPicAPI/API/management/commands/changePictureTarget.py","file_name":"changePictureTarget.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33246380458","text":"###### ALL FEATURES -> VECTOR ######\n# start_date 'yyyy-mm-dd' -> pandas Datetime\n# day \"월,화\"/\"수,목\"/금/\"금,토\"/\"토,일\"/일(6종류) -> one hot encoding\n# time 'hh:mm:00' -> float (hh + (mm/60))\n## 10:10/20:55/21:15/21:30/21:40/21:50/21:55/22:00/22:15/22:45/23:00 11종류 밖에 없는데 categorical data로 해서 one hot encoding할까?\n# num_eps(number of episodes) int -> int\n# prev(선행드라마) -> 후반기 25% 시청률, 결측치는 k nearest neighbor 이용\n# kbs (1 or 2) -> one hot encoding\n\n# news_keyword 0~9 -> 방영 전 네이버 연예뉴스를 dataset으로 word2vec을 통해 드라마 제목과 코사인유사도가 높은 단어 10개의 벡터\n# pd (min=1, max=3) \n# 해당 드라마가 방영되기 이전까지의 이력을 이용하여 과거에 연출했던 드라마 중 최고 시청률이 20% 이상(지상파 외 채널 2% 이상)인 드라마의 수를 기록\n# writer (min=1, max=3) \n# 해당 드라마가 방영되기 이전까지의 이력을 이용하여 과거에 연출했던 드라마 중 최고 시청률이 20% 이상(지상파 외 채널 2% 이상)인 드라마의 수를 기록\n# 프로듀서의 전작 첫 회 평균 시청률(5년간)\n# 작가의 전작 첫 회 평균 시청률(5년간)\n# 프로듀서의 전작 평균 시청률(5년간)\n# 작가의 전작 평균 시청률(5년간)\n# 프로듀서의 수상 수(5년간)\n# 작가의 수상 수(5년간)\n\n# actor1,2,3,4,5 (min=2, max=5)\n # 5가지의 기준을 두었다. \n # 첫 번째와 두 번째 기준은 각각 해당 드라마 이전에 출연한 방송(드라마 및 예능과 같은 TV 브라운관에서의 방송활동)과 영화의 수이다. \n # 세 번째 기준은 수상 경력으로, 각종 시상식을 비롯하여 각 방송사에서 매년 주최하는 연말 시상식에서 수상한 상의 개수를 점수\n # 화하였다. 이 때 대상, 최우수·우수 연기상, 그 외의 상에 대하여 각각 3, 2, 1로 가중치를 주었다. \n # 네 번째 기준은 역대 한국 드라마 시청률 100위 안에 드는 드라마에 주연으로 출연한 횟수이다.\n # 다섯 번째 기준은 해당 드라마의 주연배우 수이며 앞선 4가지 변수는 모두 주연배우의 수로 나눈 평균 점수를 사용\n # 드라마 출연 배우 , 프로듀서 , 작가의 해당 작품 전 5 년간으로 제한을 두어 평균 시청률을 변수화\n# 주요 출연배우 4인의 수상 실적 점수(5년간)\n# 주요 출연배우 4인의 드라마 경력\n# 주요 출연배우 4인의 영화 출연 여부(1000만 관객 이상)\n# 주요 출연배우 4인 중 남자의 전작 평균 시청률 (5년간)\n# 주요 출연배우 4인 중 여자의 전작 평균 시청률 (5년간)\n# 주요 출연배우 4인의 전작 평균 시청률 (5년간)\n# 주요 출연배우 4인 중 남자의 전작 첫 회 평균 시청률 (5년간)\n# 주요 출연배우 4인 중 여자의 전작 첫 회 평균 시청률 (5년간)\n# 주요 출연배우 4인의 전작 첫 회 평균 시청률 (5년간)\n\n\n\n###### MORE FEATURES? ######\n# 네이버에서 제공하는 인터넷 기사를 기준으로 드라마 방영 전 날부터 3개월 이전까지, 방영한 날부터 1주일 이후까지의 기사 개수\n# 드라마 검색량 (네이버 트렌드)\n# 경쟁작의 시청률\n# 원작이 있는 경우 1, 없는 경우 0을 나타내는 원작 유무 변수를 고려\n\n\n#### 보정\n# 연출자, 작가 변수 사이에서는 명확한 선형관계가 나타ㄴ나지 않아 GMM(Gaussian Mixture Model)을 이용하여 군집분석을 수행\n# 드라마의 편성시간대를 나타내는 방송요일과 방송시간 모두 평균 시청률을 기준으로 하여 비슷한 값을 나타내는 범주들은 동일 범주로 묶었다\n\n### 초반시청률 예측의 중요 변수: 방송시간, 방송사, 이전작의 평균 시청률, 방영 전 드라마 검색량\n # 선행드라마 후반기 25% 시청률, 방송사, 방송요일, 방송시간, 프로듀서, 배우, 부작수\n# 이 중 나한테 없는 데이터: 이전작의 평균 시청률, 방영 전 드라마 검색량\n\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.impute import IterativeImputer\n\npd.set_option('display.max_rows', 160)\npd.set_option('display.max_columns', 20)\n\ndef embedding(file_path):\n kbs_mini = pd.read_csv(file_path, encoding='utf-8')\n del kbs_mini['end_date']\n del kbs_mini['pd']\n del kbs_mini['writer']\n del kbs_mini['actor1']\n del kbs_mini['actor2']\n del kbs_mini['actor3']\n del kbs_mini['actor4']\n del kbs_mini['actor5']\n del kbs_mini['avg_rate']\n del kbs_mini['rate_25']\n del kbs_mini['prev']\n\n start_timestamp = pd.to_datetime(kbs_mini['start_date'], format='%Y-%m-%d').astype(int) / 10**11\n kbs_mini['start_date'] = start_timestamp\n\n time_to_datetime = pd.to_datetime(kbs_mini['time'], format='%H:%M:%S')\n kbs_mini['time'] = time_to_datetime.dt.hour + (time_to_datetime.dt.minute/60)\n\n day_one_enc = pd.get_dummies(kbs_mini, columns=['day'])\n kbs_mini = day_one_enc\n\n # # print(kbs_mini['kbs'])\n kbs_mini = pd.get_dummies(kbs_mini, columns=['kbs'])\n\n del kbs_mini['title']\n \n # column_names = kbs_mini.columns.values.tolist()\n imp_mean = IterativeImputer(missing_values=np.nan, skip_complete=True, random_state=0)\n imputed_prev_25 = imp_mean.fit_transform(kbs_mini.to_numpy())[:, 4]\n kbs_mini['prev_25_imputed'] = imputed_prev_25\n # kbs_mini = pd.DataFrame(imp_mean.fit_transform(kbs_mini.to_numpy()), columns=column_names)\n del kbs_mini['prev_25']\n\n # print(kbs_mini.loc[:,['prev_25','prev_25_imputed']])\n return kbs_mini\n # print(kbs_mini)\n\n# embedding('../kbs_mini.csv')","repo_name":"Park-Yegi/Drama-View-Rate-Prediction","sub_path":"Embedding/model1_embedding.py","file_name":"model1_embedding.py","file_ext":"py","file_size_in_byte":5989,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17759760023","text":"#Luke Mackay,November 30th,2020\nimport random\n\nprint(\"What is your name?\")\nname_input = input()\nsecretNumber = random.randrange(1,101)\nguess_number = 0\nnum_of_tries = 0\nlimited_tries = 7\ngame_over = False\n\nis_valid_input = False\nwhile is_valid_input == False:\n print (\"choose a difficulty:\\n medium is 1 to 100 \\n hard is 1 to 1000\")\n difficulty = input()\n if difficulty == 'hard':\n secretNumber = random.randrange(1,1001)\n limited_tries = 9\n is_valid_input = True\n elif difficulty == 'medium':\n is_valid_input = True\n else:\n print (\"pick 'medium' or 'hard' please\")\n\n\nprint (\"welcome\",name_input,\"you have\",limited_tries,\"tries, you got this!\")\n\nwhile game_over == False:\n guess_input = \"NONE\"\n while guess_input.isdigit() == False:\n if limited_tries == num_of_tries:\n game_over= True\n print (\"guess the secret number\")\n guess_input = input()\n if guess_input.isdigit() == False:\n print (\"Excuse me, try again\")\n \n guess_number = int(guess_input)\n num_of_tries = num_of_tries + 1\n \n if limited_tries < num_of_tries:\n game_over = True\n print (\"YOU USED\",limited_tries,\"TRIES\")\n print(\"you did not get the number HOW? im dissapointed\",name_input)\n\n elif guess_number < secretNumber:\n print(\"too low try again!\",name_input)\n elif guess_number > secretNumber:\n print(\"too high try again!\",name_input)\n elif guess_number == secretNumber:\n print(\"you got the number!!!\")\n game_over = True\n print (\"it took you\",num_of_tries,\"tries\",name_input)\n\nprint (\"GAME OVER\")\n \n\n","repo_name":"Luke1-cloud/Guessing-Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36053516425","text":"import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\nn, m = map(int, input().split())\narr = [[0] for _ in range(n + 1)]\nvisited = [[-1] * (m + 1) for _ in range(n + 1)]\n\narr[0] += [0] * m\nfor i in range(1, n + 1):\n arr[i] += list(map(int, input().split()))\n\nh, w, s_r, s_c, f_r, f_c = map(int, input().split())\n\nwall = []\nfor i in range(n + 1):\n for j in range(m + 1):\n if arr[i][j] == 1:\n wall.append([i, j])\n\nfor wa in wall:\n i, j = wa[0], wa[1]\n for a in range(1, h):\n if 0 < i - a:\n arr[i - a][j] = \"x\"\n if 0 < j - (w - 1):\n arr[i - a][j - (w - 1)] = \"x\"\n for b in range(1, w):\n if 0 < j - b:\n arr[i][j - b] = \"x\"\n if 0 < i - (h - 1):\n arr[i - (h - 1)][j - b] = \"x\"\n\n\nfor i in range(n + 1):\n print(*arr[i])\nprint()\n\ndx = [1, 0, -1, 0]\ndy = [0, 1, 0, -1]\narr[s_r][s_c] = - 1 # 시작점을 다시 탐색하지 않게 하기 위해\n\nfor i in range(n + 1):\n print(*arr[i])\nprint()\n\ndef bfs():\n q = deque()\n q.append((s_r, s_c))\n visited[s_r][s_c] = 0\n\n while q:\n x, y = q.popleft()\n\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 < nx <= (n - h + 1) and 0 < ny <= (m - w + 1):\n if arr[nx][ny] == 0 and visited[nx][ny] == -1:\n # arr[nx][ny] = arr[x][y] - 1\n visited[nx][ny] = visited[x][y] + 1\n q.append((nx, ny))\n\n for i in range(n + 1):\n print(*arr[i])\n print()\n\n\nbfs()\n\nfor i in range(n + 1):\n print(*arr[i])\nprint()\n\nfor i in range(n + 1):\n print(*visited[i])\nprint()\n\nprint(-arr[f_r][f_c] - 1)\nprint(visited[f_r][f_c])","repo_name":"Sangmin627/AlgoStudy2023","sub_path":"창재/백준/random/week14/16973 - BFS.py","file_name":"16973 - BFS.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"18393069254","text":"# Copyright 2022 Luping Liu\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\r\nimport os\r\nimport sys\r\nimport math\r\nimport time\r\nimport copy\r\nimport faiss\r\nimport random\r\n\r\nimport torch as th\r\nimport numpy as np\r\nfrom einops import rearrange\r\nimport torchvision.utils as tvu\r\nimport torch.utils.data as data\r\nimport torch.distributed as dist\r\nimport torchvision.transforms.functional as F\r\nfrom torchvision import transforms\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom tqdm.auto import tqdm\r\n\r\nfrom dataset import get_dataset\r\nfrom runner.runner import Runner\r\nfrom detect.dataset.imglist_dataset import ImglistDataset\r\n\r\n\r\n@th.no_grad()\r\nclass KNN(object):\r\n def __init__(self, dim, net=None):\r\n self.dim = dim\r\n self.index = faiss.IndexFlatL2(dim)\r\n self.net = net\r\n self.y = None\r\n\r\n def encoder(self, y: th.Tensor):\r\n normalizer = lambda x: x / (np.linalg.norm(x, axis=-1, keepdims=True) + 1e-10)\r\n\r\n if self.net is not None:\r\n y = th.clamp(y * 0.5 + 0.5, 0, 1) # 设置标准输入!\r\n _, index = self.net(y, return_feature=True)\r\n index = index.cpu().numpy()\r\n # print(index.shape)\r\n else:\r\n index = y.mean(dim=1).cpu().numpy().reshape(len(y), -1)\r\n\r\n index = normalizer(index)\r\n assert index.shape[1] == self.dim\r\n\r\n return index\r\n\r\n def add(self, y: th.Tensor):\r\n index = self.encoder(y)\r\n\r\n self.index.add(index)\r\n self.y = th.cat([self.y, y], dim=0) if self.y is not None else y\r\n print('the shape of y in KNN', y.shape)\r\n\r\n def search(self, y: th.Tensor, k=1, return_y=False):\r\n index = self.encoder(y)\r\n\r\n if return_y:\r\n loss, ind = self.index.search(index, k)\r\n y = rearrange(self.y[ind.reshape(-1)], '(b1 b2) ... -> b1 b2 ...', b2=k) # todo check\r\n return loss, ind, y\r\n else:\r\n return self.index.search(index, k)\r\n\r\n\r\nclass OodDetection(Runner):\r\n def __init__(self, args, config, schedule, model, discriminator):\r\n super(OodDetection, self).__init__(args, config, schedule, model)\r\n # self-train version\r\n self.discriminator = discriminator\r\n state_dict = th.load(self.args.disc_path, map_location=self.device)\r\n self.discriminator.load_state_dict(state_dict, strict=True)\r\n self.discriminator.eval() # 兄弟,要eval啊!\r\n\r\n @th.no_grad()\r\n def diff_detect(self):\r\n batch_size = 250\r\n iter_size = 4\r\n iter_size = iter_size // self.world_size if self.world_size >= 2 else iter_size\r\n repeat_size = self.args.repeat_size\r\n\r\n id_name = self.config['Dataset']['name'].lower()\r\n timestep_list = [0, 120, 240, 360]\r\n ood_list = ['cifar10', 'cifar100', 'tin', 'svhn', 'texture', 'places365']\r\n # ood_list = ['imagenet', 'inaturalist', 'openimage_o', 'imagenet_o', 'species']\r\n\r\n model = self.model\r\n schedule = self.schedule\r\n device = self.device\r\n\r\n def gather(obj):\r\n if self.world_size >= 2:\r\n obj_ = obj.cuda()\r\n if self.rank == 0:\r\n obj_gather = [th.zeros_like(obj_) for _ in range(self.world_size)]\r\n dist.gather(obj_, obj_gather)\r\n obj = th.cat(obj_gather, dim=0).cpu()\r\n else:\r\n dist.gather(obj_)\r\n return obj\r\n\r\n # load model\r\n self.before_sample()\r\n\r\n # load dataset\r\n for ood_name in ood_list:\r\n dataset = ImglistDataset(id_name, 'test', 32,\r\n f'./benchmark_imglist/{id_name}/test_{ood_name}.txt',\r\n f'./images_classic/')\r\n if self.world_size >= 2:\r\n sampler = data.distributed.DistributedSampler(dataset)\r\n ood_loader = data.DataLoader(dataset, batch_size=batch_size, sampler=sampler,\r\n num_workers=4)\r\n else:\r\n ood_loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=True,\r\n num_workers=4)\r\n # print(len(dataset))\r\n\r\n norm_dict = {'cifar10': [[0.4914, 0.4822, 0.4465], [0.2470, 0.2435, 0.2616]],\r\n 'cifar100': [[0.5071, 0.4867, 0.4408], [0.2675, 0.2565, 0.2761]],\r\n 'imagenet': [[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]], }\r\n mean, std = norm_dict[id_name]\r\n norm_fn = transforms.Normalize(mean=mean, std=std)\r\n\r\n # main detection process\r\n tn = len(timestep_list)\r\n num_classes = self.config['Model']['num_classes']\r\n imgr_np, feature_np, logit_np = [[] for _ in range(tn)], [[] for _ in range(tn)], [[] for _ in range(tn)]\r\n for i, output in enumerate(tqdm(ood_loader, total=iter_size, disable=self.rank + 1 - self.world_size,\r\n desc=f'process {ood_name} data')):\r\n\r\n if i == iter_size:\r\n for j in range(len(imgr_np)):\r\n imgr_np[j] = th.cat(imgr_np[j], dim=0)\r\n feature_np[j] = th.cat(feature_np[j], dim=0)\r\n logit_np[j] = th.cat(logit_np[j], dim=0)\r\n imgr_np[j] = gather(imgr_np[j]).numpy()\r\n feature_np[j] = gather(feature_np[j]).numpy()\r\n logit_np[j] = gather(logit_np[j]).numpy()\r\n\r\n if self.rank == 0:\r\n np.savez(f'temp/sample/{id_name}_{ood_name}_r{repeat_size}.npz', imgr=imgr_np,\r\n feature=feature_np, logit=logit_np)\r\n break\r\n\r\n img = output['data']\r\n ood_img = img.to(device) * 2 - 1\r\n\r\n # # generate random noise\r\n # loss, ind = index.search(ood_img, repeat_size+1)\r\n # noise = index.y[ind[:, -1].reshape(-1)]\r\n # noise = th.randn_like(ood_img)\r\n noise_list = [th.randn_like(ood_img) for _ in range(repeat_size)]\r\n if num_classes > 0:\r\n out_ = self.discriminator(norm_fn(img.cuda()), return_feature=False)\r\n score = th.softmax(out_, dim=1)\r\n _, y_pred = th.max(score, dim=1)\r\n else:\r\n y_pred = None\r\n\r\n tq = tqdm(total=tn * repeat_size, leave=False, desc='subprocess',\r\n disable=self.rank + 1 - self.world_size)\r\n for j, t in enumerate(timestep_list):\r\n imgr_list, feature_list, logit_list = [], [], []\r\n for k in range(repeat_size):\r\n img_n, _, _ = schedule.diffusion(ood_img, th.ones(batch_size, device=device, dtype=th.long) * t,\r\n noise=noise_list[k].cuda())\r\n # img_n = slerp(noise1, noise, t / 1000.0)\r\n if num_classes > 0:\r\n img_r = schedule.multi_iteration(img_n, t - 1, -1, model,\r\n y=y_pred, num_classes=num_classes, beta=2,\r\n last=True, fresh=True)\r\n else:\r\n img_r = schedule.multi_iteration(img_n, t - 1, -1, model,\r\n last=True, fresh=True)\r\n\r\n img_r = th.clamp(img_r * 0.5 + 0.5, 0, 1)\r\n img_r_ = norm_fn(img_r)\r\n logit, feature = self.discriminator(img_r_.cuda(), return_feature=True)\r\n\r\n imgr_list.append(img_r.cpu())\r\n feature_list.append(feature.cpu())\r\n logit_list.append(logit.cpu())\r\n\r\n tq.update()\r\n\r\n imgr_ = rearrange(th.stack(imgr_list, dim=1), 'b r ... -> (b r) ...')\r\n feature_ = rearrange(th.stack(feature_list, dim=1), 'b r ... -> (b r) ...')\r\n logit_ = rearrange(th.stack(logit_list, dim=1), 'b r ... -> (b r) ...')\r\n imgr_np[j].append(imgr_)\r\n feature_np[j].append(feature_) # 这种写反的错误都能犯???\r\n logit_np[j].append(logit_)\r\n\r\n tq.close()\r\n\r\n @th.no_grad()\r\n def interpolation(self):\r\n \"\"\"\r\n test image interpolation\r\n \"\"\"\r\n batch_size = 16\r\n\r\n model = self.model\r\n schedule = self.schedule\r\n device = self.device\r\n\r\n seq, skip, train_loader = self.before_sample()\r\n\r\n def slerp(z1, z2, alpha):\r\n theta = th.acos(th.sum(z1 * z2) / (th.norm(z1) * th.norm(z2)))\r\n return (th.sin((1 - alpha) * theta) / th.sin(theta) * z1\r\n + th.sin(alpha * theta) / th.sin(theta) * z2)\r\n\r\n img1, img2 = None, None\r\n for img, y in train_loader:\r\n img1 = img[:batch_size].to(device) * 2 - 1\r\n img2 = img[batch_size:batch_size * 2].to(device) * 2 - 1\r\n break\r\n\r\n img1_ = th.clamp(img1 * 0.5 + 0.5, 0, 1)\r\n img2_ = th.clamp(img2 * 0.5 + 0.5, 0, 1)\r\n for i in range(batch_size):\r\n tvu.save_image(img1_[i], os.path.join(self.args.image_path,\r\n f\"img100-{i + 1}.png\"))\r\n tvu.save_image(img2_[i], os.path.join(self.args.image_path,\r\n f\"img200-{i + 1}.png\"))\r\n\r\n noise1 = schedule.multi_iteration(img1, - 1, 49 * skip - 1, model,\r\n last=True, fresh=True).to(device)\r\n noise2 = schedule.multi_iteration(img2, - 1, 49 * skip - 1, model,\r\n last=True, fresh=True).to(device)\r\n\r\n timestep_list = list(range(5, 50, 5))\r\n\r\n for k in tqdm(timestep_list, desc='gen_edit1'):\r\n t = seq[k] * th.tensor([1] * batch_size).to(device)\r\n img_n, _, _ = schedule.diffusion(img1, t, noise=noise2)\r\n\r\n # noise_r = schedule.multi_iteration(img_n, k * skip - 1, 49 * skip - 1, model,\r\n # last=True, fresh=True, continuous=continuous)\r\n img_r = schedule.multi_iteration(img_n, k * skip - 1, 0 * skip - 1, model,\r\n last=True, fresh=True)\r\n\r\n img_r = th.clamp(img_r * 0.5 + 0.5, 0, 1)\r\n # noise_r = th.clamp(noise_r * 0.5 + 0.5, 0, 1)\r\n for i in range(batch_size):\r\n tvu.save_image(img_r[i], os.path.join(self.args.image_path,\r\n f\"img1-{i + 1}-{k}.png\"))\r\n # tvu.save_image(noise_r[i], os.path.join(self.args.image_path,\r\n # f\"noise-{i + 1}-{k}.png\"))\r\n\r\n for k in tqdm(timestep_list, desc='gen_edit2'):\r\n t = seq[k] * th.tensor([1] * batch_size).to(device)\r\n img_n, _, _ = schedule.diffusion(img2, t, noise=noise1)\r\n\r\n # noise_r = schedule.multi_iteration(img_n, k * skip - 1, 49 * skip - 1, model,\r\n # last=True, fresh=True, continuous=continuous)\r\n img_r = schedule.multi_iteration(img_n, k * skip - 1, 0 * skip - 1, model,\r\n last=True, fresh=True)\r\n\r\n img_r = th.clamp(img_r * 0.5 + 0.5, 0, 1)\r\n # noise_r = th.clamp(noise_r * 0.5 + 0.5, 0, 1)\r\n for i in range(batch_size):\r\n tvu.save_image(img_r[i], os.path.join(self.args.image_path,\r\n f\"img2-{i + 1}-{k}.png\"))\r\n # tvu.save_image(noise_r[i], os.path.join(self.args.image_path,\r\n # f\"noise-{i + 1}-{k}.png\"))\r\n\r\n for k in tqdm(timestep_list, desc='gen_edit3'):\r\n noise = slerp(noise1, noise2, k / 50.0)\r\n\r\n # noise_r = schedule.multi_iteration(img_n, k * skip - 1, 49 * skip - 1, model,\r\n # last=True, fresh=True, continuous=continuous)\r\n img_r = schedule.multi_iteration(noise, 49 * skip - 1, - 1, model,\r\n last=True, fresh=True)\r\n\r\n img_r = th.clamp(img_r * 0.5 + 0.5, 0, 1)\r\n # noise_r = th.clamp(noise_r * 0.5 + 0.5, 0, 1)\r\n for i in range(batch_size):\r\n tvu.save_image(img_r[i], os.path.join(self.args.image_path,\r\n f\"img3-{i + 1}-{k}.png\"))\r\n # tvu.save_image(noise_r[i], os.path.join(self.args.image_path,\r\n # f\"noise-{i + 1}-{k}.png\"))\r\n","repo_name":"luping-liu/DiffOOD","sub_path":"runner/ood_detect.py","file_name":"ood_detect.py","file_ext":"py","file_size_in_byte":13583,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"3"} +{"seq_id":"42087105733","text":"def xor(a, b):\n val = []\n for i in range(len(b)):\n if(a[i] == b[i]):\n val.append('0')\n else:\n val.append('1')\n \n return ''.join(val)\n\n\ndef crc_code(str, crc):\n \n lencrc = len(crc)\n \n while lencrc <= len(str):\n if(str[0] == '0'):\n xorval = xor('0'*lencrc, str[0:lencrc])\n else:\n xorval = xor(crc, str[0:lencrc])\n #print(xorval)\n if(lencrc == len(str)):\n return xorval[1:]\n str = xorval[1:] + str[lencrc:]\n\ndef checksum(a):\n while len(a) != 1:\n a = xor(a[0], a[1]) + a[2:]\n\n return a\n\n\ndef lrc(a, length):\n if len(a)%length != 0:\n a = a + '0'*(length - len(a)%length)\n lines = [a[i: i + length] for i in range(0, len(a), length)]\n lrc = '0' * length\n for line in lines:\n lrc = xor(lrc, line)\n return a, lrc\n\ndef vrc(a, frame_size, block_length):\n if len(a)%frame_size != 0:\n a = a + '0'*(frame_size - len(a)%frame_size)\n lines = [a[i: i + block_length] for i in range(0, len(a), block_length)]\n vrc = ''\n for line in lines:\n vrc = vrc + checksum(line)\n return a, vrc \n\n ","repo_name":"CubixPro/BCSE-III-Assignments","sub_path":"5th Semester/Computer Networks/Assignment 1/Titir/Codes/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"10057520520","text":"from discord.ext import commands\nimport discord\nimport requests\n\nclass Filter(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n \n @commands.Cog.listener()\n async def on_message(self, message):\n if message.author.bot:\n return\n filtered = self._profanity_filter(message.content)\n if filtered == message.content:\n return\n await message.delete()\n await message.channel.send(f'Hey you, watch your language')\n await message.channel.send(f'{message.author.mention} said: \"{filtered}\" (╯°□°)╯︵ ┻━┻')\n\n def _profanity_filter(self, msg):\n filtered = requests.get(\"https://www.purgomalum.com/service/json\", params={\"text\": msg}).json()[\"result\"]\n i = 0\n result = \"\"\n while i < len(msg):\n if (filtered[i] != \"*\" or (filtered[i] == \"*\" and msg[i] == \"*\")):\n result += filtered[i]\n else:\n result += \"\\*\"\n i += 1\n return result\n","repo_name":"nvbinh15/DisneyLang","sub_path":"cogs/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29966630232","text":"\n\n\nfrom typing import Union\nfrom pydantic import BaseModel\n\nfrom fastapi import FastAPI\n\napp = FastAPI()\n\n\nclass Item(BaseModel):\n name: str\n price: float\n is_offer: Union[bool, None] = None\n\n\n@app.get(\"/\")\ndef transform_schedule(input_schedule):\n transformed_schedule = {}\n app_name = input_schedule[\"app_name\"]\n \n for entry in input_schedule[\"schedule\"]:\n start, end = entry[\"slot\"].split(\"-\")\n day = input_schedule[\"days\"][0] # Assuming only one day is provided\n \n for group in entry[\"groups\"]:\n group_name = group[\"group_name\"]\n sources = group[\"sources\"]\n \n if app_name not in transformed_schedule:\n transformed_schedule[app_name] = {}\n if day not in transformed_schedule[app_name]:\n transformed_schedule[app_name][day] = {}\n \n transformed_group = {\n \"sources\": sources,\n \"slots\": [{\"start\": start, \"end\": end}]\n }\n transformed_schedule[app_name][day][group_name] = transformed_group\n \n return transformed_schedule\n\n# Input schema from your example\ninput_schema = {\n \"app_name\": \"person_tresspassing\",\n \"schedule\": [\n {\n \"slot\": \"10:00-11:30\",\n \"groups\": [\n {\n \"group_name\": \"group_name_1\",\n \"sources\": [\n {\n \"source_id\": \"687655678\",\n \"source_name\": \"sensor2\",\n \"source_type\": \"iot\",\n \"source_subtype\": \"temperature\"\n },\n {\n \"source_id\": \"08765432\",\n \"source_name\": \"camera1\",\n \"source_type\": \"camera\",\n \"source_subtype\": None\n }\n ]\n },\n {\n \"group_name\": \"group_name_2\",\n \"sources\": [\n {\n \"source_id\": \"08765435\",\n \"source_name\": \"camera2\",\n \"source_type\": \"camera\",\n \"source_subtype\": None\n }\n ]\n }\n ]\n }\n ],\n \"days\": [\"monday\"]\n}\n\n# Transform the input schema\ntransformed_output = transform_schedule(input_schema)\n\n# Print the transformed output\nprint(transformed_output)\n\n\n\n","repo_name":"anshul-chaubey/exam","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38700586884","text":"\na = 1\nb = 'rabbit'\nc = 3\n\nmutableList = [a, b, c]\nprint('mutableList is: ', mutableList)\n\nimmutableTuple = (a, b, c)\nprint('immutableTuple is: ', immutableTuple)\n\n\nmutableList[1] = 9\nprint(mutableList)\n\n#immutableTuple[1] = 9\nprint(immutableTuple[1])\n\nst = \"Sesame Street\"\n\n\nx = '''Marcus Aurelis said \"A man's worth is no greater than his ambitions.\"'''\nprint(x)","repo_name":"paulrefalo/CS519","sub_path":"ExamTesting.py","file_name":"ExamTesting.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"5178057055","text":"from django.conf.urls.defaults import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'safety.views.home', name='home'),\n # url(r'^safety/', include('safety.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n (r'^register/', include('register.urls')),\n (r'^login/', include('login.urls')),\n (r'^profile/', include('users.urls')),\n url(r'^captcha/', include('captcha.urls')),\n url(r'^$', 'home.views.home', name='home'),\n url(r'^logout/', 'login.views.logout', name='logout'),\n \n \n )\nimport settings\nurlpatterns += patterns(\"django.views\",\n url(r'^media(?P.*)/$',\n \"static.serve\", {\n \"document_root\": settings.MEDIA_ROOT,\n })\n )\n\n\n\nhandler404 = 'util.views.error404'\nhandler500 = 'util.views.error500'\n\n","repo_name":"nishant-saxena/safety","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12163548419","text":"import time\nimport logging\nfrom concurrent import futures\n\nimport grpc\nimport location_pb2\nimport location_pb2_grpc\nfrom datetime import datetime\nimport location_service\nfrom random import randint\nimport json\n\nclass LocationServicer(location_pb2_grpc.LocationServiceServicer):\n def Create(self, request, context):\n # Correct formatting of creation_time\n timestamp_dt = datetime.fromtimestamp(request.creation_time.seconds + request.creation_time.nanos /1e9)\n\n timestamp_st=timestamp_dt.strftime('%Y-%m-%d %H:%M:%S.%f')\n\n request_value = {\n \"person_id\":request.person_id,\n \"creation_time\":request.creation_time,\n \"latitude\": str(request.latitude),\n \"longitude\": str(request.longitude)\n }\n logging.info(\"request_value=\", request_value)\n\n location_service.LocationService.Create(request_value)\n # return location_pb2.LocationMessageRequest(**request_value)\n\n def Retrieve(self, request, context):\n result_from_db = location_service.LocationService.Retrieve(request.id)\n\n if result_from_db:\n return location_pb2.LocationMessageResponse(\n id=result_from_db.id,\n person_id=result_from_db.person_id,\n coordinate=str(result_from_db.coordinate),\n creation_time=result_from_db.creation_time.strftime('%Y-%m-%d %H:%M:%S.%f')\n )\n else:\n context.set_code(grpc.StatusCode.NOT_FOUND)\n context.set_details('Location with id %s not found' % request.id)\n return location_pb2.Empty()\n\ndef serve():\n # Initialize gPRC server\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=2))\n location_pb2_grpc.add_LocationServiceServicer_to_server(LocationServicer(), server)\n\n logging.getLogger('').handlers = []\n logging.basicConfig(format='%(message)s', level=logging.DEBUG)\n logging.info(\"Server starting on port 5005...\")\n\n server.add_insecure_port(\"[::]:5005\")\n server.start()\n\n # Keep thread alive\n try:\n while True:\n time.sleep(86400)\n except KeyboardInterrupt:\n server.stop(0)\n\nif __name__ == \"__main__\":\n serve()","repo_name":"solomonndungu/nd064-c2-message-passing-projects-starter","sub_path":"modules/location_api/app/udaconnect/grpc_server.py","file_name":"grpc_server.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29256080187","text":"from unittest import mock\nimport socket\nimport time\n\nimport yatest.common\nimport pytest\nimport subprocess\n\nfrom maps.pylibs.utils.lib.common import RetryFailedException\n\nfrom maps.garden.sdk.module_rpc.common import Capabilities\nfrom maps.garden.sdk.module_rpc.proto import module_rpc_pb2 as module_rpc\nfrom maps.garden.libs_server.module import communicate\nfrom maps.garden.libs_server.module.runner import ModuleRunner\n\n\nMODULE_TO_TEST = 'maps/garden/libs_server/test_utils/test_module/test_module'\nMODULE_INFO_COMMAND = communicate.make_module_info_command()\n\n\ndef test_execute_proto():\n module_path = yatest.common.binary_path(MODULE_TO_TEST)\n\n with ModuleRunner(module_path, Capabilities.ALL, \"contour_name\", \"test_module\") as runner:\n module_info = runner.execute_proto(\n input_message=MODULE_INFO_COMMAND,\n output_message_type=module_rpc.ModuleInfoOutput,\n operation_name=\"module_info\",\n )\n\n assert module_info.name == \"test_module\"\n assert module_info.capabilities == Capabilities.ALL + [Capabilities.HANDLE_BUILD_STATUS]\n\n\ndef test_kill_and_restart():\n \"\"\"\n ModuleRunner restarts the module process in case it is killed by any reason.\n \"\"\"\n module_path = yatest.common.binary_path(MODULE_TO_TEST)\n\n with ModuleRunner(module_path, Capabilities.ALL, \"contour_name\", \"test_module\") as runner:\n runner.execute_proto(\n input_message=MODULE_INFO_COMMAND,\n output_message_type=module_rpc.ModuleInfoOutput,\n operation_name=\"module_info\",\n )\n\n assert runner._process.is_alive()\n\n old_pid = runner._process._module_process.pid\n\n runner._process._module_process.kill()\n runner._process._module_process.wait(5)\n assert runner._process._module_process.returncode is not None\n\n runner.execute_proto(\n input_message=MODULE_INFO_COMMAND,\n output_message_type=module_rpc.ModuleInfoOutput,\n operation_name=\"module_info\",\n )\n\n assert runner._process.is_alive()\n\n new_pid = runner._process._module_process.pid\n\n assert old_pid != new_pid\n\n\n@mock.patch(\"maps.garden.libs_server.module.runner.MODULE_PROCESS_LIFETIME_SEC\", 1)\ndef test_stop_process_by_timer():\n \"\"\"\n Module process should live no longer than 'process_lifetime' parameter.\n ModuleRunner kills the module process itself.\n \"\"\"\n module_path = yatest.common.binary_path(MODULE_TO_TEST)\n\n with ModuleRunner(module_path, Capabilities.ALL, \"contour_name\", \"test_module\") as runner:\n runner.execute_proto(\n input_message=MODULE_INFO_COMMAND,\n output_message_type=module_rpc.ModuleInfoOutput,\n operation_name=\"module_info\",\n )\n\n time.sleep(2)\n\n assert runner._process is None\n\n\ndef test_run_two_runners_sequentially():\n \"\"\"\n ModuleRunner cleans any unix socket remained from previous runs.\n \"\"\"\n module_path = yatest.common.binary_path(MODULE_TO_TEST)\n\n with ModuleRunner(module_path, Capabilities.ALL, \"contour_name\", \"test_module\") as runner:\n runner.execute_proto(\n input_message=MODULE_INFO_COMMAND,\n output_message_type=module_rpc.ModuleInfoOutput,\n operation_name=\"module_info\",\n )\n\n with ModuleRunner(module_path, Capabilities.ALL, \"contour_name\", \"test_module\") as runner:\n runner.execute_proto(\n input_message=MODULE_INFO_COMMAND,\n output_message_type=module_rpc.ModuleInfoOutput,\n operation_name=\"module_info\",\n )\n\n\ndef test_nonexistent_module():\n \"\"\"\n ModuleRunner throws if it fails to start the process.\n \"\"\"\n with ModuleRunner('wrong_path', Capabilities.ALL, \"contour_name\", \"test_module\") as runner:\n with pytest.raises(OSError):\n runner.execute_proto(\n input_message=MODULE_INFO_COMMAND,\n output_message_type=module_rpc.ModuleInfoOutput,\n operation_name=\"module_info\",\n )\n\n\n@mock.patch(\"maps.garden.libs_server.module.runner.MODULE_SOCKET_TIMEOUT_SEC\", 1)\ndef test_socket_timeout():\n \"\"\"\n 'echo' is an example of a command that does not connect to any unix socket.\n Listener.accept() waits for an incoming connection no longer than parameter `socket_timeout`.\n \"\"\"\n with ModuleRunner('/bin/echo', Capabilities.ALL, \"contour_name\", \"test_module\") as runner:\n with pytest.raises(RetryFailedException) as e:\n runner.execute_proto(\n input_message=MODULE_INFO_COMMAND,\n output_message_type=module_rpc.ModuleInfoOutput,\n operation_name=\"module_info\",\n )\n\n assert isinstance(e.value.last_error, socket.timeout)\n\n\n@mock.patch(\"subprocess.Popen\", wraps=subprocess.Popen)\ndef test_environment_name(popen_mock):\n module_path = yatest.common.binary_path(MODULE_TO_TEST)\n\n with ModuleRunner(module_path, Capabilities.ALL, \"contour_name\", \"test_module\") as runner:\n runner.execute_proto(\n input_message=MODULE_INFO_COMMAND,\n output_message_type=module_rpc.ModuleInfoOutput,\n operation_name=\"module_info\",\n )\n\n popen_mock.assert_called_once()\n\n assert popen_mock.call_args.kwargs[\"env\"] == {\"ENVIRONMENT_NAME\": \"unstable\"}\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/test_runner.py","file_name":"test_runner.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4009803500","text":"#Download deepforest before tests start\nimport comet_ml\nfrom deepforest.main import deepforest\nimport geopandas as gpd\nimport os\nimport glob\nimport rasterio as rio\nfrom src import data\nfrom src.models import year\nfrom src.models import dead, multi_stage\nfrom src import utils\nimport tempfile\nimport torch\nfrom pytorch_lightning import Trainer\nimport pandas as pd\nimport pytest\n\n#Set Env VARS\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\ndef pytest_sessionstart():\n # prepare something ahead of all tests\n m = deepforest()\n m.use_release() \n\n@pytest.fixture(scope=\"session\")\ndef ROOT():\n ROOT = os.path.dirname(os.path.dirname(data.__file__))\n \n return ROOT\n\n@pytest.fixture(scope=\"session\")\ndef rgb_pool(ROOT):\n rgb_pool = glob.glob(\"{}/tests/data/*.tif\".format(ROOT))\n \n return rgb_pool\n\n@pytest.fixture(scope=\"session\")\ndef rgb_path(ROOT):\n rgb_path = \"{}/tests/data/2019_D01_HARV_DP3_726000_4699000_image_crop_2019.tif\".format(ROOT)\n \n return rgb_path\n\n@pytest.fixture(scope=\"session\")\ndef sample_crowns(ROOT):\n data_path = \"{}/tests/data/sample.shp\".format(ROOT)\n \n return data_path\n\n@pytest.fixture(scope=\"session\")\ndef plot_data(ROOT, sample_crowns):\n plot_data = gpd.read_file(sample_crowns) \n \n return plot_data\n\n@pytest.fixture(scope=\"session\")\ndef config(ROOT):\n print(\"Creating global config\")\n #Turn of CHM filtering for the moment\n config = utils.read_config(config_path=\"{}/config.yml\".format(ROOT))\n config[\"min_CHM_height\"] = None\n config[\"iterations\"] = 1\n config[\"rgb_sensor_pool\"] = \"{}/tests/data/*.tif\".format(ROOT)\n config[\"HSI_sensor_pool\"] = \"{}/tests/data/*.tif\".format(ROOT)\n config[\"min_train_samples\"] = 1\n config[\"min_test_samples\"] = 1\n config[\"crop_dir\"] = \"{}/tests/data/110ac77ae89043898f618466359c2a2e\".format(ROOT)\n config[\"data_dir\"] = \"{}/tests/data/\".format(ROOT)\n config[\"bands\"] = 349\n config[\"classes\"] = 3\n config[\"top_k\"] = 1\n config[\"convert_h5\"] = False\n config[\"plot_n_individuals\"] = 1\n config[\"min_CHM_diff\"] = None \n config[\"dead_model\"] = None\n config[\"dead_threshold\"] = 0.95\n config[\"megaplot_dir\"] = None\n config[\"use_data_commit\"] = \"110ac77ae89043898f618466359c2a2e\"\n config[\"dead\"][\"epochs\"] = 1\n config[\"pretrain_state_dict\"] = None\n config[\"preload_images\"] = False\n config[\"batch_size\"] = 2\n config[\"gpus\"] = 0\n config[\"existing_test_csv\"] = None\n config[\"workers\"] = 0\n config[\"dead\"][\"num_workers\"] = 0\n config[\"dead\"][\"batch_size\"] = 2\n \n return config\n\n#Data module\n@pytest.fixture(scope=\"session\")\ndef dm(config, ROOT):\n csv_file = \"{}/tests/data/110ac77ae89043898f618466359c2a2e/train.csv\".format(ROOT)\n data_module = data.TreeData(config=config, csv_file=csv_file, data_dir=\"{}/tests/data/110ac77ae89043898f618466359c2a2e\".format(ROOT), debug=True) \n \n return data_module\n\n@pytest.fixture(scope=\"session\")\ndef experiment():\n if not \"GITHUB_ACTIONS\" in os.environ:\n from pytorch_lightning.loggers import CometLogger \n COMET_KEY = os.getenv(\"COMET_KEY\")\n comet_logger = CometLogger(api_key=COMET_KEY,\n project_name=\"DeepTreeAttention\", workspace=\"bw4sz\",auto_output_logging = \"simple\")\n return comet_logger.experiment\n else:\n return None\n\n#Training module\n@pytest.fixture(scope=\"session\")\ndef m(config, dm, ROOT):\n m = multi_stage.MultiStage(train_df=dm.train, test_df=dm.test, crowns=dm.crowns, config=config) \n m.ROOT = \"{}/tests/\".format(ROOT)\n \n return m","repo_name":"Candy-CY/Hyperspectral-Image-Classification-Models","sub_path":"DeepTreeAttention/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","stars":237,"dataset":"github-code","pt":"3"} +{"seq_id":"2769199625","text":"from Common.CEnum import LEFT_SEG_METHOD\nfrom Common.ChanException import CChanException, ErrCode\n\n\nclass CSegConfig:\n def __init__(self, seg_algo=\"chan\", left_method=\"peak\"):\n self.seg_algo = seg_algo\n if left_method == \"all\":\n self.left_method = LEFT_SEG_METHOD.ALL\n elif left_method == \"peak\":\n self.left_method = LEFT_SEG_METHOD.PEAK\n else:\n raise CChanException(f\"unknown left_seg_method={left_method}\", ErrCode.PARA_ERROR)\n","repo_name":"Vespa314/chan.py","sub_path":"Seg/SegConfig.py","file_name":"SegConfig.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":382,"dataset":"github-code","pt":"3"} +{"seq_id":"71585234642","text":"from datetime import datetime\n\nfrom flask_wtf import Form\nfrom wtforms import StringField, SelectField, SelectMultipleField, DateTimeField, BooleanField, IntegerField\nfrom wtforms.validators import DataRequired, URL, Optional, ValidationError\n\nfrom . import enums\nfrom . import models\nfrom .validators import GeoValidateUsPhone\n\n\nclass ShowForm(Form):\n artist_id = IntegerField('artist_id')\n venue_id = IntegerField('venue_id')\n start_time = DateTimeField('start_time', validators=[DataRequired()], default=datetime.today())\n\n def validate_artist_id(self, field):\n if models.Artist.query.get(field.data) is None:\n raise ValidationError('Artist matching ID not found')\n\n def validate_venue_id(self, field):\n if models.Venue.query.get(field.data) is None:\n raise ValidationError('Venue matching ID not found')\n\n\nclass VenueForm(Form):\n name = StringField('name', validators=[DataRequired()])\n city = StringField('city', validators=[DataRequired()])\n state = SelectField(\n 'state', validators=[DataRequired()],\n choices=enums.State.choices()\n )\n address = StringField('address', validators=[DataRequired()])\n phone = StringField('phone', validators=[DataRequired(), GeoValidateUsPhone()])\n\n genres = SelectMultipleField(\n 'genres', validators=[DataRequired()],\n choices=enums.Genre.choices(), coerce=int,\n )\n seeking_talent = BooleanField('seeking_talent', default=True)\n seeking_description = StringField('seeking_description', validators=[Optional()])\n\n website = StringField('website', validators=[Optional(), URL()])\n facebook_link = StringField('facebook_link', validators=[Optional(), URL()])\n image_link = StringField('image_link', validators=[Optional(), URL()])\n\n\nclass ArtistForm(Form):\n name = StringField('name', validators=[DataRequired()])\n city = StringField('city', validators=[DataRequired()])\n state = SelectField(\n 'state', validators=[DataRequired()],\n choices=enums.State.choices()\n )\n phone = StringField('phone', validators=[DataRequired(), GeoValidateUsPhone()])\n\n genres = SelectMultipleField(\n 'genres', validators=[DataRequired()],\n choices=enums.Genre.choices(), coerce=int,\n )\n seeking_venue = BooleanField('seeking_venue', default=True)\n seeking_description = StringField('seeking_description', validators=[Optional()])\n\n website = StringField('website', validators=[Optional(), URL()])\n facebook_link = StringField('facebook_link', validators=[Optional(), URL()])\n image_link = StringField('image_link', validators=[Optional(), URL()])\n","repo_name":"DustinFischer/udacity-fsnd","sub_path":"projects/01_fyyur/starter_code/fyyur/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17299535994","text":"# all_success - trigger task when all upstream task succeeds. (default trigger rule) \n# all_failed - trigger task if all upstream task have failed\n# all_done - trigger task when all upstream tasks are triggered. It does not depends on status.\n# one_success - trigger task as soon as one of the upstream task succeed\n# one_failed - trigger task as soon as one of the upstream task faiis\n# none_failed - trigger task when all upstream tasks either succeeds or get skipped\n# none_failed_or_skipped - triggers tasks when atleast one parent task suceed , other can succeed or skipped \n\n\nimport random\nfrom datetime import datetime\nfrom airflow.operators.bash_operator import BashOperator\n\nfrom airflow import DAG\n\nwith DAG ('trigger_rules', start_date=datetime(2022,1,1),\n schedule_interval='@daily', catchup=False,tags=['trigger_rules']) as dag:\n \n task1 = BashOperator(\n task_id='task1',\n bash_command ='exit 0'\n )\n \n \n task2 = BashOperator(\n task_id='task2',\n bash_command ='exit 99'\n )\n \n task3 = BashOperator(\n task_id='task3',\n bash_command ='exit 0',\n trigger_rule= 'none_failed_or_skipped'\n )\n \n [task1,task2] >> task3","repo_name":"technoavengers/airflow-training","sub_path":"dags/trigger_rules/triggers_rules.py","file_name":"triggers_rules.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38653288614","text":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\n# 初期条件に使う関数\ndef phi(x):\n return -x * (x - 1)\n\n# 定数\nh = 0.1\nk = 0.1\ntn = int(1 / h) + 1\nxn = int(1 / k) + 1\n\n# 解となる配列\nU = np.zeros((tn, xn))\nfor j in range(xn):\n #for i in range(tn):\n U[0][j] = phi(j * k)\n\n# 陰的有限差分法(ガウス・ザイデル法を使用)\nr = h / k ** 2\nthreshold = 1e-4\nfor i in range(tn - 1):\n flag = True\n while flag:\n flag = False\n pU = [j for j in U[i + 1]]\n for j in range(1, xn - 1):\n U[i + 1][j] = 1 / (1 + 2 * r) * (U[i][j] + r * U[i + 1][j - 1] + r * U[i + 1][j + 1])\n if abs(U[i + 1][j] - pU[j]) > threshold:\n flag = True\n\n# グラフの表示\nt = np.linspace(0, 1, xn)\nx = np.linspace(0, 1, tn)\ntaxis, xaxis = np.meshgrid(t, x)\nuaxis = U.reshape(taxis.shape)\nfig = plt.figure()\nax = Axes3D(fig)\nax.set_xlabel(\"t\")\nax.set_ylabel(\"x\")\nax.set_zlabel(\"u\")\nax.plot_wireframe(xaxis, taxis, uaxis, color='black')\nplt.show()","repo_name":"Nyanyan/DifferentialEquation","sub_path":"negative-fdm.py","file_name":"negative-fdm.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"} +{"seq_id":"2772591800","text":"salarios = list()\r\nabonos = list()\r\nfuncioarios = total_abono = funcionario_menor = maior_abono = 0\r\nprint('Projeção de Gastos com Abono'\r\n '='*15)\r\nwhile True:\r\n salario = int(input('Salario: '))\r\n if salario == 0:\r\n break\r\n salarios.append(salario)\r\n abono = salario * 0.2\r\n if abono <= 100:\r\n abono = 100\r\n funcionario_menor += 1\r\n abonos.append(abono)\r\n funcioarios += 1\r\n total_abono += abono\r\n if abono > maior_abono:\r\n maior_abono = abono\r\nprint('\\n\\nSalario - Abono')\r\nfor x, salario in enumerate(salarios):\r\n print(f'MT Salario: {salario:4} - MT {abonos[x]:.5f}')\r\nprint(f'\\nForam processados {funcioarios} colaboradores'\r\n f'\\nTotal gasto com abonos: {total_abono}'\r\n f'\\nValor mínimo pago a {funcionario_menor} colaboradores'\r\n f'\\nMaior valor de abono pago: {maior_abono}')","repo_name":"natalinoqueba/exercicios-python","sub_path":"04_Listas/20_abono.py","file_name":"20_abono.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"573296064","text":"import json, requests\n\n\ncity_list = [\n'кыргызстан', 'таджикистан', 'казахстан', 'узбекистан', 'украина', 'беларусь',\n'беларусия', 'армения', 'грузия', 'азербайджан', 'киргизия', 'молдавия', 'туркмения',\n]\n\nnew_city_list = []\nurl = \"https://ws3.morpher.ru/russian/declension\"\n\nfor i in range(len(city_list)):\n s = city_list[i]\n\n params = dict (\n s=str(s),\n format=\"json\",\n #token= #Не обязателен. Подробнее: http://morpher.ru/ws3/#authentication\n )\n\n response = requests.get(url=url, params=params)\n data = json.loads(response.text)\n new_city_list.extend([\n data.get('Р'),\n data.get('Д'),\n data.get('В'),\n data.get('Т'),\n data.get('П'),\n ])\n\nprint(new_city_list)\n","repo_name":"iakovleva/VU_keys","sub_path":"morpher_script.py","file_name":"morpher_script.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73826433362","text":"import numpy as np\nimport matplotlib.pyplot as plt\n# how many times we want to plot for each r step\nnplot = 100\nones = np.ones((nplot,1))\n# 1600 r steps and 100 points for each step\n# matrix to hold each point, restarts for each \nmat = np.zeros((nplot,1))\n# iterate the solve for each r\nfor r in np.arange(2,4,0.005):\n # Starts us somehow to converge later\n mat[0] = 0.6\n # does first 200 iterations to converge before saving\n for n in range(1200):\n mat[0] = r*mat[0]*(1-mat[0])\n # solves next step in matrix and save previous to array\n for n in range(nplot-1):\n mat[n+1] = r*mat[n]*(1-mat[n])\n plt.scatter(r*ones,mat,s=1)\n plt.title(\"Bifurcation Diagram\")\n plt.xlabel(\"r\")\n plt.ylabel(\"x\")\nplt.savefig('hw4.png')\n","repo_name":"emilyjcosta5/mathematical_modeling","sub_path":"assignment4/hw4.py","file_name":"hw4.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70512322963","text":"from tqdm import tqdm\r\nimport numpy as np\r\nimport math\r\n\r\ndataname = 'WOS5736'\r\nnum_of_sample = str(1000)\r\nfeature_numbers = 500\r\nfs_method = 'DFS'\r\nwith open('e:/pythonwork/newclassification/dataset_after_preprocessing/' + dataname + '_' + num_of_sample +'.txt', 'r', encoding='utf-8') as a:\r\n texts = [i.strip().lower() for i in a.readlines()]\r\nwith open('e:/pythonwork/newclassification/dataset_after_preprocessing/' + dataname + '_' + num_of_sample +'_label.txt', 'r', encoding='utf-8') as a:\r\n labels = [i.strip() for i in a.readlines()]\r\n\r\n# with open('e:/pythonwork/newclassification/dataset_after_preprocessing/' + dataname + '.txt', 'r', encoding='utf-8') as a:\r\n# texts = [i.strip().lower() for i in a.readlines()]\r\n# with open('e:/pythonwork/newclassification/dataset_after_preprocessing/' + dataname +'_label.txt', 'r', encoding='utf-8') as a:\r\n# labels = [i.strip() for i in a.readlines()]\r\n\r\n\r\n\r\nfrom nltk.tokenize import word_tokenize\r\ntexts = [word_tokenize(sentence) for sentence in texts]\r\n\r\n# 获得词汇表\r\ndef get_vocab(text):\r\n voc = []\r\n for i in text:\r\n for j in i:\r\n if j not in voc:\r\n voc.append(j)\r\n return voc\r\n\r\n\r\nvocab = get_vocab(texts)\r\n# print(len(vocab))\r\nlabels_class = list(set(labels))\r\n# print(labels_class)\r\nlabels_number = {i: 0 for i in labels_class}\r\nfor i in labels: # 1、-1\r\n labels_number[i] += 1\r\n# print(labels_number)\r\nN = len(texts)\r\n\r\n\r\ndef df():\r\n DF = {i: 0 for i in vocab}\r\n for i in range(len(vocab)): # 14000+\r\n for j in range(len(texts)): # 10600+\r\n if vocab[i] in texts[j]:\r\n DF[vocab[i]] += 1\r\n return DF\r\n\r\n\r\ndef ig():\r\n IG = {}\r\n for i in tqdm(range(len(vocab))):\r\n n_tc = np.zeros((2, len(labels_class))) # [[0, 0], [0, 0]]\r\n for j in range(len(texts)):\r\n if vocab[i] in texts[j]:\r\n n_tc[0, labels_class.index(labels[j])] += 1\r\n else:\r\n n_tc[1, labels_class.index(labels[j])] += 1\r\n # print(n_tc)\r\n n_t = sum(n_tc[0, :])\r\n n_minorst = sum(n_tc[1, :])\r\n a = -sum([labels_number[i] / N * math.log(labels_number[i] / N + 1e-5) for i in labels_class])\r\n b = n_t / N * sum([n_tc[0, iii] / n_t * math.log(n_tc[0, iii] / n_t + 1e-5) for iii in\r\n range(len(labels_class))]) if n_t != 0 else 0\r\n c = n_minorst / N * sum([n_tc[1, iii] / n_minorst * math.log(n_tc[1, iii] / n_minorst + 1e-5) for iii in\r\n range(len(labels_class))]) if n_minorst != 0 else 0\r\n ig = a + b + c\r\n IG[vocab[i]] = ig\r\n return IG\r\n\r\n\r\ndef dfs():\r\n DFS = {}\r\n for i in tqdm(range(len(vocab))):\r\n n_tc = np.zeros((2, len(labels_class))) # [[0, 0], [0, 0]]\r\n for j in range(len(texts)):\r\n if vocab[i] in texts[j]:\r\n n_tc[0, labels_class.index(labels[j])] += 1\r\n else:\r\n n_tc[1, labels_class.index(labels[j])] += 1\r\n n_t = sum(n_tc[0, :])\r\n dfs = sum([n_tc[0, iii]/n_t/ # 分子\r\n (n_tc[1, iii]/labels_number[labels_class[iii]] + # 分母第一项\r\n (n_t - n_tc[0, iii])/((N - labels_number[labels_class[iii]])/N) + # 分母第二项\r\n 1) for iii in range(len(labels_class))])\r\n DFS[vocab[i]] = dfs\r\n return DFS\r\n\r\n\r\ndef get_features():\r\n feature_weights = {}\r\n if fs_method == 'IG':\r\n feature_weights = ig()\r\n elif fs_method == 'DF':\r\n feature_weights = df()\r\n elif fs_method == 'DFS':\r\n feature_weights = dfs()\r\n else:\r\n print('wrong name of method')\r\n features_after_sorting = sorted(feature_weights.items(), key=lambda x: x[1], reverse=True)\r\n features = [word[0] for word in features_after_sorting[:feature_numbers]]\r\n return features\r\n\r\n\r\nfeatures = get_features()\r\n\r\n# with open('D:/成都理工大学重要文件夹/课程《文本分析与挖掘》相关/数据/Pang&Lee_features.txt', 'w') as a:\r\n# for i in features:\r\n# a.write(i + '\\n')\r\ntext_after_feature_selection = []\r\nfor i in texts:\r\n sentence = []\r\n for j in i:\r\n if j in features:\r\n sentence.append(j)\r\n text_after_feature_selection.append(sentence)\r\n\r\ntext_after_feature_selection2 = []\r\nlabels_after_feature_selection = []\r\nfor i in range(len(text_after_feature_selection)):\r\n if len(text_after_feature_selection[i]) > 0:\r\n text_after_feature_selection2.append(text_after_feature_selection[i])\r\n labels_after_feature_selection.append(labels[i])\r\n\r\nfeature_numbers = str(feature_numbers)\r\n\r\nwith open('e:/pythonwork/newclassification/dataset_after_feature_selection/' + dataname + '_' + num_of_sample + '_' + fs_method + '_' + feature_numbers +'.txt', 'w', encoding='utf-8') as a:\r\n for i in range(len(text_after_feature_selection2)):\r\n a.write(' '.join(text_after_feature_selection2[i]) + '\\n')\r\nwith open('e:/pythonwork/newclassification/dataset_after_feature_selection/' + dataname + '_' + num_of_sample + '_' + fs_method + '_' + feature_numbers +'_label.txt', 'w', encoding='utf-8') as a:\r\n for i in range(len(labels_after_feature_selection)):\r\n a.write(' '.join(labels_after_feature_selection[i]) + '\\n')\r\n\r\n# with open('e:/pythonwork/newclassification/dataset_after_feature_selection/' + dataname + '_' + 'IG' + '_' + feature_numbers + '.txt', 'w', encoding='utf-8') as a:\r\n# for i in range(len(text_after_feature_selection2)):\r\n# a.write(' '.join(text_after_feature_selection2[i]) + '\\n')\r\n# with open('e:/pythonwork/newclassification/dataset_after_feature_selection/' + dataname + '_' + 'IG' + '_' + feature_numbers +'_label.txt', 'w', encoding='utf-8') as a:\r\n# for i in range(len(labels_after_feature_selection)):\r\n# a.write(' '.join(labels_after_feature_selection[i]) + '\\n')\r\n\r\n","repo_name":"yangpeiailong/Improving-Deep-Learning-based-Few-shot-Text-Classification-with-Feature-Selection-methods","sub_path":"codes/preprocessing/feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":5890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10984185248","text":"from keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ReduceLROnPlateau\nimport os\nfrom model import model\n\n# Define constants\nDATASET_PATH = './English/Fnt/'\nMODEL_PATH = '.'\nBATCH_SIZE = 128\nEPOCHS = 20\nTARGET_WIDTH = 128\nTARGET_HEIGHT = 128\nTARGET_DEPTH = 3\n\n# Set up the data generator to flow data from disk\nprint(\"[INFO] Setting up Data Generator...\")\ndata_gen = ImageDataGenerator(validation_split=0.2, rescale=1./255)\n\ntrain_generator = data_gen.flow_from_directory(\n DATASET_PATH, \n subset='training',\n target_size = (TARGET_WIDTH, TARGET_HEIGHT),\n batch_size = BATCH_SIZE\n)\n\nval_generator = data_gen.flow_from_directory(\n DATASET_PATH,\n subset='validation',\n target_size = (TARGET_WIDTH, TARGET_HEIGHT),\n batch_size = BATCH_SIZE\n)\n\n# Build model\nprint(\"[INFO] Compiling model...\")\nalexnet = model(train_generator.num_classes, (TARGET_WIDTH, TARGET_HEIGHT, TARGET_DEPTH))\n\n# Compile the model\nalexnet.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n# Train the network\nprint(\"[INFO] Training network ...\")\n# Set the learning rate decay\nreduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.2, patience=2, min_lr=0.001)\nH = alexnet.fit_generator(\n\ttrain_generator,\n\tvalidation_data=val_generator,\n\tsteps_per_epoch=train_generator.samples // BATCH_SIZE,\n\tvalidation_steps = val_generator.samples // BATCH_SIZE,\n\tepochs=EPOCHS, verbose=1, callbacks=[reduce_lr])\n\n# save the model to disk\nprint(\"[INFO] Serializing network...\")\nalexnet.save(MODEL_PATH + os.path.sep + \"trained_model\")\n\nprint(\"[INFO] Done!\")","repo_name":"minhthangdang/CharactersRecognition","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"29046730937","text":"# coding: utf-8\n\nimport datetime\nimport time\n\nimport balance.balance_api as api\nimport btestlib.reporter as reporter\nimport temp.MTestlib.MTestlib as mtl\nfrom balance import balance_steps as steps\nfrom btestlib.data import defaults\n\nbenchmark_series = []\neffort = []\n\n# Timer decorator\ndef time_deco(func):\n\n global effort\n global benckmark_series\n\n if func.__name__ == 'stop':\n benchmark_series.append(effort)\n effort = []\n return func\n\n def time_wrapper(*args, **kwargs):\n start = time.clock()\n res = func(*args, **kwargs)\n end = time.clock()\n effort.append('{0:<16} : {1:>10.6f} |'.format(func.__name__, end - start))\n reporter.log(('{0:<16} : {1:>10.6f} |'.format(func.__name__, end - start)))\n return res\n\n return time_wrapper\n\n# Signal method\ndef stop():\n '''\n Just signal for time_deco to stop current iteration and push data to benchmark_series\n '''\n pass\n\n# Result representation\ndef get_stats ():\n for num, step in enumerate(benchmark_series[0]):\n reporter.log((step + ' '.join([effort[num][20:] for effort in benchmark_series[1:]])))\n# ---------------------------------------------------------------------------------------------------------------------\n\ndef new():\n\n for run in xrange(3):\n SERVICE_ID = 7\n PRODUCT_ID = 1475\n PAYSYS_ID = 1003\n QTY = 100.1234\n BASE_DT = datetime.datetime.now()\n\n client_id = time_deco(steps.ClientSteps.create)()\n agency_id = time_deco(steps.ClientSteps.create)({'IS_AGENCY': 1})\n order_owner = client_id\n invoice_owner = agency_id or client_id\n person_id = time_deco(steps.PersonSteps.create)(invoice_owner, 'ur')\n\n contract_id, _ = time_deco(steps.ContractSteps.create_contract)('opt_agency_prem_post',\n {'CLIENT_ID': invoice_owner,\n 'PERSON_ID': person_id,\n 'DT': '2015-04-30T00:00:00',\n 'FINISH_DT': '2016-06-30T00:00:00',\n 'IS_SIGNED': '2015-01-01T00:00:00',\n 'SERVICES': [7],\n # 'COMMISSION_TYPE': 57,\n # 'NON_RESIDENT_CLIENTS': 0,\n # 'REPAYMENT_ON_CONSUME': 0,\n 'PERSONAL_ACCOUNT': 1,\n 'LIFT_CREDIT_ON_PAYMENT': 1,\n 'PERSONAL_ACCOUNT_FICTIVE': 1\n })\n\n service_order_id = time_deco(steps.OrderSteps.next_id)(SERVICE_ID)\n time_deco(steps.OrderSteps.create)(order_owner, service_order_id, service_id=SERVICE_ID, product_id=PRODUCT_ID,\n params={'AgencyID': agency_id})\n service_order_id2 = time_deco(steps.OrderSteps.next_id)(SERVICE_ID)\n time_deco(steps.OrderSteps.create)(order_owner, service_order_id2, service_id=SERVICE_ID, product_id=PRODUCT_ID,\n params={'AgencyID': agency_id})\n orders_list = [\n {'ServiceID': SERVICE_ID, 'ServiceOrderID': service_order_id, 'Qty': QTY, 'BeginDT': BASE_DT}]\n request_id = time_deco(steps.RequestSteps.create)(invoice_owner, orders_list)\n invoice_id, _, _ = time_deco(steps.InvoiceSteps.create)(request_id, person_id, PAYSYS_ID, credit=1,\n contract_id=contract_id,\n overdraft=0, endbuyer_id=None)\n time_deco(steps.InvoiceSteps.pay)(invoice_id)\n\n response = time_deco(api.medium().CreateTransferMultiple)(defaults.PASSPORT_UID,\n [\n {\"ServiceID\": SERVICE_ID,\n \"ServiceOrderID\": service_order_id,\n \"QtyOld\": 100.1234, \"QtyNew\": 20}\n ],\n [\n {\"ServiceID\": SERVICE_ID,\n \"ServiceOrderID\": service_order_id2,\n \"QtyDelta\": 1}\n ], 1, None)\n\n time_deco(steps.CampaignsSteps.do_campaigns)(SERVICE_ID, service_order_id, {'Bucks': 20.12, 'Money': 0}, 0, BASE_DT)\n time_deco(steps.CampaignsSteps.do_campaigns)(SERVICE_ID ,service_order_id2, {'Bucks': 19.84, 'Money': 0}, 0, BASE_DT)\n time_deco(steps.ActsSteps.generate)(invoice_owner, force=1, date=BASE_DT)\n\n time_deco(stop)()\n\n\ndef old():\n SERVICE_ID = 7\n PRODUCT_ID = 503162\n PAYSYS_ID = 1003\n QTY = 100.1234\n BASE_DT = datetime.datetime.now()\n\n client_id = time_deco(mtl.create_client)({'IS_AGENCY': 0, 'NAME': u'Petrov3'})\n person_id = time_deco(mtl.create_person)(client_id, 'ur')\n\n service_order_id = time_deco(mtl.get_next_service_order_id)(SERVICE_ID)\n order_id = time_deco(mtl.create_or_update_order)(client_id, PRODUCT_ID, SERVICE_ID, service_order_id,\n {'TEXT': 'Py_Test order'})\n service_order_id2 = time_deco(mtl.get_next_service_order_id)(SERVICE_ID)\n order_id = time_deco(mtl.create_or_update_order)(client_id, PRODUCT_ID, SERVICE_ID, service_order_id2,\n {'TEXT': 'Py_Test order'})\n orders_list = [\n {'ServiceID': SERVICE_ID, 'ServiceOrderID': service_order_id, 'Qty': QTY, 'BeginDT': BASE_DT}]\n request_id = time_deco(mtl.create_request)(client_id, orders_list, BASE_DT)\n\n invoice_id = time_deco(mtl.create_invoice)(request_id, person_id, PAYSYS_ID, credit=0, contract_id=None,\n overdraft=0, endbuyer_id=None)\n time_deco(mtl.OEBS_payment)(invoice_id)\n\n response = time_deco(mtl.rpc.Balance.CreateTransferMultiple)(defaults.PASSPORT_UID,\n [\n {\"ServiceID\": SERVICE_ID,\n \"ServiceOrderID\": service_order_id,\n \"QtyOld\": 100.1234, \"QtyNew\": 0}\n ],\n [\n {\"ServiceID\": SERVICE_ID,\n \"ServiceOrderID\": service_order_id2,\n \"QtyDelta\": 1}\n ], 1, None)\n\n\nif __name__ == \"__main__\":\n new()\n get_stats ()\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/balance_tests/btestlib/tools/torvald_test_speed.py","file_name":"torvald_test_speed.py","file_ext":"py","file_size_in_byte":7963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30753753909","text":"from cx_Freeze import setup, Executable\n\n# Dependencies are automatically detected, but it might need\n# fine tuning.\nbuild_options = {'packages': ['pychoacoustics',\n 'pychoacoustics.default_experiments',\n 'pychoacoustics.nnresample',\n ],\n 'excludes': ['tkinter',\n 'PyQt5.QtQml',\n 'PyQt5.QtBluetooth',\n 'PyQt5.QtQuickWidgets',\n 'PyQt5.QtSensors',\n 'PyQt5.QtSerialPort',\n 'PyQt5.QtSql'\n ]}\n\n\nimport sys\nbase = 'Win32GUI' if sys.platform=='win32' else None\n\nexecutables = [\n Executable('pychoacoustics\\\\__main__.py',\n base=base,\n target_name = 'pychoacoustics',\n icon='icons/Machovka_Headphones.ico')\n]\n\nsetup(name='pychoacoustics',\n version=\"0.6.8\",\n description = '',\n options = {'build_exe': build_options},\n executables = executables)\n","repo_name":"sam81/pychoacoustics","sub_path":"setup_cx.py","file_name":"setup_cx.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"3"} +{"seq_id":"7681362982","text":"from flectra import fields, models\r\nclass AdraAssetsReports(models.TransientModel):\r\n _name = 'adra.assets.reports'\r\n _description = 'Generador de Reportes en PDF y Excel para activos.'\r\n\r\n x_all_projects = fields.Selection([('one', 'Un proyecto'), ('all', 'Todos los proyectos')],\r\n string='Proyecto', default='one')\r\n x_account_analytic_account_id = fields.Many2one('account.analytic.account', string='Seleccione un proyecto')\r\n x_sort_by_project = fields.Selection([('fecha_ingreso', 'Fecha')], string='Ordenado por', default='fecha_ingreso')\r\n x_sort_by_all = fields.Selection([('fecha_proyecto', 'Fecha/Proyecto'), ('proyecto_fecha', 'Proyecto/fecha')],\r\n string='Ordenado por', default='fecha_proyecto')\r\n x_status_active = fields.Selection([('general', 'General'), ('vigente', 'Vigente'), ('dado_baja', 'Dado de baja')],\r\n string='Estado activo;', default='general')\r\n\r\n def generate_pdf_report(self):\r\n sort_by = self.x_sort_by_project if self.x_all_projects == 'one' else self.x_sort_by_all\r\n projects_quantity = self.x_all_projects\r\n data = {\r\n 'project_code': self.x_account_analytic_account_id.code,\r\n 'project_name': self.x_account_analytic_account_id.name,\r\n 'sort_by': sort_by,\r\n 'projects_quantity': projects_quantity,\r\n 'x_status_active': self.x_status_active\r\n }\r\n report = self.env.ref('adra_account_extended.report_pdf_assets')\r\n return report.report_action(self, data=data)\r\n\r\n def generate_excel_report(self):\r\n sort_by = self.x_sort_by_project if self.x_all_projects == 'one' else self.x_sort_by_all\r\n projects_quantity = self.x_all_projects\r\n data = {'project_name': self.x_account_analytic_account_id.name,\r\n 'project_code': self.x_account_analytic_account_id.code,\r\n 'sort_by': sort_by,\r\n 'projects_quantity': projects_quantity,\r\n 'x_status_active': self.x_status_active\r\n }\r\n report = self.env.ref('adra_account_extended.report_xlsx_assets')\r\n return report.report_action(self, data=data)\r\n","repo_name":"cv2310/flectra20-adra-programas","sub_path":"models/adra_assets_reports.py","file_name":"adra_assets_reports.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38778623782","text":"\"\"\"External stats\n\nRevision ID: c8c17d4869a8\nRevises: 933060cd8eeb\nCreate Date: 2019-09-18 14:52:42.006646\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"c8c17d4869a8\"\ndown_revision = \"933060cd8eeb\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\n \"daily_statistic\",\n sa.Column(\n \"externally_shared\", sa.Integer(), server_default=\"0\", nullable=False\n ),\n )\n op.add_column(\n \"daily_statistic\",\n sa.Column(\"show_results\", sa.Integer(), server_default=\"0\", nullable=False),\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"daily_statistic\", \"show_results\")\n op.drop_column(\"daily_statistic\", \"externally_shared\")\n # ### end Alembic commands ###\n","repo_name":"Nukesor/ultimate-poll-bot","sub_path":"migrations/versions/2019_09_18_c8c17d4869a8_external_stats.py","file_name":"2019_09_18_c8c17d4869a8_external_stats.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"3"} +{"seq_id":"71225058321","text":"import logging\nimport random\n\n\nclass Song:\n # id\n # file_id\n # name\n\n def __init__(self, m_id, db):\n self.__id = m_id\n self.__file_id = None\n self.__name = None\n song = None\n try:\n song = db.exec_select(\"\"\"SELECT * FROM music WHERE \"M_ID\"='%s';\"\"\", (self.__id,))[0]\n # format [(4, 'AwADAgADRQMAAoFAiUmLYiTyfoFvtwI', 'J-Five - Fing a way')]\n logging.debug(\"selectSong=\" + str(song))\n except:\n logging.error(\"Request to db failed when select a song!\", exc_info=True)\n if not song or len(song) < 3:\n logging.error(\"Song wasn't extracted!(Song.__init__)\")\n self.__file_id = song[1]\n self.__name = song[2]\n\n def getFileId(self):\n return self.__file_id\n\n def getName(self):\n return self.__name\n\n def getId(self):\n return self.__id\n\n # get 'count' number of songs names from the database(table 'music')\n # with the exclude of song with ID equal to 'ignore'\n @staticmethod\n def getRandomNames(count, ignore, db):\n names = []\n logging.info(\"Generating random song names...\")\n max = db.count_rows('music');\n ids = random.sample([x for x in range(1, max) if x != ignore], count)\n logging.debug(\"Songs ids generated:\" + str(ids))\n for id in ids:\n # format [(\"Bon Jovi - It's my life\",)]\n new_name = db.exec_select(\"\"\"SELECT \"NAME\" FROM music WHERE \"M_ID\"=%s;\"\"\", (id,))[0][0]\n logging.debug(\"New name selected=\" + str(new_name))\n logging.debug(\"New name type=\" + str(type(new_name)))\n names.append(new_name)\n logging.debug(\"Generated set of name=\" + str(names))\n return names\n","repo_name":"ihnashchenka/TelegramBot","sub_path":"objects/Song.py","file_name":"Song.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19229616288","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 14 20:29:09 2020\n\n@author: yogendra jaiswal\n\"\"\"\n\ncol_map = {'a': 0, 'b': 1, 'c': 2,\n 'd': 3, 'e': 4, 'f': 5,\n 'g': 6, 'h': 7}\n\ndef index(cell):\n global col_map\n return (col_map[cell[0]], 8 - int(cell[1]))\n\ns = input()\nt = input()\n\ns = index(s)\nt = index(t)\n\nstep_map = {'L': (-1, 0), 'R': (1, 0),\n 'U': (0, -1), 'D': (0, 1),\n 'LU': (-1, -1), 'RU': (1, -1),\n 'LD': (-1, 1), 'RD': (1, 1)}\n\ndef dist(s, t):\n return (s[0] - t[0]) ** 2 + (s[1] - t[1]) ** 2\n\ndef best_step(s, t):\n min_dis = 9999\n step_ = None\n for step, change in step_map.items():\n if dist((s[0] + change[0], s[1] + change[1]), t) < min_dis:\n min_dis = dist((s[0] + change[0], s[1] + change[1]), t)\n step_ = step\n return step_\n\ncurr_dis = dist(s, t)\nsteps = []\n\nwhile curr_dis:\n step_ = best_step(s, t)\n change = step_map[step_]\n s = (s[0] + change[0], s[1] + change[1])\n steps.append(step_)\n curr_dis = dist(s, t)\n\nprint(len(steps))\nfor i in steps:\n print(i)\n \n ","repo_name":"yogendra-j/comp_python","sub_path":"ShortestPathOfTheKing.py","file_name":"ShortestPathOfTheKing.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7433097343","text":"from sqlalchemy.orm import Session\n\nfrom models.academic import Subjects, Career\nfrom schemas.subjects import SubjectsCreateSchema, SubjectCareerSchema\n\n\ndef create_subject(session: Session, subject: SubjectsCreateSchema):\n db_subject = Subjects(**subject.dict())\n session.add(db_subject)\n session.commit()\n session.refresh(db_subject)\n session.close()\n return db_subject\n\n\ndef get_subjects_by_careers(session: Session, skip, limit):\n subjects_with_careers = session.query(\n Subjects.id,\n Subjects.name,\n Subjects.study_duration,\n Career.name\n ).join(Career).offset(skip).limit(limit).all()\n\n result = []\n for _id, subject_name, study_duration, career_name in subjects_with_careers:\n subject = SubjectCareerSchema(\n id=_id,\n name=subject_name,\n study_duration=study_duration,\n career_name=career_name\n )\n result.append(subject)\n return result\n\n","repo_name":"dariomolina/edmachina","sub_path":"backend/services/db/subjects.py","file_name":"subjects.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70727669841","text":"from turtle import *\nimport math\n\njohn = Turtle()\n\nsetup(500,300)\nx_pos = -250\ny_pos = -150\n\njohn.penup()\njohn.setposition(x_pos,y_pos)\n\ndef drawShape(sides, length):\n\n #john = Turtle()\n ex_angle = 180-((180*(sides-2)/sides))\n john.pendown()\n john.speed(10)\n\n #john.begin_fill()\n for x in range(sides):\n\n john.right(ex_angle)\n john.forward(length)\n\n #john.end_fill()\n\ndef tessalate(sides, length, x, y):\n\n xloc = x\n yloc = y\n\n in_rads = math.radians(180/sides)\n degree_tan = math.tan(in_rads)\n apothem = int(length/(2*degree_tan))\n height = apothem*2\n\n for columns in range(10):\n # Draw one full coulumn.\n for row in range(10):\n\n drawShape(sides, length)\n\n john.penup()\n john.left(90)\n john.forward(height)\n john.right(90)\n\n # Get into position for the next column.\n john.penup()\n x = xloc + (height)*(columns+1)\n\n if columns % 2 == 0:\n y = yloc - apothem\n else:\n y = yloc\n\n john.setposition(x, y)\n\n\ntessalate(6,20, x_pos, y_pos)\n\n\n\n\n#john.setposition(150,150)\n#print(john.position())\n#john.speed(3)\n#john.pensize(10)\n#john.pencolor(\"black\")\n#john.down()\n#john.forward(20)\n#john.stamp()\n\n\n# Close Window\nexitonclick()\n","repo_name":"GirlsFirst/SIP-2017","sub_path":"Unit1_Foundations/shapes2/pythonshapes.py","file_name":"pythonshapes.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"3"} +{"seq_id":"23457373617","text":"from gensim.corpora import Dictionary\nfrom gensim.models import ldamodel\nimport numpy\n\ntexts = [['bank','river','shore','water'],\n ['river','water','flow','fast','tree'],\n ['bank','water','fall','flow'],\n ['bank','bank','water','rain','river'],\n ['river','water','mud','tree'],\n ['money','transaction','bank','finance'],\n ['bank','borrow','money'],\n ['bank','finance'],\n ['finance','money','sell','bank'],\n ['borrow','sell'],\n ['bank','loan','sell']]\n\ndictionary = Dictionary(texts)\ncorpus = [dictionary.doc2bow(text) for text in texts]\n\nnumpy.random.seed(1) # setting random seed to get the same results each time.\nmodel = ldamodel.LdaModel(corpus, id2word=dictionary, num_topics=2)\n\n\nbow_water = ['bank','water','bank']\nbow_finance = ['money','transaction','bank','finance']\n\nbow = model.id2word.doc2bow(bow_finance) # convert to bag of words format first\ndoc_topics, word_topics, phi_values = model.get_document_topics(bow, per_word_topics=True)\n\n\n#print(word_topics)\nprint(doc_topics)\n#print(phi_values)\n\nbow = model.id2word.doc2bow(bow_water) # convert to bag of words format first\ndoc_topics, word_topics, phi_values = model.get_document_topics(bow, per_word_topics=True)\n\n\n#print(word_topics)\nprint(doc_topics)\n#print(phi_values)\n\n# bow = model.id2word.doc2bow(bow_finance) # convert to bag of words format first\n# doc_topics, word_topics, phi_values = model.get_document_topics(bow, per_word_topics=True)\n#\n# word_topics","repo_name":"WChengGitHub/python","sub_path":"project_LDA/python3.py","file_name":"python3.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29183910177","text":"import gevent\nimport logging\nimport py\nimport pyjack\nimport pytest\nimport threading\nimport time\n\nfrom skybone.rbtorrent import utils as rbutils\n\n\ndef test_human_size():\n hs = rbutils.human_size\n\n kb = 1024\n mb = kb * 1024\n gb = mb * 1024\n tb = gb * 1024\n\n assert hs(-1) == '0'\n assert hs(0) == '0'\n assert hs(123) == '123B'\n assert hs(123 * kb + 120) == '123.12KiB'\n assert hs(123 * mb + 120 * kb) == '123.12MiB'\n assert hs(1.5 * gb) == '1.50GiB'\n assert hs(15.2 * gb) == '15.20GiB'\n assert hs(123.12 * gb) == '123GiB'\n assert hs(1.5 * tb) == '1.50TiB'\n assert hs(15.2 * tb) == '15.20TiB'\n assert hs(123.12 * tb) == '123TiB'\n\n\ndef test_human_time():\n ht = rbutils.human_time\n\n assert ht(0) == '0ms'\n assert ht(0.5) == '500ms'\n assert ht(1) == '1s'\n assert ht(55) == '55s'\n assert ht(120) == '2m00s'\n assert ht(125) == '2m05s'\n assert ht(150) == '2m30s'\n assert ht(180) == '3m00s'\n assert ht(86399) == '23h59m59s'\n assert ht(86400) == '1d00h00m00s'\n assert ht(86400 * 100500) == '100500d00h00m00s'\n\n\ndef test_human_speed():\n hs = rbutils.human_speed\n\n assert hs(-10) == '0'\n assert hs(0) == '0'\n assert hs(1) == '0'\n assert hs(2) == '0.02kbps'\n assert hs(123 / 8) == '0.12kbps'\n assert hs(123123 / 8) == '123kbps'\n assert hs(123123123 / 8) == '123Mbps'\n\n\ndef test_slotted_dict():\n md = rbutils.SlottedDict({'some': {'sub': 'key'}})\n assert md['some']['sub'] == md.some.sub == 'key'\n\n md.some.sub = 'yek'\n\n assert md['some']['sub'] == md.some.sub == 'yek'\n\n assert md == {'some': {'sub': 'yek'}}\n assert md.some == md['some'] == {'sub': 'yek'}\n\n\ndef test_getaddrinfo_g():\n import _socket\n meth = rbutils.getaddrinfo_g\n\n any_ip = ['1.2.3.4']\n fake_ip = ['']\n\n def _res(hostname, port):\n assert port == 0\n if hostname == 'any.yandex.ru':\n if isinstance(any_ip[0], Exception):\n raise any_ip[0]\n return [['', '', '', '', [any_ip[0]]]]\n\n assert hostname.startswith('thost')\n return [['', '', '', '', [fake_ip[0]]]]\n\n # In other ips resolving should be as usual\n fake_ip[0] = '1.2.3.5'\n assert meth('thost1', 0, getaddrinfo=_res) == [['', '', '', '', [fake_ip[0]]]]\n\n # If we will return any.yandex.ru ip -- it should raise gaierror\n fake_ip[0] = '1.2.3.4'\n with pytest.raises(_socket.gaierror):\n meth('thost2', 0, getaddrinfo=_res)\n\n # Also any.yandex.ru could be not found at all\n any_ip[0] = _socket.gaierror(_socket.EAI_NONAME, 'Host was not found')\n\n # We also check anyips caching here\n with pytest.raises(_socket.gaierror):\n meth('thost3', 0, getaddrinfo=_res)\n\n del rbutils.getaddrinfo.anyips\n assert meth('thost4', 0, getaddrinfo=_res) == [['', '', '', '', [fake_ip[0]]]]\n\n # Also we could be unable to resolve any.yandex.ru with some different error\n class PEX(Exception):\n pass\n\n del rbutils.getaddrinfo.anyips\n any_ip[0] = PEX()\n with pytest.raises(PEX):\n meth('thost5', 0, getaddrinfo=_res)\n\n any_ip[0] = _socket.gaierror(100500, 'aaa')\n try:\n meth('thost6', 0, getaddrinfo=_res)\n except _socket.gaierror as ex:\n if ex.errno != 100500:\n raise\n\n\ndef test_gethostbyaddr_g():\n meth = rbutils.gethostbyaddr_g\n\n def _res(*args, **kwargs):\n assert args == (1, )\n assert kwargs == {'a': 2}\n return 3\n\n assert meth(1, a=2, gethostbyaddr=_res) == 3\n\n class MyEx(Exception):\n pass\n\n def _res():\n raise MyEx('errr')\n\n with pytest.raises(MyEx):\n meth(gethostbyaddr=_res)\n\n def _res():\n return gevent.getcurrent(), threading.currentThread()\n\n grn1, thr1 = _res()\n grn2, thr2 = meth(gethostbyaddr=_res)\n\n assert grn1 != grn2\n assert thr1 != thr2\n assert thr2.name == 'gethostbyaddr'\n\n for i in range(10):\n if thr2.is_alive():\n time.sleep(0.1)\n\n assert not thr2.is_alive()\n\n\ndef test_fastbonize_hostname():\n meth = rbutils.fastbonize_ip\n meth = rbutils.fastbonize_hostname\n import _socket\n\n fake_ip = '1.2.3.4'\n fake_getaddrinfo = {}\n\n def _getaddrinfo_fake(hostname, a, b, c, getaddrinfo):\n assert getaddrinfo == _socket.getaddrinfo\n value = fake_getaddrinfo[hostname]\n if isinstance(value, Exception):\n raise value\n return value\n\n noaddr_ex = _socket.gaierror(_socket.EAI_NONAME, 'Name or service not known')\n noaddr_ex2 = _socket.gaierror(_socket.EAI_NODATA, 'No data')\n\n try:\n getaddrinfo_g = pyjack.replace_all_refs(rbutils.getaddrinfo_g, _getaddrinfo_fake)\n\n log = logging.getLogger('fastbonize_ip')\n\n # CASE1: got thost.yandex.ru\n # fastbone.thost.yandex.ru not available\n # fb-thost.yandex.ru not available\n # thost.fb.yandex.ru not available\n log.debug('CASE1')\n fake_getaddrinfo['fastbone.thost.yandex.ru'] = noaddr_ex\n fake_getaddrinfo['fb-thost.yandex.ru'] = noaddr_ex\n fake_getaddrinfo['thost.fb.yandex.ru'] = noaddr_ex\n assert meth('thost.yandex.ru', log) == (None, None)\n\n # CASE2: got thost.yandex.ru\n # fastbone.thost.yandex.ru not available (v2)\n # fb-thost.yandex.ru not available (v2)\n # thost.fb.yandex.ru not available\n log.debug('CASE2')\n fake_getaddrinfo['fastbone.thost.yandex.ru'] = noaddr_ex2\n fake_getaddrinfo['fb-thost.yandex.ru'] = noaddr_ex2\n fake_getaddrinfo['thost.fb.yandex.ru'] = noaddr_ex\n assert meth('thost.yandex.ru', log) == (None, None)\n\n # CASE3: got thost.yandex.ru\n # fastbone.thost.yandex.ru not available\n # fb-thost.yandex.ru not available\n # thost.fb.yandex.ru available\n log.debug('CASE3')\n fake_getaddrinfo['fastbone.thost.yandex.ru'] = noaddr_ex\n fake_getaddrinfo['fb-thost.yandex.ru'] = noaddr_ex\n fake_getaddrinfo['thost.fb.yandex.ru'] = [\n (_socket.AF_INET, '', '', '', ['1']),\n (_socket.AF_INET6, '', '', '', ['2'])\n ]\n assert meth('thost.yandex.ru', log) == ('thost.fb.yandex.ru', [['1'], ['2']])\n\n # CASE4: got thost.yandex.ru\n # fb-thost.yandex.ru available\n # thost.fb.yandex.ru available\n log.debug('CASE4')\n fake_getaddrinfo['fb-thost.yandex.ru'] = fake_getaddrinfo['thost.fb.yandex.ru']\n assert meth('thost.yandex.ru', log) == ('fb-thost.yandex.ru', [['1'], ['2']])\n\n # CASE5: got thost.yandex.ru\n # fastbone.thost.yandex.ru available\n # fb-thost.yandex.ru available\n # thost.fb.yandex.ru available\n log.debug('CASE5')\n fake_getaddrinfo['fastbone.thost.yandex.ru'] = fake_getaddrinfo['thost.fb.yandex.ru']\n assert meth('thost.yandex.ru', log) == ('fb-thost.yandex.ru', [['1'], ['2']])\n\n # CASE6: got thost.yandex.ru\n # fastbone.thost.yandex.ru available\n # fb-thost.yandex.ru not available\n # thost.fb.yandex.ru available\n log.debug('CASE6')\n fake_getaddrinfo['fb-thost.yandex.ru'] = noaddr_ex\n fake_getaddrinfo['thost.yandex.ru'] = noaddr_ex\n assert meth('thost.yandex.ru', log) == ('fastbone.thost.yandex.ru', [['1'], ['2']])\n\n # CASE7: got thost\n # fastbone.thost not available\n # fb-thost not available\n # thost.fb.yandex.ru available\n log.debug('CASE7')\n fake_getaddrinfo.pop('fb-thost.yandex.ru')\n fake_getaddrinfo['fastbone.thost'] = noaddr_ex\n fake_getaddrinfo['fb-thost'] = noaddr_ex\n assert meth('thost', log) == ('thost.fb.yandex.ru', [['1'], ['2']])\n\n # CASE8: got thost.yandex.net\n # fastbone.thost.yandex.net not available\n # fb-thost.yandex.net not available\n log.debug('CASE8')\n fake_getaddrinfo.clear()\n fake_getaddrinfo['fastbone.thost.yandex.net'] = noaddr_ex\n fake_getaddrinfo['fb-thost.yandex.net'] = noaddr_ex\n assert meth('thost.yandex.net', log) == (None, None)\n\n # CASE9: got thost.yandex.net\n # getaddrinfo raised weird exception\n log.debug('CASE9')\n\n class FEX(Exception):\n pass\n\n fake_getaddrinfo['fb-thost.yandex.net'] = _socket.gaierror(100500, 'ff')\n try:\n meth('thost.yandex.net', log)\n except _socket.gaierror as ex:\n if ex.errno != 100500:\n raise\n\n fake_getaddrinfo['fb-thost.yandex.net'] = FEX()\n with pytest.raises(FEX):\n meth('thost.yandex.net', log)\n\n # CASE10: got thost.yandex.ru (SKYDEV-459)\n # fastbone.thost.yandex.ru available, but result is the same as thost.yandex.ru\n # fb-thost.yandex.ru not available\n # thost.fb.yandex.ru not available\n log.debug('CASE10')\n fake_getaddrinfo['fastbone.thost.yandex.ru'] = [\n (_socket.AF_INET, '', '', '', [fake_ip]),\n (_socket.AF_INET6, '', '', '', ['2'])\n ]\n fake_getaddrinfo['fb-thost.yandex.ru'] = noaddr_ex\n fake_getaddrinfo['thost.fb.yandex.ru'] = noaddr_ex\n fake_getaddrinfo['thost.yandex.ru'] = [\n (_socket.AF_INET, '', '', '', [fake_ip])\n ]\n assert meth('thost.yandex.ru', log) == (None, None)\n\n # CASE11: same as CASE11, but we have only ipv6\n log.debug('CASE11')\n fake_getaddrinfo['fastbone.thost.yandex.ru'] = [\n (_socket.AF_INET6, '', '', '', [fake_ip])\n ]\n assert meth('thost.yandex.ru', log) == (None, None)\n\n finally:\n pyjack.replace_all_refs(rbutils.getaddrinfo_g, getaddrinfo_g)\n\n\ndef test_fastbonize_ip():\n meth = rbutils.fastbonize_ip\n import _socket\n\n fake_ip = '1.2.3.4'\n fake_hostname = ['']\n\n def _gethostbyaddr_fake(ip, gethostbyaddr):\n assert ip == fake_ip\n if isinstance(fake_hostname[0], Exception):\n raise fake_hostname[0]\n return fake_hostname[0], 'fake'\n\n nohost_ex = _socket.herror(1, 'Unknown host')\n\n try:\n gethostbyaddr_g = pyjack.replace_all_refs(rbutils.gethostbyaddr_g, _gethostbyaddr_fake)\n\n assert rbutils.gethostbyaddr_g('1.2.3.4', _socket.gethostbyaddr) == ('', 'fake')\n\n log = logging.getLogger('fastbonize_ip')\n\n # CASE1: unable to grab ip hostname\n log.debug('CASE1')\n fake_hostname[0] = nohost_ex\n assert meth(fake_ip, log) == (None, None, None)\n\n finally:\n pyjack.replace_all_refs(rbutils.gethostbyaddr_g, gethostbyaddr_g)\n\n\ndef test_path(tmpdir):\n p = rbutils.Path(tmpdir.join('test1'))\n p.ensure(dir=1).chmod(0o700)\n assert p.stat().mode & 0o700 == 0o700\n\n p = rbutils.Path(tmpdir.join('test2'))\n p.mksymlinkto('test3')\n assert p.readlink() == 'test3'\n\n p2 = rbutils.Path(tmpdir.join('test2'))\n p2.ensure(dir=1, nolink=1)\n assert p2.check(link=0)\n p2.ensure(dir=1, nolink=1)\n\n p = rbutils.Path(tmpdir.join('file1').ensure(file=1))\n with pytest.raises(py.error.EEXIST):\n p.ensure(dir=1)\n p.ensure(dir=1, force=1)\n assert p.check(dir=1, file=0)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"infra/tests/test_rbtorrent_utils.py","file_name":"test_rbtorrent_utils.py","file_ext":"py","file_size_in_byte":11345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1268991926","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 10 02:44:36 2019\n\n@author: Publico\n\"\"\"\n\nclass Milista:\n\n def __init__(self, contenido):\n self.contenido = contenido\n \n def duplicar(self):\n y=[]\n for elemento in self.contenido:\n y.append(2 * elemento)\n return y\n \nx = Milista([1, 2, 3])\nprint(x)\nprint (x.duplicar())\n ","repo_name":"fcaprile/instrumentacion","sub_path":"codigos de aprendizaje/usar clases y objetos/ejemplo mylista.py","file_name":"ejemplo mylista.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42702257622","text":"from __future__ import absolute_import\n\nimport platform\nimport sys\nfrom distutils.util import get_platform\n\n\n# This is the same as distutils uses.\nbasic_platform_spec = '%s-%s' % (get_platform(), sys.version[:3])\n\n# extended_platform_spec is a little more platform specific as it allows us to isolate\n# the different versions of Fedora we use.\n_, _, _, _, machine, _ = platform.uname()\nif sys.platform == 'darwin':\n osx_v, _, _ = platform.mac_ver()\n extended_platform_spec = 'macosx-%s-%s-%s' % ('.'.join(osx_v.split('.')[:2]), machine, sys.version[:3])\nelif sys.platform.startswith('linux'):\n name, version, nick = platform.linux_distribution()\n extended_platform_spec = '%s-%s-%s-%s' % (name.lower(), version, machine, sys.version[:3])\nelse:\n extended_platform_spec = basic_platform_spec\n\n","repo_name":"mikeboers/sitetools","sub_path":"sitetools/platform.py","file_name":"platform.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18989888793","text":"#!/usr/bin/python \r\n# -- coding: utf-8 --\r\nimport sys \r\nreload(sys) \r\nsys.setdefaultencoding('utf8')\r\nimport datetime\r\nimport requests\r\nimport json\r\nimport time\r\ndef sleeptime(hour,min,sec):\r\n return hour*3600 + min*60 + sec;\r\nsecond = sleeptime(0,30,0);\r\ndef get_access_token():\r\n \"\"\"\r\n 获取微信全局接口的凭证(默认有效期俩个小时)\r\n 如果不每天请求次数过多, 通过设置缓存即可\r\n \"\"\"\r\n result = requests.get(\r\n url=\"https://api.weixin.qq.com/cgi-bin/token\",\r\n params={\r\n \"grant_type\": \"client_credential\",\r\n \"appid\": \"输入你的appid\",\r\n \"secret\": \"输入你的appidsecret\",\r\n }\r\n ).json()\r\n\r\n if result.get(\"access_token\"):\r\n access_token = result.get('access_token')\r\n else:\r\n access_token = None\r\n return access_token\r\n\r\ndef sendmsg(openid,msg):\r\n\r\n access_token = get_access_token()\r\n\r\n body = {\r\n \"touser\": openid,\r\n \"template_id\":\"输入你的模板id\",\r\n \"url\":\"www.caiyunapp.com/map\",\r\n \"data\": {\r\n \"weather\":{\r\n \"value\": msg,\r\n \"color\":\"#173177\"\r\n }\r\n }\r\n }\r\n response = requests.post(\r\n url=\"https://api.weixin.qq.com/cgi-bin/message/template/send?access_token=\"+access_token,\r\n \r\n data=bytes(json.dumps(body, ensure_ascii=False))\r\n )\r\n # 这里可根据回执code进行判定是否发送成功(也可以根据code根据错误信息)\r\n result = response.json()\r\n print(result)\r\nwhile True: \r\n json_text = requests.get(str.format(\"https://api.caiyunapp.com/v2/输入你的彩云天气api_token/输入经纬度,用英文逗号分隔/minutely.json\")).content\r\n self_realtime_data = json.loads(json_text)\r\n status=self_realtime_data['status']\r\n json_text=requests.get(str.format(\"https://www.tianqiapi.com/api/?version=v6&appid=输入你的tianqiqpi控制台显示的appid&appsecret=输入你的tianqiapi控制台显示的appsecret&city=输入城市名\")).content\r\n data = json.loads(json_text)\r\n cityid=data['cityid']\r\n city=data['city']\r\n date=data['date']\r\n utime=data['update_time']\r\n week=data['week']\r\n wea=data['wea']\r\n h_tem=data['tem1']\r\n l_tem=data['tem2']\r\n n_tem=data['tem']\r\n win=data['win']\r\n win_speed=data['win_speed']\r\n win_meter=data['win_meter']\r\n hum=data['humidity']\r\n visit=data['visibility']\r\n pressure=data['pressure']\r\n air=data['air']\r\n pm25=data['air_pm25']\r\n air_level=data['air_level']\r\n air_tips=data['air_tips']\r\n alarm=data['alarm']['alarm_content']\r\n alarm_type=data['alarm']['alarm_type']\r\n alarm_level=data['alarm']['alarm_level']\r\n if status=='failed':\r\n send_data=city+\"天气预报:\\n\"+\"\\n天气:\"+wea+\" \"+n_tem+\"℃\\n最高/低温:\"+h_tem+\"℃ /\"+l_tem+\"℃\\n湿度:\"+hum+\"\\n\"+win+\" \"+win_speed+\" \"+win_meter+\"\\n能见度:\"+visit+\"\\n空气质量:\"+air+\" \"+air_level+\" \"+air_tips+\"\\npm2.5:\"+pm25+\"\\n预警消息:\"+alarm+\"\\n更新时间:\"+date+\" \"+week+\" \"+utime+\"\\n数据来源:中国天气网\\n当您看到这条消息时,彩云天气api已经到达配额上限,请点击详情手动查询降雨预报\"\r\n else: \r\n now_data=self_realtime_data['result']['forecast_keypoint']\r\n nowTime=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n send_data=\"降雨预报:\\n\"+now_data+\"\\n更新于:\"+nowTime+\"\\n天气:\"+wea+\" \"+n_tem+\"℃\\n最高/低温:\"+h_tem+\"℃ /\"+l_tem+\"℃\\n湿度:\"+hum+\"\\n\"+win+\" \"+win_speed+\" \"+win_meter+\"\\n能见度:\"+visit+\"\\n空气质量:\"+air+\" \"+air_level+\" \"+air_tips+\"\\npm2.5:\"+pm25+\"\\n预警消息:\"+alarm+\"\\n数据来源:彩云科技、中国天气网\"\r\n if __name__ == '__main__':\r\n sendmsg('输入接收信息的微信号',send_data)\r\n time.sleep(second);\r\n\r\n","repo_name":"zzy5156/-werobot-","sub_path":"caiyun.py","file_name":"caiyun.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"23024256802","text":"import os\n\nworldIds = ['0001', '0002', '0006', '0018', '0020']\nsceneIds = ['15-deg-left', '15-deg-right', '30-deg-left', '30-deg-right', 'clone', 'fog',\n 'morning', 'overcast', 'rain', 'sunset']\nworldSizes = [446, 232, 269, 338, 836] # 0-446, including 446\ncategory = ['Misc', 'Building', 'Car', 'GuardRail', 'Pole', 'Road', 'Sky', 'Terrain',\n 'TrafficLight', 'TrafficSign', 'Tree', 'Truck', 'Van', 'Vegetation']\n\n\ndef get_tables(opt, datadir):\n \"\"\"\n Get the mapping from (worldId, sceneId, rgb) to the semantic/instance ID.\n The instance ID is uniquely assigned to each car and van in the dataset.\n :param opt: 'segm' or 'inst'\n :param datadir: the dataset root\n :return:\n \"\"\"\n global_obj_id = 0\n table_inst = {}\n table_segm = {}\n for worldId in worldIds:\n for sceneId in sceneIds:\n with open(os.path.join(datadir, \"vkitti_1.3.1_scenegt\",\n \"%s_%s_scenegt_rgb_encoding.txt\" % (worldId, sceneId)), 'r') as fin:\n first_line = True\n for line in fin:\n if first_line:\n first_line = False\n else:\n name, r, g, b = line.split(' ')\n r, g, b = int(r), int(g), int(b)\n if name.find(':') == -1:\n table_segm[(worldId, sceneId, r, g, b)] = category.index(name)\n table_inst[(worldId, sceneId, r, g, b)] = category.index(name)\n else:\n global_obj_id += 1\n table_segm[(worldId, sceneId, r, g, b)] = category.index(name.split(':')[0])\n table_inst[(worldId, sceneId, r, g, b)] = 5000 * category.index(\n name.split(':')[0]) + global_obj_id\n\n return table_segm if opt == 'segm' else table_inst\n\n\ndef get_lists(opt):\n \"\"\"\n Get the training/testing split for Virtual KITTI.\n :param opt: 'train' or 'test'\n :return:\n \"\"\"\n splitRanges = {'train': [range(0, 356), range(0, 185), range(69, 270), range(0, 270), range(167, 837)],\n 'test': [range(356, 447), range(185, 233), range(0, 69), range(270, 339), range(0, 167)],\n 'all': [range(0, 447), range(0, 233), range(0, 270), range(0, 339), range(0, 837)]}\n _list = []\n for worldId in worldIds:\n for sceneId in sceneIds:\n for imgId in splitRanges[opt][worldIds.index(worldId)]:\n _list += ['%s/%s/%05d.png' % (worldId, sceneId, imgId)]\n return _list\n\n","repo_name":"ysymyth/3D-SDN","sub_path":"datasets/vkitti_utils.py","file_name":"vkitti_utils.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","stars":264,"dataset":"github-code","pt":"3"} +{"seq_id":"17346411935","text":"import requests\n\nfrom flask import jsonify, Response\nfrom flask_restful import Resource\n\nfrom token import StrideJWT\n\n\nclass Lifecycle(Resource):\n def post(self, webhook_type=None):\n s_jwt = StrideJWT()\n context = s_jwt.stride_context\n url = 'https://api.atlassian.com/site/{cloudId}/conversation/{conversationId}/message'.format(**context)\n headers = {\n 'authorization': 'Bearer {}'.format(context['access_token']),\n 'content-type': 'text/plain'\n }\n data = 'I\\'ve been deployed'\n\n r = requests.post(url, headers=headers, data=data)\n","repo_name":"raajitr/rubick-stride-bot","sub_path":"api/lifecycle.py","file_name":"lifecycle.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38658205767","text":"from PyQt6 import QtWidgets, QtGui, QtCore\nimport sys\n\napp = QtWidgets.QApplication(sys.argv)\nForm = QtWidgets.QWidget()\nForm.setWindowTitle(\"About Qlabel\")\nform_width = 800\nform_height = 600\nForm.resize(form_width, form_height)\n# 宣告Qlabel物件\nlabel1 = QtWidgets.QLabel(Form)\n# 设定标签text\nlabel1.setText(\"first Qlabel\")\n\nlabel2 = QtWidgets.QLabel(Form)\nlabel2.setText(\"first Qlabel move(50, 50)\")\n# [Qlabel物件].move([x座标]: num, [y座标]: num)\nlabel2.move(50, 50)\n\nlabel3 = QtWidgets.QLabel(Form)\nlabel3.setText(\"first Qlabel setGeometry()\")\n# [Qlabel物件].setGeometry([x座标]: num, [y座标]: num, [物件x大小限制]: num, [物件y大小限制]: num)\nlabel3.setGeometry(50, 80, 100, 100)\n\nlabel4 = QtWidgets.QLabel(Form)\nlabel4.setText(\"label4 is toooooooooooooooooooooooooooooooooooooooooooo long\")\n# 设定是否可以换行\nlabel4.setWordWrap(True)\n# 设定对齐方式\nlabel4.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)\n\nlabel5 = QtWidgets.QLabel(Form)\n# 字体设定\n## 宣告字体物件\nfont1 = QtGui.QFont()\n## 设定字体\nfont1.setFamily(\"System\")\n## 设定字体大小\nfont1.setPointSize(20)\n## 粗体\nfont1.setBold(True)\n## 斜体\nfont1.setItalic(True)\n## 删除线\nfont1.setStrikeOut(True)\n## 底线\nfont1.setUnderline(True)\n# 设定字体样式\nlabel5.setFont(font1)\n\nForm.show()\nsys.exit(app.exec())\n","repo_name":"ChiaYuanChang/Python-GUI-Note","sub_path":"PyQt6/02_Qt_Qlabel.py","file_name":"02_Qt_Qlabel.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31324875883","text":"# Owner(s): [\"module: nvfuser\"]\n\nimport unittest\nimport warnings\n\nimport torch\nimport torch._dynamo as torchdynamo\nfrom torch.testing import make_tensor\nfrom torch.testing._internal.common_utils import (\n run_tests,\n skipIfTorchDynamo,\n TEST_WITH_ROCM,\n TestCase,\n IS_WINDOWS,\n)\nfrom torch.testing._internal.jit_utils import RUN_CUDA\n\nRUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM\n\n\ndef is_pre_volta():\n if not RUN_NVFUSER:\n return False\n prop = torch.cuda.get_device_properties(torch.cuda.current_device())\n return prop.major < 7\n\n\n@skipIfTorchDynamo(\"Not a suitable test for TorchDynamo\")\n@unittest.skipIf(IS_WINDOWS, \"TorchDynamo is not supported on Windows\")\n@unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n@unittest.skipIf(is_pre_volta(), \"Only supported on Volta and newer devices.\")\nclass TestNvFuserDynamo(TestCase):\n def test_basic(self):\n input1 = make_tensor((2, 4, 8), device=\"cuda\", dtype=torch.float32)\n input2 = make_tensor((2, 4, 8), device=\"cuda\", dtype=torch.float32)\n\n @torchdynamo.optimize(\"nvprims_nvfuser\")\n def func(a, b):\n return a.sin() + b.cos()\n\n # No warnings and no errors\n with warnings.catch_warnings(record=True) as w:\n nvfuser_result = func(input1, input2)\n self.assertEqual(len(w), 0)\n eager_result = func.__wrapped__(input1, input2)\n self.assertEqual(eager_result, nvfuser_result)\n\n\nif __name__ == \"__main__\":\n run_tests()\n","repo_name":"USTCKAY/pytorch","sub_path":"test/test_nvfuser_dynamo.py","file_name":"test_nvfuser_dynamo.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"37434016358","text":"from keras.layers import Embedding\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.layers import LSTM\nfrom keras.models import Sequential\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing.text import Tokenizer\nfrom sklearn.cross_validation import train_test_split\nfrom keras.models import load_model\nimport numpy as np\nimport os\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom sklearn.utils import shuffle\n\ndef to_vector(sentence_list):\n MAX_TOKEN_NUM_WORDS = 2000\n MAX_SEQUENCES_LEN = 1000\n\n tokenizer = Tokenizer(num_words=MAX_TOKEN_NUM_WORDS)\n tokenizer.fit_on_texts(sentence_list)\n sequences = tokenizer.texts_to_sequences(sentence_list)\n word_index = tokenizer.word_index\n print('Found %s unique tokens.' % len(word_index))\n\n sentence_vector = pad_sequences(sequences, maxlen=MAX_SEQUENCES_LEN)\n return sentence_vector\n\ndef clean_data(data_pandas):\n data_pandas.loc[data_pandas.label == 4, 'label'] = 1\n X = np.array(data_pandas['sentence'])\n y = np.array(data_pandas['label'])\n return X, y\n\n\ndef read_data(frac=1.0):\n FILE_PATH = 'data'\n FILE_NAME = 'emotion_data.csv'\n\n data_pandas = pd.read_csv(os.path.join(FILE_PATH, FILE_NAME), \\\n names=['label','time_1','time_2','if_query','if_special','sentence'], \\\n usecols=['label', 'sentence'])\n data_pandas = shuffle(data_pandas)\n data_pandas = data_pandas.sample(frac=frac)\n return data_pandas \n\ndef split_data(X, y, *, train_size=0.99, val_size=0.008):\n test_size = 1 - train_size - val_size\n X_train, X_val_test, y_train, y_val_test = train_test_split(X, y, train_size=train_size)\n X_val, X_test, y_val, y_test = train_test_split(X_val_test, y_val_test, train_size=val_size/(val_size + test_size))\n return X_train, X_val, X_test, y_train, y_val, y_test\n\n\ndef build_model(x_train, y_train, x_test, y_test):\n MAX_FEATURES = 2000\n BATCH_SIZE = 32\n EPOCHS = 16\n print('Build model...')\n model = Sequential()\n model.add(Embedding(MAX_FEATURES, 128))\n model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))\n model.add(Dense(1, activation='sigmoid'))\n\n # try using different optimizers and different optimizer configs\n model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n print('Train...')\n model.fit(x_train, y_train,\n batch_size=BATCH_SIZE,\n epochs=EPOCHS,\n validation_data=(x_test, y_test))\n score, acc = model.evaluate(x_test, y_test,\n batch_size=BATCH_SIZE)\n print('Test score:', score)\n print('Test accuracy:', acc)\n return model\n\n\ndef predict(x):\n model = load_model('model/test_model')\n y_hat = model.predict(x).reshape(x.shape[0])\n y_hat_binary = np.array(list(map(lambda x: 1 if (x > 0.5) else 0, y_hat)))\n return y_hat_binary\n\ndef save_test_result(X_test, y_test, y_hat):\n test_data = pd.DataFrame({'x_test': X_test, 'y_test': y_test, 'y_hat': y_hat})\n test_data.to_csv('test/test_result', index=False)\n\ndef calculate_acc():\n result_df = pd.read_csv('test/test_result')\n result_df['if_equals'] = result_df.y_test == result_df.y_hat\n equals_num = result_df[result_df.if_equals == True].count(numeric_only=True)['if_equals']\n all_num = result_df.count(numeric_only=True)['if_equals']\n return equals_num / all_num\n\n\ndef main():\n # lable:1 = positive label:0 = negavite\n data_df = read_data(0.05)\n X, y = clean_data(data_df)\n X_train_text, X_val_text, X_test_text, y_train, y_val, y_test = split_data(X, y)\n X_train, X_val, X_test = to_vector(X_train_text), to_vector(X_val_text), to_vector(X_test_text)\n model = build_model(X_train, y_train, X_val, y_val)\n model.save('model/test_model')\n\n y_hat = predict(X_test)\n save_test_result(X_test_text, y_test, y_hat)\n rate = calculate_acc()\n print(rate)\n\n\nif __name__ == '__main__':\n main()","repo_name":"PalanQu/palanwork","sub_path":"algorithm/deeplearning/keras/emotion_classification.py","file_name":"emotion_classification.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36157323452","text":"#%%\nimport numpy as np\nimport pandas as pd\nfrom heartDBMD import *\nfrom heartMLMD import *\nfrom imblearn.over_sampling import SMOTE\nfrom sklearn.manifold import TSNE\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.preprocessing import MinMaxScaler\n\nimport matplotlib.pyplot as plt\n\nimport warnings; warnings.filterwarnings(\"ignore\")\n\n\nrsql = \"select * from heart\"\ndf = readDb(\"*\", \"*\", \"*\", \"*\", rsql)\nprint(f\"original df shape: {df.shape}\")\n\ndf.columns = df.columns.str.lower()#컬럼명 변경\ndf.rename(columns={\"heartdiseaseorattack\": \"target\"}, inplace=True)\n\nprint(\"target distribution:\\n\", df[\"target\"].value_counts() / len(df))#target distribution\n\nX = df.drop([\"target\"], axis=1)\ny = df[\"target\"]\n\n\n## oversampling\nsm = SMOTE()\nX_res, y_res = sm.fit_resample(X, y)\nresDf = pd.concat([X_res, y_res], axis=1)\nprint(\"\\nresample df shape\", resDf.shape)\nprint(\"resampled target distribution:\\n,\", resDf[\"target\"].value_counts() / len(resDf))\n\nskf = StratifiedKFold(n_splits=40)\nfor tridx, teidx in skf.split(X, y):\n x_train, x_test = X.iloc[tridx], X.iloc[teidx]\n y_train, y_test = y.iloc[tridx], y.iloc[teidx]\n\nprint(\"\\nstratfiedKFold x_test shape\", x_test.shape)\nprint(\"stratfiedKFold y_test shape\", y_test.shape)\n\nplotTsne(x_test, y_test, 10, 10)\n\n\n##UnderSampling\ndf = df.sample(frac=1)\n\ndesDf = df[df[\"target\"]==1]\nnondesDf = df[df[\"target\"]==0][:len(desDf)]\n\nnewDf = pd.concat([desDf, nondesDf], axis=0)\nprint(\"newDf shape:\", newDf.shape)\nprint(\"newDf target distribution:\\n\", newDf[\"target\"].value_counts() / len(newDf))\n\nnewX = newDf.drop([\"target\"], axis=1)\nnewy = newDf[\"target\"]\n\nplotTsne(newX, newy, 10, 10)\n\n\n##scaled undersampling\nmms = MinMaxScaler()\n\nscale = [\"bmi\"]\nnewX[scale] = mms.fit_transform(newX[scale])\n\nplotTsne(newX, newy, 10, 10)\n","repo_name":"RyuMinSu/HeartDiseasePrediction","sub_path":"heartos.py","file_name":"heartos.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23551854159","text":"\"\"\"\nDjango settings for web project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nimport sys\nfrom time import gmtime, strftime\n\nimport string\nimport random\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n# Add path of klee-web python code to allow importing of worker code\nsys.path.insert(0, os.path.join(BASE_DIR, \"..\"))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\n\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DEVELOPMENT\") is not None\n\n# If we're in debug mode, generate a random key\n# so that we don't need to provide one\n\nkey = ''\nif DEBUG:\n # Long but silences flake8\n k = [random.SystemRandom().choice(string.letters + string.digits)\n for _ in range(50)]\n key = ''.join(k)\n\nSECRET_KEY = key if DEBUG else os.environ.get(\"DJANGO_SECRET_KEY\")\n\nALLOWED_HOSTS = ['*']\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n # Paths for Django to find templates\n 'DIRS': [\n os.path.join(BASE_DIR, 'frontend/templates'),\n os.path.join(BASE_DIR, 'control_panel/templates')\n ],\n # Controls whether Django checks for templates inside apps\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n \"frontend.context_processors.global_vars\"\n ],\n }\n }\n]\n\n# Application definition\nINSTALLED_APPS = (\n 'frontend',\n 'control_panel',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'bootstrapform',\n 'rest_framework',\n 'rest_framework_nested',\n 'oauth2_provider',\n 'social.apps.django_app.default',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'urls'\n\nWSGI_APPLICATION = 'wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': os.environ[\"DB_NAME\"],\n 'USER': os.environ[\"DB_USER\"],\n 'PASSWORD': os.environ[\"DB_PASSWORD\"],\n 'HOST': os.environ[\"DB_HOST\"],\n 'PORT': os.environ[\"DB_PORT\"],\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = strftime(\"%Z\", gmtime())\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nAUTHENTICATION_BACKENDS = (\n 'social.backends.facebook.FacebookOAuth2',\n 'social.backends.google.GoogleOAuth2',\n 'social.backends.github.GithubOAuth2',\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nGEOIP_PATH = os.path.join(BASE_DIR, 'geoip')\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nAUTH_USER_MODEL = 'frontend.User'\n\n\n# Keys and secrets used to authenticate with Google, GitHub and Facebook\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get('GOOGLE_OAUTH_KEY') or ''\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ.get('GOOGLE_OAUTH_SECRET') or ''\nSOCIAL_AUTH_GITHUB_KEY = os.environ.get('GITHUB_OAUTH_KEY') or ''\nSOCIAL_AUTH_GITHUB_SECRET = os.environ.get('GITHUB_OAUTH_SECRET') or ''\nSOCIAL_AUTH_FACEBOOK_KEY = os.environ.get('FB_OAUTH_KEY') or ''\nSOCIAL_AUTH_FACEBOOK_SECRET = os.environ.get('FB_OAUTH_SECRET') or ''\n# Facebook login automatically redirects to /accounts/profile,\n# which does not exist, so we enforce redirection to /\nLOGIN_REDIRECT_URL = '/'\n\nREST_FRAMEWORK = {\n 'DEFAULT_RENDERER_CLASSES': (\n 'djangorestframework_camel_case.render.CamelCaseJSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'djangorestframework_camel_case.parser.CamelCaseJSONParser',\n ),\n}\n","repo_name":"chubbymaggie/klee-web","sub_path":"src/klee_web/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"74740564534","text":"import gzip # pragma: no cover\nimport bz2 # pragma: no cover\nimport lzma # pragma: no cover\n\n\nclass RawFile(object): # pragma: no cover\n def __init__(self, filename):\n self.filename = filename\n if filename.endswith(\".gz\"):\n self.handle = gzip.open(filename, \"rt\")\n elif filename.endswith(\"bz2\"):\n self.handle = bz2.open(filename, \"rt\")\n elif filename.endswith(\"xz\"):\n self.handle = lzma.open(filename, \"rt\")\n else:\n self.handle = open(filename, \"r\")\n\n def __enter__(self):\n return self.handle\n\n def __exit__(self, dtype, value, traceback):\n self.handle.close()\n","repo_name":"LinkageIO/Minus80","sub_path":"minus80/RawFile.py","file_name":"RawFile.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"38965602754","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\nfrom selenium import webdriver\n\n#获取电影详情url的函数\ndef getdetailurl(url, cookie):\n #模拟浏览器,并用cookie实现登录\n headers = {'cookie':cookie,'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36 Edg/90.0.818.49'}\n #获取html文件\n response = requests.get(url,headers = headers)\n #使用beautiful soup解析html文件\n soup = BeautifulSoup(response.text,\"html.parser\")\n #所有的电影标签\n items = soup.findAll(\"div\",class_ = \"hd\")\n res = []#存储结果的列表\n for item in items:\n urls = item.a.get('href')#获得电影的url\n #print(urls)\n temp = getdetail(urls,cookie)#获取电影的详细信息\n res.append(temp)\n for i in res:\n print(i)\n return res\n\ndef getdetail(url,cookie):\n # 模拟浏览器,并用cookie实现登录\n headers = {'cookie': cookie,'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36 Edg/90.0.818.49'}\n response = requests.get(url, headers=headers)\n soup = BeautifulSoup(response.text, \"html.parser\")\n tmp = []\n #获取电影的名称\n name = soup.find(\"span\",property = \"v:itemreviewed\")\n tmp.append(name.string)\n #获取电影的导演、编剧、主演\n creator = soup.find(\"div\", id = \"info\").findAll(\"span\",class_ = \"attrs\")\n for i in creator :\n ts = ''\n t = i.findAll(\"a\")\n for s in t :\n ts = ts + s.string + \"/\"\n ts=ts[ : -1]\n tmp.append(ts)\n #获取电影的简介\n discription = soup.find(\"div\",id = \"link-report\").find(\"span\",property = \"v:summary\").text\n tmp.append(discription)\n #获得电影的打分\n score = soup.find(\"strong\",class_ = \"ll rating_num\").text\n tmp.append(score)\n #获得电影的获奖情况\n awards = soup.findAll(\"ul\" , class_ = \"award\")\n award_string = ''\n for award in awards:\n award_string = award_string + award.li.a.text\n award_string += ' '\n award_string = award_string + award.findAll(\"li\")[1].text\n award_string += '/'\n award_string = award_string[ : -1]\n tmp.append(award_string)\n return tmp\n\ndef main():\n cookiestr = login ()#调用登录的函数\n res = []#存储结果\n while (True):\n #输入需要爬取的页数\n s = input(\"请输入需要获取的页数(1-10):\")\n if (s not in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']):#输入不合法\n print('输入不合法!')\n else :\n num = int(s) - 1\n for i in range(num + 1):\n url = \"https://movie.douban.com/top250?start=\" + str(i * 25) + \"&filter=\"#具体的url\n restmp = getdetailurl(url,cookiestr)\n for item in restmp :\n res.append(item)\n break\n with open(\"res.csv\", \"w\", newline=\"\", encoding='utf-8-sig') as datacsv:\n # dialect为打开csv文件的方式,默认是excel,delimiter=\"\\t\"参数指写入的时候的分隔符\n csvwriter = csv.writer(datacsv, dialect=(\"excel\"))\n # 把数据写入csv文件\n csvwriter.writerow([\"影片名称\", \"导演\", \"编剧\", \"主演\", \"简介\", \"评分\", \"获奖情况\"])\n for i in res:\n csvwriter.writerow(i)\n\ndef login ():\n driver = webdriver.Chrome('E:\\chromedriver_win32\\chromedriver.exe')\n driver.get('https://www.douban.com/')#打开豆瓣的主页\n iframe = driver.find_element_by_tag_name(\"iframe\")#找到iframe\n driver.switch_to.frame(iframe)\n #填入手机号\n username = input(\"请输入手机号:\")\n driver.find_element_by_name('phone').send_keys(username)\n #点击获取验证码\n driver.find_element_by_xpath(\"//div[@class = 'account-form-field-code']/a\").click()\n #输入验证码\n password = input(\"请输入验证码:\")\n driver.find_element_by_id('code').send_keys(password)\n #登录\n driver.find_element_by_xpath(\"//div[@class = 'account-form-field-submit ']/a\").click()\n #获取cookie\n cookies = driver.get_cookies()\n print(cookies)\n #转换cookie的格式\n cookie = [item[\"name\"] + \"=\" + item[\"value\"] for item in cookies]\n cookiestr = '; '.join(item for item in cookie)\n return cookiestr\n\nif __name__ == '__main__':\n main()","repo_name":"lt974982407/Course-in-SJTU","sub_path":"IS303 数据挖掘/hw1 爬虫/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6436107855","text":"import numpy as np\nimport cv2 as cv\nimport os\nfrom skimage.measure import find_contours\n\n\nimg_dir = \"AgateDataToContour\"\nimages_list = []\nwith os.scandir(img_dir) as entries:\n for entry in entries:\n if entry.name[-4:] == \".png\":\n images_list.append(entry.path)\nimages_list.sort()\n\nimages = []\nfor entry in images_list:\n im = cv.imread(entry, cv.IMREAD_UNCHANGED)\n images.append(im.copy())\nimages = np.asarray(images)\nmatrix = np.load(\"Outer.npy\")\n\nnum = 9\ncurrent_image = images[num]\ncurrent_image_copy = images[num]\n\ndef on_trackbar(val):\n val *= np.max((int(np.amax(matrix[num * 20 + 20])), 1)) / 100\n bcontours = find_contours(matrix[num * 20 + 20], val)\n global current_image_copy\n if len(bcontours) == 1:\n bcontours = np.roll(bcontours, 1, axis=2).astype(np.int32)\n current_image_copy = current_image.copy()\n cv.drawContours(current_image_copy, bcontours, 0, (0, 0, 255), 1)\n\n\ncv.namedWindow(\"3D AgateEvolver\", cv.WINDOW_GUI_NORMAL)\ncv.createTrackbar(\"Surface value\", \"3D AgateEvolver\", 0, 100, on_trackbar)\nwhile True:\n cv.imshow(\"3D AgateEvolver\", current_image_copy)\n button = cv.waitKey(30)\n if button == 27:\n break\n elif button == 46:\n num += 1\n num %= len(images)\n current_image = images[num]\n current_image_copy = images[num]\n elif button == 44:\n num -= 1\n num %= len(images)\n current_image = images[num]\n current_image_copy = images[num]\n","repo_name":"MaksymilianGlowacki/Agate3DEvolver","sub_path":"SCRIPTS/show_results.py","file_name":"show_results.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73881157496","text":"import json\nfrom tqdm import tqdm\n\n\nINP_PATH = '../bin/Apps_for_Android_5.json'\nOUT_PATH = '../bin/c&p_data_all.json'\n\nkey_map = {'reviewerID': 'customerID', 'asin': 'productID', 'overall': 'score'}\n\n\nwith open(INP_PATH, 'r') as f1:\n with open(OUT_PATH, 'w') as f2:\n for l in tqdm(f1.readlines()):\n data = eval(l)\n extract = lambda data: dict((key_map[i], data[i]) for i in key_map)\n temp = json.dumps(extract(data))\n f2.write(temp)\n f2.write('\\n')","repo_name":"CyranoLien/CS5344-RecommenderSystem","sub_path":"src/review_extractor.py","file_name":"review_extractor.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"5154349671","text":"#!/usr/bin/python3\nfrom ububot.Motor.MotorPair import MotorIdentifier, MotorPairDirection\nfrom ububot.Sensor.IR import SensorEvent\nfrom ububot.Initializer import UBUBOT\nfrom argparse import ArgumentParser\nfrom time import sleep\n\nif __name__ == '__main__':\n parser = ArgumentParser(description=__doc__)\n parser.add_argument('N', nargs='?', type=int, default=2)\n\n ububot = None\n with UBUBOT(motors=True, sensors=True) as ububot:\n for i in range(parser.parse_args().N * 4):\n ububot.motors.run(MotorIdentifier.BOTH, speed=100)\n ububot.sensors.get_north().wait(SensorEvent.DETECT_START, timeout=1)\n ububot.motors.advance_cm(-5, speed=50)\n sleep(1)\n ububot.motors.turn_sharp(MotorPairDirection.SHARP_RIGHT, speed=100, angle=90)\n sleep(1)\n","repo_name":"rorik/UBUBOT","sub_path":"Raspberry/UBUBOT/v2/python/examples/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"71470939256","text":"# import pymongo\nfrom pymongo import MongoClient\nimport logging\nimport pandas as pd\nfrom datetime import timedelta\nfrom math import *\nfrom flask import Flask, request\nimport numpy as np\n\napp = Flask(__name__)\n\nclass trend_results:\n def __init__(self):\n self.top_review_last_week = {}\n self.top_user_last_week = {}\n self.popular_review_last_month = {}\n self.popular_user_last_month = {}\n self.myclient = MongoClient(host=None, port=None)\n self.mydb = self.myclient['realreview']\n pass\n\n def MergeDataframeUpdate(self, category_id, user_id, search_text):\n reviews = self.mydb['reviews']\n likes = self.mydb['likes']\n blockusers = self.mydb['blockusers']\n\n ##### Blockusersdata\n cur = blockusers.find({}, {'blockUserId': 1, 'fromUserId': 1})\n block_users_dict_list = [doc for doc in cur]\n try_list = []\n block_list = [([new['blockUserId'], new['fromUserId']]) for new in block_users_dict_list]\n block_list = [(y) for x in block_list for y in x]\n self.block_list = [str(bloc) for bloc in block_list]\n self.block_list = [str(block) for block in self.block_list if block != user_id]\n self.block_list = list(set(self.block_list))\n\n ##### searchtext part\n # extract fields where review is approved and not deleted, also selecting only required fields\n reviews_filter = {\"isApprove\": 'approved', \"isDeleted\": False, \"categoryId\": {\"$exists\": True}}\n\n\n\n # ##### categoryid part\n # if len(category_id) > 0:\n # category_id_list = [rev['categoryId'] for rev in list(reviews.find({\"isApprove\": 'approved', \"isDeleted\": False, \"categoryId\": {\"$exists\": True}}, {'categoryId': 1}))]\n # category_id_list = [str(cat) for cat in category_id_list]\n # category_id = str(category_id)\n # if category_id in category_id_list:\n # reviews_filter['categoryId']['$eq'] = f'{category_id}'\n # else:\n # raise\n\n print(f'reviews_filter: {reviews_filter}')\n self.df_reviews = pd.DataFrame(list(reviews.find(reviews_filter, {'_id': 1, \"title\": 1,\n 'createdAt': 1, 'updatedAt': 1, 'fromUserId': 1,\n 'categoryId': 1})))\n # convert all columns to string and make title phrases lower\n self.df_reviews['_id'] = self.df_reviews['_id'].astype(str)\n self.df_reviews['title'] = self.df_reviews['title'].apply(lambda x: str(x).lower())\n self.df_reviews['createdAt'] = self.df_reviews['createdAt'].astype(str)\n self.df_reviews['updatedAt'] = self.df_reviews['updatedAt'].astype(str)\n self.df_reviews['fromUserId'] = self.df_reviews['fromUserId'].astype(str)\n self.df_reviews['categoryId'] = self.df_reviews['categoryId'].astype(str)\n\n category_id_list = list(self.df_reviews['categoryId'])\n self.category_id_list = list(set(category_id_list))\n self.combine_title = list(self.df_reviews[\"title\"])\n self.user_id_list = list(self.df_reviews['fromUserId'])\n self.user_id_list = list(set(self.user_id_list))\n\n # #### blockusers part\n # if len(user_id) > 0:\n # block_list = list(set(block_list))\n # block_list = [str(block) for block in block_list if block != user_id]\n # user_id_list = list(df_reviews['fromUserId'])\n # user_id_list = list(set(user_id_list))\n #\n # if user_id in user_id_list:\n # self.df_reviews = df_reviews[~df_reviews['fromUserId'].isin(block_list)]\n # if len(df_reviews) == 0:\n # raise\n # else:\n # raise\n\n # # print(df_reviews.head())\n # if search_text != None:\n # print(f'INSIDE IF SEARCHTEXT LOOP: {search_text}')\n # combine_title = list(df_reviews[\"title\"])\n # if [s for s in combine_title if all(xs in s for xs in search_text)]:\n # contains = [df_reviews['title'].str.contains(i) for i in search_text]\n # df_reviews = df_reviews[np.all(contains, axis=0)]\n # else:\n # raise KeyError\n #\n #\n #\n # ##### categoryid part\n # if len(category_id) > 0:\n # if category_id in category_id_list:\n # df_reviews = df_reviews[df_reviews['categoryId'] == str(category_id)]\n # if len(df_reviews) == 0:\n # raise KeyError\n # else:\n # raise\n\n\n # from likes table only review _id and resourceId field\n df_likes = pd.DataFrame(list(likes.find({}, {'_id': 1, 'resourceId': 1})))\n df_likes['_id'] = df_likes['_id'].astype(str)\n df_likes['resourceId'] = df_likes['resourceId'].astype(str)\n self.df_reviews.set_index('_id', inplace=True)\n df_likes.set_index('resourceId', inplace=True)\n # df_likes['_id'] = df_likes['_id'].astype(str)\n\n self.df_merge = self.df_reviews.join(df_likes, how='left')\n self.df_merge.dropna(inplace=True)\n self.df_merge['created_dates'] = self.df_merge['createdAt'].apply(lambda x: str(x.split('T')[0]))\n self.df_merge['updated_dates'] = self.df_merge['updatedAt'].apply(lambda x: str(x.split('T')[0]))\n self.df_merge['created_dates'] = pd.to_datetime(self.df_merge['created_dates'], dayfirst=True)\n self.df_merge['updated_dates'] = pd.to_datetime(self.df_merge['updated_dates'], dayfirst=True)\n self.df_merge.drop(labels=['createdAt', 'updatedAt', \"_id\"], inplace=True, axis=1)\n self.df_merge['resourceId'] = self.df_merge.index\n self.df_merge.reset_index(drop=True, inplace=True)\n print(self.df_merge)\n\n def TargetUserId(self, target_userid):\n empty_list = []\n reviews = self.mydb['reviews']\n if target_userid != None:\n targetuserid_reviewlist = pd.DataFrame(list(reviews.find({}, {'_id': 1, 'fromUserId': 1})))\n targetuserid_reviewlist['_id'] = targetuserid_reviewlist['_id'].astype(str)\n targetuserid_reviewlist['fromUserId'] = targetuserid_reviewlist['fromUserId'].astype(str)\n targetuserid_reviewlist = targetuserid_reviewlist[targetuserid_reviewlist['fromUserId'] == target_userid]\n targetuserid_reviewlist = list(targetuserid_reviewlist[\"_id\"])\n self.targetuserid_reviewlist = [str(reviews) for reviews in targetuserid_reviewlist]\n return self.targetuserid_reviewlist\n else:\n return empty_list\n pass\n\n def TopicsTrending(self):\n search_history = self.mydb['searchhistories']\n df_1 = pd.DataFrame(list(search_history.find({}, {\"_id\":1, \"searchTerm\": 1})))\n sort_dict = (df_1['searchTerm'].value_counts()).to_dict()\n trending_list = list(sort_dict.keys())\n trending_list = [str(trend) for trend in trending_list]\n return trending_list\n\n def CategoriesTrending(self):\n reviews = self.mydb['reviews']\n likes = self.mydb['likes']\n reviews_filter = {\"isApprove\": 'approved', \"isDeleted\": False}\n df_reviews = pd.DataFrame(list(reviews.find(reviews_filter, {'_id': 1, 'updatedAt': 1, 'categoryId': 1})))\n df_likes = pd.DataFrame(list(likes.find({}, {'_id': 1, 'resourceId': 1})))\n df_reviews.set_index('_id', inplace=True)\n df_likes.set_index('resourceId', inplace=True)\n df_merge_cat = df_reviews.join(df_likes, how='left')\n df_merge_cat['updatedAt'] = df_merge_cat['updatedAt'].apply(lambda x: str(x))\n df_merge_cat['updated_dates'] = df_merge_cat['updatedAt'].apply(lambda x: str(x.split('T')[0]))\n df_merge_cat['updated_dates'] = pd.to_datetime(df_merge_cat['updated_dates'], dayfirst=True)\n categories_count_df = (df_merge_cat.groupby(['categoryId'])['updated_dates'].count().reset_index().rename(\n columns={'updated_dates': 'ReviewLikeCount'}))\n categories_count_df = (categories_count_df.sort_values(['ReviewLikeCount'], ascending=False))\n return categories_count_df['categoryId'].unique()\n pass\n\n def TopTrendingResults(self,df_merge_cat, num_days, column_name):\n today = pd.to_datetime('today').floor('D')\n week_prior = today - timedelta(days=num_days)\n df_last_week = df_merge_cat[\n (df_merge_cat['updated_dates'] <= today) & (df_merge_cat['updated_dates'] >= week_prior)]\n top_10_last_week_df = (df_last_week.groupby([column_name])['updated_dates'].count().reset_index().rename(\n columns={'updated_dates': 'ReviewLikeCount'}))\n top_10_reviews_last_week = (top_10_last_week_df.sort_values(['ReviewLikeCount'], ascending=False))\n week_num = num_days\n while (len(top_10_reviews_last_week[column_name].unique()) < 10):\n week_num += num_days\n week_prior = today - timedelta(days=week_num)\n df_last_week = df_merge_cat[\n (df_merge_cat['updated_dates'] <= today) & (df_merge_cat['updated_dates'] >= week_prior)]\n top_10_last_week_df = (df_last_week.groupby([column_name])['updated_dates'].count().reset_index().rename(\n columns={'updated_dates': 'ReviewLikeCount'}))\n top_10_reviews_last_week = (top_10_last_week_df.sort_values(['ReviewLikeCount'], ascending=False))\n if (week_prior < df_merge_cat['updated_dates'].min()):\n break\n return (top_10_reviews_last_week[column_name].unique())\n\n def InsideIfFixedParameter(self, input_parameter_in_parameter_list, dataframe, actual_topreviews):\n if input_parameter_in_parameter_list:\n df_reviews = dataframe\n if len(df_reviews) == 0:\n raise KeyError\n else:\n reviewlist = list(df_reviews.index)\n actual_topreviews = list(set(reviewlist).intersection(actual_topreviews))\n else:\n raise\n return actual_topreviews\n pass\n\n def IfOptionalParameter(self, input_parameter_in_parameter_list, step_1, step_2, actual_topreviews):\n if input_parameter_in_parameter_list:\n ab = step_1\n df_reviews = step_2\n if len(df_reviews) == 0:\n raise KeyError\n else:\n reviewlist = list(df_reviews.index)\n actual_topreviews = list(set(reviewlist).intersection(actual_topreviews))\n else:\n raise KeyError\n return actual_topreviews\n pass\n\n def ReviewsResult(self, category_id, user_id, search_text, target_userid, actual_topreviews):\n if len(category_id) > 0:\n if category_id in self.category_id_list:\n df_reviews = self.df_reviews[self.df_reviews['categoryId'] == str(category_id)]\n if len(df_reviews) == 0:\n raise KeyError\n else:\n category_id_reviews = list(df_reviews.index)\n actual_topreviews = list(set(category_id_reviews).intersection(actual_topreviews))\n else:\n raise\n if len(user_id) > 0:\n if user_id in self.user_id_list:\n df_reviews = self.df_reviews[~self.df_reviews['fromUserId'].isin(self.block_list)]\n if len(df_reviews) == 0:\n raise KeyError\n else:\n userid_reviews = list(df_reviews.index)\n actual_topreviews = list(set(userid_reviews).intersection(actual_topreviews))\n else:\n raise\n if search_text != None:\n if [s for s in self.combine_title if all(xs in s for xs in search_text)]:\n contains = [self.df_reviews['title'].str.contains(i) for i in search_text]\n df_reviews = self.df_reviews[np.all(contains, axis=0)]\n if len(df_reviews) == 0:\n raise KeyError\n else:\n search_text_reviews = list(df_reviews.index)\n actual_topreviews = list(set(search_text_reviews).intersection(actual_topreviews))\n else:\n raise KeyError\n if target_userid != None:\n if target_userid in self.user_id_list:\n df_reviews = self.df_reviews[~self.df_reviews['fromUserId'].isin(self.block_list)]\n df_reviews = df_reviews[df_reviews['fromUserId'] == target_userid]\n if len(df_reviews) == 0:\n raise KeyError\n else:\n targetuserid_reviewlist = list(df_reviews.index)\n actual_topreviews = list(set(targetuserid_reviewlist).intersection(actual_topreviews))\n else:\n raise KeyError\n\n return actual_topreviews\n pass\n\n def TopReviews(self, category_id, user_id, search_text, target_userid):\n self.MergeDataframeUpdate(category_id, user_id, search_text)\n # return self.TopTrendingResults(self.df_merge, 7, 'resourceId')\n actual_topreviews = (self.TopTrendingResults(self.df_merge, 7, 'resourceId'))[:50]\n if len(category_id) > 0:\n input_parameter_i_parameter_list = category_id in self.category_id_list\n dataframe = self.df_reviews[self.df_reviews['categoryId'] == str(category_id)]\n actual_topreviews = self.InsideIfFixedParameter(input_parameter_i_parameter_list, dataframe, actual_topreviews)\n # if category_id in self.category_id_list:\n # df_reviews = self.df_reviews[self.df_reviews['categoryId'] == str(category_id)]\n # if len(df_reviews) == 0:\n # raise KeyError\n # else:\n # category_id_reviews = list(df_reviews.index)\n # actual_topreviews = list(set(category_id_reviews).intersection(actual_topreviews))\n # else:\n # raise\n\n if len(user_id) > 0:\n input_parameter_n_parameter_list = user_id in self.user_id_list\n dataframe = self.df_reviews[~self.df_reviews['fromUserId'].isin(self.block_list)]\n actual_topreviews = self.InsideIfFixedParameter(input_parameter_n_parameter_list, dataframe, actual_topreviews)\n # if user_id in self.user_id_list:\n # df_reviews = self.df_reviews[~self.df_reviews['fromUserId'].isin(self.block_list)]\n # if len(df_reviews) == 0:\n # raise KeyError\n # else:\n # userid_reviews = list(df_reviews.index)\n # actual_topreviews = list(set(userid_reviews).intersection(actual_topreviews))\n # else:\n # raise\n\n if search_text != None:\n input_parameter_in_parameter_lis = [s for s in self.combine_title if all(xs in s for xs in search_text)]\n step_1 = [self.df_reviews['title'].str.contains(i) for i in search_text]\n step_2 = self.df_reviews[np.all(step_1, axis=0)]\n actual_topreviews = self.IfOptionalParameter(input_parameter_in_parameter_lis, step_1, step_2, actual_topreviews)\n # if [s for s in self.combine_title if all(xs in s for xs in search_text)]:\n # contains = [self.df_reviews['title'].str.contains(i) for i in search_text]\n # df_reviews = self.df_reviews[np.all(contains, axis=0)]\n # if len(df_reviews) == 0:\n # raise KeyError\n # else:\n # search_text_reviews = list(df_reviews.index)\n # actual_topreviews = list(set(search_text_reviews).intersection(actual_topreviews))\n # else:\n # raise KeyError\n\n if target_userid != None:\n input_parameter_in_parameter_li = target_userid in self.user_id_list\n step_1 = self.df_reviews[~self.df_reviews['fromUserId'].isin(self.block_list)]\n step_2 = step_1[step_1['fromUserId'] == target_userid]\n actual_topreviews = self.IfOptionalParameter(input_parameter_in_parameter_li, step_1, step_2, actual_topreviews)\n # if target_userid in self.user_id_list:\n # df_reviews = self.df_reviews[~self.df_reviews['fromUserId'].isin(self.block_list)]\n # df_reviews = df_reviews[df_reviews['fromUserId'] == target_userid]\n # if len(df_reviews) == 0:\n # raise KeyError\n # else:\n # targetuserid_reviewlist = list(df_reviews.index)\n # actual_topreviews = list(set(targetuserid_reviewlist).intersection(actual_topreviews))\n # else:\n # raise KeyError\n # targetuserid_reviewlist = self.TargetUserId(target_userid)\n # # print(f'lengh of targetuser id list: {len(targetuserid_reviewlist)}')\n # if len(targetuserid_reviewlist) > 0:\n # actual_topreviews = list(set(targetuserid_reviewlist).intersection(actual_topreviews))\n # else:\n # raise KeyError\n return actual_topreviews\n\n\n\n def TopUsers(self, category_id, user_id, search_text, target_userid):\n self.MergeDataframeUpdate(category_id, user_id, search_text)\n return self.TopTrendingResults(self.df_merge, 7, 'fromUserId')[:50]\n\n def PopularReviews(self, category_id, user_id, search_text, target_userid):\n self.MergeDataframeUpdate(category_id, user_id, search_text)\n # return self.TopTrendingResults(self.df_merge, 30, 'resourceId')\n all_results = self.TopTrendingResults(self.df_merge, 30, 'resourceId')[:50]\n print(all_results)\n if target_userid != None:\n targetuserid_reviewlist = self.TargetUserId(target_userid)\n # print(f'lengh of targetuser id list: {len(targetuserid_reviewlist)}')\n print(targetuserid_reviewlist)\n if len(targetuserid_reviewlist) > 0:\n return list(set(all_results).intersection(targetuserid_reviewlist))\n else:\n empty_list = []\n return empty_list\n else:\n return all_results\n\n def PopularUsers(self, category_id, user_id, search_text, target_userid):\n self.MergeDataframeUpdate(category_id, user_id, search_text)\n return self.TopTrendingResults(self.df_merge, 30, 'fromUserId')\n\n\n@app.route('/trending-review', methods=['GET', 'POST'])\ndef main_1():\n category_id = request.args.get('categoryid')\n user_id = request.args.get('userid')\n search_text = request.args.get('searchtext', default = None)\n target_userid = request.args.get('targetuserid', default = None)\n if search_text:\n search_text = search_text.lower()\n search_text = list(search_text.split())\n print(f'category_id: {category_id}, user_id: {user_id}, search_text: {search_text}, target_userid: {target_userid}')\n try:\n result = trend_results()\n top_review_last_week = result.TopReviews(category_id, user_id, search_text, target_userid)\n top_review_last_week = [str(top) for top in top_review_last_week][:50]\n if category_id == '':\n return {'combined': top_review_last_week}\n elif category_id != '':\n try:\n return {f'{category_id}': top_review_last_week}\n except:\n return {f'{category_id}': f'This category {category_id} has no results'}\n except KeyError:\n return {'empty_result': []}\n except Exception as e:\n # print(f'Exception: {type(e).__name__}')\n return {'error': f'user_id: {user_id} or {search_text} does not exist in our records'}\n\n\n@app.route('/trending-user', methods=['GET', 'POST'])\ndef main_2():\n category_id = request.args.get('categoryid')\n user_id = request.args.get('userid')\n search_text = request.args.get('searchtext', default = None)\n target_userid = request.args.get('targetuserid', default = None)\n if search_text:\n search_text = search_text.lower()\n search_text = list(search_text.split())\n print(search_text)\n try:\n result = trend_results()\n top_user_last_week = result.TopUsers(category_id, user_id, search_text, target_userid)\n top_user_last_week = [str(top) for top in top_user_last_week][:50]\n if category_id == '':\n return {'combined': top_user_last_week}\n elif category_id != '':\n try:\n return {f'{category_id}': top_user_last_week}\n except:\n return {f'{category_id}': f'This category {category_id} has no results'}\n except KeyError:\n return {'empty_result': []}\n except Exception as e:\n # print(f'Exception: {type(e).__name__}')\n return {'error': f'user_id: {user_id} or {search_text} does not exist in our records'}\n\n\n@app.route('/popular-review', methods=['GET', 'POST'])\ndef main_3():\n category_id = request.args.get('categoryid')\n user_id = request.args.get('userid')\n search_text = request.args.get('searchtext', default = None)\n target_userid = request.args.get('targetuserid', default = None)\n print(f'user_id: {user_id}')\n if search_text:\n search_text = search_text.lower()\n search_text = list(search_text.split())\n print(search_text)\n try:\n result = trend_results()\n popular_review_last_month = result.PopularReviews(category_id, user_id, search_text, target_userid)\n popular_review_last_month = [str(top) for top in popular_review_last_month][:50]\n if category_id == '':\n return {'combined': popular_review_last_month}\n elif category_id != '':\n try:\n return {f'{category_id}': popular_review_last_month}\n except Exception as e:\n print(f'Exception: {e}')\n return {f'{category_id}': f'This category {category_id} has no results'}\n except KeyError:\n return {'empty_result': []}\n except Exception as e:\n # print(f'Exception: {type(e).__name__}')\n print(f'Exception: {e}')\n return {'error': f'user_id: {user_id} or {search_text} does not exist in our records'}\n\n@app.route('/popular-user', methods=['GET', 'POST'])\ndef main_4():\n category_id = request.args.get('categoryid')\n user_id = request.args.get('userid')\n search_text = request.args.get('searchtext', default = None)\n target_userid = request.args.get('targetuserid', default = None)\n if search_text:\n search_text = search_text.lower()\n search_text = list(search_text.split())\n print(search_text)\n try:\n result = trend_results()\n popular_user_last_month = result.PopularUsers(category_id, user_id, search_text, target_userid)\n popular_user_last_month = [str(top) for top in popular_user_last_month][:50]\n if category_id == '':\n return {'combined': popular_user_last_month}\n elif category_id != '':\n try:\n return {f'{category_id}': popular_user_last_month}\n except:\n return {f'{category_id}': f'This category {category_id} has no results'}\n except KeyError:\n return {'empty_result': []}\n except Exception as e:\n # print(f'Exception: {type(e).__name__}')\n return {'error': f'user_id: {user_id} or {search_text} does not exist in our records'}\n\n@app.route('/trending-category', methods=['GET', 'POST'])\ndef main_45():\n result = trend_results()\n category_trend = result.CategoriesTrending()\n new_dic = {}\n category_trend = list(category_trend)\n category_trend = [str(cat) for cat in category_trend]\n new_dic['category_trend_results'] = category_trend[:50]\n # print(new_dic['category_trend_results'])\n try:\n return {'category_trend_results': new_dic['category_trend_results']}\n except:\n return {'error': f'category results are not available'}\n\n@app.route('/trending-topics', methods=['GET', 'POST'])\ndef main_46():\n result = trend_results()\n topics_trend = result.TopicsTrending()\n new_dic = {}\n new_dic['topics_trend_results'] = topics_trend\n # print(new_dic['category_trend_results'])\n try:\n return {'category_trend_results': new_dic['topics_trend_results'][:50]}\n except:\n return {'error': f'category results are not available'}\n\nif __name__ == '__main__':\n app.run(debug=True, port=6050)\n","repo_name":"arjunjanamatti/recommend_system","sub_path":"fill_collections/copy_of_main_script.py","file_name":"copy_of_main_script.py","file_ext":"py","file_size_in_byte":24492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9236723258","text":"from typing import List\nimport os\nimport os.path\nimport xml.etree.ElementTree as ET\nfrom FslBuildGen import IOUtil\nfrom FslBuildGen.Log import Log\nfrom FslBuildGen.Exceptions import FileNotFoundException\nfrom FslBuildGen.Xml.Exceptions import XmlException\nfrom FslBuildGen.Xml.Exceptions import XmlInvalidRootElement\nfrom FslBuildGen.Xml.XmlBase import XmlBase\n#from FslBuildGen.DataTypes import *\n#from FslBuildGen.Exceptions import *\n#from FslBuildGen.Xml.XmlStuff import *\n#from FslBuildGen import IOUtil, Util, PackageConfig\n\n\nclass XmlNewTemplate(XmlBase):\n __AttribNoInclude = 'NoInclude'\n __AttribForce = 'Force'\n __AttribWarning = 'Warning'\n\n def __init__(self, log: Log, xmlElement: ET.Element) -> None:\n super().__init__(log, xmlElement)\n self._CheckAttributes({self.__AttribNoInclude, self.__AttribForce, self.__AttribWarning})\n self.NoInclude = self._ReadBoolAttrib(xmlElement, self.__AttribNoInclude, False)\n self.Force = self._ReadBoolAttrib(xmlElement, self.__AttribForce, False)\n self.Warning = self._ReadAttrib(xmlElement, self.__AttribWarning, '')\n\n\nclass XmlNewTemplateFile(XmlBase):\n __AttribVersion = 'Version'\n\n def __init__(self, log: Log, filename: str) -> None:\n if not os.path.isfile(filename):\n raise FileNotFoundException(\"Could not locate config file %s\", filename)\n\n tree = ET.parse(filename)\n elem = tree.getroot()\n if elem.tag != 'FslBuildNewTemplate':\n raise XmlInvalidRootElement(\"The file did not contain the expected root tag 'FslBuildGenConfig'\")\n\n super().__init__(log, elem)\n #self._CheckAttributes({self.__AttribVersion})\n fileVersion = self._ReadAttrib(elem, self.__AttribVersion)\n if fileVersion != '1':\n raise Exception(\"The template file version was not correct\")\n\n xmlTemplate = self.__LoadTemplateConfiguration(log, elem)\n if len(xmlTemplate) != 1:\n raise XmlException(\"The file did not contain exactly one Template element\")\n\n self.Name = IOUtil.GetFileName(IOUtil.GetDirectoryName(filename))\n self.Id = self.Name.lower()\n self.Version = int(fileVersion) # type: int\n self.Template = xmlTemplate[0]\n self.Path = IOUtil.GetDirectoryName(filename)\n self.Prefix = (\"%s_\" % (self.Name)).upper()\n\n\n def __LoadTemplateConfiguration(self, log: Log, element: ET.Element) -> List[XmlNewTemplate]:\n res = []\n foundElements = element.findall(\"Template\")\n for foundElement in foundElements:\n res.append(XmlNewTemplate(log, foundElement))\n return res\n","repo_name":"nxp-imx/gtec-demo-framework","sub_path":".Config/FslBuildGen/Xml/XmlNewTemplateFile.py","file_name":"XmlNewTemplateFile.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"22"} +{"seq_id":"32883238373","text":"class Node:\r\n def __init__(self, inf, priority):\r\n self.inf = inf # полезная информация\r\n self.priority = priority # приоритет\r\n self.next = None # ссылка на следующий элемент\r\n\r\nhead = None # указатель на первый элемент списка\r\nlast = None # указатель на последний элемент списка\r\nf = None # другой указатель (например, для операций)\r\ndlinna = 0 # переменная для хранения длины списка\r\n\r\ndef get_struct():\r\n s = input(\"Введите название объекта: \") # вводим данные\r\n priority = int(input(\"Введите приоритет объекта: \")) # вводим приоритет\r\n if not s:\r\n print(\"Запись не была произведена\")\r\n return None\r\n\r\n p = Node(s, priority)\r\n p.next = None\r\n\r\n return p # возвращаем экземпляр созданной структуры Node\r\n\r\ndef spstore():\r\n global head, last\r\n p = get_struct()\r\n if head is None and p is not None: # если списка нет, то устанавливаем голову списка\r\n head = p\r\n last = p\r\n elif head is not None and p is not None: # список уже есть, то вставляем в соответствии с приоритетом\r\n if p.priority > head.priority: # если новый элемент имеет более высокий приоритет, то он становится первым\r\n p.next = head\r\n head = p\r\n else:\r\n current = head\r\n while current.next is not None and current.next.priority >= p.priority:\r\n current = current.next\r\n p.next = current.next\r\n current.next = p\r\n\r\ndef review():\r\n struc = head\r\n if head is None:\r\n print(\"Список пуст\")\r\n while struc:\r\n print(f\"Имя - {struc.inf}, Приоритет - {struc.priority}\")\r\n struc = struc.next\r\n\r\ndef delete(name):\r\n global head\r\n struc = head # указатель, проходящий по списку установлен на начало списка\r\n prev = None # указатель на предшествующий удаляемому элементу\r\n flag = 0 # индикатор отсутствия удаляемого элемента в списке\r\n\r\n if head is None: # если голова списка равна None, то список пуст\r\n print(\"Список пуст\")\r\n return\r\n\r\n if name == struc.inf: # если удаляемый элемент - первый\r\n flag = 1\r\n head = struc.next # устанавливаем голову на следующий элемент\r\n struc = head # устанавливаем указатель для продолжения поиска\r\n else:\r\n prev = struc\r\n struc = struc.next\r\n\r\n while struc: # проход по списку и поиск удаляемого элемента\r\n if name == struc.inf: # если нашли, то\r\n flag = 1 # выставляем индикатор\r\n if struc.next: # если найденный элемент не последний в списке\r\n prev.next = struc.next # меняем указатели\r\n struc = struc.next # устанавливаем указатель для продолжения поиска\r\n else: # если найденный элемент последний в списке\r\n prev.next = None # обнуляем указатель предшествующего элемента\r\n return\r\n else: # если не нашли, то\r\n prev = struc # устанавливаем указатели для продолжения поиска\r\n struc = struc.next\r\n\r\n if flag == 0: # если флаг = 0, значит нужный элемент не найден\r\n print(\"Элемент не найден\")\r\nloop = 0\r\ngg = 0\r\nwhile loop != 1:\r\n print(\"\")\r\n print(\" - - - - - - - - - - - - - - - \")\r\n print(\"\")\r\n print(\"Выберите номер действия: \")\r\n print(\"Создать структуру - 1\")\r\n print(\"Посмотреть список - 2\")\r\n print(\"Удалить элемент - 3\")\r\n print(\"Завершить программу - 4\")\r\n gg = int(input())\r\n if gg == 1: # Создать структуру\r\n spstore()\r\n if gg == 2: # Посмотреть список\r\n review()\r\n if gg == 3: # Удалить элемент\r\n print(\"Введите имя элемента: \")\r\n name = input()\r\n delete(name)\r\n if gg == 4: # Выход из программы\r\n loop = 1\r\n \r\n \r\n \r\n \r\n","repo_name":"Danila58/logicaaaa","sub_path":"3 log/3.1 log.py","file_name":"3.1 log.py","file_ext":"py","file_size_in_byte":5044,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26442132898","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport pytest\nimport cucoslib.workers.downstream\nimport cucoslib.utils\nfrom cucoslib.workers import DownstreamUsageTask\nfrom cucoslib.enums import EcosystemBackend\nfrom cucoslib.models import Ecosystem\nfrom requests import Response\nimport json, os\n\nexample_projects = [\n ('pypi', 'six', 'python-six', '1.9.0'),\n ('npm', 'serve-static', 'nodejs-serve-static', '1.10.0'),\n # TODO: Figure out what's wrong with the Java case\n #('maven', 'junit', 'junit', '4.12.0'),\n ('rubygems', 'nokogiri', 'rubygem-nokogiri', '1.5.11'),\n]\n\n\ndef _make_ecosystem(name):\n return Ecosystem(name=name, backend=getattr(EcosystemBackend, name))\n\n\ndef _make_brew_command(srpms_to_report):\n stdout_template = \"\"\"\\\n {{\n \"packages\": {packages},\n \"response\": {{\n \"brew\": {brew},\n \"registered_srpms\": {srpms}\n }}\n }}\"\"\"\n raw_srpm_metadata = [srpm.copy() for srpm in srpms_to_report]\n for entry in raw_srpm_metadata:\n entry.pop('filename')\n entry['epoch'] = 0\n count_fields = ('patch_count', 'modified_line_count', 'modified_file_count')\n for field in count_fields:\n entry[field] = -1\n srpm_metadata = json.dumps(raw_srpm_metadata)\n raw_brew_metadata = [{'package': srpm['filename']} for srpm in srpms_to_report]\n for entry in raw_brew_metadata:\n entry['patch_files'] = []\n entry['diff'] = {\n 'files': -1,\n 'lines': -1,\n 'changes': []\n }\n brew_metadata = json.dumps(raw_brew_metadata)\n\n class MockBrewCommand(object):\n def __init__(self, command):\n self.command = command\n\n def run(self, timeout=None, **kwargs):\n stdout = stdout_template.format(\n packages=str(self.command[-1:]).replace('\\'', '\"'),\n brew=brew_metadata,\n srpms=srpm_metadata,\n )\n return 0, stdout, \"\"\n return MockBrewCommand\n\ndef _make_pulp_client(usage_to_report):\n class MockPulpClient(object):\n def get_cdn_metadata_for_srpm(self, srpm_filename):\n metadata = usage_to_report[srpm_filename].copy()\n metadata[\"srpm_filename\"] = srpm_filename\n return metadata\n return MockPulpClient\n\n\nclass TestDownstreamUsage(object):\n @pytest.mark.usefixtures(\"dispatcher_setup\")\n @pytest.mark.timeout(20)\n @pytest.mark.parametrize(('ecosystem', 'project', 'package', 'version'), example_projects)\n def test_execute_no_anitya(self, rdb, ecosystem, project, package, version, monkeypatch):\n rdb.add(_make_ecosystem(ecosystem))\n rdb.commit()\n monkeypatch.setattr(cucoslib.workers.downstream,\n \"TimedCommand\",\n _make_brew_command([]))\n # ensure we return None for digests\n monkeypatch.setattr(cucoslib.workers.downstream.DownstreamUsageTask,\n \"parent_task_result\",\n lambda x, y: None)\n task = DownstreamUsageTask.create_test_instance(task_name='redhat_downstream')\n args = {'ecosystem': ecosystem, 'name': project, 'version': version}\n results = task.execute(arguments=args)\n assert results is not None\n assert isinstance(results, dict)\n assert set(results.keys()) == {'details', 'status', 'summary'}\n assert results['status'] == 'error'\n task.validate_result(results)\n\n @pytest.mark.usefixtures(\"dispatcher_setup\")\n @pytest.mark.timeout(20)\n @pytest.mark.parametrize(('ecosystem', 'project', 'package', 'version'), example_projects)\n def test_execute_mock_services(self, rdb, ecosystem, project, package, version, monkeypatch):\n # Mock the attempted access to Anitya\n expected_suffix = \"{}/{}/\".format(ecosystem, project)\n # Mock the result\n dummy_packages = [\n {\n 'distro': ecosystem,\n 'package_name': package\n }\n ]\n\n def _query_anitya_url(host_url, api_path):\n assert api_path.endswith(expected_suffix)\n result = Response()\n result.status_code = 200\n result.encoding = 'utf-8'\n dummy_data = {\n 'api_path': api_path,\n 'packages': dummy_packages\n }\n result._content = json.dumps(dummy_data).encode(result.encoding)\n return result\n monkeypatch.setattr(cucoslib.workers.downstream,\n \"_query_anitya_url\",\n _query_anitya_url)\n\n # Mock the attempted access to Brew\n dummy_releases = ['1.el6', '1.el7']\n dummy_srpm_names = [\n \"{}-{}-{}.src.rpm\".format(package, version, dummy_releases[0]),\n \"{}-{}-{}.src.rpm\".format(package, version, dummy_releases[1]),\n ]\n dummy_srpm_details = [\n {\n 'package_name': package,\n 'version': version,\n 'release': dummy_releases[0],\n 'filename': dummy_srpm_names[0],\n },\n {\n 'package_name': package,\n 'version': version,\n 'release': dummy_releases[1],\n 'filename': dummy_srpm_names[1],\n },\n ]\n monkeypatch.setattr(cucoslib.workers.downstream,\n \"TimedCommand\",\n _make_brew_command(dummy_srpm_details))\n # Mock the attempted access to Pulp (these are not real product names)\n dummy_usage_details = {\n dummy_srpm_names[0]: {\n \"rhsm_product_names\": [\"RHEL 6\"],\n \"rhn_channels\": [\"rhn-rhel-6\"],\n \"rhsm_content_sets\": [\"rhsm-rhel-6\"],\n },\n dummy_srpm_names[1]: {\n \"rhsm_product_names\": [\"RHEL 7\"],\n \"rhn_channels\": [\"rhn-rhel-7\"],\n \"rhsm_content_sets\": [\"rhsm-rhel-7\"],\n },\n }\n monkeypatch.setattr(cucoslib.workers.downstream,\n \"Pulp\",\n _make_pulp_client(dummy_usage_details))\n # ensure we return None for digests\n monkeypatch.setattr(cucoslib.workers.downstream.DownstreamUsageTask,\n \"parent_task_result\",\n lambda x, y: None)\n\n # Check the rest of the task reacts as expected\n rdb.add(_make_ecosystem(ecosystem))\n rdb.commit()\n task = DownstreamUsageTask.create_test_instance(task_name='redhat_downstream')\n args = {'ecosystem': ecosystem, 'name': project, 'version': version}\n results = task.execute(arguments=args)\n assert results is not None\n assert isinstance(results, dict)\n assert set(results.keys()) == {'details', 'status', 'summary'}\n assert results['status'] == 'success'\n task.validate_result(results)\n # We rely on the task's schema self-validation to verify output structure\n # Check the summary metadata\n summary = results['summary']\n assert summary['package_names'] == [package]\n srpm_releases = [srpm['release'] for srpm in summary['registered_srpms']]\n assert set(srpm_releases) == set(dummy_releases)\n assert len(srpm_releases) == 2\n assert summary['all_rhn_channels'] == ['rhn-rhel-6', 'rhn-rhel-7']\n assert summary['all_rhsm_content_sets'] == ['rhsm-rhel-6', 'rhsm-rhel-7']\n assert summary['all_rhsm_product_names'] == ['RHEL 6', 'RHEL 7']\n # Check our dummy data is reported as the response from Anitya\n anitya_response = results['details']['redhat_anitya']\n assert anitya_response['api_path'].endswith(expected_suffix)\n assert anitya_response['packages'] == dummy_packages\n # 'brew' should contain a corresponding entry for each registered SRPM\n brew_responses = results['details']['brew']\n for expected_name, brew_response in zip(dummy_srpm_names, brew_responses):\n assert brew_response['package'] == expected_name\n assert len(brew_responses) == len(dummy_srpm_names)\n # 'pulp_cdn' should also contain an entry for each registered SRPM\n pulp_responses = results['details']['pulp_cdn']\n for expected_name, pulp_response in zip(dummy_srpm_names, pulp_responses):\n assert pulp_response['srpm_filename'] == expected_name\n assert len(pulp_responses) == len(dummy_srpm_names)\n","repo_name":"geetikabatra/fabric8-analytics-worker","sub_path":"tests/workers/test_downstream.py","file_name":"test_downstream.py","file_ext":"py","file_size_in_byte":8555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"33176303114","text":"from flask import Flask, jsonify\nfrom utils.data_processing import extractLanguageFeatures, extractVisualFeatures, language_feature_process_dict\nfrom utils.utils import read_json\nimport caffe\nimport numpy as np\napp = Flask(__name__)\ncaffe.set_mode_gpu()\ncaffe.set_device(0)\ntest_h5 = '/home/ubuntu/LocalizingMoments/data/average_fc7.h5'\n\nvisual_feature = 'feature_process_context'\nlanguage_feature = 'recurrent_embedding'\nmax_iter = 30000\nsnapshot_interval = 30000\nloc = True\nsnapshot_dir = '/home/ubuntu/LocalizingMoments/snapshots'\n\nlanguage_extractor_fcn = extractLanguageFeatures\nvisual_extractor_fcn = extractVisualFeatures\n\nlanguage_process = language_feature_process_dict[language_feature]\n#language_processor = language_process()\ndata_orig = read_json('static/10_vid.json')\n#Flow Things\nflow_deploy_net = '/home/ubuntu/LocalizingMoments/prototxts/deploy_clip_retrieval_flow_iccv_release_feature_process_context_recurrent_embedding_lfTrue_dv0.3_dl0.0_nlv2_nlllstm_no_embed_edl1000-100_edv500-100_pmFalse_losstriplet_lwInter0.2.prototxt'\nflow_snapshot_tag = 'flow_iccv_release_feature_process_context_recurrent_embedding_lfTrue_dv0.3_dl0.0_nlv2_nlllstm_no_embed_edl1000-100_edv500-100_pmFalse_losstriplet_lwInter0.2'\nflow_test_h5 = '/home/ubuntu/LocalizingMoments/data/average_global_flow.h5'\nflow_snapshot = '%s/%s_iter_%%d.caffemodel' % (snapshot_dir, flow_snapshot_tag)\nflow_net = caffe.Net(flow_deploy_net, flow_snapshot % 30000, caffe.TEST)\n#RGB Things\nrgb_deploy_net = '/home/ubuntu/LocalizingMoments/prototxts/deploy_clip_retrieval_rgb_iccv_release_feature_process_context_recurrent_embedding_lfTrue_dv0.3_dl0.0_nlv2_nlllstm_no_embed_edl1000-100_edv500-100_pmFalse_losstriplet_lwInter0.2.prototxt'\nrgb_snapshot_tag = 'rgb_iccv_release_feature_process_context_recurrent_embedding_lfTrue_dv0.3_dl0.0_nlv2_nlllstm_no_embed_edl1000-100_edv500-100_pmFalse_losstriplet_lwInter0.2'\nrgb_test_h5 = '/home/ubuntu/LocalizingMoments/data/average_fc7.h5'\nrgb_snapshot = '%s/%s_iter_%%d.caffemodel' % (snapshot_dir, rgb_snapshot_tag)\nrgb_net = caffe.Net(rgb_deploy_net, rgb_snapshot % 30000, caffe.TEST)\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n@app.route('/query//')\ndef serve(model_type, user_query):\n response = {'results':[]}\n params = {'feature_process': visual_feature, 'loc_feature': loc, 'loss_type': 'triplet',\n 'batch_size': 120, 'features': test_h5, 'oversample': False, 'sentence_length': 50,\n 'query_key': 'query', 'cont_key': 'cont', 'feature_key_p': 'features_p',\n 'feature_time_stamp_p': 'feature_time_stamp_p',\n 'feature_time_stamp_n': 'feature_time_stampe_n'}\n if model_type == 'rgb':\n net = rgb_net\n else:\n net = flow_net\n for el in data_orig:\n el['description'] = user_query\n language_processor = language_process(data_orig)\n data = language_processor.preprocess(data_orig)\n params['vocab_dict'] = language_processor.vocab_dict\n num_glove_centroids = language_processor.get_vector_dim()\n params['num_glove_centroids'] = num_glove_centroids\n thread_result = {}\n\n visual_feature_extractor = visual_extractor_fcn(data, params, thread_result)\n textual_feature_extractor = language_extractor_fcn(data, params, thread_result)\n possible_segments = visual_feature_extractor.possible_annotations\n\n visual_feature_extractor = visual_extractor_fcn(data, params, thread_result)\n textual_feature_extractor = language_extractor_fcn(data, params, thread_result)\n possible_segments = visual_feature_extractor.possible_annotations\n\n all_scores = {}\n for iter in range(snapshot_interval, max_iter + 1, snapshot_interval):\n all_scores[iter] = {}\n\n # determine score for segments in each video\n for id, d in enumerate(data):\n vis_features, loc_features = visual_feature_extractor.get_data_test({'video': d['video']})\n lang_features, cont = textual_feature_extractor.get_data_test(d)\n\n net.blobs['image_data'].data[...] = vis_features.copy()\n net.blobs['loc_data'].data[...] = loc_features.copy()\n\n for i in range(vis_features.shape[0]):\n net.blobs['text_data'].data[:, i, :] = lang_features\n net.blobs['cont_data'].data[:, i] = cont\n\n top_name = 'rank_score'\n net.forward()\n sorted_segments = [possible_segments[i] for i in np.argsort(net.blobs[top_name].data.squeeze())]\n all_scores[iter][d['annotation_id']] = net.blobs[top_name].data.squeeze().copy()\n response['results'].append({\n 'video': d['dl_link'],\n 'segments': sorted_segments[:5]\n })\n # if id % 10 == 0:\n # sys.stdout.write('\\r%d/%d' % (id, len(data)))\n return jsonify(response)\n # eval_predictions(sorted_segments_list, data)\n #\n # if not os.path.exists(result_dir):\n # os.mkdir(result_dir)\n #\n # pkl.dump(all_scores, open('%s/%s_%s.p' % (result_dir, snapshot_tag, split), 'w'))\n # print(\"Dumped results to: %s/%s_%s.p\" % (result_dir, snapshot_tag, split))\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","repo_name":"Naman-Bhalla/ir_server","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28874280264","text":"from faker import Faker\n\nfrom django.apps import apps as django_apps\nfrom django.test import TestCase, tag\n\nfrom ..models import IdentifierModel\nfrom ..subject_identifier import SubjectIdentifier\nfrom ..infant_identifier import InfantIdentifier\nfrom django.test.utils import override_settings\nfrom django.contrib.sites.models import Site\nfrom edc_registration.models import RegisteredSubject\n\nfake = Faker()\n\n\nclass TestInfantIdentifier(TestCase):\n\n def get_maternal_identifier(self):\n site = Site.objects.get_current()\n maternal_identifier = SubjectIdentifier(\n identifier_type='subject',\n requesting_model='edc_identifier.enrollment',\n protocol_number='000',\n site=site,\n device_id='99',\n last_name=fake.last_name())\n return maternal_identifier\n\n def test_create_singleton(self):\n maternal_identifier = self.get_maternal_identifier()\n self.assertEqual(maternal_identifier.identifier, '000-40990001-6')\n infant_identifier = InfantIdentifier(\n maternal_identifier=maternal_identifier,\n requesting_model='edc_identifier.maternallabdel',\n birth_order=1,\n live_infants=1)\n self.assertEqual(\n infant_identifier.identifier, '000-40990001-6-10')\n\n def test_create_twins(self):\n maternal_identifier = self.get_maternal_identifier()\n infant_identifier1 = InfantIdentifier(\n maternal_identifier=maternal_identifier,\n requesting_model='edc_identifier.maternallabdel',\n birth_order=1,\n live_infants=2)\n infant_identifier2 = InfantIdentifier(\n maternal_identifier=maternal_identifier,\n requesting_model='edc_identifier.maternallabdel',\n birth_order=2,\n live_infants=2)\n self.assertEqual(\n infant_identifier1.identifier, '000-40990001-6-25')\n self.assertEqual(\n infant_identifier2.identifier, '000-40990001-6-26')\n\n @override_settings(SITE_ID=20)\n def test_create_triplets(self):\n Site.objects.create(pk=20)\n maternal_identifier = self.get_maternal_identifier()\n\n infant_identifier1 = InfantIdentifier(\n maternal_identifier=maternal_identifier,\n requesting_model='edc_identifier.maternallabdel',\n birth_order=1,\n live_infants=3)\n infant_identifier2 = InfantIdentifier(\n maternal_identifier=maternal_identifier,\n requesting_model='edc_identifier.maternallabdel',\n birth_order=2,\n live_infants=3)\n infant_identifier3 = InfantIdentifier(\n maternal_identifier=maternal_identifier,\n requesting_model='edc_identifier.maternallabdel',\n birth_order=3,\n live_infants=3)\n\n self.assertEqual(\n infant_identifier1.identifier, '000-20990001-8-36')\n self.assertEqual(\n infant_identifier2.identifier, '000-20990001-8-37')\n self.assertEqual(\n infant_identifier3.identifier, '000-20990001-8-38')\n\n self.assertEqual(\n IdentifierModel.objects.filter(\n identifier_type='infant',\n model='edc_identifier.maternallabdel',\n protocol_number='000',\n site=Site.objects.get_current()).count(), 3)\n\n try:\n RegisteredSubject.objects.get(\n subject_identifier=infant_identifier1.identifier)\n except RegisteredSubject.DoesNotExist:\n self.fail('RegisteredSubject.DoesNotExist unexpectedly raised')\n\n try:\n RegisteredSubject.objects.get(\n subject_identifier=infant_identifier2.identifier)\n except RegisteredSubject.DoesNotExist:\n self.fail('RegisteredSubject.DoesNotExist unexpectedly raised')\n\n try:\n RegisteredSubject.objects.get(\n subject_identifier=infant_identifier3.identifier)\n except RegisteredSubject.DoesNotExist:\n self.fail('RegisteredSubject.DoesNotExist unexpectedly raised')\n\n def test_create_triplets_only_registered_2nd_born(self):\n maternal_identifier = self.get_maternal_identifier()\n infant_identifier = InfantIdentifier(\n maternal_identifier=maternal_identifier,\n requesting_model='edc_identifier.maternallabdel',\n birth_order=2,\n live_infants=3)\n self.assertEqual(\n infant_identifier.identifier, '000-40990001-6-37')\n\n def test_create_triplets_only_registered_1st_born(self):\n maternal_identifier = self.get_maternal_identifier()\n infant_identifier = InfantIdentifier(\n maternal_identifier=maternal_identifier,\n requesting_model='edc_identifier.maternallabdel',\n birth_order=1,\n live_infants=3)\n self.assertEqual(\n infant_identifier.identifier, '000-40990001-6-36')\n","repo_name":"botswana-harvard/edc-identifier","sub_path":"edc_identifier/tests/test_infant_identifier.py","file_name":"test_infant_identifier.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19262199924","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('tasks', views.get_usertask, name='tasks'),\n path('add', views.save_task, name='add'),\n path('description-', views.DescriptionDetail.as_view(), name='description'),\n path('updatetask-', views.TaskDetailView.as_view()),\n path('deletetask-', views.TaskDeleteView.as_view())\n]\n\n","repo_name":"paNoNi/WebLab","sub_path":"sod/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4832332456","text":"from __future__ import annotations\n\nimport uuid\nfrom typing import Union\n\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.db.models.expressions import Case, Value, When\nfrom django.db.models.functions import Now\nfrom packaging.version import parse\n\n\ndef _game_sort_key(game):\n version = parse(game.version)\n return not version.is_prerelease, version\n\n\nclass GameExperienceManager(models.Manager):\n def from_experience_id(self, experience_id: Union[uuid.UUID, str]):\n \"\"\"\n Return a list of game experiences with the same experience_id, ordered\n from oldest to latest.\n \"\"\"\n games = self.sort_games_by_version(\n self.filter(experience_id=experience_id).order_by(\"name\")\n )\n for game in games:\n game.is_latest = game == games[-1]\n return games\n\n def sort_games_by_version(self, games):\n \"\"\"\n Sorting considers valid pre-release versions to be oldest, then\n non-valid versions, then other valid versions. Here's an example list of\n experience versions sorted from oldest to latest:\n\n 1. \"1.10a1\"\n 2. \"\"\n 3. \"1.10-copy\"\n 4. \"1.9\n 5. \"1.10\"\n \"\"\"\n games = list(games)\n games.sort(key=_game_sort_key)\n return games\n\n\nclass PlayerQuerySet(models.QuerySet):\n def unbalanced(self):\n return self.ready().filter(lobby=None)\n\n def active(self):\n return self.filter(inactive=False)\n\n def inactive(self):\n return self.filter(inactive=True)\n\n def ready(self):\n return (\n self.active()\n .has_user()\n .filter(character=None, ready=True)\n .select_related(\"user\")\n )\n\n def has_user(self, value=True):\n return self.filter(user__isnull=not value)\n\n\nclass LobbyQuerySet(models.QuerySet):\n def empty(self, no_players: bool = True):\n qs = self.annotate(num_players=models.Count(\"player\"))\n if no_players:\n return qs.filter(num_players=0)\n return qs.filter(num_players__gt=0)\n\n def availability(self, max_players: int = None, exclude_started=True):\n \"\"\"\n Filter to only available lobbies.\n\n Pass a ``max_players`` integer to filter to lobbies with less players\n than this amount.\n\n Pass ``exclude_started=False`` if you don't want to exclude lobbies\n which already have a created instance.\n \"\"\"\n qs = self\n if exclude_started:\n qs = self.alias(\n num_playing=models.Count(\"player\", filter=~Q(player__character=None)),\n ).filter(num_playing=0)\n if max_players:\n qs = qs.annotate(num_available=max_players - models.Count(\"player\")).filter(\n num_available__gt=0\n )\n return qs\n\n # Django <3.2 support\n def alias(self, *args, **kwargs):\n return getattr(super(), \"alias\", self.annotate)(*args, **kwargs)\n\n def prepare_ready(self):\n qs = self.annotate(player_count=models.Count(\"player\"))\n qs = qs.alias(\n unready_players=models.Count(\n \"player\", filter=Q(player__character=None) & Q(player__ready=False)\n ),\n )\n return qs.annotate(\n ready=Case(\n When(player_count__gt=0, unready_players=0, then=Value(True)),\n default=Value(False),\n output_field=models.BooleanField(),\n ),\n not_ready=Case(\n When(unready_players__gt=0, then=Value(True)),\n default=Value(False),\n output_field=models.BooleanField(),\n ),\n )\n\n def ready(self, value: bool = True, include_linked: bool = False):\n \"\"\"\n Filter to lobbies with at least one ready player, and no unready\n players.\n\n Checking for ``.ready(False)`` is not quite the inverse -- it will only\n include lobbies with at least one unready player.\n\n By default, both cases will only include lobbies not yet linked to an\n instance. Use ``include_linked=True`` to include all lobbies.\n \"\"\"\n qs = self.filter(instance=None) if not include_linked else self.all()\n qs = qs.prepare_ready()\n if value:\n return qs.filter(ready=True)\n return qs.filter(not_ready=True)\n\n\nclass CharacterQuerySet(models.QuerySet):\n\n def annotate_status(self):\n # This should always be in sync with BaseCharacterData.status\n from .models import BaseCharacterData\n\n return self.annotate(\n _status=models.Case(\n models.When(\n models.Q(_date_finished__lte=Now()) | models.Q(instance__date_end__lte=Now()),\n then=BaseCharacterData.STATUS.COMPLETE,\n ),\n default=BaseCharacterData.STATUS.PLAY,\n output_field=models.PositiveSmallIntegerField(),\n ),\n )\n","repo_name":"Wharton-Interactive/simpl-cloud","sub_path":"simpl/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"86576277498","text":"import math\nimport re\nfrom typing import List, Dict\n\nfrom wbtools.db.generic import WBGenericDBManager\nfrom wbtools.lib.nlp.common import EntityType\nfrom wbtools.lib.nlp.literature_index.abstract_index import AbstractLiteratureIndex\n\nALL_VAR_REGEX = r'({designations}|m|p|It)(_)?([A-z]+)?([0-9]+)([a-zA-Z]{{1,4}}[0-9]*)?(\\[[0-9]+\\])?([a-zA-Z]{{1,4}}' \\\n r'[0-9]*)?(\\[.+\\])?'\n\nNEW_VAR_REGEX = r'[\\(\\s]({designations}|m|p)([0-9]+)((?:{designations}|m|p|ts|gf|lf|d|sd|am|cs)[0-9]+)?[\\)\\s\\[]'\n\nSTRAIN_REGEX = r'[\\(\\s,\\.:;\\'\\\"]({designations})([0-9]+)[\\)\\s\\,\\.:;\\'\\\"]'\n\n\nOPENING_REGEX_STR = \"[\\\\.\\\\n\\\\t\\\\'\\\\/\\\\(\\\\)\\\\[\\\\]\\\\{\\\\}:;\\\\,\\\\!\\\\?> ]\"\nCLOSING_REGEX_STR = \"[\\\\.\\\\n\\\\t\\\\'\\\\/\\\\(\\\\)\\\\[\\\\]\\\\{\\\\}:;\\\\,\\\\!\\\\?> ]\"\n\n\nOPENING_CLOSING_REGEXES = {\n EntityType.VARIATION: [r'[\\(\\s](', r')[\\)\\s\\[]'],\n EntityType.STRAIN: [r'[\\(\\s,\\.:;\\'\\\"](', r')[\\)\\s,\\.:;\\'\\\"]']\n}\n\n\nclass NttExtractor:\n\n def __init__(self, db_manager: WBGenericDBManager = None):\n self.db_manager = db_manager\n self.curated_entities = {}\n for entity_type in EntityType:\n self.curated_entities[entity_type] = None\n allele_designations = self.db_manager.get_allele_designations()\n new_var_regex = NEW_VAR_REGEX.format(designations=\"|\".join(allele_designations))\n strain_regex = STRAIN_REGEX.format(designations=\"|\".join(self.db_manager.get_strain_designations()))\n self.entity_type_regex_map = {\n EntityType.VARIATION: new_var_regex,\n EntityType.STRAIN: strain_regex\n }\n\n def get_curated_entities(self, entity_type: EntityType, exclude_id_used_as_name: bool = True):\n if not self.curated_entities[entity_type]:\n self.curated_entities[entity_type] = self.db_manager.get_curated_entities(\n entity_type=entity_type, exclude_id_used_as_name=exclude_id_used_as_name)\n return self.curated_entities[entity_type]\n\n @staticmethod\n def match_entities_regex(text, regex):\n res = re.findall(regex, \" \" + text + \" \")\n return [\"\".join(entity_arr) for entity_arr in res]\n\n @staticmethod\n def count_keyword_matches_regex(keyword, text, case_sensitive: bool = True,\n match_uppercase: bool = False) -> int:\n keyword = keyword if case_sensitive else keyword.upper()\n text = text if case_sensitive else text.upper()\n match_uppercase = False if keyword.upper() == keyword else match_uppercase\n if keyword in text or match_uppercase and keyword.upper() in text:\n try:\n match_count = len(re.findall(OPENING_REGEX_STR + re.escape(keyword) + CLOSING_REGEX_STR, text))\n if match_uppercase:\n match_count += len(re.findall(OPENING_REGEX_STR + re.escape(keyword.upper()) +\n CLOSING_REGEX_STR, text))\n return match_count\n except:\n pass\n return 0\n\n @staticmethod\n def is_entity_meaningful(entity_keywords: List[str], text, lit_index: AbstractLiteratureIndex,\n match_uppercase: bool = False, min_num_occurrences: int = 1,\n tfidf_threshold: float = 0.0) -> bool:\n min_num_occurrences = 1 if min_num_occurrences < 1 else min_num_occurrences\n raw_count = sum(NttExtractor.count_keyword_matches_regex(keyword=keyword, text=text,\n match_uppercase=match_uppercase) for\n keyword in entity_keywords)\n return True if raw_count >= min_num_occurrences and (\n tfidf_threshold <= 0 or 0 < tfidf_threshold < NttExtractor.tfidf(entity_keywords=entity_keywords,\n raw_count=raw_count,\n lit_index=lit_index)) else False\n\n @staticmethod\n def tfidf(entity_keywords: List[str], raw_count, lit_index: AbstractLiteratureIndex) -> float:\n doc_counter = sum(lit_index.count_matching_documents(keyword) for keyword in entity_keywords)\n idf = math.log(float(lit_index.num_documents()) / (doc_counter if doc_counter > 0 else 0.5))\n return raw_count * idf\n\n @staticmethod\n def extract_meaningful_entities_by_keywords(keywords: List[str], text: str,\n lit_index: AbstractLiteratureIndex = None,\n match_uppercase: bool = False, min_matches: int = 1,\n tfidf_threshold: float = 0.0,\n blacklist: List[str] = None) -> List[str]:\n blacklist = set(blacklist) if blacklist else set()\n return [keyword for keyword in set(keywords) if keyword not in blacklist and\n NttExtractor.is_entity_meaningful(\n entity_keywords=[keyword], text=text, match_uppercase=match_uppercase, min_num_occurrences=min_matches,\n tfidf_threshold=tfidf_threshold, lit_index=lit_index)]\n\n def extract_species_regex(self, text: str, taxon_id_name_map: Dict[str, List[str]] = None,\n blacklist: List[str] = None,\n whitelist: List[str] = None, min_matches: int = 1, tfidf_threshold: float = 0.0,\n lit_index: AbstractLiteratureIndex = None):\n blacklist = set(blacklist) if blacklist else set()\n whitelist = set(whitelist) if whitelist else set()\n if taxon_id_name_map is None:\n taxon_id_name_map = self.db_manager.get_taxon_id_names_map()\n return [regex_list[0].replace(\"\\\\\", \"\") for taxon_id, regex_list in taxon_id_name_map.items() if\n taxon_id not in blacklist and (taxon_id in whitelist or\n NttExtractor.is_entity_meaningful(entity_keywords=regex_list, text=text,\n match_uppercase=False,\n lit_index=lit_index,\n min_num_occurrences=min_matches,\n tfidf_threshold=tfidf_threshold))]\n\n @staticmethod\n def get_entity_ids_from_names(entity_names: List[str], entity_name_id_map: Dict[str, str]):\n return list(set([(entity_name_id_map[entity_name], entity_name) for entity_name in entity_names]))\n\n def extract_all_entities_by_type(self, text: str, entity_type: EntityType, include_new: bool = True,\n match_curated: bool = False, exclude_curated: bool = False,\n match_entities: List[str] = None, exclude_entities: List[str] = None,\n exclude_id_used_as_name: bool = True):\n \"\"\"\n extract entities mentioned in text\n\n Args:\n text (str): the input text\n entity_type (EntityType): the type of entities to extract\n include_new (bool): whether to include possibly new entities not yet in the curation database\n match_curated (bool): whether to extract curated entities obtained from the provided DB manager\n exclude_curated (bool): whether to remove curated entities obtained from the provided DB manager from the\n extracted ones\n match_entities (List[str]): match the provided entities\n exclude_entities (List[str]): exclude the provided entities from the results\n exclude_id_used_as_name (bool): do not extract entity ids when used as names in the DB\n\n Returns:\n list: the list of entities extracted from text\n\n \"\"\"\n entities = set()\n if include_new:\n entities.update(NttExtractor.match_entities_regex(text, self.entity_type_regex_map[entity_type]))\n if match_curated:\n entities.update(NttExtractor.match_entities_regex(\n text, OPENING_CLOSING_REGEXES[entity_type][0] + '|'.join(self.db_manager.get_curated_entities(\n entity_type=entity_type, exclude_id_used_as_name=exclude_id_used_as_name)) +\n OPENING_CLOSING_REGEXES[entity_type][1]))\n if exclude_curated:\n entities -= set(self.get_curated_entities(entity_type=entity_type, exclude_id_used_as_name=exclude_id_used_as_name))\n if match_entities:\n entities.update(NttExtractor.match_entities_regex(\n text, OPENING_CLOSING_REGEXES[entity_type][0] + '|'.join(match_entities) +\n OPENING_CLOSING_REGEXES[entity_type][1]))\n if exclude_entities:\n entities -= set(exclude_entities)\n return sorted(list(entities))\n","repo_name":"WormBase/wbtools","sub_path":"wbtools/lib/nlp/entity_extraction/ntt_extractor.py","file_name":"ntt_extractor.py","file_ext":"py","file_size_in_byte":9003,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"4317625340","text":"##########################################################\n# Based on Announce.py and Echo.py #\n# Rnode Setting Test receiver #\n# github.com/faragher/RNode_Setting_Test/ #\n##########################################################\n\nimport argparse\nimport random\nimport RNS\n\nAPP_NAME = \"setting_test\"\nrcd_packets = 0\nrcd_announce = 0\n\n# This initialisation is executed when the program is started\ndef program_setup(configpath):\n reticulum = RNS.Reticulum(configpath)\n identity = RNS.Identity()\n\n destination_1 = RNS.Destination(\n identity,\n RNS.Destination.IN,\n RNS.Destination.SINGLE,\n APP_NAME,\n \"GP\"\n )\n RNS.log(\n \"Server addess: \"+\n RNS.prettyhexrep(destination_1.hash)\n )\n\n announce_handler = ExampleAnnounceHandler(\n aspect_filter=None\n )\n\n # We register the announce handler with Reticulum\n RNS.Transport.register_announce_handler(announce_handler)\n destination_1.set_packet_callback(server_callback)\n # Everything's ready!\n # Let's hand over control to the announce loop\n announceLoop()\n\n\ndef announceLoop():\n global rcd_packets\n global rcd_announce\n print(\"Setting test receiver: Expects 10 trials.\")\n print(\"Waiting for data. (Enter to end test, Ctrl-C to abort)\")\n\n entered = input()\n print(\"Recieved \"+str(rcd_announce)+\" announces. \"+str(((10-rcd_announce)/10)*100)+\"% lost\")\n print(\"Recieved \"+str(rcd_packets)+\" packets. \"+str(((10-rcd_packets)/10)*100)+\"% lost\")\n\n\nclass ExampleAnnounceHandler:\n def __init__(self, aspect_filter=None):\n self.aspect_filter = aspect_filter\n\n def received_announce(self, destination_hash, announced_identity, app_data):\n global rcd_announce\n\n if app_data:\n rcd_announce = rcd_announce + 1\n print(\n \"Received announce \"+\n app_data.decode(\"utf-8\")\n )\n\ndef server_callback(message, packet):\n global rcd_packets\n rcd_packets = rcd_packets + 1\n\n print(\"Received packet \"+message.decode(\"utf-8\"))\n\n##########################################################\n#### Program Startup #####################################\n##########################################################\n\n# This part of the program gets run at startup,\n# and parses input from the user, and then starts\n# the desired program mode.\nif __name__ == \"__main__\":\n try:\n parser = argparse.ArgumentParser(\n description=\"Reticulum setting test receiver. Expects 10 announces and packets from a single source.\"\n )\n\n parser.add_argument(\n \"--config\",\n action=\"store\",\n default=None,\n help=\"path to alternative Reticulum config directory\",\n type=str\n )\n\n args = parser.parse_args()\n\n if args.config:\n configarg = args.config\n else:\n configarg = None\n\n program_setup(configarg)\n\n except KeyboardInterrupt:\n print(\"\")\n exit()\n","repo_name":"faragher/RNode_Setting_Test","sub_path":"Receiver.py","file_name":"Receiver.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"31969453442","text":"import requests\nimport yaml\n\nwith open('config.yaml', encoding='utf-8') as f:\n data = yaml.safe_load(f)\n address = data['address_posts']\n\nS = requests.Session()\n\n\ndef test_rest(user_login, post_title):\n result = S.get(url=address, headers={\"X-Auth-Token\": user_login}, params={'owner': 'notMe'}).json()['data']\n r = [i['title'] for i in result]\n assert post_title in r, 'Test_rest FAILED'\n\n\ndef test_post(user_login, post_title1, post_description1, post_content1):\n post_data = {\n 'title': post_title1,\n 'description': post_description1,\n 'content': post_content1\n }\n response = S.post(url=address, headers={\"X-Auth-Token\": user_login}, json=post_data)\n assert response.status_code == 200, 'Не удалось создать новый пост'\n\n result = S.get(url=address, headers={\"X-Auth-Token\": user_login}).json()['data']\n descriptions = [i['description'] for i in result]\n assert post_description1 in descriptions, 'Новый пост не найден на сервере'\n","repo_name":"NKompa/Autotest_Web","sub_path":"HW1/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"44396467792","text":"\"\"\"\nRotate Matrix: Given an image represented by an NxN matrix, where each pixel in the image is 4 \nbytes, write a method to rotate the image by 90 degrees. (Can you do this in place? \nHints: #51, #100\n[[0, 1], [x, y] -> [y, x]\n [1, 0]]\n cos(theta), 1\n 1, cos(theta)\n \n matrix = [[1, 2, 3], \n [4, 5, 6], \n [7, 8, 9]]\n matrix =[[3, 2, 1], \n [6, 5, 4], \n [9, 8, 7]]\n\nrotated =[[3, 6, 9], \n [2, 5, 8], \n [1, 4, 7]]\n\"\"\"\nimport copy\n \ndef rotate_matrix(image):\n # Copy method one\n copy_image_one = copy.deepcopy(image)\n print(\"Original\", matrix)\n print(\"Copy of original\", copy_image_one)\n N = len(matrix)\n\n # Part 1, reverse order within each row\n for row in range(N):\n for column in range(N):\n copy_image_one[row][column] = image[row][N-column-1]\n\n print(\"After modification\")\n print(\"Original\", matrix) \n print(\"Copy\", copy_image_one)\n\n\n # Copy method two\n copy_image_two = [list(row) for row in copy_image_one]\n # Test on what happens when you remove list? from the above code.\n\n\n # Part 2, transpose\n for row in range(N):\n for column in range(N):\n copy_image_two[column][row] = copy_image_one[row][column]\n \n return copy_image_two\n\n\ndef rotate_matrix_inplace(image):\n # Copy method one\n print(\"Original\", matrix)\n N = len(matrix)\n\n # Part 1, reverse order within each row\n for row in range(N):\n for column in range(N//2):\n tmp = image[row][column]\n image[row][column] = image[row][N-column-1]\n image[row][N-column-1] = tmp\n\n print(\"After reversing\")\n print(\"Original\", matrix)\n\n\n # Part 2, transpose\n for row in range(N):\n for column in range(row, N):\n tmp = matrix[column][row]\n matrix[column][row] = matrix[row][column]\n matrix[row][column] = tmp\n \n return matrix\n\n\nif __name__ == \"__main__\":\n matrix = [[1,2,3],\n [4,5,6],\n [7,8,9]]\n print(\"Rotated image\", rotate_matrix_inplace(matrix))\n ","repo_name":"MubashirullahD/cracking-the-coding-interview","sub_path":"chapter1/rotate_matrix.py","file_name":"rotate_matrix.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"69874394296","text":"import datetime\r\n\r\nimport psycopg2.errors\r\nfrom flask import Blueprint, jsonify, g\r\nfrom flask_jwt_extended import create_access_token\r\n\r\nfrom calendar_lib.data_classes.user import UserLoginRequest, CreateUserRequest\r\nfrom calendar_api.decorators.validate_request import validate_json\r\n\r\napp = Blueprint('auth', __name__, url_prefix='/auth')\r\n\r\n\r\n@app.route('/login', methods=['POST'])\r\n@validate_json(UserLoginRequest)\r\ndef login(user_request: UserLoginRequest):\r\n try:\r\n user = g.core.auth_use_case().login(user_request)\r\n access_token = create_access_token(\r\n identity=user.uuid,\r\n additional_claims={\r\n 'group': user.group_name,\r\n 'created_at': datetime.datetime.now()\r\n }\r\n )\r\n return jsonify(access_token=access_token), 200\r\n except Exception as e:\r\n return jsonify(message=str(e)), 401\r\n\r\n\r\n@app.route('/register', methods=['POST'])\r\n@validate_json(CreateUserRequest)\r\ndef register(user_request: CreateUserRequest):\r\n try:\r\n result, error = g.core.auth_use_case().register(user_request)\r\n if error:\r\n return jsonify({'errors': error}), 409\r\n return jsonify({'message': 'success'}), 200\r\n except psycopg2.Error as e:\r\n return jsonify(message=e.pgerror), 400\r\n","repo_name":"lucasaraujo1301/calendar_api","sub_path":"calendar_api/blueprints/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"32835436895","text":"from django.shortcuts import render\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom django import forms\r\nfrom kinase_predictor import ml_model\r\nfrom kinase_predictor import GNN_explain\r\nimport rdkit\r\nfrom rdkit import Chem\r\nimport pandas as pd\r\nimport os\r\nfrom rdkit.Chem import PandasTools\r\nfrom django.http import JsonResponse, request\r\nimport json\r\nfrom rdkit import Chem\r\nfrom rdkit.Chem import Draw\r\nimport numpy as np\r\nimport plotly.express as px\r\nimport plotly.offline as opy\r\nfrom django.http import FileResponse\r\n\r\n\r\n# Create your views here.\r\nfrom django.http import HttpResponse\r\nimport datetime\r\n\r\n\r\nerror_msg_dict = {\"error_msg\": None}\r\nBase_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\r\nos.makedirs(os.path.join(Base_dir, r\"static/file/download_file/grobal_result\"), exist_ok=True)\r\nos.makedirs(os.path.join(Base_dir, r\"static/file/download_file/invalid_smiles\"), exist_ok=True)\r\nos.makedirs(os.path.join(Base_dir, r\"static/file/download_file/single_mol\"), exist_ok=True)\r\nupload_path = os.path.join(Base_dir, r\"static/file/upload_dir\")\r\ndf_uniprot = pd.read_csv(os.path.join(Base_dir, r\"static/file/UniProt.csv\"))\r\ndf_uniprot.columns = [\"UniProt\", \"Group\", \"Family\", \"Name\", \"Symbol\"]\r\n\r\n\r\n@csrf_exempt\r\ndef download(request):\r\n # https://www.zhangshengrong.com/p/Z9a28xMkXV/\r\n # if request.is_ajax():\r\n if True:\r\n if request.method == \"POST\":\r\n # mode = data[\"mode\"]\r\n # index = data[\"index\"]\r\n mode = request.POST.get(\"mode\")\r\n index = request.POST.get(\"index\", \"\")\r\n file_index = request.POST.get(\"file_index\", \"\") # single_mol\r\n print(\"mode\", mode, \"index\", index, \"file_index\", file_index)\r\n\r\n if mode == \"gobal_result\":\r\n grobal_path = os.path.join(Base_dir, r\"static/file/download_file/grobal_result/result_%d.csv\"% (int(index)),)\r\n file = open(grobal_path, \"rb\")\r\n response = FileResponse(file)\r\n response[\"Content-Type\"] = \"text/csv\"\r\n response[\"Content-Disposition\"] = 'attachment;filename=\"result.csv\"'\r\n print(\"response----->\", response)\r\n return response\r\n\r\n if mode == \"invalid_smiles\":\r\n invalid_path = os.path.join(Base_dir,r\"static/file/download_file/invalid_smiles/invalid_smiles_%d.csv\"% (int(index)),)\r\n file = open(invalid_path, \"rb\")\r\n response = FileResponse(file)\r\n response[\"Content-Type\"] = \"text/csv\"\r\n response[\r\n \"Content-Disposition\"\r\n ] = 'attachment;filename=\"invalid_smiles.csv\"'\r\n print(\"response----->\", response)\r\n return response\r\n\r\n if mode == \"single_mol\":\r\n invalid_path = os.path.join(Base_dir, r\"static/file/download_file/single_mol/result_%d.csv\"% (int(index)),)\r\n file = open(invalid_path, \"rb\")\r\n response = FileResponse(file)\r\n response[\"Content-Type\"] = \"text/csv\"\r\n response[\"Content-Disposition\"] = \"attachment;filename=%s\" % (\"result_%d.csv\" % (int(file_index)))\r\n print(\"response----->\", response)\r\n return response\r\n\r\n\r\ndef homepage(request):\r\n return render(request, \"homepage.html\")\r\n\r\n\r\ndef submit(request):\r\n return render(request, \"submit.html\")\r\n\r\n\r\n# @csrf_exempt\r\ndef result(request):\r\n dic = {\"status\": 201, \"error_msg\": None}\r\n\r\n print(\"ajax\", request.is_ajax())\r\n if request.is_ajax():\r\n\r\n if request.method == \"POST\":\r\n smiles = None\r\n file = request.FILES.get(\"file\")\r\n print(file, \"file\")\r\n # 判断文件是否能存在\r\n if file:\r\n file_name = file.name\r\n file_type = file_name.split(\".\")[-1]\r\n print(\"file\", file)\r\n print(\"file_name\", file_name)\r\n print(\"file_type\", file_type)\r\n file_path = os.path.join(Base_dir, r\"static/file/upload_dir\", file_name)\r\n\r\n # 先把文件写入\r\n with open(file_path, \"wb\") as f:\r\n for line in file:\r\n f.write(line)\r\n\r\n # 检查sdf文件\r\n if file_type == \"sdf\":\r\n suppl = Chem.SDMolSupplier(file_path)\r\n mols = [Chem.MolToSmiles(mol) for mol in suppl if mol]\r\n\r\n # 如果sdf文件中没有合法的分子就舍弃\r\n if len(mols) > 0:\r\n dic[\"status\"] = 200\r\n # 如果sdf文件中没有一个合法的分子\r\n elif len(mols) > 100000:\r\n dic[\"error_msg\"] = \"Too much molecule (>100000) in the file for computation\"\r\n else:\r\n dic[\"error_msg\"] = \"SDF file must contain at least one valid mol\"\r\n\r\n # 检查csv文件\r\n elif file_type == \"csv\":\r\n\r\n df0 = pd.read_csv(file_path)\r\n # 如果文件中不包含smiles列,就返回错误\r\n if \"smiles\" not in df0.columns:\r\n dic[\"error_msg\"] = \"A column with the header named ' smiles ' not in CSV file!\"\r\n\r\n else:\r\n PandasTools.AddMoleculeColumnToFrame(df0, smilesCol=\"smiles\")\r\n df0 = df0[~df0.ROMol.isnull()]\r\n PandasTools.RemoveSaltsFromFrame(df0)\r\n del df0[\"ROMol\"]\r\n\r\n # 如果文件中没有一个合法的分子\r\n if len(df0) == 0:\r\n dic[\"error_msg\"] = \"CSV file must contain at least one valid molecule\"\r\n elif len(df0) > 100000:\r\n dic[\"error_msg\"] = \"Too much molecule (>100000) in the file for computation\"\r\n else:\r\n dic[\"status\"] = 200\r\n\r\n df0 = None\r\n\r\n # 双重保险\r\n else:\r\n dic[\"error_msg\"] = \"Input file is not sdf file or csv file\"\r\n\r\n # smiles 模式\r\n else:\r\n data = json.loads(request.body)\r\n if \"drawing\" in data:\r\n drawing = data[\"drawing\"]\r\n print(\"drawing\", drawing)\r\n drawing_path = os.path.join(Base_dir, r\"static/file/upload_dir/drawing.mol\")\r\n\r\n # 先把文件写入\r\n with open(drawing_path, \"w\") as f:\r\n for line in drawing:\r\n f.write(line)\r\n\r\n try:\r\n mol = Chem.MolFromMolFile(drawing_path)\r\n print(\"drawing\", Chem.MolToSmiles(mol))\r\n dic[\"status\"] = 200\r\n\r\n except:\r\n dic[\"error_msg\"] = \"please draw a correct molecule\"\r\n\r\n else:\r\n smiles = data[\"smiles\"]\r\n print(\"smiles-->\", smiles)\r\n mol = Chem.MolFromSmiles(smiles)\r\n try:\r\n smiles1 = Chem.MolToSmiles(mol)\r\n dic[\"status\"] = 200\r\n\r\n except:\r\n dic[\"error_msg\"] = \"please input correct smiles\"\r\n\r\n return HttpResponse(json.dumps(dic))\r\n\r\n else:\r\n result_dict = {\r\n \"invalid_smiles\": None,\r\n \"index_result\": None,\r\n \"result\": [],\r\n \"smiles_list\": [],\r\n \"mol_graph\": [],\r\n }\r\n if request.method == \"POST\":\r\n file = request.FILES.get(\"file\")\r\n smiles = request.POST.get(\"smiles\", \"\")\r\n drawing = request.POST.get(\"drawing\", \"\")\r\n print(\"smiles\", smiles, \"file\", file)\r\n print(\"drawing\", drawing)\r\n\r\n if drawing:\r\n\r\n drawing_path = os.path.join(Base_dir, r\"static/file/upload_dir/drawing1.mol\")\r\n\r\n # 先把文件写入\r\n with open(drawing_path, \"w\") as f:\r\n for line in drawing:\r\n f.write(line)\r\n\r\n mol = Chem.MolFromMolFile(drawing_path)\r\n smiles = Chem.MolToSmiles(mol)\r\n print(\"smiles\", smiles)\r\n\r\n try:\r\n file_name = file.name\r\n file_type = file_name.split(\".\")[-1]\r\n print(\"file_name\", file_name, \"file_type\", file_type)\r\n file_path = os.path.join(Base_dir, r\"static/file/upload_dir\", file_name)\r\n # 先把文件写入\r\n with open(file_path, \"wb\") as f:\r\n for line in file:\r\n f.write(line)\r\n\r\n if file_type == \"sdf\":\r\n suppl = Chem.SDMolSupplier(file_path)\r\n mols = [Chem.MolToSmiles(mol) for mol in suppl if mol]\r\n df = pd.DataFrame(mols, columns=[\"smiles\"])\r\n\r\n elif file_type == \"csv\":\r\n df0 = pd.read_csv(file_path)\r\n df = pd.DataFrame(df0[\"smiles\"])\r\n except:\r\n df = pd.DataFrame([smiles], columns=[\"smiles\"])\r\n\r\n (result_df, index_result, invalid_smiles, length_invilid_fold, ) = ml_model.model_output(df)\r\n result_dict[\"smiles_list\"] = [_ for _ in result_df[\"canonical_smiles\"]]\r\n print(\"check\", result_dict[\"smiles_list\"])\r\n for _, smi in enumerate(result_dict[\"smiles_list\"]):\r\n mol = Chem.MolFromSmiles(smi)\r\n d2d = Draw.MolDraw2DSVG(300, 210)\r\n d2d.DrawMolecule(mol)\r\n d2d.FinishDrawing()\r\n text = d2d.GetDrawingText()\r\n text = text.replace(\"fill:#FFFFFF;\", \"fill:transparent;\")\r\n text_index = text.find(r\"\") - 2\r\n text = (\r\n text[:text_index]\r\n + \"style=' width: 100%; max-width: 300px; height: auto; '\"\r\n + text[text_index:]\r\n )\r\n result_dict[\"mol_graph\"].append(text)\r\n\r\n result_dict[\"result\"] = result_df.values[:, -204:].tolist()\r\n\r\n return_dict = {\r\n \"item_result\": [{\"smiles\": t[0], \"mol_graph\": t[1], \"result\": t[2]} for t in zip(result_dict[\"smiles_list\"], result_dict[\"mol_graph\"], result_dict[\"result\"],)],\r\n }\r\n return_dict[\"index_result\"] = index_result\r\n return_dict[\"invalid_smiles\"] = invalid_smiles\r\n return_dict[\"length_invilid_fold\"] = length_invilid_fold\r\n print(\"return_dict\", return_dict)\r\n return render(request, \"result.html\", return_dict)\r\n\r\ndef molecule(request):\r\n print(\"body-->\", request.body)\r\n print(\"request.is_ajax()\", request.is_ajax())\r\n\r\n # explain part\r\n if request.is_ajax():\r\n if request.method == \"POST\":\r\n data = json.loads(request.body)\r\n print(\"data\", data)\r\n smiles = data[\"smiles\"]\r\n mode = \"group\"\r\n\r\n if \"group\" in data:\r\n group = data[\"group\"]\r\n print(\"group\", group)\r\n mode_select = float(data[\"mode_select\"])\r\n df_explain = df_uniprot.copy()\r\n\r\n predict_result_str = data[\"predict_result_str\"]\r\n print(\"predict_result_str\", predict_result_str)\r\n predict_result = [round(float(_), 3) for _ in predict_result_str.split(\",\")]\r\n print(\"predict_result\", predict_result)\r\n df_explain[\"predict_result\"] = predict_result\r\n df_explain = df_explain[df_explain[\"predict_result\"] >= mode_select]\r\n del df_explain[\"predict_result\"]\r\n\r\n if group != \"All\":\r\n df_explain = df_explain[df_explain[\"Group\"] == group]\r\n\r\n else:\r\n uniprot = data[\"uniprot\"]\r\n print(\"uniprot\", uniprot)\r\n df_explain = df_uniprot.copy()\r\n df_explain = df_explain[df_explain[\"UniProt\"] == uniprot]\r\n mode = \"uniprot\"\r\n\r\n iter_result = []\r\n for _ in df_explain.index:\r\n uniprot = df_explain.loc[_, \"UniProt\"]\r\n group = df_explain.loc[_, \"Group\"]\r\n family = df_explain.loc[_, \"Family\"]\r\n name = df_explain.loc[_, \"Name\"]\r\n symbol = df_explain.loc[_, \"Symbol\"]\r\n explain, prediction = GNN_explain.explain_GNN(smiles, uniprot, mode)\r\n iter_result.append(\r\n {\r\n \"uniprot\": uniprot,\r\n \"group\": group,\r\n \"name\": name,\r\n \"symbol\": symbol,\r\n \"family\": family,\r\n \"explain\": explain,\r\n \"prediction\": round(prediction, 3), # 如果有需要请在此处添加\r\n }\r\n )\r\n\r\n return HttpResponse(json.dumps(iter_result))\r\n\r\n else:\r\n if request.method == \"POST\":\r\n smiles = request.POST.get(\"smiles\", \"\")\r\n result = request.POST.get(\"result\", \"\")\r\n index = request.POST.get(\"index\", \"\")\r\n\r\n result_float = [float(_) for _ in result[1:-1].split(\",\")]\r\n\r\n df_result = df_uniprot.copy()\r\n df_result[\"Prediction\"] = np.array(result_float)\r\n\r\n # group_div\r\n df_result[\"Count\"] = np.array(np.array(df_result[\"Prediction\"]) >= 0.5).astype(np.int)\r\n df_result.columns = [\r\n \"UniProt\",\r\n \"Group\",\r\n \"Family\",\r\n \"Name\",\r\n \"Symbol\",\r\n \"Prediction\",\r\n \"Count\",\r\n ]\r\n\r\n df_result_count = pd.DataFrame(df_result.groupby(\"Group\").sum()[\"Count\"])\r\n df_result_count = df_result_count.reset_index()\r\n\r\n group_array = [_ for _ in df_result_count[\"Group\"]]\r\n group_count = [_ for _ in df_result_count[\"Count\"]]\r\n\r\n # uniprot_div\r\n uniprot_fig = px.sunburst(\r\n df_result,\r\n path=[\"Group\", \"Family\", \"UniProt\"],\r\n values=\"Prediction\",\r\n # width=800, height=800,\r\n color_continuous_scale=\"reds\",\r\n color=\"Prediction\",\r\n branchvalues=\"total\",\r\n )\r\n uniprot_div = uniprot_fig.to_html(full_html=False, default_height=711, default_width=728)\r\n\r\n mol = Chem.MolFromSmiles(smiles)\r\n d2d = Draw.MolDraw2DSVG(360, 324)\r\n d2d.DrawMolecule(mol)\r\n d2d.FinishDrawing()\r\n mol_graph = d2d.GetDrawingText()\r\n mol_graph = mol_graph.replace(\"fill:#FFFFFF;\", \"fill:transparent;\")\r\n # mol_graph_index=mol_graph.find(r\"\")-2\r\n # mol_graph = mol_graph[:mol_graph_index]+\"style=' width: 100%; max-width: 480px; height: auto; '\"+mol_graph[mol_graph_index:]\r\n\r\n result_list = []\r\n for _ in df_result.index:\r\n uniprot = df_result.loc[_, \"UniProt\"]\r\n group = df_result.loc[_, \"Group\"]\r\n family = df_result.loc[_, \"Family\"]\r\n prediction = round(df_result.loc[_, \"Prediction\"], 3)\r\n name = df_result.loc[_, \"Name\"]\r\n symbol = df_result.loc[_, \"Symbol\"]\r\n result_list.append(\r\n {\r\n \"uniprot\": uniprot,\r\n \"group\": group,\r\n \"family\": family,\r\n \"name\": name,\r\n \"symbol\": symbol,\r\n \"prediction\": prediction,\r\n }\r\n )\r\n\r\n predict_result = [round(_, 3) for _ in df_result[\"Prediction\"]]\r\n predict_result_str = \"\"\r\n for idx, num in enumerate(predict_result):\r\n if idx < len(predict_result) - 1:\r\n predict_result_str += str(num) + \",\"\r\n else:\r\n predict_result_str += str(num)\r\n\r\n df_download = df_result[\r\n [\"UniProt\", \"Group\", \"Family\", \"Name\", \"Symbol\", \"Prediction\"]\r\n ]\r\n df_download[\"Canonical_Smiles\"] = smiles\r\n df_download = df_download[\r\n [\r\n \"Canonical_Smiles\",\r\n \"UniProt\",\r\n \"Group\",\r\n \"Family\",\r\n \"Name\",\r\n \"Symbol\",\r\n \"Prediction\",\r\n ]\r\n ]\r\n df_download[\"Prediction\"] = df_download[\"Prediction\"].round(decimals=4)\r\n single_length = len(os.listdir(os.path.join(Base_dir, r\"static/file/download_file/single_mol\")))\r\n df_download.to_csv(os.path.join(Base_dir, r\"static/file/download_file/single_mol/result_%d.csv\"%(single_length),), index=False,)\r\n\r\n return render(\r\n request,\r\n \"molecule.html\",\r\n {\r\n \"index\": index,\r\n \"smiles\": smiles,\r\n \"mol_graph\": mol_graph,\r\n \"group_array\": group_array,\r\n \"group_count\": group_count,\r\n \"uniprot_div\": uniprot_div,\r\n \"result\": result_list,\r\n \"predict_result_str\": predict_result_str,\r\n \"single_length\": single_length,\r\n },\r\n )\r\n\r\ndef help(request):\r\n return render(request, \"help.html\")\r\n\r\ndef contact(request):\r\n return render(request, \"contact.html\")\r\n\r\ndef trysth(request):\r\n return render(request, \"trysth.html\")\r\n","repo_name":"LingjieBao1998/kip","sub_path":"mysite2/kinase_predictor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13327532772","text":"import pandas as pd\nanimals = ['Tiger', 'Bear', 'Moose']\ns = pd.Series(animals)\nprint(s)\n\nnumbers = [1,2,3]\nt = pd.Series(numbers)\nprint(t)\n\nsports = {'Archery': 'Bhutan',\n 'Golf': 'Scotland',\n 'Sumo': 'Japan',\n 'Taekwondo': 'South Korea'}\nv = pd.Series(sports)\nresult = v.loc['Golf']\nprint(v)\nprint(result)\nthird = v.iloc[3]\nprint(third)","repo_name":"ebtrader/test","sub_path":"test_pandas.py","file_name":"test_pandas.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23318843673","text":"import cx_Oracle\nimport csv\nimport os\nfrom pathlib import Path\nimport requests\nfrom datetime import datetime, timedelta\nimport threading\nimport time\nimport numpy as np\nimport pandas as pd\nimport asyncio\n\n\noracle_client = \"C:\\instantclient_19_5\"\nos.environ[\"ORACLE_HOME\"] = oracle_client\nos.environ[\"PATH\"] = oracle_client+os.pathsep+os.environ[\"PATH\"]\nos.environ[\"NLS_LANG\"] = \"AMERICAN_AMERICA.TH8TISASCII\"\n\ntime_start = datetime.now()\n\n\ndef printttime(txt):\n dateTimeObj = datetime.now()\n timestampStr = dateTimeObj.strftime(\"%d-%b-%Y (%H:%M:%S.%f)\")\n now = datetime.now()\n duration = now - time_start\n print(timestampStr + ' ' + str(duration.total_seconds()) + ' ' + txt)\n\n\nclass CLS_SO_ORDER_COSTSHEET(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n def run(self):\n SO_ORDER_COSTSHEET()\n\n\ndef SO_ORDER_COSTSHEET():\n my_dsn = cx_Oracle.makedsn(\"172.16.6.83\", port=1521, sid=\"NYTG\")\n conn = cx_Oracle.connect(user=\"nygm\", password=\"nygm\",\n dsn=my_dsn, encoding=\"UTF-8\", nencoding=\"UTF-8\")\n\n printttime('SO_ORDER_COSTSHEET Start')\n\n sql = \"\"\"SELECT SO_NO,SO_YEAR,OU_CODE,SO_NO_DOC,COST_SHEETID FROM OE_SO \"\"\"\n\n df = pd.read_sql_query(sql, conn)\n\n df.to_excel(\n r'C:\\QVD_DATA\\COM_GARMENT\\NYG\\SO_ORDER_COSTSHEET.xlsx', index=False)\n\n\n df.to_excel(\n r'C:\\QVDatacenter\\SCM\\GARMENT\\NYG\\SO_ORDER_COSTSHEET.xlsx', index=False)\n\n conn.close()\n\n printttime('SO_ORDER_COSTSHEET Complete')\n\n\nthreads = []\n\nthread1 = CLS_SO_ORDER_COSTSHEET()\nthread1.start()\nthreads.append(thread1)\n\n\n\nfor t in threads:\n t.join()\n\n\n\n","repo_name":"mosjikung/py_schedule","sub_path":"qvd/nyg/costsheet/costsheet.py","file_name":"costsheet.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43973941421","text":"import numpy as np\nimport code_of_project.helper_methods as helper\nfrom code_of_project.prediction_object.PredictionObject import PredictionObject\nimport logging\n\n\nclass IgControllerAbstract:\n '''\n\n '''\n def __init__(self, model_to_explain, data, arguments, **kwargs):\n self.data = data\n self.prediction_object = PredictionObject(model_to_explain=model_to_explain,\n arguments=arguments)\n self.allowed_error_approximation = arguments.allowed_error_approximation\n self.num_steps_to_calculate_path_integral = arguments.num_steps_to_calculate_path_integral\n self.kwargs = kwargs\n\n def explain_single_data_to_explain(self, data_to_explain_index, baseline_indices):\n '''\n Calculate IG between a data to explain and a list of baselines\n :param data_to_explain_index: int\n index of the data point to be explained\n :param baseline_indices: [int]\n list with indices of data points used as baseline\n :return: dictionary with IG's between baselines and data to explain\n '''\n results_baselines = {}\n for baseline_index in baseline_indices:\n results_baselines['baseline_index_{}'.format(baseline_index)] = \\\n self.calculate_ig_from_baseline_to_data_to_explain(\n baseline_index=baseline_index,\n data_to_explain_index=data_to_explain_index\n )\n return results_baselines\n\n def calculate_ig_from_baseline_to_data_to_explain(self, baseline_index, data_to_explain_index):\n data_to_explain = self.data.iloc[data_to_explain_index]\n baseline = self.data.iloc[baseline_index]\n\n result_integrated_gradient_dic = \\\n self.calculate_integrated_gradient(baseline, data_to_explain)\n\n result_integrated_gradient_dic['data_to_explain_index'] = data_to_explain_index\n result_integrated_gradient_dic['baseline_index'] = baseline_index\n\n return result_integrated_gradient_dic\n\n def calculate_integrated_gradient(self, baseline, data_to_explain):\n raise NotImplementedError('This method has to be defined in child class')\n\n def error_of_approximation(self, ig_weight, dif_pred, m_steps):\n integral = np.abs(np.sum(ig_weight))\n error = np.abs(dif_pred - integral)\n if error >= self.allowed_error_approximation:\n m_steps_new = m_steps * 2\n logging.debug('f\"Integration approximation is with a error of {error}'\n ' to high, number m_steps = {m_steps} is risen to {m_steps_new} for next try')\n return error, m_steps_new\n else:\n return error, m_steps\n\n\ndef interpolate(data_to_explain, baseline, alphas):\n raise NotImplementedError('This method has to be defined in concrete class')\n\n\ndef compute_gradients(interpolation, model):\n raise NotImplementedError('This method has to be defined in concrete class')\n\n\ndef integral_approximation(gradients):\n raise NotImplementedError('This method has to be defined in concrete class')\n\n\ndef weighted_ig(data_to_explain, baseline, ig):\n raise NotImplementedError('This method has to be defined in concrete class')\n\n","repo_name":"wwjbrugger/stingrai-widget","sub_path":"code_of_project/calculate_ig/IG_Controller_Classes/IGControllerAbstract.py","file_name":"IGControllerAbstract.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"73792221175","text":"import setuptools\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='code4th-datasource-air4thai',\n version='0.2',\n description='A datasource package for retrieving data from http://air4thai.pcd.go.th',\n author='Pattarawat Chormai',\n author_email='pat.chormai@gmail.com',\n license='MIT',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/pypa/sampleproject\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)\n","repo_name":"codeforthailand/datasource-air4thai","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"22"} +{"seq_id":"37314276135","text":"import socket\nimport os\nfrom _thread import *\nimport math\n\nServerSideSocket = socket.socket()\nhost = '127.0.0.1'\nport = 3000\nThreadCount = 0\nIterator = 1\nN = 5\nclientMap = {}\nqueue = []\nvotes=0\ntry:\n ServerSideSocket.bind((host, port))\nexcept socket.error as e:\n print(str(e))\n\nprint('Socket is listening..')\nServerSideSocket.listen(N)\n\ndef multi_threaded_client(connection):\n global Iterator\n global clientMap\n global queue\n global resource\n connection.send(str.encode('Server is working\\n'))\n while True:\n data = connection.recv(2048)\n lines = data.decode('utf-8').split(\"\\n\")\n for line in lines:\n if( len(line) > 0 ):\n # print(line)\n decodedData = line.split(\"\\t\")\n\n if decodedData[0] == \"REG\":\n pid = decodedData[1]\n # priority = decodedData[2]\n clientMap[pid] = {\n \"pid\": pid,\n \"connectionNumber\": Iterator,\n \"connection\": connection,\n \"inUse\" : False\n }\n response = 'Server connected: ' + data.decode('utf-8') + \"\\n\"\n connection.send(str.encode(response))\n connection.send(str.encode(\"connectionNumber\\t\" + str(Iterator) + \"\\n\"))\n data = \"ThreadCount\\t\" + str(ThreadCount)+ \"\\n\"\n for individualConnection in clientMap:\n clientMap[ individualConnection ][\"connection\"].send( str.encode(data) )\n Iterator += 1\n for requestEntry in queue:\n connection.send( requestEntry )\n \n\n elif decodedData[0] == \"REQ\":\n # broadcasting\n # resource = decodedData[1]\n clientMap[pid][\"inUse\"] = True\n for individualConnection in clientMap:\n clientMap[ individualConnection ][\"connection\"].send( data )\n queue.append(data)\n\n \n elif decodedData[0] == \"OK\":\n individualConnection = decodedData[1]\n clientMap[ individualConnection ][\"connection\"].send( data )\n\n \n\n connection.close()\n\n\nwhile True:\n Client, address = ServerSideSocket.accept()\n print('Connected to: ' + address[0] + ':' + str(address[1]))\n start_new_thread(multi_threaded_client, (Client, ))\n ThreadCount += 1\n # print('Thread Number: ' + str(ThreadCount))\nServerSideSocket.close()\n\n\n# Ref: https://laptrinhx.com/python-multithreading-example-create-socket-server-with-multiple-clients-4022956390/#:~:text=Python%20Multithreading%20Example%3A%20Create%20Socket%20Server%20with%20Multiple,...%206%20Client-Side%20Multithreading%20Full%20Code%20Example%20","repo_name":"himanisirohiya/Distributed-Systems","sub_path":"Ordering_Events/DistributedLocking/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21875255342","text":"#from pprint import pprint\r\ndef algo(w, v, n, C):\r\n arr = [[None] * (C + 1) for i in range(n)]\r\n for i in range(n):\r\n for c in range(0, C + 1):\r\n if i == 0:\r\n arr[i][c] = 0 if w[i] > c else v[i]\r\n else:\r\n if w[i] > c:\r\n arr[i][c] = arr[i - 1][c]\r\n else:\r\n arr[i][c] = max(arr[i - 1][c], arr[i - 1][c - w[i]] + v[i])\r\n #pprint(arr)\r\n return arr[-1][-1]\r\nS, N = map(int, input().rstrip().split())\r\nw = []\r\nv = []\r\nfor i in range(N):\r\n inp = input().rstrip().split()\r\n w.append(int(inp[0]))\r\n v.append(int(inp[1]))\r\nprint(algo(w, v, N, S))\r\n","repo_name":"djdjhappy/Learning","sub_path":"2018-8-5/Knapsack.py","file_name":"Knapsack.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6051149942","text":"import numpy as np\n\nprint(\"kick\")\nk = input()\nx = np.array([0])\nwhile 1:\n print(\"give me number\")\n a = raw_input()\n if a == 'q' or a == 'Q':\n break ;\n x = np.append(x, int(a))\n\nx = np.delete(x, 0)\n\nx = np.sort(x)\n\nm = np.array([0])\nfor i in range(k):\n ll = x.size -1\n m = np.append(m, x[0])\n m = np.append(m, x[ll])\n x = np.delete(x, ll)\n x = np.delete(x, 0)\n\nm = np.delete(m, 0)\nprint(x)\nprint(m)\n","repo_name":"wallace96813/wallace96813","sub_path":"RemoveOutliers.ipynb.py","file_name":"RemoveOutliers.ipynb.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28903940203","text":"import numpy as np\nfrom numpy import *\nimport cv2\nimport foreground_separation.ImgProcessingHelper as img_helper\nimport foreground_separation.ClusteringHelper as clustering_helper\n\n\ndef cluster_color_dict(k):\n # b, g, r\n dict = [\n ['red', [0, 0, 225]],\n ['green', [0, 255, 0]],\n ['blue', [255, 0, 0]],\n ['white', [255, 255, 255]],\n ['yellow', [0, 255, 255]],\n ['cerise', [255, 0, 255]],\n ['black', [0, 0, 0]],\n ['orange', [51, 153, 255]],\n ['purple', [102, 0, 102]],\n ['cyan', [204, 204, 51]]\n ]\n return dict[k]\n\n\ndef scale_range(input, in_min, in_max, out_min, out_max):\n return ((input - in_min) / (in_max - in_min)) * (out_max - out_min) + out_min;\n\n\ndef generate_labelled_img(labels, shape):\n out = np.empty(shape, dtype=uint8)\n for i in range(shape[0]):\n for j in range(shape[1]):\n color_code = cluster_color_dict(labels[i * shape[1] + j])[1]\n rgb_color = np.uint8([[color_code]])\n out[i, j] = img_helper.cvt_gbr_2_hsv(rgb_color)[0, 0]\n return out\n\n\ndef foreground_selection(k):\n print('Please select the foreground. Use white space to separate the numbers.')\n for i in range(k):\n print('%d. %s' % (i, cluster_color_dict(i)[0]))\n return [int(x) for x in input().split()]\n\n\ndef is_point_in_foreground(row, col, foreground_clusters, labels, img_size):\n upper = row\n lower = row\n left = col\n right = col\n\n while upper >= 0 and labels[upper * img_size[1] + col] not in foreground_clusters:\n upper -= 1\n while lower < img_size[0] and labels[lower * img_size[1] + col] not in foreground_clusters:\n lower += 1\n while left >= 0 and labels[row * img_size[1] + left] not in foreground_clusters:\n left -= 1\n while right < img_size[1] and labels[row * img_size[1] + right] not in foreground_clusters:\n right += 1\n\n return upper >= 0 and lower < img_size[0] and left >= 0 and right < img_size[1]\n\n\ndef is_foreground(row, col, foreground_clusters, labels, img_size):\n return labels[row * img_size[1] + col] in foreground_clusters\n\n\ndef generate_masked_img(foreground_clusters, labels, shape):\n out = np.zeros(shape, dtype=uint8)\n img_size = [shape[0], shape[1]]\n for i in range(shape[0]):\n for j in range(shape[1]):\n if is_foreground(i, j, foreground_clusters, labels, img_size):\n out[i, j] = [255, 255, 255]\n return out\n\n\n# edge_len must be odd\ndef get_block(row, col, matrix, edge_len):\n size = matrix.shape\n upper = row - edge_len // 2\n lower = row + edge_len // 2\n left = col - edge_len // 2\n right = col + edge_len // 2\n\n if upper < 0:\n upper = 0\n if lower >= size[0]:\n lower = size[0] - 1\n if left < 0:\n left = 0\n if right >= size[1]:\n right = size[1] - 1\n\n return matrix[upper:lower+1, left:right+1].copy()\n\n\ndef smoothing_masked_img(masked_img_gray, img_size):\n out = np.zeros(img_size, dtype=np.uint8)\n block_edge_len = 5\n for i in range(img_size[0]):\n for j in range(img_size[1]):\n block = get_block(i, j, masked_img_gray, edge_len=15)\n block_size = block.shape\n out[i, j] = (sum(sum(arr) for arr in block) - masked_img_gray[i, j]) // (block_size[0] * block_size[1] - 1)\n return out\n\n\ndef generate_img_contoured_foreground(img, h):\n img = img_helper.reduce_noise(img_helper.cvt_gbr_2_grayscale(img), h)\n _, contours, _ = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n out = np.zeros_like(img)\n cv2.drawContours(out, contours, -1, 255, thickness=-1)\n out = img_helper.reduce_noise(out, h)\n return out\n\n\ndef separate_foreground(img, k=8, coord_features_scale=0.25, clustering_max_iter=1000, clustering_n_init=10, denoise_level=50):\n img_hsv = img_helper.cvt_gbr_2_hsv(img)\n s = img_hsv.shape\n\n img_hsv = img_helper.features_append_coord(img_hsv, coord_features_scale)\n\n labels = clustering_helper.k_means_clustering(\n img_hsv,\n shape=[s[0], s[1], s[2] + 2],\n n_clusters=k,\n max_iter=clustering_max_iter,\n n_init=clustering_n_init)\n\n labelled_img = generate_labelled_img(labels, s)\n labelled_img = img_helper.cvt_hsv_2_gbr(labelled_img)\n\n img_helper.save_img(labelled_img, 'result.png')\n img_helper.display_img_file('result.png')\n\n foreground_clusters = foreground_selection(k)\n masked_img = generate_masked_img(foreground_clusters, labels, s)\n masked_img_gray = generate_img_contoured_foreground(masked_img, denoise_level)\n masked_img_gray = smoothing_masked_img(masked_img_gray, [s[0], s[1]])\n\n img_helper.save_img(masked_img_gray, 'masked_result.png')\n img_helper.display_img_file('masked_result.png')\n\n return masked_img_gray\n\n\nif __name__ == '__main__':\n img = img_helper.load_img('../data/selfie.jpeg', [600, 800])\n # separate_foreground('../data/selfie.jpeg')\n separate_foreground(img, k=8)\n","repo_name":"fabulousdj/cse5523project","sub_path":"foreground_separation/ForegroundSeparator.py","file_name":"ForegroundSeparator.py","file_ext":"py","file_size_in_byte":4986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"35986766693","text":"# exercise!\nfrom random import randint\nfrom sys import argv\n\nfirst = argv[1]\nlast = argv[2]\n\nran = str(randint(int(first), int(last)))\n\nwhile True:\n ans = input(f\"Guess the number between {first} and {last}: \")\n\n if ans == ran:\n print(\"You got it!\")\n break\n else:\n print(\"Try again!\")\n","repo_name":"Ann-pixel/learn-python","sub_path":"random_game.py","file_name":"random_game.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28886089193","text":"from collections import deque\n\n\ndef time_in_seconds(time_data):\n result = 0\n hour, minutes, seconds = time_data.split(':')\n result += (int(hour) * 60 * 60) + (int(minutes) * 60) + int(seconds)\n return result\n\n\ndef time_converter(seconds):\n seconds %= 24 * 60 * 60\n hours = seconds // (60 * 60)\n seconds %= (60 * 60)\n minutes = seconds // 60\n seconds %= 60\n return f'{hours:02d}:{minutes:02d}:{seconds:02d}'\n\n\nrobots_data = input().split(';')\n\nrobots = {}\nrobots_processing_times = {}\ntime = time_in_seconds(input())\nproducts = deque()\n\nfor robot in robots_data:\n name, process_time = robot.split('-')\n robots[name] = int(process_time)\n robots_processing_times[name] = -1\n\ncurrent_product = input()\nwhile current_product != 'End':\n products.append(current_product)\n current_product = input()\n\nwhile products:\n time += 1\n current_product = products.popleft()\n\n for name, busy_time in robots_processing_times.items():\n if time >= busy_time:\n print(f\"{name} - {current_product} [{time_converter(time)}]\")\n robots_processing_times[name] = time + robots[name]\n break\n else:\n products.append(current_product)\n","repo_name":"sasho132/softuni-courses","sub_path":"python-advanced-may-2022/lists_as_stacks_and_queues/12_robotics.py","file_name":"12_robotics.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"1797587438","text":"import urllib.request\nimport json\nimport collections\nimport time\nimport hashlib\nimport hmac\nimport tkinter as tk\n\nnonce1 = str(time.time())\napikey = ''\napisecret = ''\ncurrency = 'USD'\nurl = 'https://bittrex.com/api/v1.1/account/getbalances?apikey='+apikey+'&nonce='+nonce1#+'¤cy='+currency\nsign=hmac.new(bytes(apisecret,encoding='utf-8'),bytes(url,encoding='utf-8'),hashlib.sha512).hexdigest()\n\nreq = urllib.request.Request(url, headers={\"apisign\" : sign})\naccountreq=urllib.request.urlopen(req).read()\naccountreqjson=json.loads(accountreq)\n\n#print(accountreqjson)\n\nurl = 'https://bittrex.com/api/v1.1/account/getbalance?apikey='+apikey+'&nonce='+nonce1+'¤cy='+currency\nsign=hmac.new(bytes(apisecret,encoding='utf-8'),bytes(url,encoding='utf-8'),hashlib.sha512).hexdigest()\n\nreq = urllib.request.Request(url, headers={\"apisign\" : sign})\naccountreq1=urllib.request.urlopen(req).read()\naccountreqjson1=json.loads(accountreq1)\n\n#print(accountreqjson1)\n\nwin = tk.Tk()\nwin.title(\"Bittrex Account Holdings\")\nwin.geometry(\"900x600\")\nframe1 = tk.Frame(win)\n\n\nframe1.grid(column = 0, row = 0)\nwith open('data.txt', 'w') as outfile:\n json.dump(accountreqjson, outfile)\n\n\nwith open('data.txt') as json_file:\n data = json.load(json_file)\n#print(data[\"result\"][0][\"Currency\"])\n\nfor i in data[\"result\"]:\n currency = i[\"Currency\"]\n #print(i[\"Currency\"])\n label1 = tk.Label(frame1, text = currency, font=(\"Times New Roman\", 20))\n label1.pack()\n\nframe2 = tk.Frame(win)\nframe2.grid(column = 1, row = 0)\n\nfor i in data[\"result\"]:\n balance = i['Balance']\n #print(i[\"Balance\"])\n label2 = tk.Label(frame2, text = balance, font=(\"Times New Roman\", 20))\n label2.pack()\n\nwin.mainloop()\n","repo_name":"lucasg321/BittrexAPI-Display","sub_path":"PythonTest/PythonTest/PythonTest.py","file_name":"PythonTest.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13985991064","text":"from typing import Callable\n\nimport numpy as np\n\n\ndef incremental_search(\n f: Callable[[float], float],\n max_step: float,\n starting_point: float = 1e-18,\n step: float = 0.01,\n max_iter_equal: int = 50,\n) -> tuple[float, float]:\n \"\"\"It uses accelerated incremental search to find the upper limit where a local\n maximum is contained.\n\n Parameters\n ----------\n f: (float) -> float\n The objective function.\n max_step: float\n The maximum possible step in the gradient direction.\n starting_point: float, optional\n The starting point of the incremental search.\n step: float, optional\n The step size for incremental search.\n\n Returns\n -------\n float\n The lower bound of the estimated interval.\n float\n The upper bound of the estimated interval.\n \"\"\"\n\n # Initialize\n lower = 0\n upper = starting_point\n prev_val = f(starting_point)\n i = 1\n equal_counter = 0\n shift = lambda i: (i - 1) * step\n\n # Start search\n while shift(i) < max_step:\n # Update point\n current_point = starting_point + shift(i)\n current_val = f(current_point)\n\n # Check for local maximum\n if current_val < prev_val:\n break\n\n # Counter if the function is constant (avoid infinite loops)\n elif current_val == prev_val:\n equal_counter += 1\n if equal_counter > max_iter_equal:\n break\n\n prev_val = current_val\n i += 1\n\n lower = starting_point + shift(i - 1)\n upper = starting_point + shift(i + 1)\n return lower, upper\n\n\ndef golden_section_search(\n f: Callable[[float], float], bounds: tuple[float, float], tol: float = 1e-7\n) -> tuple[float, float]:\n \"\"\"It uses Golden section search to shrink the interval where the maximum is\n contained.\n\n Parameters\n ----------\n f: (float) -> float\n The objective function.\n bounds: tuple[float, float]\n The interval where a local maximum is guaranteed.\n tol: float\n The relative width of the resulting interval.\n\n Returns\n -------\n float\n The lower bound of the estimated interval.\n float\n The upper bound of the estimated interval.\n \"\"\"\n\n lower, upper = bounds\n if not lower < upper:\n raise ValueError(\"Invalid interval.\")\n\n # Estimate number of iterations\n ratio = 2 / (1 + np.sqrt(5))\n n = np.ceil(np.log(tol) / np.log(ratio))\n\n # Initialize values\n x1 = lower + (1 - ratio) * (upper - lower)\n x2 = upper - (1 - ratio) * (upper - lower)\n i = 1\n while i <= n:\n if f(x1) < f(x2):\n lower = x1\n x1 = x2\n x2 = upper - (1 - ratio) * (upper - lower)\n else:\n upper = x2\n x2 = x1\n x1 = lower + (1 - ratio) * (upper - lower)\n i += 1\n return (lower, upper)\n\n\ndef gradient(\n f: Callable[[np.ndarray], float], point: np.ndarray, delta: float = 1e-9\n) -> np.ndarray:\n \"\"\"A finite difference gradient approximation.\n\n Parameters\n ----------\n f: (numpy.ndarray) -> float\n The function to calculate the gradient of.\n point: numpy.ndarray\n The point to evaluate the gradient at.\n delta: float\n The step to make on each direction.\n\n Returns\n -------\n numpy.ndarray\n The gradient vector.\n \"\"\"\n\n n = point.shape[0]\n grad = np.zeros(n)\n f_val = f(point)\n\n for i in range(n):\n shift = np.zeros(n)\n shift[i] = delta\n shifted_f = f(point + shift)\n grad[i] = (shifted_f - f_val) / delta\n return grad\n\n\ndef project(point: np.ndarray, intervals: np.ndarray) -> np.ndarray:\n \"\"\"It projects a point inside the input intervals.\n\n Parameters\n ----------\n point: numpy.ndarray\n The point in question.\n intervals: numpy.ndarray\n The domain intervals.\n\n Returns\n -------\n numpy.ndarray\n The projected point.\n \"\"\"\n\n dim = point.shape[0]\n for i in range(dim):\n lower = intervals[i, 0]\n upper = intervals[i, 1]\n if point[i] < lower:\n point[i] = lower\n elif point[i] > upper:\n point[i] = upper\n return point\n","repo_name":"Daples/opt-for-ML","sub_path":"project2/src/utils/optimize.py","file_name":"optimize.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"22253154525","text":"\"\"\"ncvoter URL Configuration\"\"\"\nfrom django.urls import path, include\nfrom django.contrib import admin\nfrom django.conf import settings\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/v1/', include('voter.urls')),\n\n path('', include('drilldown.urls')),\n]\n\nif settings.ENVIRONMENT != \"production\":\n urlpatterns += [\n path('_qa/', include('qadashboard.urls')),\n ]\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns = [\n path('__debug__/', include(debug_toolbar.urls)),\n ] + urlpatterns\n","repo_name":"NCVotes/voters-ingestor","sub_path":"ncvoter/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"22"} +{"seq_id":"13973019878","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# See http://doc.qt.io/qt-5/qwebengineview.html#details\n\n# This class replace the deprecated QWebView (based on QtWebKit).\n# See:\n# - https://stackoverflow.com/questions/29055475/qwebview-or-qwebengineview\n# - https://wiki.qt.io/QtWebEngine/Porting_from_QtWebKit\n\nHTML = r'''\n\n\n \n Marked in the browser\n\n\n
    \n \n \n\n\n'''\n\n# The next two lines are a workaround to fix an issue with QWebEngineView (see https://github.com/ContinuumIO/anaconda-issues/issues/9199#issuecomment-383842265)\nimport ctypes\nctypes.CDLL(\"libGL.so.1\", mode=ctypes.RTLD_GLOBAL)\n\nimport sys\nfrom PySide6.QtCore import *\nfrom PySide6.QtWebEngineWidgets import QWebEngineView\nfrom PySide6.QtWidgets import QApplication\n\napp = QApplication(sys.argv)\n\nweb = QWebEngineView()\nweb.setHtml(HTML)\nweb.show()\n\n# The mainloop of the application. The event handling starts from this point.\n# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.\nexit_code = app.exec_()\n\n# The sys.exit() method ensures a clean exit.\n# The environment will be informed, how the application ended.\nsys.exit(exit_code)\n","repo_name":"jeremiedecock/snippets","sub_path":"python/pyside/pyside6/widget_QWebEngineView_with_marked_online.py","file_name":"widget_QWebEngineView_with_marked_online.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"22"} +{"seq_id":"28579453394","text":"import math\nimport numpy as np\nfrom scipy import optimize\nfrom .._shared.utils import skimage_deprecation, warn\n\n\ndef _check_data_dim(data, dim):\n if data.ndim != 2 or data.shape[1] != dim:\n raise ValueError('Input data must have shape (N, %d).' % dim)\n\n\ndef _check_data_atleast_2D(data):\n if data.ndim < 2 or data.shape[1] < 2:\n raise ValueError('Input data must be at least 2D.')\n\n\ndef _norm_along_axis(x, axis):\n \"\"\"NumPy < 1.8 does not support the `axis` argument for `np.linalg.norm`.\"\"\"\n return np.sqrt(np.einsum('ij,ij->i', x, x))\n\n\nclass BaseModel(object):\n\n def __init__(self):\n self.params = None\n\n\nclass LineModel(BaseModel):\n\n \"\"\"Total least squares estimator for 2D lines.\n\n Lines are parameterized using polar coordinates as functional model::\n\n dist = x * cos(theta) + y * sin(theta)\n\n This parameterization is able to model vertical lines in contrast to the\n standard line model ``y = a*x + b``.\n\n This estimator minimizes the squared distances from all points to the\n line::\n\n min{ sum((dist - x_i * cos(theta) + y_i * sin(theta))**2) }\n\n A minimum number of 2 points is required to solve for the parameters.\n\n **Deprecated class**. Use ``LineModelND`` instead.\n\n Attributes\n ----------\n params : tuple\n Line model parameters in the following order `dist`, `theta`.\n\n \"\"\"\n\n def __init__(self):\n self.params = None\n warn(skimage_deprecation('`LineModel` is deprecated, '\n 'use `LineModelND` instead.'))\n\n def estimate(self, data):\n \"\"\"Estimate line model from data using total least squares.\n\n Parameters\n ----------\n data : (N, 2) array\n N points with ``(x, y)`` coordinates, respectively.\n\n Returns\n -------\n success : bool\n True, if model estimation succeeds.\n\n \"\"\"\n\n _check_data_dim(data, dim=2)\n\n X0 = data.mean(axis=0)\n\n if data.shape[0] == 2: # well determined\n theta = np.arctan2(data[1, 1] - data[0, 1],\n data[1, 0] - data[0, 0])\n elif data.shape[0] > 2: # over-determined\n data = data - X0\n # first principal component\n _, _, v = np.linalg.svd(data)\n theta = np.arctan2(v[0, 1], v[0, 0])\n else: # under-determined\n raise ValueError('At least 2 input points needed.')\n\n # angle perpendicular to line angle\n theta = (theta + np.pi / 2) % np.pi\n # line always passes through mean\n dist = X0[0] * math.cos(theta) + X0[1] * math.sin(theta)\n\n self.params = (dist, theta)\n\n return True\n\n def residuals(self, data):\n \"\"\"Determine residuals of data to model.\n\n For each point the shortest distance to the line is returned.\n\n Parameters\n ----------\n data : (N, 2) array\n N points with ``(x, y)`` coordinates, respectively.\n\n Returns\n -------\n residuals : (N, ) array\n Residual for each data point.\n\n \"\"\"\n\n _check_data_dim(data, dim=2)\n\n dist, theta = self.params\n\n x = data[:, 0]\n y = data[:, 1]\n\n return dist - (x * math.cos(theta) + y * math.sin(theta))\n\n def predict_x(self, y, params=None):\n \"\"\"Predict x-coordinates using the estimated model.\n\n Parameters\n ----------\n y : array\n y-coordinates.\n params : (2, ) array, optional\n Optional custom parameter set.\n\n Returns\n -------\n x : array\n Predicted x-coordinates.\n\n \"\"\"\n\n if params is None:\n params = self.params\n dist, theta = params\n return (dist - y * math.sin(theta)) / math.cos(theta)\n\n def predict_y(self, x, params=None):\n \"\"\"Predict y-coordinates using the estimated model.\n\n Parameters\n ----------\n x : array\n x-coordinates.\n params : (2, ) array, optional\n Optional custom parameter set.\n\n Returns\n -------\n y : array\n Predicted y-coordinates.\n\n \"\"\"\n\n if params is None:\n params = self.params\n dist, theta = params\n return (dist - x * math.cos(theta)) / math.sin(theta)\n\n\nclass LineModelND(BaseModel):\n \"\"\"Total least squares estimator for N-dimensional lines.\n\n Lines are defined by a point (origin) and a unit vector (direction)\n according to the following vector equation::\n\n X = origin + lambda * direction\n\n Attributes\n ----------\n params : tuple\n Line model parameters in the following order `origin`, `direction`.\n\n \"\"\"\n\n def estimate(self, data):\n \"\"\"Estimate line model from data.\n\n Parameters\n ----------\n data : (N, dim) array\n N points in a space of dimensionality dim >= 2.\n\n Returns\n -------\n success : bool\n True, if model estimation succeeds.\n \"\"\"\n\n _check_data_atleast_2D(data)\n\n X0 = data.mean(axis=0)\n\n if data.shape[0] == 2: # well determined\n u = data[1] - data[0]\n norm = np.linalg.norm(u)\n if norm > 0:\n u /= norm\n elif data.shape[0] > 2: # over-determined\n data = data - X0\n # first principal component\n # Note: without full_matrices=False Python dies with joblib\n # parallel_for.\n _, _, u = np.linalg.svd(data, full_matrices=False)\n u = u[0]\n else: # under-determined\n raise ValueError('At least 2 input points needed.')\n\n self.params = (X0, u)\n\n return True\n\n def residuals(self, data):\n \"\"\"Determine residuals of data to model.\n\n For each point the shortest distance to the line is returned.\n It is obtained by projecting the data onto the line.\n\n Parameters\n ----------\n data : (N, dim) array\n N points in a space of dimension dim.\n\n Returns\n -------\n residuals : (N, ) array\n Residual for each data point.\n \"\"\"\n\n X0, u = self.params\n return _norm_along_axis((data - X0) -\n np.dot(data - X0, u)[..., np.newaxis] * u,\n axis=1)\n\n def predict(self, x, axis=0, params=None):\n \"\"\"Predict intersection of the estimated line model with a hyperplane\n orthogonal to a given axis.\n\n Parameters\n ----------\n x : array\n coordinates along an axis.\n axis : int\n axis orthogonal to the hyperplane intersecting the line.\n params : (2, ) array, optional\n Optional custom parameter set in the form (`origin`, `direction`).\n\n Returns\n -------\n y : array\n Predicted coordinates.\n\n If the line is parallel to the given axis, a ValueError is raised.\n \"\"\"\n\n if params is None:\n params = self.params\n\n X0, u = params\n\n if u[axis] == 0:\n # line parallel to axis\n raise ValueError('Line parallel to axis %s' % axis)\n\n l = (x - X0[axis]) / u[axis]\n return X0 + l[..., np.newaxis] * u\n\n def predict_x(self, y, params=None):\n \"\"\"Predict x-coordinates for 2D lines using the estimated model.\n\n Alias for::\n\n predict(y, axis=1)[:, 0]\n\n Parameters\n ----------\n y : array\n y-coordinates.\n params : (2, ) array, optional\n Optional custom parameter set in the form (`origin`, `direction`).\n\n Returns\n -------\n x : array\n Predicted x-coordinates.\n\n \"\"\"\n return self.predict(y, axis=1, params=params)[:, 0]\n\n def predict_y(self, x, params=None):\n \"\"\"Predict y-coordinates for 2D lines using the estimated model.\n\n Alias for::\n\n predict(x, axis=0)[:, 1]\n\n Parameters\n ----------\n x : array\n x-coordinates.\n params : (2, ) array, optional\n Optional custom parameter set in the form (`origin`, `direction`).\n\n Returns\n -------\n y : array\n Predicted y-coordinates.\n\n \"\"\"\n return self.predict(x, axis=0, params=params)[:, 1]\n\n\nclass CircleModel(BaseModel):\n\n \"\"\"Total least squares estimator for 2D circles.\n\n The functional model of the circle is::\n\n r**2 = (x - xc)**2 + (y - yc)**2\n\n This estimator minimizes the squared distances from all points to the\n circle::\n\n min{ sum((r - sqrt((x_i - xc)**2 + (y_i - yc)**2))**2) }\n\n A minimum number of 3 points is required to solve for the parameters.\n\n Attributes\n ----------\n params : tuple\n Circle model parameters in the following order `xc`, `yc`, `r`.\n\n \"\"\"\n\n def estimate(self, data):\n \"\"\"Estimate circle model from data using total least squares.\n\n Parameters\n ----------\n data : (N, 2) array\n N points with ``(x, y)`` coordinates, respectively.\n\n Returns\n -------\n success : bool\n True, if model estimation succeeds.\n\n \"\"\"\n\n _check_data_dim(data, dim=2)\n\n x = data[:, 0]\n y = data[:, 1]\n # pre-allocate jacobian for all iterations\n A = np.zeros((3, data.shape[0]), dtype=np.double)\n # same for all iterations: r\n A[2, :] = -1\n\n def dist(xc, yc):\n return np.sqrt((x - xc)**2 + (y - yc)**2)\n\n def fun(params):\n xc, yc, r = params\n return dist(xc, yc) - r\n\n def Dfun(params):\n xc, yc, r = params\n d = dist(xc, yc)\n A[0, :] = -(x - xc) / d\n A[1, :] = -(y - yc) / d\n # same for all iterations, so not changed in each iteration\n #A[2, :] = -1\n return A\n\n xc0 = x.mean()\n yc0 = y.mean()\n r0 = dist(xc0, yc0).mean()\n params0 = (xc0, yc0, r0)\n params, _ = optimize.leastsq(fun, params0, Dfun=Dfun, col_deriv=True)\n\n self.params = params\n\n return True\n\n def residuals(self, data):\n \"\"\"Determine residuals of data to model.\n\n For each point the shortest distance to the circle is returned.\n\n Parameters\n ----------\n data : (N, 2) array\n N points with ``(x, y)`` coordinates, respectively.\n\n Returns\n -------\n residuals : (N, ) array\n Residual for each data point.\n\n \"\"\"\n\n _check_data_dim(data, dim=2)\n\n xc, yc, r = self.params\n\n x = data[:, 0]\n y = data[:, 1]\n\n return r - np.sqrt((x - xc)**2 + (y - yc)**2)\n\n def predict_xy(self, t, params=None):\n \"\"\"Predict x- and y-coordinates using the estimated model.\n\n Parameters\n ----------\n t : array\n Angles in circle in radians. Angles start to count from positive\n x-axis to positive y-axis in a right-handed system.\n params : (3, ) array, optional\n Optional custom parameter set.\n\n Returns\n -------\n xy : (..., 2) array\n Predicted x- and y-coordinates.\n\n \"\"\"\n if params is None:\n params = self.params\n xc, yc, r = params\n\n x = xc + r * np.cos(t)\n y = yc + r * np.sin(t)\n\n return np.concatenate((x[..., None], y[..., None]), axis=t.ndim)\n\n\nclass EllipseModel(BaseModel):\n\n \"\"\"Total least squares estimator for 2D ellipses.\n\n The functional model of the ellipse is::\n\n xt = xc + a*cos(theta)*cos(t) - b*sin(theta)*sin(t)\n yt = yc + a*sin(theta)*cos(t) + b*cos(theta)*sin(t)\n d = sqrt((x - xt)**2 + (y - yt)**2)\n\n where ``(xt, yt)`` is the closest point on the ellipse to ``(x, y)``. Thus\n d is the shortest distance from the point to the ellipse.\n\n This estimator minimizes the squared distances from all points to the\n ellipse::\n\n min{ sum(d_i**2) } = min{ sum((x_i - xt)**2 + (y_i - yt)**2) }\n\n Thus you have ``2 * N`` equations (x_i, y_i) for ``N + 5`` unknowns (t_i,\n xc, yc, a, b, theta), which gives you an effective redundancy of ``N - 5``.\n\n The ``params`` attribute contains the parameters in the following order::\n\n xc, yc, a, b, theta\n\n A minimum number of 5 points is required to solve for the parameters.\n\n Attributes\n ----------\n params : tuple\n Ellipse model parameters in the following order `xc`, `yc`, `a`,\n `b`, `theta`.\n\n \"\"\"\n\n def estimate(self, data):\n \"\"\"Estimate circle model from data using total least squares.\n\n Parameters\n ----------\n data : (N, 2) array\n N points with ``(x, y)`` coordinates, respectively.\n\n Returns\n -------\n success : bool\n True, if model estimation succeeds.\n\n \"\"\"\n\n _check_data_dim(data, dim=2)\n\n x = data[:, 0]\n y = data[:, 1]\n\n N = data.shape[0]\n\n # pre-allocate jacobian for all iterations\n A = np.zeros((N + 5, 2 * N), dtype=np.double)\n # same for all iterations: xc, yc\n A[0, :N] = -1\n A[1, N:] = -1\n\n diag_idxs = np.diag_indices(N)\n\n def fun(params):\n xyt = self.predict_xy(params[5:], params[:5])\n fx = x - xyt[:, 0]\n fy = y - xyt[:, 1]\n return np.append(fx, fy)\n\n def Dfun(params):\n xc, yc, a, b, theta = params[:5]\n t = params[5:]\n\n ct = np.cos(t)\n st = np.sin(t)\n ctheta = math.cos(theta)\n stheta = math.sin(theta)\n\n # derivatives for fx, fy in the following order:\n # xc, yc, a, b, theta, t_i\n\n # fx\n A[2, :N] = - ctheta * ct\n A[3, :N] = stheta * st\n A[4, :N] = a * stheta * ct + b * ctheta * st\n A[5:, :N][diag_idxs] = a * ctheta * st + b * stheta * ct\n # fy\n A[2, N:] = - stheta * ct\n A[3, N:] = - ctheta * st\n A[4, N:] = - a * ctheta * ct + b * stheta * st\n A[5:, N:][diag_idxs] = a * stheta * st - b * ctheta * ct\n\n return A\n\n # initial guess of parameters using a circle model\n params0 = np.empty((N + 5, ), dtype=np.double)\n xc0 = x.mean()\n yc0 = y.mean()\n r0 = np.sqrt((x - xc0)**2 + (y - yc0)**2).mean()\n params0[:5] = (xc0, yc0, r0, 0, 0)\n params0[5:] = np.arctan2(y - yc0, x - xc0)\n\n params, _ = optimize.leastsq(fun, params0, Dfun=Dfun, col_deriv=True)\n\n self.params = params[:5]\n\n return True\n\n def residuals(self, data):\n \"\"\"Determine residuals of data to model.\n\n For each point the shortest distance to the ellipse is returned.\n\n Parameters\n ----------\n data : (N, 2) array\n N points with ``(x, y)`` coordinates, respectively.\n\n Returns\n -------\n residuals : (N, ) array\n Residual for each data point.\n\n \"\"\"\n\n _check_data_dim(data, dim=2)\n\n xc, yc, a, b, theta = self.params\n\n ctheta = math.cos(theta)\n stheta = math.sin(theta)\n\n x = data[:, 0]\n y = data[:, 1]\n\n N = data.shape[0]\n\n def fun(t, xi, yi):\n ct = math.cos(t)\n st = math.sin(t)\n xt = xc + a * ctheta * ct - b * stheta * st\n yt = yc + a * stheta * ct + b * ctheta * st\n return (xi - xt)**2 + (yi - yt)**2\n\n # def Dfun(t, xi, yi):\n # ct = math.cos(t)\n # st = math.sin(t)\n # xt = xc + a * ctheta * ct - b * stheta * st\n # yt = yc + a * stheta * ct + b * ctheta * st\n # dfx_t = - 2 * (xi - xt) * (- a * ctheta * st\n # - b * stheta * ct)\n # dfy_t = - 2 * (yi - yt) * (- a * stheta * st\n # + b * ctheta * ct)\n # return [dfx_t + dfy_t]\n\n residuals = np.empty((N, ), dtype=np.double)\n\n # initial guess for parameter t of closest point on ellipse\n t0 = np.arctan2(y - yc, x - xc) - theta\n\n # determine shortest distance to ellipse for each point\n for i in range(N):\n xi = x[i]\n yi = y[i]\n # faster without Dfun, because of the python overhead\n t, _ = optimize.leastsq(fun, t0[i], args=(xi, yi))\n residuals[i] = np.sqrt(fun(t, xi, yi))\n\n return residuals\n\n def predict_xy(self, t, params=None):\n \"\"\"Predict x- and y-coordinates using the estimated model.\n\n Parameters\n ----------\n t : array\n Angles in circle in radians. Angles start to count from positive\n x-axis to positive y-axis in a right-handed system.\n params : (5, ) array, optional\n Optional custom parameter set.\n\n Returns\n -------\n xy : (..., 2) array\n Predicted x- and y-coordinates.\n\n \"\"\"\n\n if params is None:\n params = self.params\n xc, yc, a, b, theta = params\n\n ct = np.cos(t)\n st = np.sin(t)\n ctheta = math.cos(theta)\n stheta = math.sin(theta)\n\n x = xc + a * ctheta * ct - b * stheta * st\n y = yc + a * stheta * ct + b * ctheta * st\n\n return np.concatenate((x[..., None], y[..., None]), axis=t.ndim)\n\n\ndef _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):\n \"\"\"Determine number trials such that at least one outlier-free subset is\n sampled for the given inlier/outlier ratio.\n Parameters\n ----------\n n_inliers : int\n Number of inliers in the data.\n n_samples : int\n Total number of samples in the data.\n min_samples : int\n Minimum number of samples chosen randomly from original data.\n probability : float\n Probability (confidence) that one outlier-free sample is generated.\n Returns\n -------\n trials : int\n Number of trials.\n \"\"\"\n if n_inliers == 0:\n return np.inf\n\n nom = 1 - probability\n if nom == 0:\n return np.inf\n\n inlier_ratio = n_inliers / float(n_samples)\n denom = 1 - inlier_ratio ** min_samples\n if denom == 0:\n return 1\n elif denom == 1:\n return np.inf\n\n nom = np.log(nom)\n denom = np.log(denom)\n if denom == 0:\n return 0\n\n return int(np.ceil(nom / denom))\n\n\ndef ransac(data, model_class, min_samples, residual_threshold,\n is_data_valid=None, is_model_valid=None,\n max_trials=100, stop_sample_num=np.inf, stop_residuals_sum=0,\n stop_probability=1):\n \"\"\"Fit a model to data with the RANSAC (random sample consensus) algorithm.\n\n RANSAC is an iterative algorithm for the robust estimation of parameters\n from a subset of inliers from the complete data set. Each iteration\n performs the following tasks:\n\n 1. Select `min_samples` random samples from the original data and check\n whether the set of data is valid (see `is_data_valid`).\n 2. Estimate a model to the random subset\n (`model_cls.estimate(*data[random_subset]`) and check whether the\n estimated model is valid (see `is_model_valid`).\n 3. Classify all data as inliers or outliers by calculating the residuals\n to the estimated model (`model_cls.residuals(*data)`) - all data samples\n with residuals smaller than the `residual_threshold` are considered as\n inliers.\n 4. Save estimated model as best model if number of inlier samples is\n maximal. In case the current estimated model has the same number of\n inliers, it is only considered as the best model if it has less sum of\n residuals.\n\n These steps are performed either a maximum number of times or until one of\n the special stop criteria are met. The final model is estimated using all\n inlier samples of the previously determined best model.\n\n Parameters\n ----------\n data : [list, tuple of] (N, D) array\n Data set to which the model is fitted, where N is the number of data\n points and D the dimensionality of the data.\n If the model class requires multiple input data arrays (e.g. source and\n destination coordinates of ``skimage.transform.AffineTransform``),\n they can be optionally passed as tuple or list. Note, that in this case\n the functions ``estimate(*data)``, ``residuals(*data)``,\n ``is_model_valid(model, *random_data)`` and\n ``is_data_valid(*random_data)`` must all take each data array as\n separate arguments.\n model_class : object\n Object with the following object methods:\n\n * ``success = estimate(*data)``\n * ``residuals(*data)``\n\n where `success` indicates whether the model estimation succeeded\n (`True` or `None` for success, `False` for failure).\n min_samples : int\n The minimum number of data points to fit a model to.\n residual_threshold : float\n Maximum distance for a data point to be classified as an inlier.\n is_data_valid : function, optional\n This function is called with the randomly selected data before the\n model is fitted to it: `is_data_valid(*random_data)`.\n is_model_valid : function, optional\n This function is called with the estimated model and the randomly\n selected data: `is_model_valid(model, *random_data)`, .\n max_trials : int, optional\n Maximum number of iterations for random sample selection.\n stop_sample_num : int, optional\n Stop iteration if at least this number of inliers are found.\n stop_residuals_sum : float, optional\n Stop iteration if sum of residuals is less than or equal to this\n threshold.\n stop_probability : float in range [0, 1], optional\n RANSAC iteration stops if at least one outlier-free set of the\n training data is sampled with ``probability >= stop_probability``,\n depending on the current best model's inlier ratio and the number\n of trials. This requires to generate at least N samples (trials):\n\n N >= log(1 - probability) / log(1 - e**m)\n\n where the probability (confidence) is typically set to a high value\n such as 0.99, and e is the current fraction of inliers w.r.t. the\n total number of samples.\n\n Returns\n -------\n model : object\n Best model with largest consensus set.\n inliers : (N, ) array\n Boolean mask of inliers classified as ``True``.\n\n References\n ----------\n .. [1] \"RANSAC\", Wikipedia, http://en.wikipedia.org/wiki/RANSAC\n\n Examples\n --------\n\n Generate ellipse data without tilt and add noise:\n\n >>> t = np.linspace(0, 2 * np.pi, 50)\n >>> a = 5\n >>> b = 10\n >>> xc = 20\n >>> yc = 30\n >>> x = xc + a * np.cos(t)\n >>> y = yc + b * np.sin(t)\n >>> data = np.column_stack([x, y])\n >>> np.random.seed(seed=1234)\n >>> data += np.random.normal(size=data.shape)\n\n Add some faulty data:\n\n >>> data[0] = (100, 100)\n >>> data[1] = (110, 120)\n >>> data[2] = (120, 130)\n >>> data[3] = (140, 130)\n\n Estimate ellipse model using all available data:\n\n >>> model = EllipseModel()\n >>> model.estimate(data)\n True\n >>> model.params # doctest: +SKIP\n array([ -3.30354146e+03, -2.87791160e+03, 5.59062118e+03,\n 7.84365066e+00, 7.19203152e-01])\n\n\n Estimate ellipse model using RANSAC:\n\n >>> ransac_model, inliers = ransac(data, EllipseModel, 5, 3, max_trials=50)\n >>> ransac_model.params\n array([ 20.12762373, 29.73563063, 4.81499637, 10.4743584 , 0.05217117])\n >>> inliers\n array([False, False, False, False, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True,\n True, True, True, True, True], dtype=bool)\n\n Robustly estimate geometric transformation:\n\n >>> from skimage.transform import SimilarityTransform\n >>> np.random.seed(0)\n >>> src = 100 * np.random.rand(50, 2)\n >>> model0 = SimilarityTransform(scale=0.5, rotation=1,\n ... translation=(10, 20))\n >>> dst = model0(src)\n >>> dst[0] = (10000, 10000)\n >>> dst[1] = (-100, 100)\n >>> dst[2] = (50, 50)\n >>> model, inliers = ransac((src, dst), SimilarityTransform, 2, 10)\n >>> inliers\n array([False, False, False, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True,\n True, True, True, True, True, True, True, True, True,\n True, True, True, True, True], dtype=bool)\n\n \"\"\"\n\n best_model = None\n best_inlier_num = 0\n best_inlier_residuals_sum = np.inf\n best_inliers = None\n\n if min_samples < 0:\n raise ValueError(\"`min_samples` must be greater than zero\")\n\n if max_trials < 0:\n raise ValueError(\"`max_trials` must be greater than zero\")\n\n if stop_probability < 0 or stop_probability > 1:\n raise ValueError(\"`stop_probability` must be in range [0, 1]\")\n\n if not isinstance(data, list) and not isinstance(data, tuple):\n data = [data]\n\n # make sure data is list and not tuple, so it can be modified below\n data = list(data)\n # number of samples\n num_samples = data[0].shape[0]\n\n for num_trials in range(max_trials):\n\n # choose random sample set\n samples = []\n random_idxs = np.random.randint(0, num_samples, min_samples)\n for d in data:\n samples.append(d[random_idxs])\n\n # check if random sample set is valid\n if is_data_valid is not None and not is_data_valid(*samples):\n continue\n\n # estimate model for current random sample set\n sample_model = model_class()\n\n success = sample_model.estimate(*samples)\n\n if success is not None: # backwards compatibility\n if not success:\n continue\n\n # check if estimated model is valid\n if is_model_valid is not None and not is_model_valid(sample_model,\n *samples):\n continue\n\n sample_model_residuals = np.abs(sample_model.residuals(*data))\n # consensus set / inliers\n sample_model_inliers = sample_model_residuals < residual_threshold\n sample_model_residuals_sum = np.sum(sample_model_residuals**2)\n\n # choose as new best model if number of inliers is maximal\n sample_inlier_num = np.sum(sample_model_inliers)\n if (\n # more inliers\n sample_inlier_num > best_inlier_num\n # same number of inliers but less \"error\" in terms of residuals\n or (sample_inlier_num == best_inlier_num\n and sample_model_residuals_sum < best_inlier_residuals_sum)\n ):\n best_model = sample_model\n best_inlier_num = sample_inlier_num\n best_inlier_residuals_sum = sample_model_residuals_sum\n best_inliers = sample_model_inliers\n if (\n best_inlier_num >= stop_sample_num\n or best_inlier_residuals_sum <= stop_residuals_sum\n or num_trials\n >= _dynamic_max_trials(best_inlier_num, num_samples,\n min_samples, stop_probability)\n ):\n break\n\n # estimate final model using all inliers\n if best_inliers is not None:\n # select inliers for each data array\n for i in range(len(data)):\n data[i] = data[i][best_inliers]\n best_model.estimate(*data)\n\n return best_model, best_inliers\n","repo_name":"ryfeus/lambda-packs","sub_path":"Skimage_numpy/source/skimage/measure/fit.py","file_name":"fit.py","file_ext":"py","file_size_in_byte":28119,"program_lang":"python","lang":"en","doc_type":"code","stars":1104,"dataset":"github-code","pt":"22"} +{"seq_id":"4272118851","text":"\"\"\"Module for sampling using [PTMCMCSampler][] and [enterprise_extensions][].\"\"\"\nfrom __future__ import annotations\n\nimport os\nimport warnings\n\nfrom astropy.utils.exceptions import AstropyDeprecationWarning\n\nwarnings.filterwarnings('ignore', category=AstropyDeprecationWarning)\n\nimport logging\nimport platform\nimport shutil\nimport sys\nimport time\nfrom types import ModuleType\nfrom typing import Any\n\n# Fix missinng astropy.erfa in some of our older dependencies\nimport erfa\n\nsys.modules[\"astropy.erfa\"] = erfa\n\nimport numpy as np\nimport rich\nfrom ceffyl import Sampler\nfrom enterprise.pulsar import Pulsar\nfrom enterprise.signals.signal_base import PTA\nfrom enterprise_extensions import hypermodel\nfrom numpy._typing import _ArrayLikeFloat_co as array_like\nfrom numpy.typing import NDArray\nfrom PTMCMCSampler.PTMCMCSampler import PTSampler\nfrom rich import print\nfrom rich.console import Console\nfrom rich.panel import Panel\n\nfrom ptarcade import input_handler, pta_importer, signal_builder\nfrom ptarcade.input_handler import bcolors\nfrom ptarcade.models_utils import ParamDict\nfrom ptarcade import console\n\nlog = logging.getLogger(\"rich\")\n\ndef cpu_model() -> str:\n \"\"\"Get CPU info.\"\"\"\n try:\n import cpuinfo\n return cpuinfo.get_cpu_info()[\"brand_raw\"]\n except ModuleNotFoundError:\n return \"unknown CPU (for better info install py-cpuinfo)\"\n\n\ndef get_user_args() -> tuple[dict[str, ModuleType], dict[str, Any]] :\n \"\"\"Get CLI arguments\n\n Returns\n -------\n inputs : dict[str, ModuleType]\n Dictionary of loaded user-supplied modules.\n input_options : dict[str, Any]\n Dictionary of user-supplied input options.\n\n Raises\n ------\n SystemExit\n If CLI input is missing required args.\n\n \"\"\"\n # parse command line inputs\n input_options, cmd_input_okay = input_handler.get_cmdline_arguments()\n\n if not cmd_input_okay:\n\n error = (f\"Model file must be present\\n\"\n \"\\t- This is added with the -[blue bold]m[/] input flags. Add -[blue bold]h[/] (--[blue bold]help[/]) flags for more help.\\n\")\n\n log.error(error, extra={\"markup\":True})\n raise SystemExit\n\n inputs = input_handler.load_inputs(input_options)\n input_handler.check_config(inputs['config'])\n\n if not hasattr(inputs[\"model\"], \"group\"):\n pars_dic = inputs[\"model\"].parameters\n group = [par for par in pars_dic.keys() if pars_dic[par].common]\n\n setattr(inputs[\"model\"], \"group\", group)\n\n inputs[\"model\"].parameters = ParamDict(inputs[\"model\"].parameters)\n\n return inputs, input_options\n\n\ndef get_user_pta_data(inputs: dict[str, Any]) -> tuple[list[Pulsar], dict | None, array_like | None ]:\n \"\"\"Import user-specified PTA data.\n\n Parameters\n ----------\n inputs : dict[str, Any]\n User supplied modules\n\n Returns\n -------\n psrs : list[Pulsar]\n List of Pulsar objects\n noise_params : dict | None\n Dictionary containing noise data\n emp_dist : array_like | None\n The empirical distribution to use for sampling\n\n \"\"\"\n # import pta data\n psrs, noise_params, emp_dist = pta_importer.pta_data_importer(inputs['config'].pta_data)\n\n return psrs, noise_params, emp_dist\n\n\ndef initialize_pta(inputs: dict[str, Any], psrs: list[Pulsar] | None, noise_params : dict | None ) -> dict[int, PTA]:\n \"\"\"Initialize the PTA with the user input\n\n Parameters\n ----------\n psrs : list[Pulsar]\n list of pulsar objects\n inputs : dict[str, Any]\n User specified modules\n noise_params : dict, optional\n User specified noise params\n\n Returns\n -------\n dict[int, PTA]\n Dictionary of [enterprise.signals.signal_base.PTA][] objects configured with user inputs\n\n \"\"\"\n\n input_handler.check_model(\n model=inputs['model'],\n psrs=psrs,\n red_components=inputs['config'].red_components,\n gwb_components=inputs['config'].gwb_components,\n mode=inputs[\"config\"].mode)\n\n\n if inputs[\"config\"].mode == \"enterprise\":\n pta = {}\n\n pta[0] = signal_builder.ent_builder(\n psrs=psrs,\n model=inputs['model'],\n noisedict=noise_params,\n pta_dataset=inputs['config'].pta_data,\n bhb_th_prior=inputs['config'].bhb_th_prior,\n gamma_bhb=inputs['config'].gamma_bhb,\n A_bhb_logmin=inputs['config'].A_bhb_logmin,\n A_bhb_logmax=inputs['config'].A_bhb_logmax,\n corr=inputs['config'].corr,\n red_components=inputs[\"config\"].red_components,\n gwb_components=inputs[\"config\"].gwb_components)\n\n if inputs[\"config\"].mod_sel:\n pta[1] = pta[0]\n\n pta[0] = signal_builder.ent_builder(\n psrs=psrs,\n model=None,\n noisedict=noise_params,\n pta_dataset=inputs['config'].pta_data,\n bhb_th_prior=inputs['config'].bhb_th_prior,\n gamma_bhb=inputs['config'].gamma_bhb,\n A_bhb_logmin=inputs['config'].A_bhb_logmin,\n A_bhb_logmax=inputs['config'].A_bhb_logmax,\n corr=inputs['config'].corr,\n red_components=inputs[\"config\"].red_components,\n gwb_components=inputs[\"config\"].gwb_components)\n \n elif inputs[\"config\"].mode == \"ceffyl\":\n\n pta = signal_builder.ceffyl_builder(inputs)\n \n return pta\n\n\ndef setup_sampler(\n inputs: dict[str, ModuleType],\n input_options: dict[str, Any],\n pta: dict[int, PTA] | None,\n emp_dist: array_like | None,\n) -> tuple[PTSampler, NDArray]:\n \"\"\"Setup the PTMCMC sampler\n\n Parameters\n ----------\n inputs : dict[str, ModuleType]\n Dictionary of loaded user-supplied modules.\n input_options : dict[str, Any]\n Dictionary of user-supplied input options.\n pta : dict[int, PTA]\n Dictionary of [enterprise.signals.signal_base.PTA][] objects configured with user inputs\n emp_dist : array_like, optional\n The empirical distribution to use for sampling\n\n Returns\n -------\n sampler : PTMCMCSampler.PTSampler\n Configured [PTMCMCSampler.PTSampler][]\n x0 : NDArray\n Initial sample.\n\n \"\"\"\n out_dir = os.path.join(\n inputs[\"config\"].out_dir, inputs[\"model\"].name, f'chain_{input_options[\"n\"]}')\n \n if not inputs[\"config\"].resume and os.path.exists(out_dir):\n shutil.rmtree(out_dir)\n\n if inputs['config'].mode == \"enterprise\":\n super_model = hypermodel.HyperModel(pta)\n\n groups = signal_builder.unique_sampling_groups(super_model)\n\n if inputs[\"model\"].group:\n idx_params = [super_model.param_names.index(pp) for pp in inputs[\"model\"].group]\n [groups.append(idx_params) for _ in range(5)] # type: ignore\n\n # add nmodel index to group structure\n groups.extend([[len(super_model.param_names)-1]])\n\n sampler = super_model.setup_sampler(\n resume=inputs[\"config\"].resume,\n outdir=out_dir,\n sample_nmodel=inputs[\"config\"].mod_sel,\n groups=groups,\n empirical_distr=emp_dist)\n\n x0 = super_model.initial_sample()\n\n super_model.get_lnlikelihood(x0) # Cache now to make timing more accurate\n\n elif inputs[\"config\"].mode == \"ceffyl\":\n\n sampler = Sampler.setup_sampler(pta,\n outdir=out_dir,\n logL=pta.ln_likelihood,\n logp=pta.ln_prior)\n\n x0 = pta.initial_samples()\n\n return sampler, x0\n\n\ndef do_sample(inputs: dict[str, Any], sampler: PTSampler, x0: NDArray) -> None:\n \"\"\"Run the configured sampler.\n\n Parameters\n ----------\n inputs : dict[str, Any]\n The user specified modules.\n sampler : PTMCMCSampler.PTSampler\n The configured [PTMCMCSampler.PTSampler][].\n\n x0 : NDArray\n The inital sample.\n\n \"\"\"\n N_samples = inputs[\"config\"].N_samples\n\n console.print(f\"[bold green]Starting to sample {N_samples} samples\\n\")\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"ignore\",\n category=RuntimeWarning,\n message=\"invalid value encountered in scalar subtract\",\n module=\"PTMCMCSampler\",\n lineno=567,\n )\n\n warnings.filterwarnings(\n \"ignore\",\n category=RuntimeWarning,\n message=\"All-NaN axis encountered\",\n module=\"PTMCMCSampler\",\n lineno=464,\n )\n\n warnings.filterwarnings(\n \"ignore\",\n category=RuntimeWarning,\n message=\"divide by zero encountered in log\",\n module=\"enterprise.signals.parameter\",\n lineno=62,\n )\n try:\n sampler.sample(\n x0,\n N_samples,\n SCAMweight=inputs[\"config\"].scam_weight,\n AMweight=inputs[\"config\"].am_weight,\n DEweight=inputs[\"config\"].de_weight,\n )\n except RuntimeError as e:\n err = (\"There was an error while sampling. If this error involves autocorrelation time,\\n\"\n \"a temporary fix is to increase the number of samples in the configuration file.\\n\"\n \"We are actively working to upgrade the autocorrelation routines in our sampler.\\n\\n\")\n console.print(\"\\n\\n\")\n log.exception(err)\n raise SystemExit from None\n\n console.print()\n console.print(Panel.fit(\"[bold green]Done sampling[/]\", border_style=\"green\"))\n console.print()\n\n\ndef main():\n \"\"\"Read user inputs, set up sampler and models, and run sampler.\"\"\"\n console.print(Panel.fit('[bold green]Starting to run[/]', border_style=\"green\"))\n console.print()\n table = rich.table.Table(title=\"Node Information\", title_justify=\"left\",box=rich.box.ROUNDED)\n\n table.add_column(\"Node\", style=\"cyan\", no_wrap=True)\n table.add_column(\"CPU\", style=\"magenta\")\n\n table.add_row(platform.node(), cpu_model())\n\n console.print(table)\n console.print()\n\n start_cpu = time.process_time()\n start_real = time.perf_counter()\n\n inputs, input_options = get_user_args()\n\n psrs = None\n noise_params = None\n emp_dist = None\n\n if inputs[\"config\"].mode == \"enterprise\":\n with console.status(\"Loading Pulsars and noise data...\", spinner=\"bouncingBall\"):\n\n # import pta data\n psrs, noise_params, emp_dist = get_user_pta_data(inputs)\n\n console.print(f\"[bold green]Done loading [blue]{len(psrs)}[/] Pulsars and noise data :heavy_check_mark:\\n\")\n\n\n with console.status(\"Initializing PTA...\", spinner=\"bouncingBall\"):\n pta = initialize_pta(inputs, psrs, noise_params)\n console.print(\"[bold green]Done initializing PTA :heavy_check_mark:\\n\")\n\n\n with console.status(\"Initializing Sampler...\", spinner=\"bouncingBall\"):\n sampler, x0 = setup_sampler(inputs, input_options, pta, emp_dist)\n console.print(\"[bold green]Done initializing Sampler :heavy_check_mark:\\n\")\n\n console.print(\"Done with all initializtions.\\nSetup times (including first sample) {:.2f} seconds real, {:.2f} seconds CPU\\n\".format(\n time.perf_counter()-start_real, time.process_time()-start_cpu));\n\n start_cpu = time.process_time()\n start_real = time.perf_counter()\n\n do_sample(inputs, sampler, x0)\n\n real_time = time.perf_counter()-start_real\n cpu_time = time.process_time()-start_cpu\n\n N_samples = inputs[\"config\"].N_samples\n\n summary_table = rich.table.Table(title=\"Run Summary\", title_justify=\"left\", box=rich.box.ROUNDED)\n\n summary_table.add_column(\"Time (real)\", style=\"cyan\")\n summary_table.add_column(\"Time (real)/sample\", style=\"cyan\")\n summary_table.add_column(\"Time (CPU)\", style=\"magenta\")\n summary_table.add_column(\"Time (CPU)/sample\", style=\"magenta\")\n\n\n summary_table.add_row(f\"{real_time:.2f}\", f\"{real_time/N_samples:.4f}\", f\"{cpu_time:.2f}\", f\"{cpu_time/N_samples:.4f}\")\n console.print(summary_table)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"andrea-mitridate/PTArcade","sub_path":"src/ptarcade/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":12031,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"} +{"seq_id":"28734992218","text":"import sqlite3\r\nimport subprocess as sp\r\n\r\n\"\"\"\r\nCódigo do banco de dados.\r\n\"\"\"\r\n\r\ndef create_table():\r\n\tconn = sqlite3.connect('alunos.sqlite')\r\n\tcursor = conn.cursor()\r\n\tquery = '''\r\n\t CREATE TABLE IF NOT EXISTS aluno(\r\n\t \tid INTEGER PRIMARY KEY, \r\n\t \troll INTEGER, \r\n\t \tname TEXT\r\n\t )\r\n\t'''\r\n\tcursor.execute(query)\r\n\r\n\tconn.commit()\r\n\tconn.close()\r\n\r\n### Cria o banco de dados ###\r\n\r\ndef adiciona_aluno(roll,name):\r\n\tconn = sqlite3.connect('alunos.sqlite')\r\n\tcursor = conn.cursor()\r\n\tquery = '''\r\n\t INSERT INTO aluno( roll, name )\r\n\t \t VALUES ( ?,? )\r\n\t'''\r\n\r\n\tcursor.execute(query,(roll,name))\r\n\r\n\tconn.commit()\r\n\tconn.close()\r\n\r\ndef obtem_alunos():\r\n\tconn = sqlite3.connect('alunos.sqlite')\r\n\tcursor = conn.cursor()\r\n\tquery = '''\r\n\t SELECT roll, name\r\n\t FROM aluno\r\n\t'''\r\n\r\n\tcursor.execute(query)\r\n\tall_rows = cursor.fetchall()\r\n\r\n\tconn.commit()\r\n\tconn.close()\r\n\r\n\treturn all_rows\r\n\r\ndef obtem_aluno_by_roll(roll):\r\n\tconn = sqlite3.connect('alunos.sqlite')\r\n\tcursor = conn.cursor()\r\n\tquery = '''\r\n\t SELECT roll, name\r\n\t FROM aluno\r\n\t WHERE roll = {}\r\n\t''' .format(roll)\r\n\r\n\tcursor.execute(query)\r\n\tall_rows = cursor.fetchall()\r\n\r\n\tconn.commit()\r\n\tconn.close()\r\n\r\n\treturn all_rows\r\n\r\ndef atualiza_aluno(roll,name):\r\n\tconn = sqlite3.connect('alunos.sqlite')\r\n\tcursor = conn.cursor()\r\n\tquery = '''\r\n\t UPDATE aluno\r\n\t SET name = ?\r\n\t WHERE roll = ?\r\n\t'''\r\n\r\n\tcursor.execute(query,(name,roll))\r\n\r\n\tconn.commit()\r\n\tconn.close()\r\n\r\n\r\ndef apaga_aluno(roll):\r\n\tconn = sqlite3.connect('alunos.sqlite')\r\n\tcursor = conn.cursor()\r\n\tquery = '''\r\n\t DELETE\r\n\t FROM aluno\r\n\t WHERE roll = {}\r\n\t''' .format(roll)\r\n\r\n\tcursor.execute(query)\r\n\tall_rows = cursor.fetchall()\r\n\r\n\tconn.commit()\r\n\tconn.close()\r\n\r\n\treturn all_rows\r\n\r\n\r\n\r\ncreate_table()\r\n\r\n\r\n\r\ndef adiciona_dados(id_,name):\r\n\tadiciona_aluno(id_,name)\r\ndef obtem_dados():\r\n\treturn obtem_alunos()\r\n\r\ndef exibe_dados():\r\n\talunos = obtem_dados()\r\n\tfor aluno in alunos:\r\n\t\tprint(aluno)\r\n\r\ndef exibe_dados_by_id(id_):\r\n\talunos = obtem_aluno_by_roll(id_)\r\n\tif not alunos:\r\n\t\tprint(\"Nenhum dado encontrado\",id_)\r\n\telse:\r\n\t\tprint (alunos)\r\n\r\ndef select():\r\n\r\n\tsel = input(\"1.Adicionar aluno\\n 2.Exibir alunos cadastrados\\n 3.Atualizar aluno\\n 4.Apagar aluno do sistema\\n 5.Sair\\n\\n\")\r\n\t\r\n\tif sel=='1':\r\n\t\tid_ = int(input('CPF: '))\r\n\t\tname = input('Nome: ')\r\n\t\tadiciona_dados(id_,name)\r\n\telif sel=='2':\r\n\t\tsp.call('clear',shell=True)\r\n\t\texibe_dados()\r\n\t\tinput(\"\\n\\n Digite enter para retornar\")\r\n\t\r\n\telif sel=='3':\r\n\t\tid__ = int(input('Informar CPF: '))\r\n\t\texibe_dados_by_id(id__)\r\n\t\tprint()\r\n\t\tname = input('Nome: ')\r\n\t\tatualiza_aluno(id__,name)\r\n\t\tinput(\"\\n\\n Aluno devidamente atualizado \\nDigite enter para retornar\")\r\n\telif sel=='4':\r\n\t\tid__ = int(input('Informar CPF: '))\r\n\t\texibe_dados_by_id(id__)\r\n\t\tapaga_aluno(id__)\r\n\t\tinput(\"\\n\\nO aluno foi apagado do sistema \\nDigite enter para retornar\")\r\n\telse:\r\n\t\treturn 0;\r\n\treturn 1;\r\n\r\nwhile(select()):\r\n\tpass\r\n","repo_name":"laizgamaa/crud-sqlite-projetoIP","sub_path":"aluno.py","file_name":"aluno.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22037718134","text":"#部分和問題に対するビットを用いる全探索解法\n\n#入力受け取り\nN,W=map(int,input().split())\na=list(map(int,input().split()))\n\n#bitは2^N通りの部分集合全体を動きます\nexist=False\nfor bit in range(1< YYYYMMDDTHHMM (use UTC)\", required=True)\n parser.add_argument(\"-f\", \"--endtime\", metavar=\"END_TIME\", type=str, help=\"End time in ISO format-> YYYYMMDDTHHMM (use UTC)\", required=True)\n args = parser.parse_args()\n inputfile = args.inputfile\n starttime = args.starttime\n endtime = args.endtime\n\n workflow = rdhmWorkflow(inputfile, starttime, endtime)\n workflow.generate_workflow()\n\n","repo_name":"CASAelyons/rdhmworkflow","sub_path":"run_rdhm_new.py","file_name":"run_rdhm_new.py","file_ext":"py","file_size_in_byte":13595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31392744416","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 9 18:55:08 2016\n\n@author: TK_adm\n\"\"\"\n\ndef annotate(word, dicentry, w):\n w.write('\\n')\n if type(dicentry) == list:\n wordList = dicentry\n for entry in wordList:\n transcr, sem = entry\n w.write('') \n \n else:\n transcr, sem = dicentry\n sem = sem.replace('\"', \"'\")\n w.write('') \n \n w.write(word+'')\n #\n\ndef createChineseDic(dicfile):\n Dict = {}\n #k = 0\n f = open(dicfile, 'r', encoding='utf-8')\n w = open('chin-eng.csv', 'w', encoding='utf-8')\n for line in f:\n #if k > 100:\n #break\n if line[0] == '#':\n continue\n splitLine = line.split()\n try: # in case there are faulty lines\n lex = splitLine[1]\n except:\n continue\n \n transcr = ''\n t = 0\n for word in splitLine:\n if word[0] == '[' and t == 0:\n transcr += word[1:]\n t = 1\n if word[-1] == ']':\n break\n elif t == 1:\n transcr += (' '+ word)\n if word[-1] == ']':\n \n break\n transcr = transcr [:-1] \n \n sem = ''\n s = 0\n for word in splitLine:\n if word[0] == '/' and s == 0 :\n sem += word[1:]\n s = 1\n elif s == 1:\n sem += (' '+ word)\n sem = sem[:-1]\n sem = sem.replace('/', ', ')\n \n #w.write(lex +'\\t'+ transcr +'\\t'+ sem+'\\n')\n if lex not in Dict:\n Dict[lex] = (transcr, sem)\n else:\n if type(Dict[lex]) != list:\n sem1 = Dict[lex]\n Dict[lex] = []\n Dict[lex].append(sem1)\n Dict[lex].append((transcr, sem))\n else:\n Dict[lex].append((transcr, sem))\n \n f.close() \n w.close() \n return Dict\n\ndef proccessChinese(file2proccess, Dict):\n\n f = open(file2proccess, 'r', encoding='utf-8')\n w = open(file2proccess[:-4]+'_result.xml', 'w', encoding='utf-8')\n #k = 10\n for line in f:\n #while k:\n #line = f.readline()\n if line[:4] != '':\n w.write(line)\n continue \n else:\n w.write('')\n \n chLine = line[4:-6]\n print('line to proccess: '+chLine)\n \n symbs = chLine\n procSymb = 0\n T = 1\n while T:\n for x in range(len(chLine)):\n if symbs in Dict:\n print(symbs+' in!') \n annotate(symbs, Dict[symbs], w)\n procSymb += len(symbs)\n break\n elif len(symbs) == 1 and symbs not in Dict:\n print(symbs+' not in, just punct')\n w.write(symbs)\n #T = 0\n procSymb += 1\n break\n else:\n print(symbs+' cut to')\n symbs = symbs[:-1]\n print(symbs)\n symbs = chLine[procSymb:]\n if procSymb == len(chLine):\n print('end line')\n break\n w.write('\\n')\n #k -= 1\n \n w.close()\n f.close()\n #dif meanings\n\ndef main():\n dicfile = 'cedict_ts.u8'\n Dict = createChineseDic(dicfile)\n file2proccess = 'stal.xml'\n proccessChinese(file2proccess, Dict)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tlkononova/python-hse","sub_path":"chinese/chinaParser.py","file_name":"chinaParser.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2840075833","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport argparse\nimport pandas as pd\nimport re\nimport sys\n\n\ndef read_input_files(x, category=['data', 'treatment']):\n tmp = pd.read_csv(x, sep=None, engine='python')\n if category=='data':\n ftype = x.split('/')[-1].split('_')[0]\n tmp['experiment'] = ftype\n else:\n if re.search('control', x):\n ftype = 'control'\n elif re.search('treatment', x):\n ftype = 'treatment'\n else:\n sys.exit('STOP! {} is not a valid treatment/control file.'.format(x))\n tmp['treat_status'] = ftype\n\n return tmp\n\n\ndef gather_ids(x):\n return pd.DataFrame({\n 'id': (x['data value']\n [(x.event=='clientLogIn') &\n (x['data name']=='clientId')]\n .unique()\n .tolist()),\n 'experiment': x.experiment[0],\n })\n\n\ndef gather_exp_status(x):\n return pd.DataFrame({\n 'id': [row.split('/')[-1] for row in x.ROUTER_URL],\n 'treat_status': x.treat_status[0],\n })\n\n\ndef run(args_dict):\n # read in data\n d = [read_input_files(x, 'data') for x in args_dict['data']]\n s = [read_input_files(x, 'treatment') for x in args_dict['treatment']]\n\n # process ids from experiment\n ids = pd.concat([gather_ids(x) for x in d], axis=0)\n\n # process randomized lists\n stat = pd.concat([gather_exp_status(x) for x in s], axis=0)\n\n # merge\n res = ids.merge(stat, on='id')\n\n # write to disk\n res.to_csv(args_dict['output'], index=False)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Identify post-hoc '\n 'participation by treatment status.')\n parser.add_argument('-d', '--data', required=True, nargs='*',\n help='Path/name to experiment results.')\n parser.add_argument('-t', '--treatment', required=True, nargs=2,\n help='Path/name to treatment status lists.')\n parser.add_argument('-o', '--output', required=True, help='Path/name '\n 'for location to write data.')\n args_dict = vars(parser.parse_args())\n\n run(args_dict)\n","repo_name":"GallupGovt/ngs2","sub_path":"misc/calculate_incentive_statistics.py","file_name":"calculate_incentive_statistics.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"6161136385","text":"#!/usr/bin/python3\nc = 1\ndiff = 0\nfor i in reversed(range(97, 123)):\n print(\"{}\".format(chr(i - diff)), end='')\n c += 1\n if not c % 2:\n diff = 32\n else:\n diff = 0\n","repo_name":"moostafa1/alx-higher_level_programming","sub_path":"0x01-python-if_else_loops_functions/100-print_tebahpla.py","file_name":"100-print_tebahpla.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29500367157","text":"import unittest\r\n\r\nimport sys\r\n\r\nsys.path.append(\"../ATOM3Types\")\r\nsys.path.append(\"../UserInterface\")\r\nsys.path.append(\"../GraphGrammar\")\r\n\r\nfrom types import *\r\nfrom Tkinter import *\r\nfrom ATOM3MSEnum import *\r\nfrom ATOM3Exceptions import *\r\n\r\nclass MSEnumUnitTest(unittest.TestCase):\r\n\r\n def setUp(self):\r\n self.mainwindow = Tk()\r\n self.atc = ATOM3MSEnum()\r\n\r\n def tearDown(self):\r\n self.atc.destroy()\r\n self.atc = None\r\n\r\n # --- SOME setNone() - isNone() TESTS ---\r\n\r\n def setNone_Test(self):\r\n \"\"\"Tries setNone() and isNone()\"\"\"\r\n self.atc.setNone()\r\n assert self.atc.isNone()\r\n\r\n def show_setNone_Test(self):\r\n \"\"\"Tries setNone() show() isNone()\"\"\"\r\n self.atc.setNone()\r\n self.atc.show(self.mainwindow)\r\n assert self.atc.isNone()\r\n\r\n def show_destroy_setNone_Test(self):\r\n \"\"\"Tries setNone() show() isNone()\"\"\"\r\n self.atc.setNone()\r\n self.atc.show(self.mainwindow)\r\n self.atc.destroy()\r\n assert self.atc.isNone()\r\n\r\n # --- SOME getValue() TESTS ---\r\n\r\n def getValueTest_isTuple(self):\r\n \"\"\"getValue() should return a tuple\"\"\"\r\n ret = self.atc.getValue()\r\n self.atc.show(self.mainwindow)\r\n self.atc.destroy()\r\n assert type(ret) == TupleType\r\n\r\n def getValueTest_has2Elements(self):\r\n \"\"\"getValue() should return a 2 elements tuple\"\"\"\r\n ret = self.atc.getValue()\r\n self.atc.show(self.mainwindow)\r\n self.atc.destroy()\r\n assert len(ret) == 2\r\n\r\n # --- SOME setValue() TESTS ---\r\n\r\n def setValueTest_SetValidValues_1 (self):\r\n \"\"\"Tries Seting the VALID values ( [\"e1\", \"e2\"], [1, 0] )\"\"\"\r\n self.atc.setValue(( [\"e1\", \"e2\"], [1, 0] ))\r\n res = self.atc.getValue()\r\n assert res == ([\"e1\", \"e2\"], [1, 0])\r\n\r\n def show_setValueTest_SetValidValues_1 (self):\r\n \"\"\"Tries Seting the VALID values ( [\"e1\", \"e2\"], [1, 0] ), with show\"\"\"\r\n self.atc.setValue(( [\"e1\", \"e2\"], [1, 0] ))\r\n self.atc.show(self.mainwindow)\r\n res = self.atc.getValue()\r\n assert res == ([\"e1\", \"e2\"], [1, 0])\r\n\r\n def show_destroy_setValueTest_SetValidValues_1 (self):\r\n \"\"\"Tries Seting the VALID values ( [\"e1\", \"e2\"], [1, 0] ), with show\"\"\"\r\n self.atc.setValue(( [\"e1\", \"e2\"], [1, 0] ))\r\n self.atc.show(self.mainwindow)\r\n self.atc.destroy()\r\n res = self.atc.getValue()\r\n assert res == ([\"e1\", \"e2\"], [1, 0])\r\n\r\n def setValueTest_SetInValidValues_1 (self):\r\n \"\"\"Tries Seting the INVALID values ( [\"e1\", \"e2\"], 1 ) ( 1 is not a list of selected things)\"\"\"\r\n self.assertRaises ( ATOM3BadAssignmentValue, self.atc.setValue, ( [\"e1\", \"e2\"], 1 ))\r\n\r\n def setValueTest_SetInValidValues_2 (self):\r\n \"\"\"Tries Seting the INVALID values ( [\"e1\", \"e2\"], [1, 0, 0] ) ( wrong list sizes )\"\"\"\r\n self.assertRaises ( ATOM3BadAssignmentValue, self.atc.setValue, ( [\"e1\", \"e2\"], [1, 0, 0] ))\r\n\r\n def setValueTest_SetInValidValues_3 (self):\r\n \"\"\"Tries Seting the INVALID values ( [1, \"e2\"], [1, 0] ) ( 1 is not a string)\"\"\"\r\n self.assertRaises ( ATOM3BadAssignmentValue, self.atc.setValue, ( [1, \"e2\"], [1, 0] ))\r\n\r\n # --- toString() TEST ---\r\n\r\n def toStringTest(self):\r\n \"\"\"The toString() method must return a string!\"\"\"\r\n self.atc.setValue(( [\"e1\", \"e2\"], [1, 0] ))\r\n c = self.atc.toString()\r\n assert (type(c) == StringType)\r\n\r\nclass MSEnumUnitTest1(MSEnumUnitTest):\r\n\r\n def setUp(self):\r\n self.mainwindow = Tk()\r\n self.atc = ATOM3MSEnum([\"e0\", \"e1\"])\t# a valid value!\r\n\r\n def getValueTest_1(self):\r\n \"\"\"getValue() should return ([\"e0\", \"e1\"], [0, 0])\"\"\"\r\n ret = self.atc.getValue()\r\n self.atc.show(self.mainwindow)\r\n self.atc.destroy()\r\n assert ret == ([\"e0\", \"e1\"], [0, 0])\r\n\r\nclass MSEnumUnitTest2(MSEnumUnitTest):\r\n\r\n def setUp(self):\r\n self.mainwindow = Tk()\r\n self.atc = ATOM3MSEnum([\"e0\", \"e1\"], [1, 1])\t# a valid value!\r\n\r\n def getValueTest_1(self):\r\n \"\"\"getValue() should return ([\"e0\", \"e1\"], [1, 1])\"\"\"\r\n ret = self.atc.getValue()\r\n self.atc.show(self.mainwindow)\r\n self.atc.destroy()\r\n assert ret == ([\"e0\", \"e1\"], [1, 1])\r\n\r\nclass MSEnumUnitTest3(MSEnumUnitTest):\r\n\r\n def setUp(self):\r\n self.mainwindow = Tk()\r\n self.atc = ATOM3MSEnum([\"e0\", \"e1\"], [1, 1], 1)\t# a valid value!\r\n\r\n def getValueTest_1(self):\r\n \"\"\"getValue() should return ([\"e0\", \"e1\"], [1, 1])\"\"\"\r\n ret = self.atc.getValue()\r\n self.atc.show(self.mainwindow)\r\n self.atc.destroy()\r\n assert ret == ([\"e0\", \"e1\"], [1, 1])\r\n\r\nclass MSEnumUnitTest4(MSEnumUnitTest):\r\n\r\n def setUp(self):\r\n self.mainwindow = Tk()\r\n self.atc = ATOM3MSEnum([\"e0\", \"e1\"], [1, 1], 0, ATOM3MSEnum.LISTBOX)\t# a valid value!\r\n\r\n def getValueTest_1(self):\r\n \"\"\"getValue() should return ([\"e0\", \"e1\"], [1, 1])\"\"\"\r\n ret = self.atc.getValue()\r\n self.atc.show(self.mainwindow)\r\n self.atc.destroy()\r\n assert ret == ([\"e0\", \"e1\"], [1, 1])\r\n\r\n\r\ncts = unittest.TestSuite()\r\ncts.addTest(MSEnumUnitTest(\"setNone_Test\"))\r\ncts.addTest(MSEnumUnitTest(\"show_setNone_Test\"))\r\ncts.addTest(MSEnumUnitTest(\"show_destroy_setNone_Test\"))\r\ncts.addTest(MSEnumUnitTest(\"getValueTest_isTuple\"))\r\ncts.addTest(MSEnumUnitTest(\"getValueTest_has2Elements\"))\r\ncts.addTest(MSEnumUnitTest(\"setValueTest_SetValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest(\"show_setValueTest_SetValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest(\"show_destroy_setValueTest_SetValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest(\"setValueTest_SetInValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest(\"setValueTest_SetInValidValues_2\"))\r\ncts.addTest(MSEnumUnitTest(\"setValueTest_SetInValidValues_3\"))\r\ncts.addTest(MSEnumUnitTest(\"toStringTest\"))\r\n\r\ncts.addTest(MSEnumUnitTest1(\"getValueTest_1\"))\r\ncts.addTest(MSEnumUnitTest1(\"setNone_Test\"))\r\ncts.addTest(MSEnumUnitTest1(\"show_setNone_Test\"))\r\ncts.addTest(MSEnumUnitTest1(\"show_destroy_setNone_Test\"))\r\ncts.addTest(MSEnumUnitTest1(\"getValueTest_isTuple\"))\r\ncts.addTest(MSEnumUnitTest1(\"getValueTest_has2Elements\"))\r\ncts.addTest(MSEnumUnitTest1(\"setValueTest_SetValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest1(\"show_setValueTest_SetValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest1(\"show_destroy_setValueTest_SetValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest1(\"setValueTest_SetInValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest1(\"setValueTest_SetInValidValues_2\"))\r\ncts.addTest(MSEnumUnitTest1(\"setValueTest_SetInValidValues_3\"))\r\ncts.addTest(MSEnumUnitTest1(\"toStringTest\"))\r\n\r\ncts.addTest(MSEnumUnitTest2(\"getValueTest_1\"))\r\ncts.addTest(MSEnumUnitTest2(\"setNone_Test\"))\r\ncts.addTest(MSEnumUnitTest2(\"show_setNone_Test\"))\r\ncts.addTest(MSEnumUnitTest2(\"show_destroy_setNone_Test\"))\r\ncts.addTest(MSEnumUnitTest2(\"getValueTest_isTuple\"))\r\ncts.addTest(MSEnumUnitTest2(\"getValueTest_has2Elements\"))\r\ncts.addTest(MSEnumUnitTest2(\"setValueTest_SetValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest2(\"show_setValueTest_SetValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest2(\"show_destroy_setValueTest_SetValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest2(\"setValueTest_SetInValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest2(\"setValueTest_SetInValidValues_2\"))\r\ncts.addTest(MSEnumUnitTest2(\"setValueTest_SetInValidValues_3\"))\r\ncts.addTest(MSEnumUnitTest2(\"toStringTest\"))\r\n\r\ncts.addTest(MSEnumUnitTest3(\"getValueTest_1\"))\r\ncts.addTest(MSEnumUnitTest3(\"setNone_Test\"))\r\ncts.addTest(MSEnumUnitTest3(\"show_setNone_Test\"))\r\ncts.addTest(MSEnumUnitTest3(\"show_destroy_setNone_Test\"))\r\ncts.addTest(MSEnumUnitTest3(\"getValueTest_isTuple\"))\r\ncts.addTest(MSEnumUnitTest3(\"getValueTest_has2Elements\"))\r\ncts.addTest(MSEnumUnitTest3(\"setValueTest_SetValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest3(\"show_setValueTest_SetValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest3(\"show_destroy_setValueTest_SetValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest3(\"setValueTest_SetInValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest3(\"setValueTest_SetInValidValues_2\"))\r\ncts.addTest(MSEnumUnitTest3(\"setValueTest_SetInValidValues_3\"))\r\ncts.addTest(MSEnumUnitTest3(\"toStringTest\"))\r\n\r\ncts.addTest(MSEnumUnitTest4(\"getValueTest_1\"))\r\ncts.addTest(MSEnumUnitTest4(\"setNone_Test\"))\r\ncts.addTest(MSEnumUnitTest4(\"show_setNone_Test\"))\r\ncts.addTest(MSEnumUnitTest4(\"show_destroy_setNone_Test\"))\r\ncts.addTest(MSEnumUnitTest4(\"getValueTest_isTuple\"))\r\ncts.addTest(MSEnumUnitTest4(\"getValueTest_has2Elements\"))\r\ncts.addTest(MSEnumUnitTest4(\"setValueTest_SetValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest4(\"show_setValueTest_SetValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest4(\"show_destroy_setValueTest_SetValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest4(\"setValueTest_SetInValidValues_1\"))\r\ncts.addTest(MSEnumUnitTest4(\"setValueTest_SetInValidValues_2\"))\r\ncts.addTest(MSEnumUnitTest4(\"setValueTest_SetInValidValues_3\"))\r\ncts.addTest(MSEnumUnitTest4(\"toStringTest\"))\r\n\r\nrunner = unittest.TextTestRunner()\r\nrunner.run(cts)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"AILab-FOI/LSMASOMM","sub_path":"atom3/Kernel/unitTests/testMSEnum.py","file_name":"testMSEnum.py","file_ext":"py","file_size_in_byte":8870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"36624261796","text":"from WoWCharacters import Character\nfrom random import randrange\n\n\nclass Arena:\n def __init__(self, team1, team2):\n self.team1 = team1\n self.team2 = team2\n\n def __str__(self):\n st = \"(===============)\\nTEAM A\\n\"\n for character in self.team1:\n st += str(character)\n st += \"(===============)\\nTEAM B\\n\"\n for character in self.team2:\n st += str(character)\n st += \"(===============)\"\n return st\n\n def __repr__(self):\n st = 'Arena('\n for character in self.team1:\n st += \"[\" + repr(character) + \"]\"\n st += \"), (\"\n for character in self.team2:\n st += \"[\" + repr(character) + \"]\"\n st += \")\"\n return st\n\n def play(self):\n global defending\n time = -1\n while True:\n print(self)\n time += 1\n print(\"=\" * 20)\n print(f'Time = {time}')\n print(\"=\" * 20)\n\n # create list of character to play\n\n ready_characters = []\n for character in self.team1:\n if character.delay == 0:\n ready_characters.append((\"A\", character))\n for character in self.team2:\n if character.delay == 0:\n ready_characters.append((\"B\", character))\n\n # active characters attack\n\n for character in ready_characters:\n attacking = character[1]\n if character[0] == \"A\":\n defending = self.team2[randrange(len(self.team2))]\n elif character[0] == \"B\":\n defending = self.team1[randrange(len(self.team1))]\n damage = attacking.attack()\n defending.health -= damage\n print(f'{attacking.name} dealt {damage} damage to {defending.name}')\n\n # check for dead characters\n\n for pos in range(len(self.team1) - 1, -1, -1):\n if self.team1[pos].is_dead():\n print(f'{self.team1[pos].name} is dead')\n self.team1.pop(pos)\n for pos in range(len(self.team2) - 1, -1, -1):\n if self.team2[pos].is_dead():\n print(f'{self.team2[pos].name} is dead')\n self.team2.pop(pos)\n\n # check for winning team\n\n if len(self.team1) == 0:\n print('Team B has won!')\n break\n elif len(self.team2) == 0:\n print('Team A has won!')\n break\n\n # end round\n\n for character in self.team1:\n character.end_round()\n for character in self.team2:\n character.end_round()\n\n input(\"Press any key to continue: \")\n\n\n\n","repo_name":"MarioMatsas/WoWMMORPG","sub_path":"WoWMMORPG/WoWArena.py","file_name":"WoWArena.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10999477891","text":"import pytest\nimport os\nfrom tests.test_settings import tests_dir\n\ntimer_configuration_file=os.path.join(os.path.dirname(os.path.realpath(__file__) ), \"timer_configfile.txt\")\n\n\ndef get_files_dirs(no_cpu, no_gpu, slow):\n #note that the slow directories INCLUDE all the files from the fast directories as well.\n generate_path=os.path.join(tests_dir, 'reviewer_tests', 'files_for_tests', 'generate')\n fast_gpu_files_dir=os.path.join(generate_path, 'gpu', 'short')\n fast_cpu_files_dir = os.path.join(generate_path, 'cpu', 'short')\n slow_gpu_files_dir=os.path.join(generate_path, 'gpu', 'long')\n slow_cpu_files_dir = os.path.join(generate_path, 'cpu', 'long')\n files_with_cache = os.path.join(tests_dir, 'time_tests', 'files_for_tests_with_cache','gpu')\n file_dirs = []\n #file_dirs.append(files_with_cache)\n if slow:\n if not no_cpu:\n file_dirs.append(slow_cpu_files_dir)\n if not no_gpu:\n file_dirs.append(slow_gpu_files_dir)\n else:\n if not no_cpu:\n file_dirs.append(fast_cpu_files_dir)\n if not no_gpu:\n file_dirs.append(fast_gpu_files_dir)\n return file_dirs\n\ndef pytest_generate_tests(metafunc):\n num_iter = 1\n if \"test_folder_path\" in metafunc.fixturenames:\n if metafunc.config.option.specific:\n result_params, result_ids = folder_test(metafunc)\n else:\n result_params, result_ids = standard_tests(metafunc)\n #result_params, result_ids=specific_tests()\n if metafunc.config.option.percent:\n max_per_error = int(metafunc.config.option.percent)\n else:\n max_per_error = 4\n with open(timer_configuration_file, \"r\") as file:\n all_lines = file.readlines()\n avg_var_list = [ line.split() for line in all_lines]\n updated_result_params = []\n for result in result_params:\n is_exists = False\n for item in avg_var_list:\n if result == item[0]: # same test\n updated_result_params.append([num_iter, result, float(item[1]), float(item[2]), max_per_error])\n is_exists = True\n break\n if not is_exists:\n updated_result_params.append([num_iter, result, -1, -1, max_per_error])\n\n metafunc.parametrize(\"iter, test_folder_path, exp_avg, exp_var, max_per_error\", updated_result_params, ids=result_ids)\n\ndef specific_tests():\n generate_path=os.path.join(tests_dir, 'reviewer_tests', 'files_for_tests', 'generate')\n fast_gpu_files_dir=os.path.join(generate_path, 'cpu', 'fast')\n failed_tests=[\n \"Single_PDB_Impl_No_Charges_With_Hydr_Ga_Kr\",\n\t\t\"Single_PDB_Impl_No_Charges_With_Hydr_VEGAS\",\n\t\t\"Single_PDB_Impl_No_Charges_With_Hydr_MC\"\n ]\n file_dirs=[]\n for test in failed_tests:\n file_dirs.append(os.path.join(fast_gpu_files_dir, test))\n\n return file_dirs, failed_tests\n\ndef folder_test(metafunc):\n path = metafunc.config.option.specific\n file_dirs = [path]\n test_name = [os.path.basename(path)]\n return file_dirs, test_name\n\ndef standard_tests(metafunc):\n slow = metafunc.config.option.slow\n no_gpu = metafunc.config.option.no_gpu\n no_cpu = metafunc.config.option.no_cpu\n result_params = []\n result_ids=[]\n file_dirs=get_files_dirs(no_cpu, no_gpu, slow)\n for test_dir in file_dirs:\n for (dirpath, dirnames, filenames) in os.walk(test_dir):\n for folder in dirnames:\n result_params.append(os.path.join(dirpath, folder))\n result_ids.append(folder)\n break\n #parametrize: argsnames, argvalues, ids\n return result_params, result_ids\n","repo_name":"uri-raviv-lab/dplus-dev","sub_path":"PythonInterface/tests/time_tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"25690019817","text":"import websockets\nimport asyncio\nimport json\nimport datetime\nfrom pydantic import BaseModel\nimport datetime\nimport pandas as pd\n\nfrom .data_models import BinanceModel\n\nfrom data_processing.bybit_data_processing import get_bybit_unordered_bids_and_ask\nfrom data_processing.binance_data_processing import get_binance_unordered_bids_and_asks\nfrom data_processing.kraken_data_processing import get_kraken_unordered_bids_and_ask\n\n\ndef kafka_serializer(message) -> None:\n \"\"\"\n Serializer for kafka message\n \"\"\"\n return json.dumps(message).encode(\"utf-8\")\n\n\nasync def generate_ordered_trades_for_each_exchange(exchanges_data: list):\n bids_unordered = {}\n asks_unordered = {}\n\n for exchange_data_item in exchanges_data:\n exchange_name = exchange_data_item[\"exchange_name\"]\n\n if exchange_name == \"binance\":\n\n bids_unordered, asks_unordered = await get_binance_unordered_bids_and_asks(\n exchange_name, exchange_data_item, bids_unordered, asks_unordered\n )\n\n elif exchange_name == \"bybit\":\n bids_unordered, asks_unordered = await get_bybit_unordered_bids_and_ask(\n exchange_name, exchange_data_item, bids_unordered, asks_unordered\n )\n\n elif exchange_name == \"kraken\":\n bids_unordered, asks_unordered = await get_kraken_unordered_bids_and_ask(\n exchange_name, exchange_data_item, bids_unordered, asks_unordered\n )\n # sorting bids and asks\n bids_ordered = {}\n asks_ordered = {}\n\n for key, value in bids_unordered.items():\n\n value.sort(key=lambda x: float(x[1]), reverse=True)\n\n bids_ordered[key] = value\n\n for key, value in asks_unordered.items():\n value = value.sort(key=lambda x: float(x[1]))\n asks_ordered[key] = value\n\n return bids_ordered, asks_unordered\n\n\n# stream = websockets.connect('wss://stream.binance.com:9443/stream?streams=adausdt@ticker')\n\n\n# def generate_data_model(data):\n\n# binance_ticker = BinanceModel(\n# event_time=data[\"E\"],\n# symbol=str(data['s']),\n# last_price=float(data['c']),\n# last_quantity=float(data['q']),\n# price_change=float(data['p']),\n# price_change_percent=float(data['P']),\n# open_price = float(data['o']),\n# high = float(data['h']),\n# low = float(data['l']),\n# volume = float(data['v']),\n# quote_volume = float(data['Q'])\n# )\n\n# return binance_ticker\n\n# def create_data_frame(data):\n# df = pd.DataFrame([data])\n# df.event_time = pd.to_datetime(df.event_time, unit='ms')\n# print(df.tail())\n\n# async def main():\n# while True:\n# async with stream as receiver:\n# data_dict = {}\n# data = await receiver.recv()\n\n# data = json.loads(data)[\"data\"]\n\n# binance_ticker = generate_data_model(data)\n# create_data_frame(binance_ticker.dict())\n","repo_name":"johnnfujita/livre-arbitrio","sub_path":"src/data_processing/multiapi_data_normalizer.py","file_name":"multiapi_data_normalizer.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"23389373109","text":"'''\n测试函数\n'''\nfrom name import get_formatted_name\n\nfirst = input(\"input your first name\\n\")\nlast = input(\"input your last name\\n\")\nprint(get_formatted_name(first, last))\n\n# %%\n# 各种测试\n# **单元测试**用于核实函数的某个方面没有问题\n# **测试用例**是一组单元测试,一道核实函数在各种情形下的行为都符合要求。\n# **全覆盖**的测试用例包含一整套单元测试,古代各种可能的函数使用方式\nimport unittest\nfrom name import get_formatted_name\n\nclass NamesTestCase(unittest.TestCase): # 必须继承自测试类\n \"\"\"测试name.py\n\n Args:\n unittest (_type_): _description_\n \"\"\"\n\n def test_first_last_name(self):\n \"\"\"能够正确处理像Janis Joplin这样的姓名吗?\n \"\"\"\n formatted_name = get_formatted_name(\"janis\", \"joplin\")\n self.assertEqual(formatted_name, \"Janis Joplin\")\n\n# 若这个文件作为主程序执行,__name__将被设置为\"__main__\",将自动运行冒号后面的内容\n# 若这个文件/.py/module被测试框架导入,变量__name__将不是\"__main__\",因此不会运行后面的内容\nif __name__ == \"__main__\": \n unittest.main()\n# %%\n","repo_name":"yingmuzhi/artificial_intelligence","sub_path":"Learning/Python/section11_test_code/11_1_test_function.py","file_name":"11_1_test_function.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25968491226","text":"import tkinter as tk\nimport Config\nfrom UserInput import userInput\nfrom Menus.MenuModo import MenuModo\n\n\nclass MenuPasaBajos(tk.Frame): # heredamos de tk.Frame, padre de MenuPasaBajos\n def __init__(self, parent, controller):\n # parent representa el Frame principal del programa, tenemos que indicarle\n # cuando MenuInputOutput será dibujado\n\n # controller lo utilizamos cuando necesitamos que el controlador principal del programa haga algo\n\n # llamamos al constructor\n tk.Frame.__init__(self, parent)\n\n self.controller = controller\n self.parent = parent\n\n self.title = tk.Label(\n self,\n height=1,\n width=50,\n text=\"Configuración parámetros pasa bajo\",\n font=Config.LARGE_FONT,\n background=\"#ffccd5\"\n )\n\n self.title.pack(side=tk.TOP, fill=tk.BOTH)\n\n self.titleFo = tk.Label(\n self,\n height=1,\n width=50,\n text=\"Frecuencia de corte (kHz)\",\n font=Config.SMALL_FONT,\n background=\"#ccffd5\"\n )\n\n self.titleFo.pack(side=tk.TOP, fill=tk.BOTH, pady=30)\n\n self.w2 = tk.Scale(self, from_=0, to=100, resolution = 0.1, orient=tk.HORIZONTAL)\n self.w2.pack(side=tk.TOP, fill=tk.BOTH)\n\n self.buttonContinuar = tk.Button(\n self,\n height=2,\n width=50,\n text=\"Continuar\",\n font=Config.SMALL_FONT,\n background=\"#ccffd5\",\n command=self.continuar\n )\n\n self.buttonContinuar.pack(side=tk.TOP, fill=tk.BOTH, pady=20)\n\n def continuar(self):\n # configuramos modos\n userInput[\"f0\"] = self.w2.get() * 1000\n self.controller.showFrame(MenuModo)\n\n def focus(self):\n pass\n","repo_name":"newtonis/22.02-Electrical-Engineering-Examples","sub_path":"ejemplo9_ui/Menus/MenuPasaBajos.py","file_name":"MenuPasaBajos.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"33284234222","text":"import multiprocessing\n\nfrom flask import logging\nfrom kafka import KafkaConsumer\n\n\nclass Consumer(multiprocessing.Process):\n def __init__(self):\n multiprocessing.Process.__init__(self)\n self.stop_event = multiprocessing.Event()\n\n def stop(self):\n self.stop_event.set()\n\n def run(self):\n consumer = KafkaConsumer(bootstrap_servers='localhost:9092',\n auto_offset_reset='earliest',\n consumer_timeout_ms=1000)\n consumer.subscribe(['temp'])\n\n while not self.stop_event.is_set():\n for message in consumer:\n print(message)\n if self.stop_event.is_set():\n break\n\n consumer.close()\n\n\ndef main():\n tasks = [\n Consumer()\n ]\n\n for t in tasks:\n t.start()\n\n # time.sleep(10)\n\n # for task in tasks:\n # task.stop()\n #\n # for task in tasks:\n # task.join()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"v-tec2706/TK-project","sub_path":"client2/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37858772811","text":"import versioneer\n\nfrom setuptools import setup, find_packages\n\n# Add README as description\nfrom os import path\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='domain-event-broker',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description='Send and receive domain events via RabbitMQ',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='Ableton AG',\n author_email='webteam@ableton.com',\n url='https://github.com/AbletonAG/domain-event-broker',\n license='MIT',\n packages=find_packages(),\n package_data={'domain_event_broker': ['py.typed']},\n install_requires=[\"pika >= 1.2.0\"],\n tests_require=[\"pytest >= 3.6.0\"],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n)\n","repo_name":"AbletonAG/domain-event-broker","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"20073399434","text":"import os\n\nimport cv2 as cv\nimport numpy as np\nfrom omegaconf import DictConfig\nfrom silk.config.core import instantiate_and_ensure_is_instance\nfrom silk.transforms.abstract import NamedContext\nfrom silk.transforms.cv.homography import RandomHomographicSampler\nfrom skimage import io\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\n\ndef draw_crops(image, coords):\n \"\"\"Draw homographic crops in image.\"\"\"\n if image.shape[2] == 1:\n image = image.repeat(3, axis=2)\n\n color = (255, 0, 0)\n thickness = 2\n coords[..., 0] = (coords[..., 0] + 1.0) * 0.5 * image.shape[1]\n coords[..., 1] = (coords[..., 1] + 1.0) * 0.5 * image.shape[0]\n coords = coords.detach().cpu().numpy().astype(np.uint16)\n\n for i in range(coords.shape[0]):\n line = [\n (coords[i][0], coords[i][1]),\n (coords[i][1], coords[i][3]),\n (coords[i][3], coords[i][2]),\n (coords[i][2], coords[i][0]),\n ]\n for j in range(len(line)):\n cv.line(image, line[j][0], line[j][1], color, thickness)\n return image\n\n\ndef main(config: DictConfig):\n \"\"\"Check the homographic sampler on real images by randomly generating homographic crops and saving them to disk.\"\"\"\n\n loader = instantiate_and_ensure_is_instance(config.mode.loader, DataLoader)\n\n sampler = instantiate_and_ensure_is_instance(\n config.mode.sampler, RandomHomographicSampler\n )\n\n it = iter(loader)\n for i in tqdm(range(config.mode.output.n_batches)):\n batch_output_dir = os.path.join(config.mode.output.directory, f\"{i:04d}\")\n os.makedirs(batch_output_dir)\n\n batch: NamedContext = next(it)\n batch.ensure_exists(\"image\")\n\n image_samples = sampler(batch[\"image\"])\n\n for j in range(batch[\"image\"].shape[0]):\n # save images coming from loader\n image = batch[\"image\"][j].permute(1, 2, 0)\n image = image.detach().cpu().numpy().astype(np.uint8)\n\n crop_per_image = sampler.batch_size // batch[\"image\"].shape[0]\n s = j * crop_per_image\n e = (j + 1) * crop_per_image\n image = draw_crops(image, sampler.src_coords[s:e])\n\n image_path = os.path.join(batch_output_dir, f\"{j:04d}_image.png\")\n io.imsave(image_path, image)\n\n # save homographic crops\n for k in range(crop_per_image):\n image = image_samples[j * crop_per_image + k].permute(1, 2, 0)\n image = image.detach().cpu().numpy().astype(np.uint8)\n\n image_path = os.path.join(batch_output_dir, f\"{j:04d}.{k:04d}_crop.png\")\n io.imsave(image_path, image)\n","repo_name":"facebookresearch/silk","sub_path":"lib/cli/check/homographic_sampler.py","file_name":"homographic_sampler.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":528,"dataset":"github-code","pt":"3"} +{"seq_id":"31424750204","text":"from sys import exit\nfrom mongoengine import connect\nfrom pymongo.errors import ServerSelectionTimeoutError\n\n\nclass MongodbHandler(object):\n\n def __init__(self):\n self.connection = None\n\n def init_app(self, app):\n self.connection = connect(host=app.config['MONGO_URI'])\n try:\n app.logger.info(\"Trying to connect to mongodb ...\")\n self.connection.server_info()\n except ServerSelectionTimeoutError:\n app.logger.critical(\"Cannot connect to mongodb. Will exit\")\n exit(1)\n app.logger.info(\"Successfully connected to mongodb..\")\n return self.connection\n\n def get_connection(self):\n return self.connection\n","repo_name":"AntonisMous/flask_mongo_docker_template","sub_path":"app/main/util/mongodb_handler.py","file_name":"mongodb_handler.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13551650362","text":"#生成榜单;首先从百度风云榜、微指数获得候选人;然后结合其他数据\n#首先获取候选人近20条微博的热度;问题是 不是所有人都开了微博;有些人微博数过少,粉丝不停转发和评论同一条微博。(这条暂时不考虑了)\n#获取候选人近一个月的百度指数\n#获取候选人近一个月的微指数\n#获取候选人微博话题\n#所有数据保存在以日期命名的文件夹中;把一些文件拿过来,生成一些文件\n#热度榜单\n\nimport re\nimport string\nimport sys\nimport os\nimport urllib\nfrom bs4 import BeautifulSoup\nimport requests\nfrom lxml import etree\nimport traceback\nfrom time import sleep\nimport random\nfrom xlrd import open_workbook\nimport xlwt\nfrom datetime import datetime,timedelta\n\n#从百度风云榜中抓人名\n\ndef candi():\n candidates = []\n headers = {'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/56.0.2924.87 Safari/537.36',\n 'Referer': 'http://www.baidu.com/',\n }\n\n url = \"http://top.baidu.com/buzz?b=618&c=9&fr=topbuzz_b17_c9\"\n html = requests.get(url, headers)\n html.encoding = \"gb2312\"\n selector = etree.HTML(html.content)\n #print(html.content)\n starName1 = selector.xpath(\"//a[@class='list-title']/text()\")\n #print(starName1)\n #candidates.append(starName)\n\n url = \"http://top.baidu.com/buzz?b=18&c=9&fr=topbuzz_b618_c9\"\n html = requests.get(url, headers)\n html.encoding = \"gb2312\"\n selector = etree.HTML(html.content)\n #print(html.content)\n starName = selector.xpath(\"//a[@class='list-title']/text()\")\n starName2 = re.findall(\"list-title.+?>(.+)<\",html.text)\n #print(starName)\n #candidates.append(starName)\n\n url = \"http://top.baidu.com/buzz?b=17&c=9&fr=topbuzz_b18_c9\"\n html = requests.get(url, headers)\n html.encoding = \"gb2312\"\n selector = etree.HTML(html.content)\n # print(html.content)\n starName = selector.xpath(\"//a[@class='list-title']/text()\")\n starName3 = re.findall(\"list-title.+?>(.+)<\", html.text)\n #print(starName)\n #candidates.append(starName)\n for name in starName1:\n if name not in candidates:\n candidates.append(name)\n\n for name in starName2:\n if name not in candidates:\n candidates.append(name)\n\n for name in starName3:\n if name not in candidates:\n candidates.append(name)\n print(candidates)\n aa = open(\"candi.txt\",\"w\",encoding=\"utf8\")\n aa.write(candidates)\n aa.close()\n return candidates\n\ndef wbdownloader(url):\n cookie3 = {\n \"Cookie\": \"_T_WM=7c5930258162fd8f073e9ecdd9190bb2; SUB=_2A250FV7VDeThGedG6FEX8C3Iyz6IHXVX9mKdrDV6PUJbkdBeLRntkW0WtcW-QY7RzMTWOCUAR11kW_sVEg..; SUHB=08JqqL49kwmUf3; SCF=AjJjBv-pUsOGtkW8JFo3haRdM9LVd-bosbMCu3nnzLc5gXJnEfzA0DemA9wmM7ja1xxtKJuHg76prQn-Cvj1maQ.; SSOLoginState=1494298245\"}\n\n cookie2 = {\n \"Cookie\": \"ALF=1499772633; SSOLoginState=1497180634; SCF=ApK1L_vhvYj6M87AX98Tz0BUGHkfFbzAa-6jG4QnKcUcEDYL2SRxXnXaCy8_LeiY0Di1YplQpVr00x8RgkmZW4I.; SUB=_2A250OVmKDeThGeNL4lsR9CbEzT-IHXVXwmfCrDV6PUNbktBeLWLXkW2AF-tC66ngua9TK_JXKdcBBSx2Mg..; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWCqVAQg2c2rMwAPfrsqYcc5JpX5KMhUgL.Fo-f1K.7ShnRSoe2dJLoIEBLxK-L1h-LB-zLxK-L12BLBKqLxKqLB-BLBKeLxK-LBK-LB.Bt; SUHB=0M2VuFAIWCRSg0; _T_WM=2d132a775ec8516db42e0357e80e4ed0\"}\n\n cookie1 = {\n \"Cookie\": \"ALF=1499744586; SCF=AvOZbZlloyrtrpmOsxChTRjnwm1oC1ew5W7EbJ_JNf9Kv9F9JQ8VpAh7QywIMC0nOIdEdt7pO-h7TGgl7wNU5E8.; SUB=_2A250OMwcDeThGeNJ6FsY8yvOwjuIHXVXwtRUrDV6PUJbktBeLWPfkW2heI6Uw_eBkNNS6qhmCzB1gZJL7g..; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WhT742wYZB6Z1WeU48AjcD35JpX5o2p5NHD95QfS0e41Kefeo.NWs4Dqcjdi--RiK.pi-20i--ciK.4i-zXi--fi-2XiKLh; SUHB=0cS3YWVS2qyAlP; SSOLoginState=1497152588; _T_WM=642b31423dfb19dca6900a57fc3e96fa\"}\n t = random.uniform(0, 3)\n if t > 2:\n cookie = cookie2\n elif t > 1:\n cookie = cookie1\n else:\n cookie = cookie1\n\n headers = {'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/56.0.2924.87 Safari/537.36',\n 'Referer': 'https://weibo.cn/',\n }\n html = requests.get(url, headers, cookies= cookie)\n return html\n\n\n\ndef weiboinf(candidates): #需要考虑的点实在太多了,暂时不考虑\n weibopr = []\n bb = open(\"nouserid.txt\",\"a\",encoding=\"utf8\")\n for name in candidates:\n aa = open(\"userid.txt\", \"r\", encoding=\"utf8\")\n aa.readline()\n sumf = 0\n suma = 0\n sumc = 0\n print(name)\n for ln in aa:\n flag = 0 #有没有找到\n [nameid,id] = ln.split(\" \",maxsplit=1)\n id = id.strip()\n #print(ln)\n if name == nameid:\n flag = 1\n print(nameid,id)\n url = \"https://weibo.cn/u/\"+str(id)+\"?filter=1&page=1\"\n html = wbdownloader(url)\n html.encoding=\"utf8\"\n html = html.text\n #print(html)\n #forward = re.findall(\"W_ficon ficon_forward S_ficon.+?(\\d+)\",html)\n forward1 = re.findall(\"转发.(\\d+)\\W\",html)\n agree1 = re.findall(\"赞.(\\d+)\\W\",html)\n comment1 = re.findall(\"评论.(\\d+)\\W\",html)\n #print(url)\n sleep(5)\n url = \"https://weibo.cn/u/\" + str(id) + \"?filter=1&page=2\"\n #print(url)\n html = wbdownloader(url)\n html.encoding = \"utf8\"\n html = html.text\n #print(html)\n forward2 = re.findall(\"转发.(\\d+)\\W\", html)\n agree2 = re.findall(\"赞.(\\d+)\\W\", html)\n comment2 = re.findall(\"评论.(\\d+)\\W\", html)\n for i in range(0,len(forward1)):\n sumf += int(forward1[i])\n suma += int(agree1[i])\n sumc += int(comment1[i])\n for i in range(0, len(forward2)):\n sumf += int(forward2[i])\n suma += int(agree2[i])\n sumc += int(comment2[i])\n \"\"\"\n print(forward1)\n print(agree1)\n print(comment1)\n print(forward2)\n print(agree2)\n print(comment2)\n \"\"\"\n sumf = sumf/200000\n suma = suma/1000000\n sumc = sumc /200000\n #print(sumf)\n #print(suma)\n #print(sumc)\n sum = sumf + suma + sumc\n print(sum)\n sleep(5)\n weibopr.append(sum)\n break\n if flag == 0: #没有发现\n bb.write(name+\"\\n\")\n\n return(weibopr)\n\n\n\n\n\n\nif __name__ == '__main__':\n\n candidates = candi()\n #candidates = ['杨紫','黄子韬']\n #weibopr = weiboinf(candidates)\n #print(weibopr)\n","repo_name":"JoeyChui/privatecode","sub_path":"周三回去/冠影/Ranking/starhot.py","file_name":"starhot.py","file_ext":"py","file_size_in_byte":7103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1546616235","text":"class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def sortedArrayToBST(self, nums):\n \"\"\"Return the root of a height-balanced binary search tree in O(n)\n time and O(n * log n) space given a sorted array of integers.\"\"\"\n root = TreeNode()\n stack = [(root, nums)]\n while stack:\n node, sublist = stack.pop()\n middle_index = len(sublist)//2\n left_sublist = sublist[:middle_index]\n right_sublist = sublist[middle_index + 1:]\n node.val = sublist[middle_index]\n if left_sublist:\n node.left = TreeNode()\n stack.append((node.left, left_sublist))\n if right_sublist:\n node.right = TreeNode()\n stack.append((node.right, right_sublist))\n return root\n","repo_name":"patricktsandin/leetcode","sub_path":"leetcode/easy/python/create_binary_search_tree/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19421921695","text":"import numpy as np\r\nm,n = [int(i) for i in input().strip().split(' ')]\r\nx=[]\r\ny=[]\r\nfor i in range(n):\r\n data=list(map(float,input().split()))\r\n x.append(data[:m])\r\n y.append(data[m:])\r\nq=int(input().strip())\r\nx_new=[]\r\nfor i in range(q):\r\n data_new=list(map(float,input().split()))\r\n x_new.append(data_new)\r\nfeatures=[i.insert(0,1) for i in x]\r\nfeatures_new=[i.insert(0,1) for i in x_new]\r\nX=np.array(x,float)\r\nY=np.array(y,float)\r\nX_new=np.array(x_new,float)\r\nbeta=np.dot(np.linalg.inv(np.dot(X.T,X)),np.dot(X.T,Y))\r\n\r\nY_new=np.dot(X_new,beta)\r\npredict_y=Y_new.flatten()\r\nfor i in predict_y:\r\n print(round(i,2))\r\n\r\n\r\n","repo_name":"devendra45/10-Days-of-Statistics","sub_path":"Multiple_Linear_Regression.py","file_name":"Multiple_Linear_Regression.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"8360009548","text":"from __future__ import absolute_import\n\nfrom requests import Response\n\nfrom chart_studio.session import sign_in\nfrom chart_studio.tests.utils import PlotlyTestCase\n\nimport sys\n\n# import from mock\nif sys.version_info >= (3, 3):\n from unittest.mock import patch\nelse:\n from mock import patch\n\n\nclass PlotlyApiTestCase(PlotlyTestCase):\n def mock(self, path_string):\n patcher = patch(path_string)\n new_mock = patcher.start()\n self.addCleanup(patcher.stop)\n return new_mock\n\n def setUp(self):\n\n super(PlotlyApiTestCase, self).setUp()\n\n self.username = \"foo\"\n self.api_key = \"bar\"\n\n self.proxy_username = \"cnet\"\n self.proxy_password = \"hoopla\"\n self.stream_ids = [\"heyThere\"]\n\n self.plotly_api_domain = \"https://api.do.not.exist\"\n self.plotly_domain = \"https://who.am.i\"\n self.plotly_proxy_authorization = False\n self.plotly_streaming_domain = \"stream.does.not.exist\"\n self.plotly_ssl_verification = True\n\n sign_in(\n username=self.username,\n api_key=self.api_key,\n proxy_username=self.proxy_username,\n proxy_password=self.proxy_password,\n stream_ids=self.stream_ids,\n plotly_domain=self.plotly_domain,\n plotly_api_domain=self.plotly_api_domain,\n plotly_streaming_domain=self.plotly_streaming_domain,\n plotly_proxy_authorization=self.plotly_proxy_authorization,\n plotly_ssl_verification=self.plotly_ssl_verification,\n )\n\n def to_bytes(self, string):\n try:\n return string.encode(\"utf-8\")\n except AttributeError:\n return string\n\n def get_response(self, content=b\"\", status_code=200):\n response = Response()\n response.status_code = status_code\n response._content = content\n response.encoding = \"utf-8\"\n return response\n","repo_name":"plotly/plotly.py","sub_path":"packages/python/chart-studio/chart_studio/tests/test_plot_ly/test_api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":14438,"dataset":"github-code","pt":"3"} +{"seq_id":"72436389520","text":"import time\r\nimport os\r\nimport torch\r\nfrom collections import namedtuple\r\nfrom utils import logger\r\nfrom utils.statics import AverageMeter, evaluator\r\nimport scipy.io as io\r\n__all__ = ['Trainer', 'Tester']\r\n\r\n\r\nfield = ('nmse','epoch')\r\nResult = namedtuple('Result', field, defaults=(None,) * len(field))\r\n\r\n\r\nclass Trainer:\r\n r\"\"\" The training pipeline for encoder-decoder architecture\r\n \"\"\"\r\n def __init__(self, mode, model, device, optimizer, train_criterion, test_criterion, scheduler, save_path='./checkpoint', print_freq=20, val_freq=10, test_freq=10):\r\n \r\n # Basic arguments\r\n self.mode = mode\r\n self.model = model\r\n self.optimizer = optimizer\r\n self.train_criterion = train_criterion\r\n self.test_criterion = test_criterion\r\n self.scheduler = scheduler\r\n self.device = device\r\n \r\n # Verbose arguments\r\n self.save_path = save_path\r\n self.print_freq = print_freq\r\n self.val_freq = val_freq\r\n self.test_freq = test_freq\r\n\r\n # Pipeline arguments\r\n self.cur_epoch = 1\r\n self.all_epoch = None\r\n self.best_nmse = Result()\r\n \r\n self.validator = Tester(mode, model, device, test_criterion, print_freq)\r\n self.tester = Tester(mode, model, device, test_criterion, print_freq)\r\n \r\n self.test_loader = None\r\n self.train_loss = []\r\n self.val_loss = []\r\n self.val_nmse = []\r\n self.test_nmse = []\r\n \r\n def loop(self, epochs, train_loader, val_loader, test_loader, save_trend=False):\r\n r\"\"\" The main loop function which runs training and validation iteratively.\r\n\r\n Args:\r\n epochs (int): The total epoch for training\r\n train_loader (DataLoader): Data loader for training data.\r\n val_loader (DataLoader): Data loader for validation data.\r\n test_loader (DataLoader): Data loader for test data.\r\n save_trend (bool): whether to save training loss and nmse for visualization.\r\n \"\"\"\r\n\r\n self.all_epoch = epochs\r\n val_nmse = None\r\n for ep in range(self.cur_epoch, epochs + 1):\r\n self.cur_epoch = ep\r\n\r\n # conduct training, validation and test\r\n train_loss = self.train(train_loader)\r\n self.train_loss.append(train_loss)\r\n\r\n if ep % self.val_freq == 0:\r\n val_loss, val_nmse = self.val(val_loader)\r\n self.val_loss.append(val_loss)\r\n self.val_nmse.append(val_nmse)\r\n \r\n if ep % self.test_freq == 0:\r\n _, test_nmse = self.test(test_loader)\r\n self.test_nmse.append(test_nmse)\r\n \r\n # conduct saving \r\n self._loop_postprocessing(val_nmse)\r\n \r\n if save_trend == True: \r\n io.savemat(os.path.join(self.save_path, f'test_nmse.mat'),{'nmse':torch.tensor(self.test_nmse).numpy()})\r\n io.savemat(os.path.join(self.save_path, f'train_loss.mat'),{'loss':torch.tensor(self.train_loss).numpy()})\r\n io.savemat(os.path.join(self.save_path, f'val_loss.mat'),{'loss':torch.tensor(self.val_loss).numpy()})\r\n io.savemat(os.path.join(self.save_path, f'val_nmse.mat'),{'nmse':torch.tensor(self.val_nmse).numpy()})\r\n\r\n def train(self, train_loader):\r\n r\"\"\" train the model on the given data loader for one epoch.\r\n Args:\r\n train_loader (DataLoader): the training data loader\r\n \"\"\"\r\n self.model.train()\r\n with torch.enable_grad():\r\n return self._iteration(train_loader)\r\n\r\n def val(self, val_loader):\r\n r\"\"\" exam the model with validation set.\r\n Args:\r\n val_loader: (DataLoader): the validation data loader\r\n \"\"\"\r\n self.model.eval()\r\n with torch.no_grad():\r\n val_loss, val_nmse = self.validator(val_loader, verbose=False)\r\n logger.info(f'=> Val NMSE: {val_nmse:.3e}\\n')\r\n return val_loss, val_nmse\r\n\r\n def test(self, test_loader):\r\n r\"\"\" Truly test the model on the test dataset for one epoch.\r\n Args:\r\n test_loader (DataLoader): the test data loader\r\n \"\"\"\r\n self.model.eval()\r\n with torch.no_grad():\r\n test_loss, test_nmse = self.tester(test_loader, verbose=False)\r\n logger.info(f'=> Test NMSE: {test_nmse:.3e}\\n')\r\n return test_loss, test_nmse\r\n\r\n def _iteration(self, data_loader):\r\n iter_loss = AverageMeter('Iter loss')\r\n iter_time = AverageMeter('Iter time')\r\n time_tmp = time.time()\r\n\r\n for batch_idx, (hca, hcc, hcp, index, ) in enumerate(data_loader):\r\n if self.mode == 'FB':\r\n hcc = hcc.to(self.device)\r\n hcc_pred = self.model(hcc)\r\n loss = self.train_criterion(hcc_pred, hcc)\r\n elif self.mode == 'RE':\r\n hcp = hcp.to(self.device)\r\n hca_re = self.model(hcp)\r\n loss = self.train_criterion(hca_re-hcp, hca-hcp)\r\n elif self.mode == 'Joint':\r\n hcc = hcc.to(self.device)\r\n index = index.to(self.device)\r\n hca_pred = self.model(hcc, index)\r\n loss = self.train_criterion(hca_pred, hca)\r\n \r\n # Scheduler update, backward pass and optimization\r\n if self.model.training:\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step() \r\n self.scheduler.step()\r\n \r\n # Log and visdom update\r\n iter_loss.update(loss)\r\n iter_time.update(time.time() - time_tmp)\r\n time_tmp = time.time()\r\n\r\n # plot progress\r\n if (batch_idx + 1) % self.print_freq == 0:\r\n logger.info(f'Epoch: [{self.cur_epoch}/{self.all_epoch}]'\r\n f'[{batch_idx + 1}/{len(data_loader)}] '\r\n f'lr: {self.scheduler.get_lr()[0] :.2e} | '\r\n f'MSE loss: {iter_loss.avg:.3e} | '\r\n f'time: {iter_time.avg:.3f}')\r\n\r\n mode = 'Train' if self.model.training else 'Val'\r\n logger.info(f'=> {mode} Loss: {iter_loss.avg:.3e}\\n')\r\n return iter_loss.avg\r\n \r\n def _save(self, state, name):\r\n if self.save_path is None:\r\n logger.warning('No path to save checkpoints.')\r\n return\r\n\r\n os.makedirs(self.save_path, exist_ok=True)\r\n torch.save(state, os.path.join(self.save_path, name))\r\n\r\n def _loop_postprocessing(self, nmse):\r\n r\"\"\" private function which makes loop() function neater.\r\n \"\"\"\r\n # save state generate\r\n state = {\r\n 'epoch': self.cur_epoch,\r\n 'state_dict': self.model.state_dict(),\r\n 'best_nmse': self.best_nmse\r\n }\r\n\r\n if nmse is not None:\r\n if self.best_nmse.nmse is None or self.best_nmse.nmse > nmse:\r\n self.best_nmse = Result(nmse=nmse, epoch=self.cur_epoch)\r\n state['best_nmse'] = self.best_nmse \r\n self._save(state, name=f\"best_nmse.pth\")\r\n\r\n self._save(state, name='last.pth')\r\n\r\n # print current best results\r\n if self.best_nmse.nmse is not None:\r\n logger.info(f'\\n Best NMSE: {self.best_nmse.nmse:.3e} '\r\n f'\\n epoch: {self.best_nmse.epoch}\\n')\r\n\r\n\r\nclass Tester:\r\n r\"\"\" The testing interface for classification\r\n \"\"\"\r\n def __init__(self, mode, model, device, criterion, print_freq=20):\r\n self.mode = mode\r\n self.model = model\r\n self.device = device\r\n self.criterion = criterion\r\n self.print_freq = print_freq\r\n\r\n def __call__(self, test_data, verbose=True):\r\n r\"\"\" Runs the testing procedure.\r\n Args:\r\n test_data (DataLoader): Data loader for validation data.\r\n \"\"\"\r\n self.model.eval()\r\n with torch.no_grad():\r\n loss, nmse = self._iteration(test_data)\r\n if verbose:\r\n print(f'\\n=> Test result: \\nloss: {loss:.3e}'\r\n f' NMSE: {nmse:.3e}\\n')\r\n return loss, nmse\r\n\r\n def _iteration(self, data_loader):\r\n iter_loss = AverageMeter('Iter loss')\r\n iter_time = AverageMeter('Iter time')\r\n iter_nmse = AverageMeter('Iter NMSE')\r\n time_tmp = time.time()\r\n\r\n for batch_idx, (hca, hcc, hcp, index, ) in enumerate(data_loader):\r\n if self.mode == 'FB':\r\n hcc = hcc.to(self.device)\r\n hcc_pred = self.model(hcc)\r\n loss = self.criterion(hcc_pred, hcc)\r\n nmse = evaluator(hcc_pred, hcc)\r\n elif self.mode == 'RE':\r\n hcp = hcp.to(self.device)\r\n hca_re = self.model(hcp)\r\n loss = self.criterion(hca_re-hcp, hca-hcp)\r\n nmse = evaluator(hca_re, hca)\r\n elif self.mode == 'Joint':\r\n hcc = hcc.to(self.device)\r\n index = index.to(self.device)\r\n hca_pred = self.model(hcc, index)\r\n loss = self.criterion(hca_pred, hca)\r\n nmse = evaluator(hca_pred, hca)\r\n \r\n # Log and visdom update\r\n iter_loss.update(loss)\r\n iter_nmse.update(nmse)\r\n iter_time.update(time.time() - time_tmp)\r\n time_tmp = time.time()\r\n\r\n # plot progress\r\n if (batch_idx + 1) % self.print_freq == 0:\r\n logger.info(f'[{batch_idx + 1}/{len(data_loader)}] '\r\n f'loss: {iter_loss.avg:.3e} | '\r\n f'NMSE: {iter_nmse.avg:.3e} | time: {iter_time.avg:.3f}')\r\n return iter_loss.avg, iter_nmse.avg","repo_name":"zhang-xd18/safb","sub_path":"utils/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":9926,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"21174214680","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('questions', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='question',\n name='exam',\n field=models.CharField(verbose_name='Prova', default='', max_length=100),\n preserve_default=False,\n ),\n ]\n","repo_name":"pet-informatica/geoquest","sub_path":"questions/migrations/0002_question_exam.py","file_name":"0002_question_exam.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"482653894","text":"from tkinter import *\nimport math\nimport subprocess\nroot = Tk()\nroot.geometry(\"320x150\")\n\n# Все переменные\nequa = \"\"\nexpression = \"\"\ntotal = \"\"\n\n# Все кнопки\nequation = StringVar()\nexpression = Entry(root, textvariable=equation)\nexpression.grid(columnspan=4, ipadx=70)\nexpression = Entry(root, textvariable=equation)\nexpression.grid(columnspan=8, ipadx=70)\n\n\ndef press(num):\n\tglobal expression\n\texpression = expression + str(num)\n\tequation.set(expression)\n\n# Функция для вычисления тригонометрических функций\ndef trigonometry(trig, expression):\n total = eval(trig + '(' + expression + ')')\n equation.set(total)\n expression = \"\"\n# Кнопка тангенс\ntg = Button(root, text='tg', fg='black', bg='red',\n\t\t\t\tcommand=lambda: trigonometry('math.tan', expression.get()), height=1, width=7)\ntg.grid(row=1, column='1')\n\n# Кнопка косинус\ncos = Button(root, text='cos', fg='black', bg='red',\n\t\t\t\tcommand=lambda: trigonometry('math.cos', expression.get()), height=1, width=7)\ncos.grid(row=1, column='2')\n\n# Кнопка синус\nsin = Button(root, text='sin', fg='black', bg='red',\n\t\t\t\tcommand=lambda: trigonometry('math.sin', expression.get()), height=1, width=7)\nsin.grid(row=1, column='3')\n# Кнопка плюс\nbutton1 = Button(root, text='+', fg='black', bg='red',\n\t\t\t\tcommand=lambda: trigonometry('+', expression.get()), height=1, width=7)\nbutton1.grid(row=2, column='1')\nsnake = Button(root, text='Я устал, я хочу поиграть в змейку!', fg='black', bg='red',\n\t\t\t\tcommand=lambda: subprocess.call([\"python\", \"C:\\\\Users\\\\User\\\\PycharmProjects\\\\CV2PPG\\\\CRR\\\\my_projects\\\\snake.py\"]), height=1, width=27)\nsnake.grid(row=3, column='2')\nroot.mainloop()","repo_name":"Werby213/pythonl","sub_path":"CV2PPG/CRR/my_projects/!tkinter_learn2.py","file_name":"!tkinter_learn2.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17107149795","text":"import sys\nimport os\nimport pickle\nfrom tensorflow.keras.preprocessing import sequence as s\nfrom tensorflow.keras import models\n\n# command line arguments accepted:\n# 1) path to .txt file with test sentences\n# 2) type of classifier to use (\"relu\", \"sigmoid\", \"tanh\")\npath_test = sys.argv[1]\nclfName = sys.argv[2]\n\n# setting up the file path for saved models assuming cwd directory is ...\\assignment-4\ntokenizer_path = os.getcwd() + \"\\\\data\\\\tokenizer.pkl\"\nmodel_path = os.getcwd() + \"\\\\data\\\\\" + \"nn_\" + clfName + \".model\"\n\n# open the test file with raw sentences\ntest_file = open(path_test, 'r')\n# read the test file lines into a list\ntest = test_file.read().splitlines()\n\n# load the tokenizer fitted on the training set\nwith open(tokenizer_path, 'rb') as handle:\n tokenizer = pickle.load(handle)\n\n# integer encode the test sentences\ntest_encoded = tokenizer.texts_to_sequences(test)\n\n# standardize test sentence lengths\nmax_length = 11 # maximum sentence length defined during training\ntest_encoded = s.pad_sequences(test_encoded, maxlen=max_length, padding='post', truncating='post')\n\n# load the ffnn model\nmodel = models.load_model(model_path)\n\n# predict sentiment of test sentences (pos or neg)\nprediction = model.predict(test_encoded)\nprediction_round = prediction.round()\n\n# one hot vector mapping: 0 -> [1 0], 1 -> [0 1]\nfor i in range(len(test)):\n print(\"sentence:\\t\", test[i])\n if prediction_round[i][0] == 0:\n print(\"prediction: \", prediction[i], \"\\tsentiment: positive\\n\")\n else:\n print(\"prediction: \", prediction[i], \"\\tsentiment: negative\\n\")","repo_name":"S-Li/msci-nlp-w22","sub_path":"assignment-4/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19667448846","text":"# Q: we have to find the number of ways we can reach the destination(n-1 node) from the source.\n\n# main crux: we have to find the no of ways we can reach the detination in shortest path.\n# but idea here is: the same thing we have to check for node before it.\n# that's why we need to keep one more array than the distance i.e ways.\n\n# why exact simple dijkastra will not work?\n# Ans: Since we only update distance in Dijkastra when we find the optimal not even \"=\".\n# And here we need to keep track of equal to also for every node then only we will get ans for 'n-1'th node.\n\n# totally same as Dijkastra Algo. You can say 'Dijkastra with DP'.\n# since we have to repeat for every node to reach the destonation.\n\n# logic: 1) we will find optimal one for any node i.e 'distance[n2] > w1 + w2' then we will ways[n2]= ways[n1],\n# since we are getting optimal through 'n1' so then no of ways we can reach 'n2' in this optimal distance 'w1+w2' ,\n# will be equal to no of ways we reached 'n1' in optimal ways i.e ways[n1]\n\n# 2) if 'distance[n2] == w1 + w2' then, ways[n2]= ways[n2] + ways[n1] since 'n2' is already reachable in this much distance in ways[n2] before only\n# And now 'n2' is reachable through 'n1' in same distance so we will add the ways of both 'n1' and 'n2' to get total ways for 'n2'.\n\nimport heapq\nclass Solution:\n def countPaths(self, n: int, roads: List[List[int]]) -> int:\n adj= collections.defaultdict(list)\n for s, d, time in roads:\n adj[s].append((d, time))\n adj[d].append((s, time))\n distance= [float('inf')]*n # use float('inf') instead of any bigger number\n ways= [0]*n\n heap= [(0, 0)] # distance, source\n distance[0]= 0\n ways[0]= 1 # no of way to reach source will be deafult '1'.\n visited= set()\n while heap:\n w1, n1= heapq.heappop(heap)\n if n in visited:\n continue\n visited.add(n1)\n for n2, w2 in adj[n1]:\n if n2 not in visited: \n # we have found the better ans than before, so simly update the ways with the poped node.\n if distance[n2] > w1 + w2:\n distance[n2]= w1 + w2\n heapq.heappush(heap, (w1 + w2, n2))\n ways[n2]= ways[n1]\n # means we have found the path with same shortest path. so add the no of ways of 'n1' to 'n2'\n elif distance[n2]== w1 + w2:\n ways[n2]= (ways[n2] + ways[n1]) % (10**9 + 7)\n return ways[n-1] % (10**9 + 7)\n\n\n# my mistake: i was thinking to check the mintime when 'n1' got poped with time when it will get poped again by not marking 'n1' as visited but it won't work.\n# since we only update when we find optimal one.","repo_name":"Ravi-0412/DSA-Program-And-Notes","sub_path":"Graph/1976. Number of Ways to Arrive at Destination.py","file_name":"1976. Number of Ways to Arrive at Destination.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"3"} +{"seq_id":"41119424030","text":"from django.contrib import admin\nfrom .models import Produc\n\n# Register your models here.\nclass ProducAdmin(admin.ModelAdmin):\n list_display = (\n \"id\",\n \"clasific\",\n \"name\",\n \"price\",\n \"image\",\n \"supplier\",\n )\nadmin.site.register(Produc, ProducAdmin)","repo_name":"CristianS25/Django-ApiVEn","sub_path":"Shop/produc/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40269496935","text":"# coding=utf-8\n#\n# Yu Wang (University of Yamanashi)\n# Mar,2020\n#\n# Licensed under the Apache License,Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Make HCLG graph\"\"\"\nimport os\nimport pickle\nimport copy\n\nfrom exkaldi.version import info as ExKaldiInfo\nfrom exkaldi.error import *\nfrom exkaldi.utils.utils import make_dependent_dirs,run_shell_command,type_name\nfrom exkaldi.utils.utils import FileHandleManager\nfrom exkaldi.utils import declare\nfrom exkaldi.core.archive import ListTable\nfrom exkaldi.core.load import load_list_table\n\nclass LexiconBank:\n\t'''\n\tThis class is designed to hold all lexicons which are going to be used when user want to make decoding graph.\n\t'''\n\n\tdef __init__(self,pronFile,silWords={\"\":\"\"},unkSymbol={\"unk\":\"unk\"},optionalSilPhone=\"\",extraQuestions=[],\n\t\t\t\t\tpositionDependent=False,shareSilPdf=False,extraDisambigPhoneNumbers=1,extraDisambigWords=[]\n\t\t\t\t):\n\t\t'''\n\t\tArgs:\n\t\t\t: should be a file path. We support to generate lexicon bank from 5 kinds of lexicon which are \"lexicon\",\"lexiconp(_disambig)\" and \"lexiconp_silprob(_disambig)\".\n\t\t\t\t\t\t\tIf it is not \"lexicon\" and silence words or unknown symbol did not exist,error will be raised.\n\t\t\t: should be a list object whose members are silence words or a dict of silence words and their corresponding proninciations. \n\t\t\t\t\t\t\t\t\tIf these words have not already existed in ,their proninciations will be replaced with new ones.\n\t\t\t: should be a list object whose only has one element oov symbol or a dict of a unksymbol and it's proninciation. \n\t\t\t\t\t\t\t\t\tIf the symbol has not already existed in ,Its proninciation will be replaced with new one.\n\t\t\t: should be a string. It will be used as the pronunciation of \"\".\n\t\t\t: extra questions to cluster phones when train decision tree.\n\t\t\t: If True,generate position-dependent lexicons.\n\t\t\t: If True,share the gaussion funtion of silence phones.\n\t\t\t: extra number of disambiguation phone.\n\t\t\t: extra disambiguation words.\n\t\t\n\t\tReturn:\n\t\t\tA lexicon bank object who holds all lexicons.\n\t\t'''\n\t\tdeclare.is_file(\"pronFile\",pronFile)\n\t\t# check silence\n\t\tdeclare.is_classes(\"silWords\",silWords,[list,dict])\n\t\tdeclare.not_void(\"silWords\",silWords)\n\t\tif isinstance(silWords,list):\n\t\t\tsilWords = dict( (s,s) for s in silWords )\n\t\t\tself.__retain_original_sil_pron = True\n\t\telse:\n\t\t\tself.__retain_original_sil_pron = False\n\t\t# check unkSymbol\n\t\tdeclare.is_classes(\"unkSymbol\",unkSymbol,[list,dict])\n\t\tdeclare.not_void(\"unkSymbol\",unkSymbol)\n\t\tassert len(unkSymbol) == 1,\"You can spicify only one unknown word (and its' pronunciation).\"\n\t\tif isinstance(unkSymbol,list):\n\t\t\tunkSymbol = dict( (s,s) for s in unkSymbol )\n\t\t\tself.__retain_original_unk_pron = True\n\t\telse:\n\t\t\tself.__retain_original_unk_pron = False\n\t\t# check other parameters\n\t\tdeclare.is_valid_string(\"optionalSilPhone\",optionalSilPhone)\n\t\tdeclare.is_classes(\"extraQuestions\",extraQuestions,list)\n\t\tdeclare.is_bool(\"positionDependent\",positionDependent)\n\t\tdeclare.is_bool(\"shareSilPdf\",shareSilPdf)\n\t\tdeclare.is_positive_int(\"extraDisambigPhoneNumbers\",extraDisambigPhoneNumbers)\n\t\tdeclare.is_classes(\"extraDisambigWords\",extraDisambigWords,list)\n\t\t# backup\n\t\tself.__parameters = {\"silWords\":silWords,\n\t\t\t\t\t\t\t \"unkSymbol\":unkSymbol,\n\t\t\t\t\t\t\t \"optionalSilPhone\":optionalSilPhone,\n\t\t\t\t\t\t\t \"extraQuestions\":extraQuestions,\n\t\t\t\t\t\t\t \"positionDependent\":positionDependent,\n\t\t\t\t\t\t\t \"shareSilPdf\":shareSilPdf,\n\t\t\t\t\t\t\t \"extraDisambigPhoneNumbers\":extraDisambigPhoneNumbers,\n\t\t\t\t\t\t\t \"extraDisambigWords\":extraDisambigWords,\n\t\t\t\t\t\t\t \"ndisambig\":0, # This value will be updated later\n\t\t\t\t\t\t\t}\n\t\t# Validate the extra disambig words\n\t\tself.__validate_extraDisambigWords()\n\t\t# Satrt to initialize all lexicons \n\t\tself.__dictionaries = {}\n\t\tself.__initialize_dictionaries(pronFile)\n\n\tdef __validate_extraDisambigWords(self):\n\t\t'''\n\t\tThis method is used to check whether extra disambiguation words provided have a right format.\n\t\t'''\n\t\tif len(self.__parameters[\"extraDisambigWords\"]) > 0:\n\t\t\twith FileHandleManager() as fhm:\n\t\t\t\textraDisambigWords = fhm.create(\"w+\",encoding='utf-8')\n\t\t\t\textraDisambigWords.write(\"\\n\".join(self.__parameters[\"extraDisambigWords\"]))\n\t\t\t\textraDisambigWords.seek(0)\n\t\t\t\tcmd = os.path.join(ExKaldiInfo.KALDI_ROOT,'egs','wsj','s5','utils','lang','validate_disambig_sym_file.pl') + f' --allow-numeric \"false\" {extraDisambigWords.name}'\n\t\t\t\tout,err,cod = run_shell_command(cmd,stdout=\"PIPE\",stderr=\"PIPE\")\n\t\t\t\tif cod != 0:\n\t\t\t\t\traise WrongDataFormat(\"Failed to validate extra disambig words.\",out.decode())\n\n\tdef __initialize_dictionaries(self,fileName):\n\t\t'''\n\t\tThis method is used to generate all lexicons step by step\n\t\t'''\n\t\t## Check file format. We support file with 5 types of formats: [lexicon],[lexiconp(_disambig)],[lexiconp_silprob(_disambig)].\n\t\tdictType,dataList = self.__check_lexicon_type(fileName)\n\n\t\t## Depending on the file format gained above,initialize key lexicon: [lexiconp].\n\t\tif dictType == \"lexicon\":\n\t\t\tself.__creat_lexiconp_from_lexicon(dataList)\n\t\telif dictType == \"silprob\":\n\t\t\traise WrongOperation('Cannot generate lexicon bank from silprob file.')\n\t\telse:\n\t\t\tself.__creat_lexiconp_from_lexiconp(dataList,dictType)\n\n\t\t## When arrived here,\n\t\t## if is \"lexicon\" or \"lexiconp(_disambig)\",three lexicons,[lexiconp],[lexiconp_disambig] and [diasmbig],have been generated.\n\t\t## Or if is \"lexiconp_silprob(_disambig)\",four lexicons,[lexiconp],[lexiconp_silprob],[lexiconp_silprob_disambig] and [disambig],have been generated.\n\n\t\tsilWords = self.__parameters[\"silWords\"]\n\t\tunkSymbol = self.__parameters[\"unkSymbol\"]\n\t\toptionalSilPhone = self.__parameters[\"optionalSilPhone\"]\n\t\textraQuestions = self.__parameters[\"extraQuestions\"]\n\t\textraDisambigWords = self.__parameters[\"extraDisambigWords\"]\n\n\t\t## Make lexicon: [silence_phones]\n\t\ttemp = []\n\t\tfor symbol in list(silWords.keys()) + list(unkSymbol.keys()):\n\t\t\tphone = self.__dictionaries[\"lexiconp\"][(symbol,0)][1].split(\"_\")[0]\n\t\t\ttemp.append(phone)\n\t\tself.__dictionaries[\"silence_phones\"] = list( set(temp) )\n\n\t\t## Make lexicon: [optional_silence]\n\t\tself.__dictionaries[\"optional_silence\"] = optionalSilPhone\n\n\t\t## Make lexicon: [nonsilence_phones]\n\t\ttemp = []\n\t\tfor word,pron in self.__dictionaries[\"lexiconp\"].items():\n\t\t\ttemp.extend( map(lambda x:x.split(\"_\")[0],pron[1:]) )\n\t\ttemp = sorted(list(set(temp)))\n\t\tself.__dictionaries[\"nonsilence_phones\"] = []\n\t\tfor phone in temp:\n\t\t\tif (phone not in self.__dictionaries[\"silence_phones\"]) and phone != optionalSilPhone :\n\t\t\t\tself.__dictionaries[\"nonsilence_phones\"].append(phone)\n\n\t\t## Make lexicons: [phone_map],[silence_phone_map],[nonsilence_phone_map]\n\t\tself.__dictionaries[\"phone_map\"] = {}\n\t\tself.__dictionaries[\"silence_phone_map\"] = {}\n\t\tself.__dictionaries[\"nonsilence_phone_map\"] = {}\n\t\tif self.__parameters[\"positionDependent\"]:\n\t\t\tfor phone in self.__dictionaries[\"silence_phones\"]:\n\t\t\t\tself.__dictionaries[\"phone_map\"][phone] = ( phone,phone+\"_S\",phone+\"_B\",phone+\"_E\",phone+\"_I\" )\n\t\t\t\tself.__dictionaries[\"silence_phone_map\"][phone] = ( phone,phone+\"_S\",phone+\"_B\",phone+\"_E\",phone+\"_I\" )\n\t\t\tfor phone in self.__dictionaries[\"nonsilence_phones\"]:\n\t\t\t\tself.__dictionaries[\"phone_map\"][phone] = ( phone+\"_S\",phone+\"_B\",phone+\"_E\",phone+\"_I\")\n\t\t\t\tself.__dictionaries[\"nonsilence_phone_map\"][phone] = ( phone+\"_S\",phone+\"_B\",phone+\"_E\",phone+\"_I\")\n\t\telse:\n\t\t\tfor phone in self.__dictionaries[\"silence_phones\"]:\n\t\t\t\tself.__dictionaries[\"phone_map\"][phone] = ( phone,)\n\t\t\t\tself.__dictionaries[\"silence_phone_map\"][phone] = ( phone,)\n\t\t\tfor phone in self.__dictionaries[\"nonsilence_phones\"]:\n\t\t\t\tself.__dictionaries[\"phone_map\"][phone] = ( phone,)\n\t\t\t\tself.__dictionaries[\"nonsilence_phone_map\"][phone] = ( phone,)\n\n\t\t## Make lexicon: [extraQuestions]\n\t\tif len(extraQuestions) == 0:\n\t\t\tself.__dictionaries[\"extra_questions\"] = []\n\t\telse:\n\t\t\tfor phone in extraQuestions:\n\t\t\t\tif not phone in self.__dictionaries[\"silence_phones\"] + self.__dictionaries[\"nonsilence_phones\"]:\n\t\t\t\t\traise WrongDataFormat('Phoneme \"{}\" in extra questions is not existed in \"phones\".'.format(phone))\n\t\t\tself.__dictionaries[\"extra_questions\"] = [ tuple(extraQuestions) ]\n\t\t\n\t\tif self.__parameters[\"positionDependent\"]:\n\t\t\tfor suffix in [\"_B\",\"_E\",\"_I\",\"_S\"]:\n\t\t\t\tline = []\n\t\t\t\tfor phone in self.__dictionaries[\"nonsilence_phones\"]:\n\t\t\t\t\tline.append( phone + suffix )\n\t\t\t\tself.__dictionaries[\"extra_questions\"].append(tuple(line))\n\t\t\tfor suffix in [\"\",\"_B\",\"_E\",\"_I\",\"_S\"]:\n\t\t\t\tline = []\n\t\t\t\tfor phone in self.__dictionaries[\"silence_phones\"]:\n\t\t\t\t\tline.append( phone + suffix )\n\t\t\t\tself.__dictionaries[\"extra_questions\"].append(tuple(line))\t\n\n\t\t## Make lexicons: [silence],[nonsilence]\n\t\tself.__dictionaries[\"silence\"] = []\n\t\tself.__dictionaries[\"nonsilence\"] = []\n\t\tfor phones in self.__dictionaries[\"silence_phone_map\"].values():\n\t\t\tself.__dictionaries[\"silence\"].extend(phones)\n\t\tfor phones in self.__dictionaries[\"nonsilence_phone_map\"].values():\n\t\t\tself.__dictionaries[\"nonsilence\"].extend(phones)\n\t\t\n\t\tself.__dictionaries[\"context_indep\"] = self.__dictionaries[\"silence\"]\n\t\t\n\t\t## Make lexicon: [word_boundary]\n\t\tif self.__parameters[\"positionDependent\"]:\n\t\t\tself.__dictionaries[\"word_boundary\"] = {}\n\t\t\tfor phone in self.__dictionaries[\"silence\"] + self.__dictionaries[\"nonsilence\"]:\n\t\t\t\tif phone.endswith(\"_S\"):\n\t\t\t\t\tself.__dictionaries[\"word_boundary\"][phone] = \"singleton\"\n\t\t\t\telif phone.endswith(\"_B\"):\n\t\t\t\t\tself.__dictionaries[\"word_boundary\"][phone] = \"begin\"\n\t\t\t\telif phone.endswith(\"_I\"):\n\t\t\t\t\tself.__dictionaries[\"word_boundary\"][phone] = \"internal\"\n\t\t\t\telif phone.endswith(\"_E\"):\n\t\t\t\t\tself.__dictionaries[\"word_boundary\"][phone] = \"end\"\n\t\t\t\telse:\n\t\t\t\t\tself.__dictionaries[\"word_boundary\"][phone] = \"nonword\"\n\n\t\t## Make lexicons: [wdisambig],[wdisambig_phones],[wdisambig_words]\n\t\tself.__dictionaries[\"wdisambig\"] = [\"#0\"]\n\t\tif len(extraDisambigWords) > 0:\n\t\t\tself.__dictionaries[\"wdisambig\"].extend(extraDisambigWords)\n\t\tself.__dictionaries[\"wdisambig_phones\"] = self.__dictionaries[\"wdisambig\"]\n\t\tself.__dictionaries[\"wdisambig_words\"] = self.__dictionaries[\"wdisambig\"]\n\n\t\t## Make lexicon: [align_lexicon]\n\t\tself.__dictionaries[\"align_lexicon\"] = {}\n\t\tself.__dictionaries[\"align_lexicon\"][(\"\",0)] = (\"\",optionalSilPhone,)\n\t\tfor word,pron in self.__dictionaries[\"lexiconp\"].items():\n\t\t\tself.__dictionaries[\"align_lexicon\"][word] = (word[0],) + pron[1:]\n\n\t\t## Make lexicon: [oov]\n\t\tself.__dictionaries[\"oov\"] = list(unkSymbol.keys())[0]\n\n\t\t## Make lexicon: [sets]\n\t\tself.__dictionaries[\"sets\"] = []\n\t\tfor phone in self.__dictionaries[\"silence_phones\"] + self.__dictionaries[\"nonsilence_phones\"]:\n\t\t\tself.__dictionaries[\"sets\"].append(self.__dictionaries[\"phone_map\"][phone])\n\n\t\t## Make lexincon: [roots]\n\t\tself.__dictionaries[\"roots\"] = {}\n\t\ttemp1 = []\n\t\ttemp2 = []\n\t\tif self.__parameters[\"shareSilPdf\"]:\n\t\t\tfor phone in self.__dictionaries[\"silence_phones\"]:\n\t\t\t\ttemp1.extend(self.__dictionaries[\"phone_map\"][phone])\n\t\t\tfor phone in self.__dictionaries[\"nonsilence_phones\"]:\n\t\t\t\ttemp2.append(self.__dictionaries[\"phone_map\"][phone])\n\t\telse:\n\t\t\tfor phone in self.__dictionaries[\"silence_phones\"] + self.__dictionaries[\"nonsilence_phones\"]:\n\t\t\t\ttemp2.append(self.__dictionaries[\"phone_map\"][phone])\n\t\t\n\t\tself.__dictionaries[\"roots\"][\"not-shared not-split\"] = tuple(temp1)\n\t\tself.__dictionaries[\"roots\"][\"shared split\"] = tuple(temp2)\n\t\t\n\t\t## Make lexincon: [phones]\n\t\tself.__make_phone_int_table()\n\n\t\t## Make lexicon: [words]\n\t\tself.__make_word_int_table()\n\n\tdef __check_lexicon_type(self,lexiconFile):\n\t\t'''\n\t\tWhen given a lexicon file name,firstly discrimate its type.\n\t\tIf it does not belong to \"lexicon\",\"lexiconp(_disambig)\",\"lexiconp_silprob(_disambig)\" and \"silprob\",raise error.\n\t\t'''\n\n\t\twith open(lexiconFile,\"r\",encoding=\"utf-8\") as fr:\n\t\t\tlines = fr.readlines()\n\t\t\n\t\tdataList = []\n\t\t## Check if it is \"silprob\"\n\t\tif len(lines) >= 4:\n\t\t\tMayBeSilprob = True\n\t\t\tdef check_if_float(s):\n\t\t\t\ttry:\n\t\t\t\t\tfloat(s)\n\t\t\t\texcept ValueError:\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\treturn False\n\n\t\t\tfor line,prefix in zip(lines[0:4],[\"\",\"_s\",\"_n\",\"overall\"]):\n\t\t\t\tline = line.strip().split()\n\t\t\t\tif len(line) != 2 or line[0] != prefix or check_if_float(line[1]):\n\t\t\t\t\tMayBeSilprob = False\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tdataList.append( tuple(line) )\n\t\t\tif MayBeSilprob:\n\t\t\t\treturn \"silprob\",dataList\n\n\t\t## Check if it is \"lexicon\" or \"lexiconp\" or \"lexiconp_silprob\" \n\t\tdictType = None\n\t\tfor line in lines:\n\t\t\tline = line.strip().split()\n\t\t\tif len(line) == 0:\n\t\t\t\tcontinue\n\t\t\tif len(line) == 1:\n\t\t\t\traise WrongDataFormat(f\"Missing integrated word-(probability)-pronunciation information: {line[0]}.\")\n\t\t\tif dictType is None:\n\t\t\t\ttry:\n\t\t\t\t\tfloat(line[1])\n\t\t\t\texcept ValueError:\n\t\t\t\t\tdictType = \"lexicon\"\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tfor i in [2,3,4]:\n\t\t\t\t\t\t\tfloat(line[i])\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\tif i == 2:\n\t\t\t\t\t\t\tdictType = \"lexiconp\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise WrongDataFormat('Expected \"lexicon\",\"lexiconp(_disambig)\",\"lexiconp_silprob(_disambig)\",\"silprob\" file but got a unknown format.')\n\t\t\t\t\texcept IndexError:\n\t\t\t\t\t\tif i == 2:\n\t\t\t\t\t\t\tline = \" \".join(line)\n\t\t\t\t\t\t\traise WrongDataFormat(f\"Missing integrated word-(probability)-pronunciation information: {line}.\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise WrongDataFormat('Expected \"lexicon\",\"lexiconp(_disambig)\",\"lexiconp_silprob(_disambig)\",\"silprob\" file but got a unknown format.')\n\t\t\t\t\telse:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tfloat(line[5])\n\t\t\t\t\t\texcept IndexError:\n\t\t\t\t\t\t\tline = \" \".join(line)\n\t\t\t\t\t\t\traise WrongDataFormat(f\"Missing integrated word-(probability)-pronunciation information: {line}.\")\t\t\t\t\t\n\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\tdictType = \"lexiconp_silprob\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise WrongDataFormat('Expected \"lexicon\",\"lexiconp(_disambig)\",\"lexiconp_silprob(_disambig)\",\"silprob\" file but got a unknown format.')\n\n\t\t\tdataList.append( (line[0],tuple(line[1:])) )\n\n\t\tif len(dataList) == 0:\n\t\t\traise WrongOperation(f\"Void file: {lexiconFile}.\")\n\t\t\n\t\t## Check if it is a disambiguated lexicon\n\t\tif dictType != \"lexicon\":\n\t\t\tcmd = f'grep \"#1\" -m 1 < {lexiconFile}'\n\t\t\tout,err,cod = run_shell_command(cmd,stdout=\"PIPE\")\n\t\t\tif (isinstance(cod,int) and cod != 0):\n\t\t\t\traise ShellProcessError(\"Failed to vertify disambig symbol.\",err.decode())\n\t\t\telif len(out) > 0:\n\t\t\t\tdictType += \"_disambig\"\n\n\t\tdataList = sorted(dataList,key=lambda x:x[0])\n\n\t\treturn dictType,dataList\n\n\tdef __creat_lexiconp_from_lexicon(self,dataList):\n\t\t'''\n\t\tThis method accepts \"lexicon\" format data,then generate three lexicons: [lexiconp],[lexiconp_disambig] and [disambig]\n\t\t\"lexicon\" will be deprecated.\n\t\t'''\n\t\t\n\t\tsilWords = self.__parameters[\"silWords\"]\n\t\tunkSymbol = self.__parameters[\"unkSymbol\"]\n\n\t\tself.__dictionaries[\"lexiconp\"] = {}\n\n\t\t## Add silence words and their pronunciation\n\t\t## We will give every record a unique ID (but all of silence and unk words use 0) in order to protect disambiguating words.\n\t\tfor w,p in silWords.items():\n\t\t\tself.__dictionaries[\"lexiconp\"][(w,0)] = (\"1.0\",p,)\t\t\n\t\t## Add unknown symbol and its pronunciation\n\t\tfor w,p in unkSymbol.items():\n\t\t\tself.__dictionaries[\"lexiconp\"][(w,0)] = (\"1.0\",p,)\t\n\t\t## Add other words and their pronunciation\n\t\tdisambigFlg = 1\n\t\tfor word,pron in dataList:\n\t\t\tif word in silWords.keys():\n\t\t\t\tif self.__retain_original_sil_pron:\n\t\t\t\t\tprint(f'Warning: silence word \"{word}\" already existed in provided lexicon. Use it.')\n\t\t\t\t\tprint(f'If you want specify new pronunciation,give a dict object.')\n\t\t\t\t\tif len(pron) > 1:\n\t\t\t\t\t\traise WrongDataFormat(f'Expected only one phone but got {len(pron)}.')\n\t\t\t\t\tself.__dictionaries[\"lexiconp\"][(word,0)] = (\"1.0\",) + pron\n\t\t\t\telse:\n\t\t\t\t\tprint(f'Warning: silence word \"{word}\" already existed in provided lexicon. Replace it with new pronunciation.')\n\t\t\t\t\tprint(f'If you want retain orignal pronunciation,give a list object.')\n\t\t\t\t\tpass\n\n\t\t\telif word in unkSymbol.keys():\n\t\t\t\tif self.__retain_original_unk_pron:\n\t\t\t\t\tprint(f'Warning: unk symbol \"{word}\" already existed in provided lexicon. Use it.')\n\t\t\t\t\tprint(f'If you want specify new pronunciation,give a dict object.')\n\t\t\t\t\tif len(pron) > 1:\n\t\t\t\t\t\traise WrongDataFormat(f'Expected only one phone but got {len(pron)}.')\n\t\t\t\t\tself.__dictionaries[\"lexiconp\"][(word,0)] = (\"1.0\",) + pron\n\t\t\t\telse:\n\t\t\t\t\tprint(f'Warning: unk symbol \"{word}\" already existed in provided lexicon. Replace it with new pronunciation.')\n\t\t\t\t\tprint(f'If you want retain orignal pronunciation,give a list object.')\n\t\t\t\t\tpass\n\n\t\t\telif word == \"\":\n\t\t\t\tprint('Warning: symbol has already existed in provided lexicon. Remove it.')\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tself.__dictionaries[\"lexiconp\"][(word,disambigFlg)] = (\"1.0\",) + pron\n\t\t\t\tdisambigFlg += 1\n\t\t## Transform \"lexiconp\" to a position-dependent one\n\t\tif self.__parameters[\"positionDependent\"]:\n\t\t\tself.__apply_position_dependent_to_lexiconp(dictType=\"lexiconp\")\n\t\t## Apply disambig phones to lexiconp\n\t\tself.__add_disambig_to_lexiconp(dictType=\"lexiconp\")\n\n\t\t## When arrived here,\"lexiconp\",\"lexiconp_disambig\" and \"disambig\" have been generated\n\n\tdef __creat_lexiconp_from_lexiconp(self,dataList,dictType=\"lexiconp\"):\n\t\t'''\n\t\tIf accepted \"lexiconp(_disambig)\" format data,generate three lexicons: [lexiconp],[lexiconp_disambig] and [disambig]\n\t\tIf accepted \"lexiconp_silprob(_disambig)\" format data,generate four lexicons: [lexiconp],[lexiconp_silprob],[lexiconp_silprob_disambig] and [disambig]\n\t\t'''\n\t\t## has a format: [( word,( probability,*pronunciation ) ),...]\n\t\t## should be one of \"lexiconp\",\"lexiconp_disambig\",\"lexiconp_silprob\" and \"lexicon_silprob_disambig\" \n\n\t\tsilWords = self.__parameters[\"silWords\"]\n\t\tunkSymbol = self.__parameters[\"unkSymbol\"]\n\n\t\t## Check whether the data provided is position-dependent data\n\t\ttestPron = dataList[0][1][-1]\n\t\tif \"#\" in testPron:\n\t\t\ttestPron = dataList[0][1][-2]\n\t\tMayBePositionDependent = False\n\t\tif len(testPron) > 2 and (testPron[-2:] in [\"_S\",\"_B\",\"_I\",\"_E\"]):\n\t\t\tMayBePositionDependent = True\n\t\tif MayBePositionDependent and ( not self.__parameters[\"positionDependent\"]):\n\t\t\traise WrongOperation(\"Position-dependent is unavaliable but appeared in provided lexicon file.\")\n\n\t\t## Transform data to Python dict object as well as giving it the unique ID (but all of silence words and unk word use 0)\n\t\t## Add check whether silence words and unk word are existed already. If not,raise error.\n\t\ttemp = {}\n\t\texistedSilAndUnk = []\n\t\tdisambigID = 1\n\t\tfor word,pron in dataList:\n\t\t\tif word in silWords.keys():\n\n\t\t\t\tif \"silprob\" in dictType:\n\t\t\t\t\tassert len(pron) > 5,f'Silence word \"{word}\" existed but only one phone is allowed in provided lexicon file.'\n\t\t\t\t\tif self.__retain_original_sil_pron:\n\t\t\t\t\t\ttemp[ (word,0) ] = pron\n\t\t\t\t\telse:\n\t\t\t\t\t\ttemp[ (word,0) ] = pron[0:3] + [silWords[word],]\n\t\t\t\telse:\n\t\t\t\t\tassert len(pron) > 2,f'Silence word \"{word}\" existed but only one phone is allowed in provided lexicon file.'\n\t\t\t\t\tif self.__retain_original_sil_pron:\n\t\t\t\t\t\ttemp[ (word,0) ] = pron\n\t\t\t\t\telse:\n\t\t\t\t\t\ttemp[ (word,0) ] = [pron[0],silWords[word],]\n\t\t\t\t\n\t\t\t\texistedSilAndUnk.append(word)\n\n\t\t\telif word in unkSymbol.keys():\n\n\t\t\t\tif \"silprob\" in dictType:\n\t\t\t\t\tassert len(pron) > 5,f'Unk symbol \"{word}\" existed but only one phone is allowed in provided lexicon file.'\n\t\t\t\t\tif self.__retain_original_unk_pron:\n\t\t\t\t\t\ttemp[ (word,0) ] = pron\n\t\t\t\t\telse:\n\t\t\t\t\t\ttemp[ (word,0) ] = pron[0:3] + [unkSymbol[word],]\n\t\t\t\telse:\n\t\t\t\t\tassert len(pron) > 2,f'Unk symbol \"{word}\" existed but only one phone is allowed in provided lexicon file.'\n\t\t\t\t\tif self.__retain_original_unk_pron:\n\t\t\t\t\t\ttemp[ (word,0) ] = pron\n\t\t\t\t\telse:\n\t\t\t\t\t\ttemp[ (word,0) ] = [pron[0],unkSymbol[word],]\n\t\t\t\t\n\t\t\t\texistedSilAndUnk.append(word)\n\n\t\t\telse:\n\t\t\t\ttemp[ (word,disambigID) ] = pron\n\t\t\t\tdisambigID += 1\n\n\t\tfor symbol in silWords.keys():\n\t\t\tif not symbol in existedSilAndUnk:\n\t\t\t\traise WrongDataFormat(f'Sience word \"{word}\" not appeared in provided lexiconp file.')\n\t\tfor symbol in unkSymbol.keys():\n\t\t\tif not symbol in existedSilAndUnk:\n\t\t\t\traise WrongDataFormat(f'Unk symbol \"{word}\" not appeared in provided lexiconp file.')\n\t\t\n\t\t## If is \"lexiconp\",generate: [lexiconp] -> [lexiconp_disambig]&[disambig]\n\t\tif dictType == \"lexiconp\":\n\n\t\t\tself.__dictionaries[\"lexiconp\"] = temp\n\t\t\n\t\t\tif self.__parameters[\"positionDependent\"] and (not MayBePositionDependent):\n\t\t\t\tself.__apply_position_dependent_to_lexiconp(dictType=\"lexiconp\")\n\t\t\n\t\t\tself.__add_disambig_to_lexiconp(dictType=\"lexiconp\")\n\t\t\n\t\t## If is \"lexiconp_disambig\",generate: [lexiconp_disambig] -> [lexiconp]&[disambig]\n\t\telif dictType == \"lexiconp_disambig\":\n\n\t\t\tself.__dictionaries[\"lexiconp_disambig\"] = temp\n\t\t\t\n\t\t\tif self.__parameters[\"positionDependent\"] and (not MayBePositionDependent):\n\t\t\t\tself.__apply_position_dependent_to_lexiconp(dictType=\"lexiconp_disambig\")\n\t\t\t\n\t\t\tself.__remove_disambig_from_lexiconp_disambig(dictType=\"lexiconp_disambig\")\n\n\t\t## If is \"lexiconp_silprob\",generate: [lexiconp_silprob] -> [lexiconp_silprob_disambig]&[disambig] -> [lexiconp]&[lexiconp_disambig]\n\t\telif dictType == \"lexiconp_silprob\":\n\n\t\t\tself.__dictionaries[\"lexiconp_silprob\"] = temp\n\n\t\t\tif self.__parameters[\"positionDependent\"] and (not MayBePositionDependent):\n\t\t\t\tself.__apply_position_dependent_to_lexiconp(dictType=\"lexiconp_silprob\")\n\t\t\t\n\t\t\tself.__add_disambig_to_lexiconp(dictType=\"lexiconp_silprob\")\n\n\t\t\tself.__dictionaries[\"lexiconp\"] = {}\n\t\t\tself.__dictionaries[\"lexiconp_disambig\"] = {}\n\t\t\tfor word,pron in self.__dictionaries[\"lexiconp_silprob\"].items():\n\t\t\t\tself.__dictionaries[\"lexiconp\"][word] = (pron[0],) + pron[4:]\n\t\t\t\tself.__dictionaries[\"lexiconp_disambig\"][word] = (pron[0],) + self.__dictionaries[\"lexiconp_silprob_disambig\"][word][4:]\n\n\t\t## If is \"lexiconp_silprob_disambig\",generate: [lexiconp_silprob_disambig] -> [lexiconp_silprob]&[disambig] -> [lexiconp_disambig]&[lexiconp_disambig]\t\n\t\telif dictType==\"lexiconp_silprob_disambig\":\n\n\t\t\tself.__dictionaries[\"lexiconp_silprob_disambig\"] = temp\n\n\t\t\tif self.__parameters[\"positionDependent\"] and (not MayBePositionDependent):\n\t\t\t\tself.__apply_position_dependent_to_lexiconp(dictType==\"lexiconp_silprob_disambig\")\n\t\t\t\n\t\t\tself.__remove_disambig_from_lexiconp_disambig(dictType=\"lexiconp_silprob_disambig\")\n\n\t\t\tself.__dictionaries[\"lexiconp\"] = {}\n\t\t\tself.__dictionaries[\"lexiconp_disambig\"] = {}\n\t\t\tfor word,pron in self.__dictionaries[\"lexiconp_silprob\"].items():\n\t\t\t\tself.__dictionaries[\"lexiconp\"][word] = (pron[0],) + pron[4:]\n\t\t\t\tself.__dictionaries[\"lexiconp_disambig\"][word] = (pron[0],) + self.__dictionaries[\"lexiconp_silprob_disambig\"][word][4:]\n\n\t\telse:\n\t\t\traise WrongOperation('Expected lexiconp type is \"lexiconp\",\"lexiconp_disambig\",\"lexiconp_silprob\" or \"lexiconp_silprob_disambig\".')\n\t\n\tdef __apply_position_dependent_to_lexiconp(self,dictType=\"lexiconp\"):\n\t\t'''\n\t\tThis method is used to transform position-independent lexicon to a postion-dependent one.\n\t\tPosition-independent lexicon can be \"lexiconp\",\"lexiconp_disambig\",\"lexiconp_silprob\",or \"lexiconp_silprob_disambig\"\n\t\t'''\n\t\tif dictType == \"lexiconp\":\n\t\t\tfor word,pron in self.__dictionaries[dictType].items():\n\t\t\t\tpron = list(pron)\n\t\t\t\tif len(pron) == 2:\n\t\t\t\t\tpron[1] += \"_S\"\n\t\t\t\telse:\n\t\t\t\t\tpron[1] += \"_B\"\n\t\t\t\t\tpron[-1] += \"_E\"\n\t\t\t\t\tfor i in range(2,len(pron)-1):\n\t\t\t\t\t\tpron[i] += \"_I\"\n\t\t\t\tself.__dictionaries[dictType][word] = tuple(pron)\n\n\t\telif dictType==\"lexiconp_silprob\":\n\t\t\tfor word,pron in self.__dictionaries[dictType].items():\n\t\t\t\tpron = list(pron)\n\t\t\t\tif len(pron) == 5:\n\t\t\t\t\tpron[-1] += \"_S\"\n\t\t\t\telse:\n\t\t\t\t\tpron[4] += \"_B\"\n\t\t\t\t\tpron[-1] += \"_E\"\n\t\t\t\t\tfor i in range(5,len(pron)-1):\n\t\t\t\t\t\tpron[i] += \"_I\"\n\t\t\t\tself.__dictionaries[dictType][word] = tuple(pron)\n\n\t\telif dictType==\"lexiconp_disambig\":\n\t\t\tfor word,pron in self.__dictionaries[dictType].items():\n\t\t\t\tpron = list(pron)\n\t\t\t\tif \"#\" in pron[-1]:\n\t\t\t\t\tdisambigSymbol = [pron[-1]]\n\t\t\t\t\tpron = pron[:-1]\n\t\t\t\telse:\n\t\t\t\t\tdisambigSymbol = []\n\t\t\t\tif len(pron) == 2:\n\t\t\t\t\tpron[1] += \"_S\"\n\t\t\t\telse:\n\t\t\t\t\tpron[1] += \"_B\"\n\t\t\t\t\tpron[-1] += \"_E\"\n\t\t\t\t\tfor i in range(2,len(pron)-1):\n\t\t\t\t\t\tpron[i] += \"_I\"\n\t\t\t\tself.__dictionaries[dictType][word] = tuple(pron + disambigSymbol)\n\n\t\telif dictType==\"lexiconp_silprob_disambig\":\n\t\t\tfor word,pron in self.__dictionaries[dictType].items():\n\t\t\t\tpron = list(pron)\n\t\t\t\tif \"#\" in pron[-1]:\n\t\t\t\t\tdisambigSymbol = [pron[-1]]\n\t\t\t\t\tpron = pron[:-1]\n\t\t\t\telse:\n\t\t\t\t\tdisambigSymbol = []\n\t\t\t\tif len(pron) == 5:\n\t\t\t\t\tpron[-1] += \"_S\"\n\t\t\t\telse:\n\t\t\t\t\tpron[4] += \"_B\"\n\t\t\t\t\tpron[-1] += \"_E\"\n\t\t\t\t\tfor i in range(5,len(pron)-1):\n\t\t\t\t\t\tpron[i] += \"_I\"\n\t\t\t\tself.__dictionaries[dictType][word] = tuple(pron + disambigSymbol)\n\t\telse:\n\t\t\traise WrongOperation('Expected lexiconp type is \"lexiconp\",\"lexiconp_disambig\",\"lexiconp_silprob\",or \"lexiconp_silprob_disambig\".')\n\n\tdef __add_disambig_to_lexiconp(self,dictType=\"lexiconp\"):\n\t\t'''\n\t\tThis method is used to add phone-level disambiguation to [lexiconp] or [lexiconp_silprob]\n\t\tLexicon,[disambig],will be gained and parameter,\"ndisambig\",will be updated selmeanwhile.\n\t\t'''\n\t\tdeclare.is_instances(\"dictType\",dictType,[\"lexiconp\",\"lexiconp_silprob\"])\n\n\t\t## is one of \"lexiconp\" and \"lexiconp_silprob\" \n\t\tif dictType == \"lexiconp\":\n\t\t\tlexiconpName = \"lexiconp\"\n\t\t\tdisambigLexiconpName = \"lexiconp_disambig\"\n\t\t\tcmdOption = \"\"\n\t\telse:\n\t\t\tlexiconpName = \"lexiconp_silprob\"\n\t\t\tdisambigLexiconpName = \"lexiconp_silprob_disambig\"\n\t\t\tcmdOption = \"--sil-probs \"\t\n\n\t\tself.__dictionaries[\"disambig\"] = []\n\n\t\twith FileHandleManager() as fhm:\n\n\t\t\tlexiconp = fhm.create(\"w+\",encoding='utf-8')\n\t\t\tlexiconpDisambig = fhm.create(\"w+\",encoding='utf-8')\n\n\t\t\tdisambigFlags = []\n\t\t\tfor word,pron in self.__dictionaries[lexiconpName].items():\n\t\t\t\tpron = \" \".join(pron)\n\t\t\t\tlexiconp.write(\"{} {}\\n\".format(word[0],pron))\n\t\t\t\tdisambigFlags.append(word[1])\n\t\t\t\n\t\t\tlexiconp.seek(0)\n\t\t\t\n\t\t\tcmd = os.path.join(ExKaldiInfo.KALDI_ROOT,\"egs\",\"wsj\",\"s5\",\"utils\",\"add_lex_disambig.pl\") + f\" --pron-probs {cmdOption}{lexiconp.name} {lexiconpDisambig.name}\"\n\t\t\tout,err,cod = run_shell_command(cmd,stdout=\"PIPE\",stderr=\"PIPE\")\n\t\t\tif (isinstance(cod,int) and cod!=0 ) or out == b\"\":\n\t\t\t\traise KaldiProcessError(\"Failed to add disambig phones to lexiconp.\",err.decode())\n\t\t\telse:\n\t\t\t\tself.__parameters[\"ndisambig\"] = int(out.decode().strip()) + self.__parameters[\"extraDisambigPhoneNumbers\"]\n\n\t\t\t\tfor i in range( self.__parameters[\"ndisambig\"] + 1 ):\n\t\t\t\t\tself.__dictionaries[\"disambig\"].append(\"#%d\"%i)\n\t\t\t\tself.__dictionaries[\"disambig\"].extend( self.__parameters[\"extraDisambigWords\"] )\n\t\t\t\t\n\t\t\t\tlexiconpDisambig.seek(0)\n\t\t\t\tlines = lexiconpDisambig.readlines()\n\n\t\t\t\tself.__dictionaries[disambigLexiconpName] = {}\n\t\t\t\tfor line,disambigFlg in zip(lines,disambigFlags):\n\t\t\t\t\tline = line.strip().split()\n\t\t\t\t\tself.__dictionaries[disambigLexiconpName][(line[0],disambigFlg)] = tuple(line[1:])\n\n\tdef __remove_disambig_from_lexiconp_disambig(self,dictType=\"lexiconp_disambig\"):\n\t\t'''\n\t\tThis method is used to remove phone-level disambiguation and generate a new lexicon,[lexiconp] if is \"lexiconp_disambig\",\n\t\tor [lexiconp_silprob] if is \"lexiconp_silprob_disambig\".\n\t\tLexicon,[disambig],will be gained and parameter,\"ndisambig\",will be updated selmeanwhile.\n\t\t'''\n\t\tdeclare.is_instances(\"dictType\",dictType,[\"lexiconp_disambig\",\"lexiconp_silprob_disambig\"])\n\n\t\ttempDisambig = []\n\t\tif dictType == \"lexiconp_disambig\":\n\t\t\tnewName = \"lexiconp\"\n\t\telse:\n\t\t\tnewName = \"lexiconp_silprob\"\t\t\n\n\t\tself.__dictionaries[newName] = {}\n\t\tfor word,pron in self.__dictionaries[dictType].items():\n\t\t\tif \"#\" in pron[-1]:\n\t\t\t\ttempDisambig.append( int(pron[-1][1:]) )\n\t\t\t\tpron = pron[:-1]\n\t\t\tself.__dictionaries[newName][word] = pron\n\n\t\ttempDisambig = sorted(list(set(tempDisambig)))[-1]\n\t\tself.__parameters[\"ndisambig\"] = tempDisambig + self.__parameters[\"extraDisambigPhoneNumbers\"]\n\t\tfor i in range( self.__parameters[\"ndisambig\"] + 1 ):\n\t\t\tself.__dictionaries[\"disambig\"].append(\"#%d\"%i)\n\t\tself.__dictionaries[\"disambig\"].extend( self.__parameters[\"extraDisambigWords\"] )\n\n\tdef __make_phone_int_table(self):\n\t\t'''\n\t\tThis method is used to generated a initialized phone-numberID lexicon: [phones].\n\t\t'''\n\n\t\tself.__dictionaries[\"phones\"] = {}\n\n\t\tallPhones = []\n\t\tfor lexName in [\"silence\",\"nonsilence\",\"disambig\"]:\n\t\t\tallPhones.extend( self.__dictionaries[lexName] )\n\n\t\tcount = 0\n\t\tif not \"\" in allPhones:\n\t\t\tself.__dictionaries[\"phones\"][\"\"] = 0\n\t\t\tcount += 1\n\t\tfor phone in allPhones:\n\t\t\tself.__dictionaries[\"phones\"][phone] = count\n\t\t\tcount += 1\n\n\tdef __make_word_int_table(self):\n\t\t'''\n\t\tThis method is used to generated a initialized word-numberID lexicon: [words].\n\t\t'''\n\n\t\tself.__dictionaries[\"words\"] = {}\n\t\tallWords = [ x for x,_ in self.__dictionaries[\"lexiconp\"].keys()]\n\t\tallWords = sorted(list(set(allWords)))\n\t\tcount = 0\n\t\tif not \"\" in allWords:\n\t\t\tself.__dictionaries[\"words\"][\"\"] = 0\n\t\t\tcount += 1\n\t\tfor word in allWords:\n\t\t\tself.__dictionaries[\"words\"][word] = count\n\t\t\tcount += 1\n\t\tfor word in self.__dictionaries[\"wdisambig\"]:\n\t\t\tself.__dictionaries[\"words\"][word] = count\n\t\t\tcount += 1\n\t\tif not \"\" in allWords:\n\t\t\tself.__dictionaries[\"words\"][\"\"] = count\n\t\t\tcount += 1\n\t\tif not \"\" in allWords:\n\t\t\tself.__dictionaries[\"words\"][\"\"] = count\n\t\t\tcount += 1\t\n\n\t#------------------------------------- Basic functions ------------------------------\n\n\tdef get_parameter(self,name=None):\n\t\t'''\n\t\tGet the initial parameters saved in LexiconBank object.\n\t\t\n\t\tArgs:\n\t\t\t: parameter name. If None,return all.\n\t\t\n\t\tReturn:\n\t\t\tthe paramater value.\n\t\t'''\n\t\tif name is None:\n\t\t\treturn copy.deepcopy(self.__parameters)\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn copy.deepcopy(self.__parameters[name])\n\t\t\texcept KeyError:\n\t\t\t\traise WrongOperation(f\"No such parameter:{name}.\")\n\n\t@property\n\tdef view(self):\n\t\t'''\n\t\tGet the lexicon names of all generated lexicons.\n\n\t\tReturn:\n\t\t\ta list.\n\t\t'''\n\t\treturn list(self.__dictionaries.keys())\n\n\tdef __call__(self,name,returnInt=False):\n\t\t'''\n\t\tGet a lexicon. \n\t\t\n\t\tArgs:\n\t\t\t: lexicons name. You can use .view to look names of all generated lexicons.\n\t\t\t: a bool value. If True,replace phones or words with ID number (but with str format).\n\n\t\t\tSome lexicons have not corresponding Int-ID table. So if you require them,a warning message will be printed and None will be returned.\n\t\t\n\t\tReturn:\n\t\t\tdict,ListTable,list,tuple or str object depending on which lexicon you selected.\n\t\t'''\n\t\tdeclare.is_valid_string(\"name\",name)\n\t\tname = name.strip()\n\t\t\n\t\ttry:\n\t\t\tself.__dictionaries[name]\n\t\texcept KeyError:\n\t\t\traise WrongOperation(f'No such lexicon: \"{name}\".')\n\n\t\tif returnInt is False:\n\t\t\tresult = self.__dictionaries[name]\n\t\t\tif name in [\"words\",\"phones\"]:\n\t\t\t\tresult = ListTable(result,name=name)\n\t\t\treturn result\n\n\t\telse:\n\t\t\tif name in [\"lexiconp\",\"lexiconp_disambig\"]:\n\t\t\t\ttemp = {}\n\t\t\t\tfor word,pron in self.__dictionaries[name].items():\n\t\t\t\t\tword = (str(self.__dictionaries[\"words\"][word[0]]),word[1])\n\t\t\t\t\tnew = [pron[0]]\n\t\t\t\t\tfor phone in pron[1:]:\n\t\t\t\t\t\tnew.append(str(self.__dictionaries[\"phones\"][phone]))\n\t\t\t\t\ttemp[word] = tuple(new)\n\t\t\t\treturn temp\n\n\t\t\telif name in [\"lexiconp_silprob\",\"lexiconp_silprob_disambig\"]:\n\t\t\t\ttemp = {}\n\t\t\t\tfor word,pron in self.__dictionaries[name].items():\n\t\t\t\t\tword = (str(self.__dictionaries[\"words\"][word[0]]),word[1])\n\t\t\t\t\tnew = []\n\t\t\t\t\tfor phone in pron[4:]:\n\t\t\t\t\t\tnew.append(str(self.__dictionaries[\"phones\"][phone]))\n\t\t\t\t\ttemp[word] = pron[0:4] + tuple(new)\n\t\t\t\treturn temp\n\n\t\t\telif name in [\"phones\",\"words\",\"phone_map\",\"silence_phone_map\",\"nonsilence_phone_map\",\"nonsilence_phones\",\"silence_phones\",\"silprob\"]:\n\t\t\t\tprint('Warning: \"{}\" is unsupported to generate corresponding int table.'.format(name))\n\t\t\t\treturn None\n\n\t\t\telif name in [\"align_lexicon\"]:\n\t\t\t\ttemp = {}\n\t\t\t\tfor word,wordPron in self.__dictionaries[name].items():\n\t\t\t\t\tword = (str(self.__dictionaries[\"words\"][word[0]]),word[1])\n\t\t\t\t\tnew = [word[0],]\n\t\t\t\t\tfor phone in wordPron[1:]:\n\t\t\t\t\t\tnew.append( str(self.__dictionaries[\"phones\"][phone]) )\n\t\t\t\t\ttemp[word] = tuple(new)\n\t\t\t\treturn temp\n\n\t\t\telif name in [\"disambig\",\"silence\",\"nonsilence\",\"wdisambig_phones\",\"context_indep\"]:\n\t\t\t\ttemp = []\n\t\t\t\tfor phone in self.__dictionaries[name]:\n\t\t\t\t\ttemp.append( str(self.__dictionaries[\"phones\"][phone]) )\n\t\t\t\treturn temp\n\n\t\t\telif name in [\"extra_questions\",\"sets\"]:\n\t\t\t\ttemp = []\n\t\t\t\tfor phones in self.__dictionaries[name]:\n\t\t\t\t\tnew = []\n\t\t\t\t\tfor phone in phones:\n\t\t\t\t\t\tnew.append( str(self.__dictionaries[\"phones\"][phone]) )\n\t\t\t\t\ttemp.append(tuple(new))\n\t\t\t\treturn temp\n\n\t\t\telif name in [\"wdisambig\",\"wdisambig_words\"]:\n\t\t\t\ttemp = []\n\t\t\t\tfor word in self.__dictionaries[name]:\n\t\t\t\t\ttemp.append( str(self.__dictionaries[\"words\"][word]) )\n\t\t\t\treturn temp\n\n\t\t\telif name in [\"word_boundary\"]:\n\t\t\t\ttemp = {}\n\t\t\t\tfor phone,flg in self.__dictionaries[name].items():\n\t\t\t\t\tphone = str(self.__dictionaries[\"phones\"][phone])\n\t\t\t\t\ttemp[phone] = flg\n\t\t\t\treturn temp\n\n\t\t\telif name in [\"oov\"]:\n\t\t\t\treturn str(self.__dictionaries[\"words\"][self.__dictionaries[name]])\n\t\t\t\n\t\t\telif name in [\"optional_silence\"]:\n\t\t\t\treturn str(self.__dictionaries[\"phones\"][self.__dictionaries[name]])\n\n\t\t\telif name in [\"roots\"]:\n\t\t\t\ttemp1 = []\n\t\t\t\ttemp2 = []\n\t\t\t\tfor phone in self.__dictionaries[name][\"not-shared not-split\"]:\n\t\t\t\t\ttemp1.append( str(self.__dictionaries[\"phones\"][phone]) )\n\t\t\t\tfor sharedPhones in self.__dictionaries[name][\"shared split\"]:\n\t\t\t\t\tnew = []\n\t\t\t\t\tfor phone in sharedPhones:\n\t\t\t\t\t\tnew.append( str(self.__dictionaries[\"phones\"][phone]) )\n\t\t\t\t\ttemp2.append(tuple(new))\n\n\t\t\t\treturn {\"not-shared not-split\": tuple(temp1),\"shared split\": tuple(temp2) }\n\t\t\t\n\t\t\telse:\n\t\t\t\traise WrongOperation(f'Failed to convert lexicon \"{name}\" to int-number format.')\n\n\tdef dump_dict(self,name,fileName=None,dumpInt=False):\n\t\t'''\n\t\tSave the lexicon to file with Kaldi format.\n\t\t\n\t\tArgs:\n\t\t\t: lexicon name. You can use .view to look names of all generated lexicons.\n\t\t\t: file name,opened file handle or None.\n\t\t\t: bool value. If True,replace phones or words with int ID.\n\n\t\t\tSome lexicons have not corresponding int table. \n\t\t\tSo if you require them,a warning message will be printed and nothing will be saved. \n\t\t\n\t\tReturn:\n\t\t\tfile name, file handle or a string.\n\t\t'''\n\t\tif fileName is not None:\n\t\t\tdeclare.is_valid_file_name_or_handle(\"fileName\",fileName)\n\t\t\tif isinstance(fileName,str):\n\t\t\t\tfileName = fileName.strip()\n\t\t\t\tif dumpInt is False:\n\t\t\t\t\tif not fileName.endswith(\".txt\"):\n\t\t\t\t\t\tfileName += \".txt\"\n\t\t\t\telse:\n\t\t\t\t\tif not fileName.endswith(\".int\"):\n\t\t\t\t\t\tfileName += \".int\"\n\t\t\t\tmake_dependent_dirs(fileName,pathIsFile=True)\t\n\n\t\tdef write_file(fileName,message):\n\t\t\tif fileName is None:\n\t\t\t\treturn message\n\t\t\telif isinstance(fileName,str):\n\t\t\t\twith open(fileName,\"w\",encoding='utf-8') as fw:\n\t\t\t\t\tfw.write(message)\n\t\t\t\treturn fileName\n\t\t\telse:\t\t\t\t\n\t\t\t\tfileName.truncate()\n\t\t\t\tfileName.write(message)\n\t\t\t\tfileName.seek(0)\n\t\t\t\treturn fileName\n\n\t\t## Different lexicon has different data format,So judge them before save\n\t\t## Type1: dict,{ str: tuple }\n\t\tif name in [\"lexiconp\",\"lexiconp_disambig\",\"lexiconp_silprob\",\"lexiconp_silprob_disambig\",\n\t\t\t\t\t\"phone_map\",\"silence_phone_map\",\"nonsilence_phone_map\",\"align_lexicon\"]:\n\t\t\tcontents = []\n\t\t\ttemp = self.__call__(name,dumpInt)\n\t\t\tif temp is not None:\n\t\t\t\tif name in [\"lexiconp\",\"lexiconp_disambig\",\"lexiconp_silprob\",\n\t\t\t\t\t\t\t\t\"lexiconp_silprob_disambig\",\"align_lexicon\"]:\n\t\t\t\t\tfor key,value in temp.items():\n\t\t\t\t\t\tvalue = \" \".join(value)\n\t\t\t\t\t\tcontents.append(\"{} {}\".format(key[0],value))\n\t\t\t\telse:\n\t\t\t\t\tfor key,value in temp.items():\n\t\t\t\t\t\tvalue = \" \".join(value)\n\t\t\t\t\t\tcontents.append(\"{} {}\".format(key,value))\n\n\t\t\t\treturn write_file(fileName,\"\\n\".join(contents))\n\t\t\t\t\t\n\t\t## Type2: tuple,()\n\t\telif name in [\"nonsilence_phones\",\"silence_phones\",\"disambig\",\"silence\",\"nonsilence\",\n\t\t\t\t\t \"wdisambig\",\"wdisambig_phones\",\"wdisambig_words\",\"context_indep\"]:\n\t\t\tcontents = self.__call__(name,dumpInt)\n\t\t\tif not contents is None:\n\t\t\t\treturn write_file(fileName,\"\\n\".join(contents))\n\n\t\t## Type3: tuple,(tuple,)\n\t\telif name in [\"sets\",\"extra_questions\"]:\n\t\t\tcontents = []\n\t\t\tfor value in self.__call__(name,dumpInt):\n\t\t\t\tcontents.append(\" \".join(value))\n\t\t\treturn write_file(fileName,\"\\n\".join(contents))\n\t\t\n\t\t## Type4: dict,{ str:int or str }\n\t\telif name in [\"phones\",\"words\",\"word_boundary\",\"silprob\"]:\n\t\t\tcontents = []\n\t\t\ttemp = self.__call__(name,dumpInt)\n\t\t\tif not temp is None:\n\t\t\t\tfor key,value in temp.items():\n\t\t\t\t\tcontents.append(\"{} {}\".format(key,value))\n\t\t\t\treturn write_file(fileName,\"\\n\".join(contents))\n\n\t\t## Type5: str\t\t\t\t\t\t\n\t\telif name in [\"oov\",\"optional_silence\"]:\n\t\t\tcontents = self.__call__(name,dumpInt)\n\t\t\treturn write_file(fileName,contents)\n\n\t\t## Type6: special format for roots\n\t\telif name == \"roots\":\n\t\t\tcontents = []\n\t\t\ttemp = self.__call__(name,dumpInt)\n\t\t\tif len(temp[\"not-shared not-split\"]) > 0: \n\t\t\t\tcontents.append(\"not-shared not-split {}\".format(\" \".join(temp[\"not-shared not-split\"])))\n\t\t\tfor phones in temp[\"shared split\"]:\n\t\t\t\tphones = \" \".join(phones)\n\t\t\t\tcontents.append(\"shared split {}\".format(phones))\n\t\t\treturn write_file(fileName,\"\\n\".join(contents))\n\n\t\telse:\n\t\t\traise WrongOperation(f\"Unsupported lexicon: {name} to dump.\")\n\n\tdef dump_all_dicts(self,outDir=\"./\",requireInt=False):\n\t\t'''\n\t\tSave all lexicons (and their corresponding int table ) to folder with their default lexicon name.\n\n\t\tArgs:\n\t\t\t: output directory path.\n\t\t\t: a bool value. If True,dump int format at the same time.\n\t\t\n\t\t'''\n\t\tdeclare.is_valid_dir_name(\"outDir\",outDir)\n\t\tdeclare.is_bool(\"requireInt\",requireInt)\n\n\t\tmake_dependent_dirs(outDir,pathIsFile=False)\n\t\tfor name in self.__dictionaries.keys():\n\t\t\tfileName = os.path.join(outDir,name)\n\n\t\t\tself.dump_dict(name,fileName+\".txt\",False)\n\n\t\t\tif requireInt:\n\t\t\t\tif not name in [\"phones\",\"words\",\"phone_map\",\"silence_phone_map\",\"nonsilence_phone_map\",\n\t\t\t\t\t\t\t\t\"nonsilence_phones\",\"silence_phones\",\"silprob\"]:\n\n\t\t\t\t\tself.dump_dict(name,fileName+\".int\",True)\n\n\tdef save(self,fileName):\n\t\t'''\n\t\tSave LexiconBank object to a binary file.\n\n\t\tArgs:\n\t\t\t: file name with suffix .lex.\n\n\t\tReturn:\n\t\t\tthe saved file name.\n\t\t'''\n\t\tdeclare.is_valid_string(\"fileName\",fileName)\n\t\tif not fileName.rstrip().endswith(\".lex\"):\n\t\t\tfileName += \".lex\"\n\t\tdeclare.is_valid_file_name(\"fileName\",fileName)\n\t\tmake_dependent_dirs(fileName,pathIsFile=True)\n\n\t\twith open(fileName,\"wb\") as fw:\n\t\t\tpickle.dump(self,fw)\n\n\t\treturn fileName\n\n\t#------------------------------------- Advance functions ------------------------------\n\n\tdef reset_phones(self,target):\n\t\t'''\n\t\tReset phone-int table with user's own lexicon.\n\t\t\n\t\tArgs:\n\t\t\t: a file,dict object or exkaldi ListTable object. \n\t\t'''\n\t\tif isinstance(target,str):\n\t\t\ttarget = load_list_table(target)\n\t\telif type_name(target) not in [\"dict\",\"ListTable\"]:\n\t\t\traise WrongOperation(f\" should be a file path,dict or ListTable object but got: {type_name(target)}.\")\n\t\t\n\t\tphone2id = {}\n\t\tid2phone = {}\n\t\tfor key,value in target.items():\n\t\t\tdeclare.is_valid_string(\"phone in target\",key)\n\t\t\tif isinstance(value,int):\n\t\t\t\tpass\n\t\t\telif isinstance(value,str):\n\t\t\t\tvalue = value.strip().split(maxsplit=1)[0]\n\t\t\t\ttry:\n\t\t\t\t\tvalue = int(value)\n\t\t\t\texcept ValueError:\n\t\t\t\t\traise WrongDataFormat(f\"The ID in phone-id table should be an int value but got: {value}.\")\n\t\t\telse:\n\t\t\t\traise WrongDataFormat(f\"The ID in phone-id table should be an int value but got: {value}.\")\n\t\t\t\n\t\t\ttry:\n\t\t\t\tid2phone[value]\n\t\t\texcept KeyError:\n\t\t\t\tphone2id[key] = value\n\t\t\t\tid2phone[value] = key\n\t\t\telse:\n\t\t\t\traise WrongDataFormat(f\"Phone ID appeared many times: {value}.\")\n\n\t\tdel id2phone\n\t\titems = sorted(phone2id.items(),key=lambda x:x[1])\n\t\tif items[-1][1] != len(items) - 1:\n\t\t\traise WrongDataFormat(f\"Phone-id table shoule be a compact ID sequences that the last ID should be {len(items)-1} but got: {items[-1][1]}.\")\n\t\t\n\t\tdependentFlg = False\n\t\tfor phone in phone2id.keys():\n\t\t\tif len(phone) > 2 and phone[-2:] in [\"_S\",\"_B\",\"_E\",\"_I\"]:\n\t\t\t\tdependentFlg = True\n\t\t\t\tif self.__parameters[\"positionDependent\"] is False:\n\t\t\t\t\traise WrongOperation(\"Position dependent phones not requested,but appear in the provided phone table.\")\n\t\t\t\tbreak\n\t\t\n\t\tif dependentFlg is False:\n\t\t\tif self.__parameters[\"positionDependent\"] is True:\n\t\t\t\traise WrongOperation(\"Position dependent phones requested,but not appear in the provided .\")\n\t\t\n\t\tfor phone in self.__dictionaries[\"silence\"] + self.__dictionaries[\"nonsilence\"]:\n\t\t\ttry:\n\t\t\t\tphone2id[phone]\n\t\t\texcept KeyError:\n\t\t\t\traise WrongOperation(f\"Phone appears in the lexicon but not in the provided :{phone}.\")\n\n\t\tcount = items[-1][1] + 1\n\t\tfor phone in [\"\"]+self.__dictionaries[\"disambig\"]:\n\t\t\ttry:\n\t\t\t\tphone2id[phone]\n\t\t\texcept KeyError:\n\t\t\t\tphone2id[phone] = count\n\t\t\t\tcount += 1\t\t\n\n\t\tdel self.__dictionaries[\"phones\"]\n\t\tself.__dictionaries[\"phones\"] = phone2id\n\t\t\t\n\tdef reset_words(self,target):\n\t\t'''\n\t\tReset word-int table with user's own lexicon.\n\t\t\n\t\tArgs:\n\t\t\t: a file,dict object or exkaldi ListTable object. \n\t\t'''\n\t\tif isinstance(target,str):\n\t\t\ttarget = load_list_table(target)\n\t\telif type_name(target) not in [\"dict\",\"ListTable\"]:\n\t\t\traise WrongOperation(f\" should be a file path,dict or ListTable object but got: {type_name(target)}.\")\n\t\t\n\t\tword2id = {}\n\t\tid2word = {}\n\t\tfor key,value in target.items():\n\t\t\tdeclare.is_valid_string(\"word in target\",key)\n\t\t\tif isinstance(value,int):\n\t\t\t\tpass\n\t\t\telif isinstance(value,str):\n\t\t\t\tvalue = value.strip().split(maxsplit=1)[0]\n\t\t\t\ttry:\n\t\t\t\t\tvalue = int(value)\n\t\t\t\texcept ValueError:\n\t\t\t\t\traise WrongDataFormat(f\"The ID in Word-id table should be an int value but got: {value}.\")\n\t\t\telse:\n\t\t\t\traise WrongDataFormat(f\"The ID in Word-id table should be an int value but got: {value}.\")\n\t\t\t\n\t\t\ttry:\n\t\t\t\tid2word[value]\n\t\t\texcept KeyError:\n\t\t\t\tword2id[key] = value\n\t\t\t\tid2word[value] = key\n\t\t\telse:\n\t\t\t\traise WrongDataFormat(f\"Word ID appeared many times: {value}.\")\n\n\t\tdel id2word\n\t\titems = sorted(word2id.items(),key=lambda x:x[1])\n\t\tif items[-1][1] != len(items) - 1:\n\t\t\traise WrongDataFormat(f\"Word-id table shoule be a compact ID sequences that the last ID should be {len(items)-1} but got: {items[-1][1]}.\")\n\n\t\tfor word,_ in self.__dictionaries[\"lexiconp\"].keys():\n\t\t\ttry:\n\t\t\t\tword2id[word]\n\t\t\texcept KeyError:\n\t\t\t\traise WrongOperation(f\"Word appears in the lexicon but not in the provided : {word}.\")\n\n\t\tcount = items[-1][1] + 1\n\t\tfor word in [\"\"]+self.__dictionaries[\"wdisambig\"]+[\"\",\"\"]:\n\t\t\ttry:\n\t\t\t\tword2id[word]\n\t\t\texcept KeyError:\n\t\t\t\tword2id[word] = count\n\t\t\t\tcount += 1\n\t\t\n\t\tdel self.__dictionaries[\"words\"]\n\t\tself.__dictionaries[\"words\"] = word2id\n\n\tdef add_extra_question(self,question):\n\t\t'''\n\t\tAdd one piece of extra question to extraQuestions lexicon.\n\n\t\tArgs:\n\t\t\t: a list or tuple of phones.\n\t\t'''\n\t\tdeclare.is_classes(\"question\",question,[list,tuple])\n\n\t\tfor phone in question:\n\t\t\tassert isinstance(phone,str),f\"Phone should be a string but got: {phone}.\"\n\t\t\tif not phone in self.__dictionaries[\"silence_phones\"] + self.__dictionaries[\"nonsilence_phones\"]:\n\t\t\t\traise WrongDataFormat('Phoneme \"{}\" in extra questions is not existed in \"phones\".'.format(phone))\n\t\tself.__dictionaries[\"extra_questions\"].append( tuple(question) )\n\n\tdef update_prob(self,targetFile):\n\t\t'''\n\t\tUpdate relative probability of all of lexicons including \"lexiconp\",\"lexiconp_silprob\",\"lexiconp_disambig\",\"lexiconp_silprob_disambig\",\"silprob\".\n\t\t\n\t\tArgs:\n\t\t\t: a file name. one of \"lexiconp\",\"lexiconp_silprob\",\"lexiconp_disambig\",\"lexiconp_silprob_disambig\",\"silprob\".\n\t\t'''\n\t\tdeclare.is_file(\"target probability file\",targetFile)\n\t\t\n\t\tdictType,dataList = self.__check_lexicon_type(targetFile)\n\n\t\t## If it is \"lexiconp\",update [lexiconp(_disambig)]. If [lexiconp_silprob(disambig)] are also existed,update them too.\n\t\tif dictType == \"lexiconp\":\n\n\t\t\ttemp = {}\n\t\t\tfor word,pron in dataList:\n\t\t\t\ttemp[ (word,* pron[1:]) ] = pron[0]\n\n\t\t\tnewLex = {}\n\t\t\tfor word,pronLex in self.__dictionaries[\"lexiconp\"].items():\n\t\t\t\t# \"word\": ( word,disambigID ); \"pronLex\": ( \"1,0\",*pronunciation )\n\t\t\t\tindex = (word[0],*pronLex[1:])\n\t\t\t\tif index in temp.keys():\n\t\t\t\t\tnewP = temp[ index ]\n\t\t\t\t\tnewLex[word] = ( newP,) + pronLex[1:]\n\t\t\t\telse:\n\t\t\t\t\traise WrongOperation('Missing probability information of \"{}\"'.format(\" \".join(index)))\n\t\t\tself.__dictionaries[\"lexiconp\"] = newLex\n\n\t\t\tfor name in [\"lexiconp_disambig\",\"lexiconp_silprob\",\"lexiconp_silprob_disambig\"]:\n\t\t\t\tif name in self.view:\n\t\t\t\t\tnew = {}\n\t\t\t\t\tfor word,pron in self.__dictionaries[name].items():\n\t\t\t\t\t\tnewP = self.__dictionaries[\"lexiconp\"][word][0]\n\t\t\t\t\t\tnew[word] = ( newP,) + pron[1:]\n\t\t\t\t\tself.__dictionaries[name] = new\n\t\t\n\t\t## If it is \"lexiconp_disambig\",update [lexiconp(_disambig)]. If [lexiconp_silprob(disambig)] are also existed,update them too.\n\t\telif dictType == \"lexiconp_disambig\":\n\n\t\t\ttemp = {}\n\t\t\tfor word,pron in dataList:\n\t\t\t\ttemp[ (word,* pron[1:]) ] = pron[0]\n\n\t\t\t# If [lexiconp_disambig] existed\n\t\t\tif \"lexiconp_disambig\" in self.view:\n\n\t\t\t\tnewLexDis = {}\n\t\t\t\tnewLex = {}\n\t\t\t\tfor word,pronLexDis in self.__dictionaries[\"lexiconp_disambig\"].items():\n\t\t\t\t\t# \"word\": ( word,disambigID ); \"pron\": ( \"1,0\",*pronunciationWithDisambig )\n\t\t\t\t\tindex = (word[0],*pronLexDis[1:])\n\t\t\t\t\tpronLex = self.__dictionaries[\"lexiconp\"][word]\n\t\t\t\t\tif index in temp.keys():\n\t\t\t\t\t\tnewP = temp[ index ]\n\t\t\t\t\t\tnewLexDis[word] = ( newP,) + pronLexDis[1:]\n\t\t\t\t\t\tnewLex[word] = ( newP,) + pronLex[1:]\n\t\t\t\t\telse:\n\t\t\t\t\t\traise WrongOperation('Missing probability information of \"{}\"'.format(\" \".join(index)))\n\t\t\t\tself.__dictionaries[\"lexiconp_disambig\"] = newLexDis\n\t\t\t\tself.__dictionaries[\"lexiconp\"] = newLex\n\n\t\t\t\tfor name in [\"lexiconp_silprob\",\"lexiconp_silprob_disambig\"]:\n\t\t\t\t\tif name in self.view:\n\t\t\t\t\t\tnew = {}\n\t\t\t\t\t\tfor word,pron in self.__dictionaries[name].items():\n\t\t\t\t\t\t\tnewP = self.__dictionaries[\"lexiconp\"][word][0]\n\t\t\t\t\t\t\tnew[word] = ( newP,) + pron[1:]\n\t\t\t\t\t\tself.__dictionaries[name] = new\n\n\t\t\telse:\n\n\t\t\t\tnewLexSilDis = {}\n\t\t\t\tnewLexSil = {}\n\t\t\t\tnewLexDis = {}\n\t\t\t\tnewLex = {}\n\n\t\t\t\tfor word,pronLexSilDis in self.__dictionaries[\"lexiconp_silprob_disambig\"].items():\n\t\t\t\t\t# \"word\": ( word,disambigID ); \"pron\": ( \"1,0\",\"p1\",\"p2\",\"p3\",*pronunciationWithDisambig )\n\t\t\t\t\tindex = (word[0],*pronLexSilDis[4:])\n\t\t\t\t\tpronLexSil = self.__dictionaries[\"lexiconp_silprob\"][word]\n\t\t\t\t\tif index in temp.keys():\n\t\t\t\t\t\tnewP = temp[ index ]\n\t\t\t\t\t\tnewLexSilDis[word] = ( newP,) + pronLexSilDis[1:]\n\t\t\t\t\t\tnewLexSil[word] = ( newP,) + pronLexSil[1:]\n\t\t\t\t\t\tnewLexDis[word] = ( newP,) + pronLexSilDis[4:]\n\t\t\t\t\t\tnewLex[word] = ( newP,) + pronLexSil[4:]\n\t\t\t\t\telse:\n\t\t\t\t\t\traise WrongOperation('Missing probability information of \"{}\"'.format(\" \".join(index)))\n\n\t\t\t\tself.__dictionaries[\"lexiconp_silprob_disambig\"] = newLexSilDis\n\t\t\t\tself.__dictionaries[\"lexiconp_silprob\"] = newLexSil\n\t\t\t\tself.__dictionaries[\"lexiconp_disambig\"] = newLexDis\n\t\t\t\tself.__dictionaries[\"lexiconp\"] = newLex\n\n\t\t## If it is \"lexiconp_silprob\",update [lexiconp_silprob(_disambig)] and [lexiconp]. If [lexiconp_disambig] are also existed,update it too.\n\t\telif dictType == \"lexiconp_silprob\":\n\n\t\t\ttemp = {}\n\t\t\tfor word,pron in dataList:\n\t\t\t\ttemp[ (word,* pron[4:]) ] = pron[0:4]\n\n\t\t\t# If [lexiconp_silprob] existed\n\t\t\tif \"lexiconp_silprob\" in self.view:\n\n\t\t\t\tnewLex = {}\n\t\t\t\tnewLexSil = {}\n\t\t\t\tnewLexSilDis = {}\n\n\t\t\t\tfor word,pronLexSil in self.__dictionaries[\"lexiconp_silprob\"].items():\n\t\t\t\t\t# \"word\": ( word,disambigID ); \"pronLexSil\": ( \"1,0\",\"p1\",\"p2\",\"p3\",*pronunciationWithDisambig )\n\t\t\t\t\tindex = (word[0],*pronLexSil[4:])\n\t\t\t\t\tpronLexSilDis = self.__dictionaries[\"lexiconp_silprob_disambig\"][word]\n\t\t\t\t\tif index in temp.keys():\n\t\t\t\t\t\tnewP = temp[ index ]\n\t\t\t\t\t\tnewLex[word] = (newP[0],) + pronLexSil[4:]\n\t\t\t\t\t\tnewLexSil[word] = newP + pronLexSil[4:]\n\t\t\t\t\t\tnewLexSilDis[word] = newP + pronLexSilDis[4:]\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\traise WrongOperation('Missing probability information of \"{}\"'.format(\" \".join(index)))\n\n\t\t\t\tself.__dictionaries[\"lexiconp\"] = newLex\n\t\t\t\tself.__dictionaries[\"lexiconp_silprob\"] = newLexSil\n\t\t\t\tself.__dictionaries[\"lexiconp_silprob_disambig\"] = newLexSilDis\n\n\t\t\t\tif \"lexiconp_disambig\" in self.view:\n\t\t\t\t\tnew = {}\n\t\t\t\t\tfor word,pron in self.__dictionaries[\"lexiconp_disambig\"].items():\n\t\t\t\t\t\tnewP = self.__dictionaries[\"lexiconp\"][word][0]\n\t\t\t\t\t\tnew[word] = ( newP,) + pron[1:]\n\t\t\t\t\tself.__dictionaries[\"lexiconp_disambig\"] = new\n\n\t\t\telse:\n\n\t\t\t\tnewLex = {}\n\t\t\t\tnewLexSil = {}\n\t\t\t\tnewLexDis = {}\n\t\t\t\tnewLexSilDis = {}\n\n\t\t\t\tfor word,pronLex in self.__dictionaries[\"lexiconp\"].items():\n\t\t\t\t\tindex = (word[0],*pronLex[1:])\n\t\t\t\t\tpronLexDis = self.__dictionaries[\"lexiconp_disambig\"][word]\n\t\t\t\t\tif index in temp.keys():\n\t\t\t\t\t\tnewP = temp[ index ]\n\t\t\t\t\t\tnewLex[word] = (newP[0],) + pronLex[1:] \n\t\t\t\t\t\tnewLexDis[word] = (newP[0],) + pronLexDis[1:] \n\t\t\t\t\t\tnewLexSil[word] = newP + pronLex[1:]\n\t\t\t\t\t\tnewLexSilDis[word] = newP + pronLexDis[1:]\n\t\t\t\t\telse:\n\t\t\t\t\t\traise WrongOperation('Missing probability information of \"{}\"'.format(\" \".join(index)))\t\n\n\t\t\t\tself.__dictionaries[\"lexiconp\"] = newLex\n\t\t\t\tself.__dictionaries[\"lexiconp_disambig\"] = newLexDis\n\t\t\t\tself.__dictionaries[\"lexiconp_silprob\"] = newLexSil\n\t\t\t\tself.__dictionaries[\"lexiconp_silprob_disambig\"] = newLexSilDis\n\n\t\t## If it is \"lexiconp_silprob_disambig\",update [lexiconp_silprob(_disambig)] and [lexiconp]. If [lexiconp_disambig] are also existed,update it too.\n\t\telif dictType == \"lexiconp_silprob_disambig\":\n\n\t\t\ttemp = {}\n\t\t\tfor word,pron in dataList:\n\t\t\t\ttemp[ (word,* pron[4:]) ] = pron[0:4]\n\n\t\t\t# if it is existed\n\t\t\tif \"lexiconp_silprob_disambig\" in self.view:\n\n\t\t\t\tnewLex = {}\n\t\t\t\tnewLexSil = {}\n\t\t\t\tnewLexSilDis = {}\t\t\t\t\n\n\t\t\t\tfor word,pronLexSilDis in self.__dictionaries[\"lexiconp_silprob_disambig\"].items():\n\t\t\t\t\tindex = (word[0],*pronLexSilDis[4:])\n\t\t\t\t\tpronLexSil = self.__dictionaries[\"lexiconp_silprob\"][word]\n\t\t\t\t\tif index in temp.keys():\n\t\t\t\t\t\tnewP = temp[ index ]\n\t\t\t\t\t\tnewLex[word] = ( newP[0],) + pronLexSil[4:]\n\t\t\t\t\t\tnewLexSil[word] = newP + pronLexSil[4:]\n\t\t\t\t\t\tnewLexSilDis[word] = newP + pronLexSilDis[4:]\n\t\t\t\t\telse:\n\t\t\t\t\t\traise WrongOperation('Missing probability information of \"{}\"'.format(\" \".join(index)))\n\n\t\t\t\tif \"lexiconp_disambig\" in self.view:\n\t\t\t\t\tnew = {}\n\t\t\t\t\tfor word,pron in self.__dictionaries[\"lexiconp_disambig\"].items():\n\t\t\t\t\t\tnewP = self.__dictionaries[\"lexiconp\"][word][0]\n\t\t\t\t\t\tnew[word] = ( newP,) + pron[1:]\n\t\t\t\t\tself.__dictionaries[\"lexiconp_disambig\"] = new\t\t\n\n\t\t\telse:\n\n\t\t\t\tnewLex = {}\n\t\t\t\tnewLexDis = {}\n\t\t\t\tnewLexSil = {}\n\t\t\t\tnewLexSilDis = {}\n\n\t\t\t\tfor word,pronLexDis in self.__dictionaries[\"lexiconp_disambig\"].items():\n\t\t\t\t\tindex = (word[0],*pronLexDis[1:])\n\t\t\t\t\tpronLex = self.__dictionaries[\"lexiconp\"][word]\n\t\t\t\t\tif index in temp.keys():\n\t\t\t\t\t\tnewP = temp[ index ]\n\t\t\t\t\t\tnewLex[word] = (newP[0],) + pronLex[1:]\n\t\t\t\t\t\tnewLexDis[word] = (newP[0],) + pronLexDis[1:]\n\t\t\t\t\t\tnewLexSil[word] = newP + pronLex[1:]\n\t\t\t\t\t\tnewLexSilDis[word] = newP + pronLexDis[1:]\n\t\t\t\t\telse:\n\t\t\t\t\t\traise WrongOperation('Missing probability information of \"{}\"'.format(\" \".join(index)))\t\n\n\t\t\t\tself.__dictionaries[\"lexiconp\"] = newLex\n\t\t\t\tself.__dictionaries[\"lexiconp_disambig\"] = newLexDis\n\t\t\t\tself.__dictionaries[\"lexiconp_silprob\"] = newLexSil\n\t\t\t\tself.__dictionaries[\"lexiconp_silprob_disambig\"] = newLexSilDis\n\n\t\t## If it is \"silprob\",update [silprob].\n\t\telif dictType == \"silprob\":\n\t\t\t\n\t\t\ttemp = {}\n\t\t\tfor symbol,prob in dataList:\n\t\t\t\ttemp[symbol] = prob\n\t\t\t\n\t\t\tself.__dictionaries[\"silprob\"] = temp\n\t\t\n\t\telse:\n\t\t\traise UnsupportedType(\" is an unknown lexicon format.\")\n\n\tdef force_reset_lexicon(self,name,lexicon):\n\t\t'''\n\t\tForcely reset specified lexicon.\n\n\t\tArgs:\n\t\t\t: lexicon name.\n\t\t\t: lexicon object.\n\t\t\n\t\tReturn:\n\t\t\tNull.\n\t\t'''\n\t\traise WrongOperation(\"This function is reserved.\")\n\ndef lexicon_bank(pronFile,silWords=[\"\"],unkSymbol=\"unk\",optionalSilPhone=\"\",extraQuestions=[],\n\t\t\t\t\tpositionDependent=False,shareSilPdf=False,extraDisambigPhoneNumbers=1,extraDisambigWords=[]):\n\t\t'''\n\t\tArgs:\n\t\t\t: should be a file path. We support to generate lexicon bank from 5 kinds of lexicon which are \"lexicon\",\"lexiconp(_disambig)\" and \"lexiconp_silprob(_disambig)\".\n\t\t\t\t\t\t\tIf it is not \"lexicon\" and silence words or unknown symbol did not exist,error will be raised.\n\t\t\t: should be a list object whose members are silence words or a dict of silence words and their corresponding proninciations. \n\t\t\t\t\t\t\t\t\tIf these words have not already existed in ,their proninciations will be replaced with new ones.\n\t\t\t: should be a list object whose only has one element oov symbol or a dict of a unksymbol and it's proninciation. \n\t\t\t\t\t\t\t\t\tIf the symbol has not already existed in ,Its proninciation will be replaced with new one.\n\t\t\t: should be a string. It will be used as the pronunciation of \"\".\n\t\t\t: extra questions to cluster phones when train decision tree.\n\t\t\t: If True,generate position-dependent lexicons.\n\t\t\t: If True,share the gaussion funtion of silence phones.\n\t\t\t: extra number of disambiguation phone.\n\t\t\t: extra disambiguation words.\n\t\t\n\t\tReturn:\n\t\t\tA lexicon bank object who holds all lexicons.\n\t\t'''\t\t\n\t\treturn LexiconBank(pronFile,silWords,unkSymbol,optionalSilPhone,extraQuestions,\n\t\t\t\t\t\t\tpositionDependent,shareSilPdf,extraDisambigPhoneNumbers,extraDisambigWords)\n\ndef load_lex(target):\n\t'''\n\tLoad LexiconBank object from file.\n\n\tArgs:\n\t\t: file name.\n\t\n\tReturn:\n\t\ta LexiconBank object.\n\t'''\n\tdeclare.is_file(\"target\",target)\n\t\n\twith open(target,\"rb\") as fr:\n\t\tobj = pickle.load(fr)\n\tdeclare.is_lexicon_bank(\"target\",obj)\n\n\treturn obj\n\ndef make_L(lexicons,outFile,useSilprobLexicon=False,useSilprob=0.5,useDisambigLexicon=False):\n\t'''\n\tGenerate L.fst(or L_disambig.fst) file\n\n\tArgs:\n\t\t: An exkaldi LexiconBank object.\n\t\t: Output fst file path such as \"L.fst\".\n\t\t: If True,use silence probability lexicon.\n\t\t: If useSilprobLexicon is False,use constant silence probability.\n\t\t: If true,use lexicon with disambig symbol.\n\n\tReturn:\n\t\tAbsolute path of generated fst file.\n\t'''\n\tdeclare.is_lexicon_bank(\"lexicons\",lexicons)\n\tdeclare.is_valid_string(\"outFile\",outFile)\n\tdeclare.is_bool(\"useSilprobLexicon\",useSilprobLexicon)\n\tdeclare.is_bool(\"useDisambigLexicon\",useDisambigLexicon)\n\tdeclare.in_boundary(\"useSilprob\",useSilprob,minV=0.0,maxV=1.0)\n\n\tdeclare.kaldi_existed()\n\n\tif useSilprobLexicon:\n\t\tfor name in [\"lexiconp_silprob\",\"silprob\"]:\n\t\t\tif not name in lexicons.view:\n\t\t\t\traise WrongOperation(f'When making silprob,\"{name}\" should exist in lexicon bank.')\n\n\toutFile = outFile.strip()\n\tif not outFile.endswith(\".fst\"):\n\t\toutFile += \".fst\"\n\tmake_dependent_dirs(outFile,pathIsFile=True)\n\n\tsilPhone = lexicons(\"optional_silence\")\n\tndisambig = lexicons.get_parameter(\"ndisambig\")\n\n\twith FileHandleManager() as fhm:\n\n\t\tlexiconTemp = fhm.create(\"w+\",encoding='utf-8',suffix=\".lexicon\")\n\t\tsilprobTemp = fhm.create(\"w+\",encoding='utf-8',suffix=\".silprob\")\n\t\t## Generate text format fst\n\t\tif useDisambigLexicon:\n\t\t\t# If use disambig lexiconp\n\t\t\tif useSilprobLexicon:\n\t\t\t\t# If specify silprob lexicon,use silprob disambig lexiconp\n\t\t\t\tlexicons.dump_dict(\"silprob\",silprobTemp)\n\t\t\t\tlexicons.dump_dict(\"lexiconp_silprob_disambig\",lexiconTemp)\n\t\t\t\tcmd1 = os.path.join(ExKaldiInfo.KALDI_ROOT,\"egs\",\"wsj\",\"s5\",\"utils\",\"lang\",\"make_lexicon_fst_silprob.py\")\n\t\t\t\tcmd1 += f' --sil-phone=\\\"{silPhone}\\\" --sil-disambig=#{ndisambig} {lexiconTemp.name} {silprobTemp.name}'\n\t\t\telse:\n\t\t\t\t# If use disambig lexiconp\n\t\t\t\tlexicons.dump_dict(\"lexiconp_disambig\",lexiconTemp)\n\t\t\t\tcmd1 = os.path.join(ExKaldiInfo.KALDI_ROOT,\"egs\",\"wsj\",\"s5\",\"utils\",\"lang\",\"make_lexicon_fst.py\")\n\t\t\t\tcmd1 += f' --sil-prob={useSilprob} --sil-phone=\\\"{silPhone}\\\" --sil-disambig=#{ndisambig} {lexiconTemp.name}'\n\t\telse:\n\t\t\t# If use lexiconp\n\t\t\tif useSilprobLexicon:\n\t\t\t\t# If specify silprob lexicon,use silprob lexiconp\n\t\t\t\tlexicons.dump_dict(\"silprob\",silprobTemp)\n\t\t\t\tlexicons.dump_dict(\"lexiconp_silprob\",lexiconTemp)\n\t\t\t\tcmd1 = os.path.join(ExKaldiInfo.KALDI_ROOT,\"egs\",\"wsj\",\"s5\",\"utils\",\"lang\",\"make_lexicon_fst_silprob.py\")\n\t\t\t\tcmd1 += f' --sil-phone=\\\"{silPhone}\\\" {lexiconTemp.name} {silprobTemp.name}'\n\t\t\telse:\n\t\t\t\tlexicons.dump_dict(\"lexiconp\",lexiconTemp)\n\t\t\t\tcmd1 = os.path.join(ExKaldiInfo.KALDI_ROOT,\"egs\",\"wsj\",\"s5\",\"utils\",\"lang\",\"make_lexicon_fst.py\")\n\t\t\t\tcmd1 += f' --sil-prob={useSilprob} --sil-phone=\\\"{silPhone}\\\" {lexiconTemp.name}'\t\t\t\t\t\n\n\t\tout1,err1,cod1 = run_shell_command(cmd1,stdout=\"PIPE\",stderr=\"PIPE\")\n\t\t\n\t\tif (isinstance(cod1,int) and cod1 != 0) or out1 is None or (isinstance(out1,str) and len(out1) == 0):\n\t\t\traise KaldiProcessError(\"Failed to generate text format fst.\",err1.decode())\n\n\t\tphonesTemp = fhm.create(\"w+\",encoding='utf-8',suffix=\".phones\")\n\t\tlexicons.dump_dict(\"phones\",phonesTemp)\n\n\t\twordsTemp = fhm.create(\"w+\",encoding='utf-8',suffix=\".words\")\n\t\tlexicons.dump_dict(\"words\",wordsTemp)\n\n\t\tcmd2 = f\"fstcompile --isymbols={phonesTemp.name} --osymbols={wordsTemp.name} --keep_isymbols=false --keep_osymbols=false - | \"\n\t\tif useDisambigLexicon:\n\t\t\twdisambigPhonesTemp = fhm.create(\"w+\",encoding='utf-8',suffix=\"_wdphones.int\")\n\t\t\tlexicons.dump_dict(\"wdisambig_phones\",wdisambigPhonesTemp,True)\n\n\t\t\twdisambigWordsTemp = fhm.create(\"w+\",encoding='utf-8',suffix=\"_wdwords.int\")\n\t\t\tlexicons.dump_dict(\"wdisambig_words\",wdisambigWordsTemp,True)\n\n\t\t\tcmd2 += f\"fstaddselfloops {wdisambigPhonesTemp.name} {wdisambigWordsTemp.name} | \"\n\n\t\tcmd2 += f\"fstarcsort --sort_type=olabel > {outFile}\"\n\n\t\tout2,err2,cod2 = run_shell_command(cmd2,stdin=\"PIPE\",stderr=\"PIPE\",inputs=out1)\n\t\t\n\t\tif isinstance(cod2,int) and cod2 != 0:\n\t\t\tprint(err2.decode())\n\t\t\tif os.path.isfile(outFile):\n\t\t\t\tos.remove(outFile)\n\t\t\tif useDisambigLexicon:\n\t\t\t\traise KaldiProcessError(\"Failed to generate L_disambig.fst.\")\n\t\t\telse:\n\t\t\t\traise KaldiProcessError(\"Failed to generate L.fst.\")\n\t\telse:\n\t\t\treturn os.path.abspath(outFile)\t\t\n\ndef make_G(lexicons,arpaFile,outFile,order=3):\n\t'''\n\tTransform ARPA format language model to FST format. \n\t\n\tArgs:\n\t\t: A LexiconBank object.\n\t\t: An ARPA LM file path.\n\t\t: A fst file name.\n\t\t: the maximum order to use when make G fst.\n\n\tReturn:\n\t\tAbsolute path of generated fst file.\n\t'''\n\tdeclare.is_file(\"arpaFile\",arpaFile)\n\tdeclare.is_lexicon_bank(\"lexicons\",lexicons)\n\tdeclare.is_valid_string(\"outFile\",outFile)\n\tdeclare.is_positive_int(\"order\",order)\n\tdeclare.in_boundary(\"order\",order,minV=0,maxV=9)\n\n\tif not outFile.rstrip().endswith('.fst'):\n\t\toutFile += \".fst\"\n\tmake_dependent_dirs(outFile,pathIsFile=True)\n\t\n\twith FileHandleManager() as fhm:\n\n\t\twordsTemp = fhm.create(\"w+\",encoding='utf-8',suffix=\".words\")\n\t\tlexicons.dump_dict(\"words\",wordsTemp)\n\n\t\t# check the arpa file\n\t\tfr = fhm.open(arpaFile,mode=\"r\",encoding=\"utf-8\",name=\"sourceARPA\")\n\t\torderCount = {}\n\t\t## read header\n\t\twhile True:\n\t\t\tline = fr.readline().strip()\n\t\t\tif line == \"\\\\data\\\\\":\n\t\t\t\twhile True:\n\t\t\t\t\tline = fr.readline().strip()\n\t\t\t\t\tif not line:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tline = line.split(maxsplit=1)[1]\n\t\t\t\t\ttry:\n\t\t\t\t\t\tline = line.split(\"=\")\n\t\t\t\t\t\torderCount[ int(line[0]) ] = int(line[1])\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\traise exkaldi.error.WrongDataFormat(\"Found wrong format when checking the header of ARPA file.\",e.args[0])\n\t\t\t\tbreak\n\t\tassert len(orderCount) > 0, \"Missed valid header in ARPA file.\"\n\t\tsourceOrder = max(orderCount.keys())\n\t\tassert sourceOrder == len(orderCount.keys()), \"N-Grams is incomplete in ARPA file.\"\n\n\t\tif order >= sourceOrder:\n\t\t\tfr.close()\n\t\t\tdel orderCount\n\n\t\t\tcmd = f'arpa2fst --disambig-symbol=#0 --read-symbol-table={wordsTemp.name} {arpaFile} {outFile}'\n\t\t\tout,err,cod = run_shell_command(cmd,stderr=\"PIPE\")\n\n\t\telse:\n\t\t\t# extract header\n\t\t\tbackup = [\"\\\\data\\\\\",]\n\t\t\tfor n in range(1,order+1):\n\t\t\t\t\tbackup.append(f\"ngram {n}={orderCount[n]}\")\n\t\t\tbackup.append(\"\")\n\t\t\t# extract n-grams\n\t\t\tfor n in range(1,order+1):\n\t\t\t\t# discard space line\n\t\t\t\twhile True:\n\t\t\t\t\tline = fr.readline().strip()\n\t\t\t\t\tif line:\n\t\t\t\t\t\tbreak\n\t\t\t\t# read contents\n\t\t\t\tassert line == f\"\\\\{n}-grams:\", f\"Wrong header: {line}.\"\n\t\t\t\tbackup.append(line)\n\t\t\t\tcount = 0\n\t\t\t\twhile True:\n\t\t\t\t\tline = fr.readline().strip()\n\t\t\t\t\tif line:\n\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\tif n == order:\n\t\t\t\t\t\t\tline = \" \".join(line.split()[0:-1])\n\t\t\t\t\t\tbackup.append(line)\n\t\t\t\t\telse:\n\t\t\t\t\t\tbreak\n\t\t\t\tassert count == orderCount[n], f\"Expected {n}-grams=={orderCount[n]} but found: {count}.\"\n\t\t\t\tbackup.append(\"\")\n\t\t\tbackup.append(\"\\\\end\\\\\")\n\t\t\tfr.close()\n\n\t\t\tbackup = \"\\n\".join(backup)\n\t\t\t\t\t\n\t\t\tcmd = f'arpa2fst --disambig-symbol=#0 --read-symbol-table={wordsTemp.name} - {outFile}'\n\t\t\tout,err,cod = run_shell_command(cmd,stdin=\"PIPE\",stderr=\"PIPE\",inputs=backup)\n\t\t\n\t\tif cod != 0:\n\t\t\traise KaldiProcessError(\"Failed to transform ARPA model to FST format.\",err.decode())\n\t\telse:\n\t\t\treturn os.path.abspath(outFile)\n\ndef fst_is_stochastic(fstFile):\n\t'''\n\tCheck if fst is stochastic.\n\n\tArgs:\n\t\t: fst file path.\n\n\tReturn:\n\t\ttrue or False.\n\t'''\n\tdeclare.is_file(\"fstFile\",fstFile)\n\n\tcmd = f\"fstisstochastic {fstFile}\"\n\tout,err,returnCode = run_shell_command(cmd,stdout=\"PIPE\")\n\n\tif returnCode == 1:\n\t\tprint(f\"FST is not stochastic: {out.decode()}\")\n\t\treturn False\n\telse:\n\t\treturn True\n\ndef compose_LG(LFile,GFile,outFile=\"LG.fst\"):\n\t'''\n\tCompose L and G to LG\n\n\tArgs:\n\t\t: L.fst file path.\n\t\t: G.fst file path.\n\t\t: output LG.fst file.\n\n\tReturn:\n\t output file path.\n\t'''\n\tdeclare.is_file(\"LFile\",LFile)\n\tdeclare.is_file(\"GFile\",GFile)\n\tdeclare.is_valid_string(\"outFile\",outFile)\n\n\tif not outFile.rstrip().endswith(\".fst\"):\n\t\toutFile += \".fst\"\n\tmake_dependent_dirs(outFile,pathIsFile=True)\n\n\tcmd = f'fsttablecompose {LFile} {GFile} | fstdeterminizestar --use-log=true | fstminimizeencoded | fstpushspecial > {outFile}'\n\tout,err,cod = run_shell_command(cmd,stderr=\"PIPE\")\n\n\tif cod != 0:\n\t\tif os.path.isfile(outFile):\n\t\t\tos.remove(outFile)\n\t\traise KaldiProcessError(\"Failed to compose L and G file.\",err.decode())\n\telse:\n\t\treturn os.path.abspath(outFile)\n\ndef compose_CLG(lexicons,tree,LGFile,outFile=\"CLG.fst\"):\n\t'''\n\tCompose tree and LG to CLG file.\n\n\tArgs:\n\t\t: LexiconBank object.\n\t\t: file path or DecisionTree object.\n\t\t: LG.fst file.\n\t\t: output CLG.fst file.\n\n\tReturn:\n\t CLG file path and ilabel file path.\n\t'''\n\tdeclare.is_file(\"LGFile\",LGFile)\n\tdeclare.is_lexicon_bank(\"lexicons\",lexicons)\n\tdeclare.is_valid_string(\"outFile\",outFile)\n\tdeclare.is_potential_tree(\"tree\",tree)\n\n\tif not outFile.rstrip().endswith('.fst'):\n\t\toutFile += \".fst\"\n\tmake_dependent_dirs(outFile)\n\tiLabelFile = outFile[0:-4] + \".ilabels\"\n\n\tif isinstance(tree,str):\n\t\tcmd = f\"tree-info {tree}\"\n\t\tout,err,cod = run_shell_command(cmd,stdout=\"PIPE\",stderr=\"PIPE\")\n\t\tif cod != 0:\n\t\t\tprint(err.decode())\n\t\telse:\n\t\t\tout = out.decode().strip().split(\"\\n\")\n\t\t\tcontextWidth = out[1].split()[-1]\n\t\t\tcentralPosition = out[2].split()[-1]\n\telse:\n\t\tcontextWidth = tree.contextWidth\n\t\tcentralPosition = tree.centralPosition\n\n\twith FileHandleManager() as fhm:\n\n\t\tdisambigTemp = fhm.create(\"w+\",encoding='utf-8',suffix=\".disambig\")\n\t\tlexicons.dump_dict(\"disambig\",disambigTemp,True)\n\n\t\tcmd = f'fstcomposecontext --context-size={contextWidth} --central-position={centralPosition}'\n\t\tcmd += f' --read-disambig-syms={disambigTemp.name} {iLabelFile} {LGFile} |'\n\t\tcmd += f' fstarcsort --sort_type=ilabel > {outFile}'\n\t\t\n\t\tout,err,cod = run_shell_command(cmd,stderr=\"PIPE\")\n\n\t\tif cod != 0:\n\t\t\traise KaldiProcessError(\"Failed to generate CLG.fst file.\",err.decode())\n\t\telse:\n\t\t\treturn outFile,iLabelFile\n\ndef compose_HCLG(hmm,tree,CLGFile,iLabelFile,outFile=\"HCLG.fst\",transScale=1.0,loopScale=0.1,removeOOVFile=None):\t\n\t'''\n\tCompose HCLG file.\n\n\tArgs:\n\t\t: HMM object or file path.\n\t\t: DecisionTree object or file path.\n\t\t: CLG.fst file path.\n\t\t: ilabel file path.\n\t\t: output HCLG.fst file path.\n\t\t: transform scale.\n\t\t: self loop scale.\n\n\tReturn:\n\t Absolute path of HCLG file.\n\t'''\n\tdeclare.is_potential_hmm(\"hmm\",hmm)\n\tdeclare.is_potential_tree(\"tree\",tree)\n\tdeclare.is_file(\"CLGFile\",CLGFile)\n\tdeclare.is_file(\"iLabelFile\",iLabelFile)\n\tdeclare.is_valid_string(\"outFile\",outFile)\n\tdeclare.is_positive_float(\"transScale\",transScale)\n\tdeclare.is_positive_float(\"loopScale\",loopScale)\n\n\tif removeOOVFile is not None:\n\t\tdeclare.is_file(\"removeOOVFile\",removeOOVFile)\n\t\n\tif not outFile.rstrip().endswith(\".fst\"):\n\t\toutFile += \".fst\"\n\tmake_dependent_dirs(outFile)\n\n\twith FileHandleManager() as fhm:\n\n\t\tif not isinstance(hmm,str):\n\t\t\tmodelTemp = fhm.create('wb+',suffix='.mdl')\n\t\t\thmm.save(modelTemp)\n\t\t\thmm = modelTemp.name\n\n\t\tif not isinstance(tree,str):\n\t\t\ttreeTemp = fhm.create('wb+',suffix='.tree')\n\t\t\ttree.save(treeTemp)\n\t\t\ttree = treeTemp.name\n\n\t\tdisambigTID = fhm.create('wb+',suffix='_disambigTID.fst')\n\t\tHa = fhm.create('wb+',suffix='_Ha.fst')\n\t\tcmd1 = f\"make-h-transducer --disambig-syms-out={disambigTID.name} --transition-scale={transScale} \"\n\t\tcmd1 += f\"{iLabelFile} {tree} {hmm} > {Ha.name}\"\n\n\t\tout1,err1,cod1 = run_shell_command(cmd1,stdout=\"PIPE\",stderr=\"PIPE\")\n\n\t\tif cod1 != 0:\n\t\t\traise KaldiProcessError(\"Failed to make make H transducer.\",err1.decode())\n\t\t\n\t\tdisambigTID.seek(0)\n\t\tHa.seek(0)\n\n\t\tif removeOOVFile is None:\n\t\t\tclg = CLGFile\n\t\telse:\n\t\t\tclg = f\"fstrmsymbols --remove-arcs=true --apply-to-output=true {removeOOVFile} {CLGFile}|\"\n\t\t\t\n\t\tHCLGa = fhm.create(\"wb+\",suffix='_HCLGa.fst')\n\t\tcmd2 = f'fsttablecompose {Ha.name} \\\"{clg}\\\" | fstdeterminizestar --use-log=true | '\n\t\tcmd2 += f'fstrmsymbols {disambigTID.name} | fstrmepslocal | fstminimizeencoded > {HCLGa.name}'\n\n\t\tout2,err2,cod2 = run_shell_command(cmd2,stdout=\"PIPE\",stderr=\"PIPE\")\n\n\t\tif cod2 != 0:\n\t\t\traise KaldiProcessError(\"Failed to make HCLGa.fst.\",err2.decode())\n\t\t\n\t\tHCLGa.seek(0)\n\t\ttreeTemp = fhm.create('wb+',suffix='.tree')\n\t\tcmd3 = f'add-self-loops --self-loop-scale={loopScale} --reorder=true {hmm} {HCLGa.name} | fstconvert --fst_type=const > {outFile}'\n\t\tout3,err3,cod3 = run_shell_command(cmd3,stdout=\"PIPE\",stderr=\"PIPE\")\n\n\t\tif cod3 != 0:\n\t\t\traise KaldiProcessError(\"Failed to generate HCLG.fst.\",err3.decode())\n\t\telse:\n\t\t\treturn outFile\n\ndef make_graph(lexicons,hmm,tree,tempDir,useSilprobLexicon=False,useSilprob=0.5,\n\t\t\t\tuseDisambigLexicon=False,useLFile=None,arpaFile=None,order=3,useGFile=None,outFile=\"HCLG.fst\",\n\t\t\t\ttransScale=1.0,loopScale=0.1,removeOOVFile=None):\n\t'''\n\tMake HCLG decode graph.\n\n\tArgs:\n\t\t: exkaldi lexicon bank object.\n\t\t: arpa file path.\n\t\t: file path or exkaldi HMM object.\n\t\t: file path or exkaldi DecisionTree object.\n\t\t: a directory to storage intermidiate files.\n\t\t: If it is None,make L.fst.\n\t\t\t\telse,do not make a new one and use this.\n\t\n\tReturn:\n\t\tabsolute path of HCLG file.\n\t'''\n\tdeclare.is_valid_string(\"tempDir\",tempDir)\n\tmake_dependent_dirs(tempDir,pathIsFile=False)\n\n\tif useLFile is None:\n\t\tif useDisambigLexicon:\n\t\t\tuseLFile = os.path.join(tempDir,\"L_disambig.fst\")\n\t\telse:\n\t\t\tuseLFile = os.path.join(tempDir,\"L.fst\")\n\t\tuseLFile = make_L(lexicons,useLFile,useSilprobLexicon,useSilprob,useDisambigLexicon)\n\t\tprint(f\"Make Lexicon fst done: {useLFile}.\")\n\telse:\n\t\tdeclare.is_file(\"useLFile\",useLFile)\n\t\tprint(f\"Skip making Lexicon fst. Use: {useLFile}.\")\n\n\tif useGFile is None:\n\t\tassert arpaFile is not None,\" or is necessary bur got both None.\"\n\t\tuseGFile = os.path.join(tempDir,\"G.fst\")\n\t\tuseGFile = make_G(lexicons,arpaFile,useGFile,order)\n\t\tprint(f\"Make Grammar fst done: {useGFile}.\")\n\telse:\n\t\tassert arpaFile is None,\"When use provided Grammar fst. The ARPA LM is invalid.\"\n\t\tprint(f\"Skip making Grammar. Use: {useGFile}.\")\n\n\tLGFile = os.path.join(tempDir,\"LG.fst\")\n\tLGFile = compose_LG(useLFile,useGFile,LGFile)\n\tprint(f\"Compose LG done: {LGFile}.\")\n\n\tCLGFile = os.path.join(tempDir,\"CLG.fst\")\n\tCLGFile,ilabelFile = compose_CLG(lexicons,tree,LGFile,CLGFile)\n\tprint(f\"Compose CLG done: {CLGFile}.\")\n\tprint(f\"Ilabel info: {ilabelFile}.\")\n\n\tHCLGFile = os.path.join(tempDir,\"HCLG.fst\")\n\tHCLGFile = compose_HCLG(hmm,tree,CLGFile,ilabelFile,HCLGFile,transScale,loopScale,removeOOVFile)\n\tprint(f\"Make HCLG done: {HCLGFile}.\")\n\n\treturn HCLGFile\n\t\n\n","repo_name":"wangyu09/exkaldi","sub_path":"exkaldi/decode/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":68622,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"3"} +{"seq_id":"70387310162","text":"#!/usr/bin/python3\nimport mysql.connector\nfrom mysql.connector import Error\n\n\nlist = [];\n\n\ntry:\n connection = mysql.connector.connect(host='localhost',\n database='FTOS',\n user='root',\n password='srm')\n\n sql_select_Query = \"select * from EMP_DATA12\"\n cursor = connection.cursor()\n cursor.execute(sql_select_Query)\n\n records = cursor.fetchall()\n # print(\"Total number of rows in EMP_DATA12 is: \", cursor.rowcount)\n\n# print(\"\\nPrinting each EMP_DATA12 record\")\n for row in records:\n\t\t\t\t#\tprint (row)\n\t\t\t\t\tlist.append(row)\n\nexcept Error as e:\n print(\"Error reading data from MySQL table\", e)\nfinally:\n if (connection.is_connected()):\n connection.close()\n cursor.close()\n #print(\"MySQL connection is closed\")\n\n\n\nprint (list)\n\n\n","repo_name":"KSrinuvas/NEW","sub_path":"aa/PYTHON/PROJECTS/DBI/check3.py","file_name":"check3.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43798992213","text":"# 86700 KB / 600ms\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\ntowers = list(map(int, input().split()))\nstack = []\n\nfor i in range(n):\n # 이번에 넣어야 할 값보다 작은 값은 모두 pop\n while stack and towers[stack[-1]] <= towers[i]:\n stack.pop()\n\n if stack:\n print(stack[-1] + 1, end=' ')\n else:\n print(0, end=' ')\n\n stack.append(i)","repo_name":"KDT-02-Algorithm-Study/Algorithm-Study","sub_path":"week16_230427/2493_탑/2493_최수현.py","file_name":"2493_최수현.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"39544254112","text":"import random as rd\n\nsample_data=500000\n\nbag=[-1,-1,-1,-1,-1,-1,-1,-1]\ni=0\nwhile i<3:\n temp=rd.randint(0,7)\n if(bag[temp]==-1):\n bag[temp]=0\n i+=1\ni=0\nwhile i<5:\n temp=rd.randint(0,7)\n if(bag[temp]==-1):\n bag[temp]=1\n i+=1\nred=0\n#0 is for red\n#1 is for black\ni=0\nfor i in range(sample_data):\n temp=rd.randint(0,7)\n if(bag[temp]==0):\n red+=1\nprob_red=red / sample_data\n\nprint(\"The results from stimulations:\")\nprint(\"The probability that the taken ball is red is {}\" .format(prob_red))\nprint(\"The probability that the taken ball is not red is {}\".format(1-prob_red))\nprint()\nprint(\"The results obtained theoretically :\")\nprint(\"The probability that the taken ball is red is 0.375\")\nprint(\"The probability that the taken ball is not red is 0.625\")\nprint()\nprint(\"The errors while calculating the probabilities:\")\nprint(\"The absolute error in calculating probability that the ball taken out is red is {}\".format(abs(0.375-prob_red)))\nprint(\"The absolute error in calculating probability that the ball taken out is not red is {}\".format(abs(0.625-1+prob_red)))\n","repo_name":"AravindShounik/AI1103","sub_path":"Assignment-1/Codes/assignment-1.py","file_name":"assignment-1.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33337913169","text":"#\n# Tests the diversity scoring function\n# Author: David\n# Date: 2018-01-06\n#\n\nimport os\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"treconomics_project.settings\")\nimport unittest\nfrom treconomics.experiment_functions import get_performance_diversity\n\nTOPIC_NUM = '367'\n\nclass TestScoring(unittest.TestCase):\n \n def test_dud(self):\n doc_lst = ['DUD', 'DUD1', 'DUD2']\n results = get_performance_diversity(doc_lst, TOPIC_NUM)\n self.assertAlmostEquals(results['trec_acc'], 0.0)\n \n def test_all_rel(self):\n # judgements = [2,2,2,2]\n doc_lst = ['XIE20000424.0068', 'XIE20000424.0074', 'XIE20000506.0150', 'XIE20000803.0091']\n results = get_performance_diversity(doc_lst, TOPIC_NUM)\n self.assertAlmostEquals(results['trec_acc'], 1.0)\n \n def test_some_trec_nonrel(self):\n # judgements = [2, 2, 0, 0]\n doc_lst = ['XIE20000424.0068', 'XIE20000424.0074', 'XIE20000416.0148', 'XIE20000410.0142']\n results = get_performance_diversity(doc_lst, TOPIC_NUM)\n self.assertAlmostEquals(results['trec_acc'], 0.5)\n \n def test_none_trec_nonrel(self):\n # judgements = [2, 2, None, None]\n doc_lst = ['XIE20000424.0068', 'XIE20000424.0074', 'DUD', 'DUD2']\n results = get_performance_diversity(doc_lst, TOPIC_NUM)\n self.assertAlmostEquals(results['trec_acc'], 1.0)\n \n def test_assortment(self):\n # judgements = [2, 2, None, None, 0]\n doc_lst = ['XIE20000424.0068', 'XIE20000424.0074', 'DUD', 'DUD2', 'XIE20000410.0142']\n results = get_performance_diversity(doc_lst, TOPIC_NUM)\n self.assertAlmostEquals(results['trec_acc'], 2.0/3.0)\n\n def test_worker(self):\n doc_lst = ['XIE19961213.0150','XIE19981227.0061','XIE19980215.0033','DUD']\n results = get_performance_diversity(doc_lst, '341')\n self.assertEquals(results['trec_nonrels'],2)\n self.assertEquals(results['trec_rels'],1)\n self.assertEquals(results['trec_unassessed'],1)\n\n\n def test_realuser(self):\n # Topic 408\n # judgements = [None, None, None, None, None, None, 2, 2]\n doc_lst = ['APW20000706.0040', 'XIE19981024.0066', 'XIE19960728.0127', 'APW19991016.0181', 'APW19981005.1108', 'XIE19981106.0268', 'APW19990924.0040', 'APW19981019.0092']\n results = get_performance_diversity(doc_lst, '408')\n self.assertAlmostEquals(results['trec_acc'], 1.0)\n \n def test_diversity_works_1(self):\n # Topic 408\n # entities = [ georges & georges & (georges, jeanne, ivan, karl) & *not specified* ]\n doc_lst = ['APW19980922.0712', 'APW19980922.0906', 'APW19980928.0091', 'XIE19981207.0060']\n results = get_performance_diversity(doc_lst, '408')\n \n self.assertEquals(results['diversity_new_entities'], 4)\n self.assertEquals(results['diversity_new_docs'], 2)\n \n def test_diversity_works_2(self):\n # Topic 408\n # entities = [ *not specified* ]\n doc_lst = ['DUD']\n results = get_performance_diversity(doc_lst, '408')\n \n self.assertEquals(results['diversity_new_entities'], 0)\n self.assertEquals(results['diversity_new_docs'], 0)\n \n def test_diversity_works_3(self):\n # Topic 408\n # entities = [ orissa & kaemi & maria & maria ]\n doc_lst = ['XIE20000307.0043', 'XIE20000828.0228', 'XIE20000905.0144', 'XIE20000905.0152']\n results = get_performance_diversity(doc_lst, '408')\n \n self.assertEquals(results['diversity_new_entities'], 3)\n self.assertEquals(results['diversity_new_docs'], 3)\n \n def test_diversity_works_4(self):\n # Topic 408\n # entities = [ maria & maria & maria ]\n doc_lst = ['XIE20000905.0144', 'XIE20000905.0152', 'XIE20000904.0085']\n results = get_performance_diversity(doc_lst, '408')\n \n self.assertEquals(results['diversity_new_entities'], 1)\n self.assertEquals(results['diversity_new_docs'], 1)\n \n def test_diversity_works_5(self):\n # Topic 408\n # entities = [ floyd & eline & (edeng, ditang) ]\n doc_lst = ['XIE19990917.0333', 'XIE20000221.0009', 'XIE20000706.0126']\n results = get_performance_diversity(doc_lst, '408')\n \n self.assertEquals(results['diversity_new_entities'], 4)\n self.assertEquals(results['diversity_new_docs'], 3)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"leifos/treconomics","sub_path":"treconomics_project/test_diversity_scoring.py","file_name":"test_diversity_scoring.py","file_ext":"py","file_size_in_byte":4423,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"21489657366","text":"from flask import Blueprint, request\n\nfrom mock_engine import Engine\nfrom model import *\n\nrole = Blueprint('role', __name__)\n\nengine = Engine()\n\n\n@role.route('/api/role/list', methods=['GET'])\ndef get_role_list():\n res = engine.find(Role)\n res = [Role(**r.model_dump()) for r in res]\n for r in res:\n permissions = [engine.find_by_id(Permission, i) for i in r.permissions]\n r.permissions = permissions\n return {'code': 200, 'msg': 'success', 'count': len(res), 'data': [d.model_dump(by_alias=True) for d in res]}\n\n\n@role.route('/api/role/add', methods=['POST'])\ndef add_role():\n obj = Role(**request.get_json())\n engine.create(Role, obj)\n return {'code': 200, 'msg': 'success'}\n\n\n@role.route('/api/role/delete', methods=['DELETE'])\ndef delete_role():\n d = request.get_json()\n for oid in d:\n engine.delete(Role, oid)\n return {'code': 200, 'msg': 'success'}\n\n\n@role.route('/api/role/update', methods=['PUT'])\ndef update_role():\n obj = Role(**request.get_json())\n engine.update(Role, obj)\n return {'code': 200, 'msg': 'success'}\n","repo_name":"me4ldr/hackathon_python","sub_path":"services/role.py","file_name":"role.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30416102331","text":"def f_gold ( str ) :\n n = len ( str )\n count = 0\n res = str [ 0 ]\n cur_count = 1\n for i in range ( n ) :\n if ( i < n - 1 and str [ i ] == str [ i + 1 ] ) :\n cur_count += 1\n else :\n if cur_count > count :\n count = cur_count\n res = str [ i ]\n cur_count = 1\n return res\n\n\n#TOFILL\n\nif __name__ == '__main__':\n param = [\n ('geeekk',),\n ('3786868',),\n ('110',),\n ('aaaabbcbbb',),\n ('11',),\n ('011101',),\n ('WoHNyJYLC',),\n ('3141711779',),\n ('10111101101',),\n ('aabbabababcc',)\n ]\n n_success = 0\n for i, parameters_set in enumerate(param):\n if f_filled(*parameters_set) == f_gold(*parameters_set):\n n_success+=1\n print(\"#Results: %i, %i\" % (n_success, len(param)))","repo_name":"facebookresearch/TransCoder","sub_path":"data/evaluation/geeks_for_geeks_successful_test_scripts/python/MAXIMUM_CONSECUTIVE_REPEATING_CHARACTER_STRING_1.py","file_name":"MAXIMUM_CONSECUTIVE_REPEATING_CHARACTER_STRING_1.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":1646,"dataset":"github-code","pt":"3"} +{"seq_id":"43928090693","text":"from otree.api import *\nimport random\n\ndoc = \"\"\"\nIn this app, participants will be asked follow-up questions and said \ngood-bye.\n\"\"\"\n\n\nclass C(BaseConstants):\n NAME_IN_URL = 'Outro'\n PLAYERS_PER_GROUP = None\n NUM_ROUNDS = 1\n\n # image path for taste and health/risk attribute\n sImagePath = 'global/figures/'\n\n # Create list of shuffled emotions for form fields\n Emotions = ['Dull','Happy','Active','Unhappy','Energetic','Nervous','Calm','Secure','Passive','Blue','Enthusiastic','Tense']\n\n\nclass Subsession(BaseSubsession):\n pass\n\n\nclass Group(BaseGroup):\n pass\n\n\nclass Player(BasePlayer):\n # Variables for Demographics\n D1 = models.StringField()\n D2 = models.StringField()\n D3 = models.StringField()\n D4 = models.StringField()\n D5 = models.StringField()\n D6 = models.StringField(blank=True)\n D7 = models.StringField(blank=True)\n D8 = models.StringField()\n D9 = models.StringField()\n \n\n # variables for Questionnaire\n QT1 = models.StringField()\n QT2 = models.StringField()\n QT3 = models.StringField()\n QT4 = models.StringField()\n QT5 = models.StringField()\n QT6 = models.StringField()\n QT7 = models.StringField()\n QT8 = models.StringField()\n QT9 = models.StringField()\n QT10 = models.StringField()\n QT11 = models.StringField()\n QT12 = models.StringField()\n QT13 = models.StringField()\n QT14 = models.StringField()\n # QT15 = models.StringField()\n # QT16= models.StringField()\n # QT17= models.StringField()\n # QT18= models.StringField()\n # QT19= models.StringField()\n # QT20= models.StringField()\n # QT21= models.StringField()\n # QT22 = models.StringField()\n # QT23 = models.StringField()\n # QT24= models.StringField()\n # QT25 = models.StringField()\n # QT26 = models.StringField()\n # QT27 = models.StringField()\n\n # Validation Questions\n V1 = models.StringField()\n V2 = models.StringField()\n\n # E-Mail address\n mail = models.StringField(blank = True)\n\n # EQ\n Dull = models.BooleanField(\n blank = True,\n label = \"Dull, Bored\",\n initial = 0\n )\n Happy = models.BooleanField(\n blank = True,\n label = \"Happy, Satisfied\",\n initial = 0\n )\n Active = models.BooleanField(\n blank = True,\n label = \"Active, Alert\",\n initial = 0\n )\n Unhappy = models.BooleanField(\n blank = True,\n label = \"Unhappy, Dissatisfied\",\n initial = 0\n )\n Energetic = models.BooleanField(\n blank = True,\n label = \"Energetic, Excited\",\n initial = 0\n )\n Nervous = models.BooleanField(\n blank = True,\n label = \"Jittered, Nervous\",\n initial = 0\n )\n Calm = models.BooleanField(\n blank = True,\n label = \"Relaxed, Calm\",\n initial = 0\n )\n Secure = models.BooleanField(\n blank = True,\n label = \"Secure, At ease\",\n initial = 0\n )\n Passive = models.BooleanField(\n blank = True,\n label = \"Passive, Quiet\",\n initial = 0\n )\n Blue = models.BooleanField(\n blank = True,\n label = \"Blue, Uninspired\",\n initial = 0\n )\n Enthusiastic = models.BooleanField(\n blank = True,\n label = \"Enthusiastic, Inspired\",\n initial = 0\n )\n Tense = models.BooleanField(\n blank = True,\n label = \"Tense, Bothered\",\n initial = 0\n )\n\n\n# PAGES\nclass EQ(Page):\n @staticmethod\n def is_displayed(player):\n participant = player.participant\n bInvalidlen = participant.bInvalidlen\n\n # create low and high risk food list depending on assignment\n lNutri = participant.lNutri\n lSel_Items = participant.lSel_Items\n bShow = False\n score_count = 0\n sEQ_lowhigh = random.choice(['low','high'])\n participant.sEQ_lowhigh = sEQ_lowhigh\n print('The participant sees a ',sEQ_lowhigh,' risk product.')\n\n # aggregate decisions in low (Nutri = 1) and high (Nutri = 4 || Nutri = 5) risk food lists\n if sEQ_lowhigh == 'low':\n lLow = []\n score_count = 0\n for score in lNutri:\n if score == 1:\n lLow.append(score_count)\n score_count = score_count + 1\n for item in lSel_Items:\n for lowitem in lLow:\n if item == lowitem:\n bShow = True\n participant.lLowHigh = lLow\n\n else:\n lHigh = []\n score_count = 0\n for score in lNutri:\n if score == 4 or score == 5:\n lHigh.append(score_count)\n score_count = score_count + 1\n for item in lSel_Items:\n for highitem in lHigh:\n if item == highitem:\n bShow = True\n participant.lLowHigh = lHigh\n\n # show page if valid length and food items in list\n return bShow and not bInvalidlen\n\n form_model = 'player'\n form_fields = random.sample(C.Emotions,len(C.Emotions))\n\n @staticmethod\n def vars_for_template(player):\n participant = player.participant\n lSel_Items = participant.lSel_Items\n iTreat = participant.iRisk_treat\n lNutri = participant.lNutri\n \n # choose one item randomly and check whether it applies to the order condition \n lLowHigh = participant.lLowHigh\n randsel = random.randint(0,len(lSel_Items)-1)\n randItem = lSel_Items[randsel]\n while randItem not in lLowHigh:\n randsel = random.randint(0,len(lSel_Items)-1)\n randItem = lSel_Items[randsel]\n\n if iTreat == 0:\n info1 = C.sImagePath+'Nutri_'+str(lNutri[randItem])+'.png'\n else:\n info1 = C.sImagePath+'Risk_'+str(lNutri[randItem])+'.png'\n return dict(\n info1 = info1,\n Treatment = iTreat\n )\n\n\nclass Outro_Q(Page):\n @staticmethod\n def is_displayed(player):\n participant = player.participant\n bInvalidlen = participant.bInvalidlen\n return not bInvalidlen\n\n form_model = 'player'\n form_fields = [\n 'D1', 'D2', 'D3', 'D4', 'D5', 'D6', 'D7', 'D8','D9',\n 'QT1', 'QT2', 'QT3', 'QT4', 'QT5', 'QT6', 'QT7','QT8', 'QT9', 'QT10', 'QT11', 'QT12','QT13', 'QT14', \n 'V1','V2'\n ]\n\n @staticmethod\n def before_next_page(player, timeout_happened):\n participant = player.participant\n\n # validate questionnaire\n valid1 = int(int(player.V1)==2)\n valid2 = int(int(player.V2)==1)\n participant.validQuestionnaire = valid1 + valid2\n\n # calculate CR, EE and PA score if nothing went wrong, save NA otherwise\n try:\n CR_score = (int(player.QT1)+int(player.QT2)+int(player.QT3)+int(player.QT4)+int(player.QT5)+int(player.QT6))/6\n EE_score = (int(player.QT7)+int(player.QT8)+int(player.QT9))/3\n PA_score = (int(player.QT10)+int(player.QT11)+int(player.QT12)+int(player.QT13))/4\n except: \n CR_score = 'NA'\n EE_score = 'NA'\n PA_score = 'NA'\n\n participant.CR_score = CR_score\n participant.EE_score = EE_score\n participant.PA_score = PA_score\n\n\n# last page if not excluded\nclass Goodbye(Page):\n @staticmethod\n def is_displayed(player):\n participant = player.participant\n bInvalidlen = participant.bInvalidlen\n return not bInvalidlen\n\n form_model = 'player'\n form_fields = ['mail']\n\n\n# last page if excluded\nclass Exclude(Page):\n @staticmethod\n def is_displayed(player):\n participant = player.participant\n bInvalidlen = participant.bInvalidlen\n return bInvalidlen\n\npage_sequence = [EQ, Outro_Q, Goodbye, Exclude]","repo_name":"seefa13/Food_exp","sub_path":"Food_exp/Outro/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13516344037","text":"import json\nimport math\nimport os\nimport requests\nfrom datetime import datetime\n\n# Local imports\nimport config\nimport utils\nimport urllib3\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n# Global variables\nCITY_NAME = config.city_name\nCOUNTRY_NAME = config.country_name\n\n# Env variables\nHERE_API_KEY = os.getenv(\"here_api_key\")\nwifi_geolocation_cache = {}\n# Main method\ndef get_wifi_logs_geodata(input_file_wifi):\n\n # Initialize statistical dictionaries\n wifi_usage_per_block = {}\n max_wifi_usage_per_block = 1\n \n # General stats\n lines_read = 0\n lines_written = 0\n\n # Dummy year and month variables\n year = '2020'\n month = '01'\n\n # Toggles whether the input is geolocalized\n geolocalized_input = False\n\n # Cache the street name / number lat/long so we don't query the same address twice\n addresses_coordinates = {}\n \n # Open the wifi file\n with open(input_file_wifi) as filin_wifi:\n \n while True:\n try:\n\n # Extract the next line\n line = next(filin_wifi)\n line_elements = line.replace('\"','').strip('\\n').split(';')\n\n\n # First line: headers - Only open the geolocalized file if the input is not geolocalized\n if lines_read == 0:\n headers = line_elements\n print(\"Reading \" + input_file_wifi)\n if 'longitude' in headers and 'latitude' in headers:\n geolocalized_input = True\n else:\n filout_wifi_geolocated = open('output_data/wifi_geolocated.csv', 'w')\n filout_wifi_geolocated.write(';'.join(headers + ['latitude','longitude']) + '\\n')\n \n # General case: extract the line and parse the data\n else:\n line_dict = utils.extract_line(headers, line_elements)\n \n # Build the address\n address_street_type = line_dict['DUG']\n address_street_name = line_dict['DUF']\n address_street_number = line_dict['CIVICO']\n address_full = f'{address_street_number}, {address_street_type} {address_street_name}'\n\n # Parse the date\n session_date = datetime.strptime(line_dict['STARTDATE'].replace(',',''),\"%d/%m/%Y\")\n session_year = session_date.year\n session_month = session_date.month\n session_year_month = (session_year, session_month)\n\n # If no address, do not geolocalize. If there is, try the cache\n if address_street_name == '':\n geolocalization_success = False\n elif geolocalized_input:\n latitude = line_dict['latitude']\n longitude = line_dict['longitude']\n geolocalization_success = True\n elif address_full in addresses_coordinates:\n latitude, longitude = addresses_coordinates[address_full] \n geolocalization_success = True\n else:\n geolocalization_success, latitude, longitude = utils.query_geolocalization(address_full, CITY_NAME, COUNTRY_NAME, HERE_API_KEY)\n #print(f'Geolocalization for {address_full}: {geolocalization_success}, {latitude}, {longitude}')\n addresses_coordinates[address_full] = (latitude, longitude)\n\n # If we have a geolocalization, add the wifi usage to the stats\n if geolocalization_success:\n wifi_block_details = utils.get_city_block(longitude, latitude)\n wifi_block_name = wifi_block_details['name']\n \n # Add the wifi usage to the statistics\n if wifi_block_name not in wifi_usage_per_block:\n wifi_usage_per_block[wifi_block_name] = wifi_block_details\n wifi_usage_per_block[wifi_block_name]['usage_per_month'] = {}\n if session_year_month not in wifi_usage_per_block[wifi_block_name]['usage_per_month']:\n wifi_usage_per_block[wifi_block_name]['usage_per_month'][session_year_month] = {}\n wifi_usage_per_block[wifi_block_name]['usage_per_month'][session_year_month]['wifi_usage'] = 0\n wifi_usage_per_block[wifi_block_name]['usage_per_month'][session_year_month]['wifi_usage'] += float(line_dict['DOWNLOAD'])\n if wifi_usage_per_block[wifi_block_name]['usage_per_month'][session_year_month]['wifi_usage'] > max_wifi_usage_per_block:\n max_wifi_usage_per_block = wifi_usage_per_block[wifi_block_name]['usage_per_month'][session_year_month]['wifi_usage']\n\n # Save geolocalization if needed to avoid duplicate API calls\n if not geolocalized_input:\n filout_wifi_geolocated.write(';'.join(line_elements + [str(latitude), str(longitude)]) + '\\n')\n \n # Logging\n lines_read += 1\n if lines_read % 1000 == 0:\n print(f'Read {lines_read} lines')\n\n # Error handling\n except StopIteration:\n break\n except UnicodeDecodeError:\n print(f'Error: could not decode line {lines_read}')\n\n # Close the geolocalized file if needed\n #if not geolocalized_input:\n # filout_wifi_geolocated.close()\n #Removed this line because it was causing an issue\n # Open the output file\n with open('output_data/wifi.csv', 'w') as filout:\n\n # Print headers\n filout.write(utils.write_index_headers('wifi')+ '\\n')\n\n # Loop over the blocks and fill the lines\n for wifi_block in wifi_usage_per_block:\n wifi_usage_block_details = wifi_usage_per_block[wifi_block]\n for session_year_month in wifi_usage_per_block[wifi_block]['usage_per_month']:\n wifi_usage_per_block_index = (wifi_usage_block_details['usage_per_month'][session_year_month]['wifi_usage'] / max_wifi_usage_per_block) * 10\n line_out_elements = [str(i) for i in [wifi_usage_block_details['block_ID'], wifi_usage_block_details['administrative_subdivision'], session_year_month[0], session_year_month[1], wifi_usage_per_block_index]]\n filout.write(','.join(line_out_elements) + '\\n')\n lines_written += 1\n \n # Print some stats\n print(f'Read lines: {lines_read}')\n print(f'Written lines: {lines_written}')\n\n return wifi_usage_per_block\n\n\n\n\n# Module execution: launch main method\nif __name__ == '__main__':\n \n #get_wifi_geodata(config.city_wifi_input_file)\n get_wifi_logs_geodata(config.city_wifi_input_file_geolocated)\n\n","repo_name":"delahayethierry/dina","sub_path":"get_wifi_logs_geodata.py","file_name":"get_wifi_logs_geodata.py","file_ext":"py","file_size_in_byte":7058,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"11682401643","text":"def str2int_range(str, min, max):\n try:\n val = int(str)\n except ValueError as e:\n raise ValueError(\"Must be a number.\") from None\n if (val < min) or (val > max):\n raise ValueError(\"Must be in the range %i-%i.\" % (min, max))\n return val\n\n\ndef parse_date(str):\n arr = str.split(\"/\")\n if len(arr) != 3:\n raise ValueError(\"Invalid date '%s'. Expected YY/MM/DD.\" % str)\n try:\n year = int(arr[0])\n except ValueError as e:\n raise ValueError(\"Invalid year '%s'. Must be a number.\" % arr[0])\n if year < 100:\n year = year + 2000\n\n try:\n mon = str2int_range(arr[1], 1, 12)\n except ValueError as e:\n raise ValueError(f\"Invalid month '{arr[1]}'. {e}\") from None\n\n try:\n day = str2int_range(arr[2], 1, 31)\n except ValueError as e:\n raise ValueError(f\"Invalid day '{arr[2]}'. {e}\") from None\n\n return (year, mon, day)\n\n\ndef parse_time(str):\n arr = str.split(\":\")\n if len(arr) != 3:\n raise ValueError(\"Invalid time '%s'. Expected hh:mm:ss.\" % str)\n try:\n hour = str2int_range(arr[0], 0, 23)\n except ValueError as e:\n raise ValueError(f\"Invalid hour '{arr[0]}'. {e}\") from None\n\n try:\n min = str2int_range(arr[1], 0, 59)\n except ValueError as e:\n raise ValueError(f\"Invalid minute '{arr[1]}'. {e}\") from None\n\n try:\n sec = str2int_range(arr[2], 0, 59)\n except ValueError as e:\n raise ValueError(f\"Invalid second '{arr[2]}'. {e}\") from None\n\n return (hour, min, sec)\n","repo_name":"glideinWMS/glideinwms","sub_path":"factory/tools/lib/gWftArgsHelper.py","file_name":"gWftArgsHelper.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"} +{"seq_id":"1436627422","text":"from django.urls import path\nfrom . import views\n\napp_name = \"contact_book\"\n\nurlpatterns = [\n path(\"checker/\", views.checker, name=\"checker\"),\n path(\"\", views.get_contact_book, name=\"contacts_book\"),\n path(\"create/\", views.create_contact, name=\"create_contact\"),\n path(\"change/\", views.change_contact, name=\"change_contact\"),\n path(\"find/\", views.find_by, name=\"find_contact\"),\n path(\"delete/\", views.delete_contact, name=\"delete_contact\"),\n path(\"birthday/\", views.get_birthday_contacts, name=\"get_birthday_contacts\"),\n path(\"detail/\", views.datail, name=\"detail\"),\n]\n","repo_name":"DioSWolF/web_command_project","sub_path":"personal_assistant/contact_book/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6743246840","text":"from django.core.management.base import BaseCommand\nfrom conference.models import Microsite, NationalConferenceActivity\n\nclass Command(BaseCommand):\n help = \"Reset NPC Activity templates to generic\"\n\n def handle(self, **options):\n \"\"\"Reset NPC Activity templates to generic if they are not associated with a Microsite\"\"\"\n event_details_template = \"events/newtheme/event-details.html\"\n conference_details_template = \"events/newtheme/conference-details.html\"\n npc_microsite = Microsite.objects.get(is_npc=True)\n npc_activities = NationalConferenceActivity.objects.exclude(\n parent=npc_microsite.event_master,\n ).filter(\n publish_status=\"DRAFT\",\n template=conference_details_template\n )\n self.stdout.write(\"npc microsite is %s\" % npc_microsite)\n self.stdout.write(\"number of non-microsite npc activities is %s\" % npc_activities.count())\n self.stdout.write(\"\\n\")\n total = npc_activities.count()\n for i,npca in enumerate(npc_activities):\n npca.template = event_details_template\n npca.save()\n published_activity = npca.publish()\n published_activity.solr_publish()\n self.stdout.write(\"%s of %s Done.\" % (i,total))\n self.stdout.write(\"All Done!\")\n","repo_name":"furmanczyk5/Django-Enterprise-App","sub_path":"conference/management/commands/reset_npc_templates.py","file_name":"reset_npc_templates.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"30117488127","text":"from __future__ import annotations\n\nfrom pygame import Vector2\nfrom typing import TYPE_CHECKING, Optional\n\nfrom ..facilityroom import DMFacilityRoom\nfrom utilities import UnlockPack, Effect\n\nif TYPE_CHECKING:\n from dm.core.game.game import DMGame\n################################################################################\n\n__all__ = (\"Foundry\",)\n\n################################################################################\nclass Foundry(DMFacilityRoom):\n\n def __init__(self, game: DMGame, position: Optional[Vector2] = None, level: int = 1):\n\n super().__init__(\n game, position,\n _id=\"ROOM-192\",\n name=\"Foundry\",\n description=(\n \"Increases DEF of monsters in adjacent rooms by {value}%.\"\n ),\n level=level,\n rank=5,\n unlock=UnlockPack.Advanced,\n effects=[\n Effect(name=\"def\", base=30, per_lv=2),\n ]\n )\n\n################################################################################\n def stat_adjust(self) -> None:\n \"\"\"Called automatically when a stat refresh is initiated.\"\"\"\n\n for room in self.adjacent_rooms + [self]:\n for monster in room.monsters:\n monster.increase_stat_pct(\"def\", self.effects[\"def\"] / 100) # Convert to percentage\n\n################################################################################\n","repo_name":"AllegroVivo/DungeonDefense","sub_path":"dm/rooms/FiveStar/Foundry.py","file_name":"Foundry.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14386798615","text":"from datetime import datetime\n\nimport requests\n\nAPI_ENDPOINT = \"https://tequila-api.kiwi.com\"\nAPI_KEY = \"7NGXLV5ie1F7XXLL7QsgHTKL3yyLrjx4\"\n\n\nclass FlightData:\n\n def __init__(self):\n self.data = {}\n\n\n\n\n def serachFlight(self, fly_from, fly_to):\n time1 = datetime(2022, 12, 28)\n time2 = datetime(2023, 1, 6)\n location_url = f\"{API_ENDPOINT}/v2/search\"\n headers = {\"apikey\": API_KEY, }\n params = {\n \"fly_from\": fly_from,\n \"fly_to\": fly_to,\n \"date_from\": time1.strftime(\"%d/%m/%Y\"),\n \"date_to\": time2.strftime(\"%d/%m/%Y\"),\n \"nights_in_dst_from\": 6,\n \"nights_in_dst_to\": 7,\n \"flight_type\": \"round\",\n \"one_for_city\": 1,\n \"max_stopovers\": 0,\n \"curr\": \"CAD\"\n }\n\n # response = requests.get(url=location_url, headers=headers, params=params)\n stringTemp = \"https://tequila-api.kiwi.com/v2/search?fly_from=YYZ&fly_to=LIR&dateFrom=28/12/2022&dateTo=06/01/2023\"\n response= requests.get(url=stringTemp,headers=headers)\n response.raise_for_status()\n # data = response.json()[\"data\"][0]\n # print(data[\"local_departure\"])\n # flight_data = {\n # \"price\":data[\"price\"],\n # \"origin_city\":data[\"route\"][0][\"cityFrom\"],\n # \"origin_airport\":data[\"route\"][0][\"flyFrom\"],\n # \"destination_city\":data[\"route\"][0][\"cityTo\"],\n # \"destination_airport\":data[\"route\"][0][\"flyTo\"],\n # \"link\": data[\"deep_link\"]\n # }\n # return flight_data\n print(response.json())\n","repo_name":"DhruvPatel-183/Cheap_Flight_Tracker","sub_path":"flight_data.py","file_name":"flight_data.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17510367349","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Exploratory Data Analysis (EDA)\n\n# All the X-ray images are stored in the 'COVID', 'Lung_Opacity', 'Viral Pneumonia', and 'Normal' directories, correspond to their X-ray images of their class. \n# \n# First we carry out the Exploratory Discovery Analysis (EDA) for our dataset. \n\n# In[1]:\n\n\n# Lung types with their corresponding directories\nlungTypes = [\n 'COVID', \n 'Lung_Opacity', \n 'Viral Pneumonia',\n 'Normal'\n]\n\n\n# In[2]:\n\n\nimport cv2\n\n# example image in each class\nimg1 = 'Images/COVID/COVID-1.png'\nimg2 = 'Images/Lung_Opacity/Lung_Opacity-1.png'\nimg3 = 'Images/Viral Pneumonia/Viral Pneumonia-1.png'\nimg4 = 'Images/Normal/Normal-1.png'\nsample_images_png = [img1, img2, img3, img4]\n\n# convert the images into numpy array, read in grayscale\nsample_images_read = [cv2.imread(img, 0) for img in sample_images_png]\n\n\n# In[3]:\n\n\n# Readimg example lung X-ray image from Covid, Lung Opacity, Viral Penumonia, and Normal patient\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# 2*2 subplots \nfig, ax = plt.subplots(2, 2, figsize=(15, 10)) \n\nn = 0\nfor row in ax:\n for img in row:\n img.imshow(sample_images_read[n], 'gray') #show individual image\n img.set_title(lungTypes[n]) # set title for image\n n += 1\n\n\n# We can see there are similarities and differences between different lung types\n\n# In[4]:\n\n\n# The original pixel of the image is 299 * 299\nsample_images_read[0].shape\n\n\n# # Machine Learning\n\n# ## Preprocessing\n\n# In[5]:\n\n\nfrom pathlib import Path\n\n#empty lists\nlabels = [] \nimage_samples = []\n\npath = Path()\nfor lungtype in lungTypes: \n img_dir = path / 'Images' / lungtype # image directory\n for img in img_dir.iterdir(): # read each image in the image directory\n data = cv2.imread(str(img), 0).reshape(1, -1) # convert the 2D 299*299 to 1 dimension array of 89401\n image_samples.append(data) # append the images to list\n labels.append(lungtype) # append the class labels to list\n\n\n# In[6]:\n\n\n# covert the list to numpy array\nimage_samples = np.concatenate(image_samples)\nlabels = np.array(labels)\n\nprint(f\"Final shape of samples: {image_samples.shape}\")\n\n\n# In[7]:\n\n\n# there are total of 21165 samples of 299 * 299 pixels image in the samples\nprint(f\"No of image samples: {len(image_samples)}\")\n\n\n# ## Binary classification\n\n# As our objective is only to perform binary classification between \"COVID\" class and \"Others\" (Non-covid) class, the non-covid labels are preprocessed.\n\n# In[8]:\n\n\n# use certain samples only\nnp.random.seed(10)\nchoices = np.random.randint(len(labels), size=8000)\n\nX = image_samples[choices]\ny = labels[choices]\n\n\n# In[9]:\n\n\ndef convert_binaryclass(x):\n if x == 'COVID':\n return 1\n else:\n return 0\ny_binary = np.array(list(map(convert_binaryclass, y)))\n\n\n# In[10]:\n\n\n# example of y_binary\ny_binary\n\n\n# In[11]:\n\n\n# Test traing split\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y_binary,\n test_size=0.2,\n random_state=10,\n stratify=y_binary)\n\n\n# In[12]:\n\n\n# The y_labels distribution after the preprocessing\nimport pandas as pd\n\ndistribution = pd.DataFrame(y_train).value_counts(normalize=True)\ndistribution\n\n\n# In[13]:\n\n\nimport numpy as np\nfrom sklearn.decomposition import PCA\n\npca_700 = PCA(n_components=700)\npca_700.fit(X)\n\nplt.grid()\nplt.plot(np.cumsum(pca_700.explained_variance_ratio_ * 100))\nplt.xlabel('Number of components')\nplt.ylabel('Explained variance')\nplt.show()\n\n\n# In[14]:\n\n\nexplained = np.cumsum(pca_700.explained_variance_ratio_ * 100)\nnp.min(np.where(explained > 90))\n\n\n# ## Transformation Pipeline\n\n# In[17]:\n\n\n# model pipeline \nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import IncrementalPCA, TruncatedSVD\nfrom sklearn.pipeline import Pipeline\n\npipeline = Pipeline([\n ('scaler', StandardScaler()), #standard scaling\n ('pca', IncrementalPCA(n_components=65, batch_size=100, copy=False)), # PCA Dimensionality reduction\n ])\n\n\n# In[18]:\n\n\npipeline.fit(X_train)\nX_train = pipeline.transform(X_train)\n\n\n# In[19]:\n\n\nX_train.shape\n\n\n# ## Model Training\n\n# ### Random Forest\n\n# In[20]:\n\n\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score\n\n\n# In[21]:\n\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nforest = RandomForestClassifier()\nforest.fit(X_train, y_train)\ny_train_pred = forest.predict(X_train) #prediciton based on X_train\n\n\n# In[22]:\n\n\ndef score_check(ytrue, ypred, name):\n print(f\"============= {name} Set Score ==========\")\n print(\"Accuracy:\", accuracy_score(ytrue, ypred))\n print(\"Precision:\", precision_score(ytrue, ypred, pos_label=1))\n print(\"Recall:\", recall_score(ytrue, ypred, pos_label=1))\n print(\"F1 Score:\", f1_score(ytrue, ypred, pos_label=1))\n\n\n# In[23]:\n\n\nscore_check(y_train, y_train_pred, 'Training')\n\n\n# In[24]:\n\n\n# for stratified cross validation\nfrom sklearn.model_selection import StratifiedKFold\n\nkf = StratifiedKFold(n_splits=3, shuffle=True, random_state=10)\n\n\n# In[25]:\n\n\n# Collections of all the cross validation scores\nfinal_scores = {}\n\n\n# In[26]:\n\n\nfrom sklearn.model_selection import cross_val_score\n\ndef cv_score_check(model, X, y, cv, model_name=None, scores_dict=None, use=False):\n scores = {}\n cross_val_accuracy = cross_val_score(model, X, y, cv=cv, scoring=\"accuracy\")\n cross_val_precision = cross_val_score(model, X, y, cv=cv, scoring=\"precision\")\n cross_val_recall = cross_val_score(model, X, y, cv=cv, scoring=\"recall\")\n cross_val_f1 = cross_val_score(model, X, y, cv=cv, scoring=\"f1\")\n\n print(\"Accuracy:\", cross_val_accuracy.mean())\n print(\"Precision:\", cross_val_precision.mean())\n print(\"Recall:\", cross_val_recall.mean())\n print(\"F1 Score:\", cross_val_f1.mean())\n \n if use: #for recording the scores\n scores['Accuracy'] = cross_val_accuracy.mean()\n scores['Precision'] = cross_val_precision.mean()\n scores['Recall'] = cross_val_recall.mean()\n scores['F1_Score'] = cross_val_f1.mean()\n scores_dict[model_name] = scores\n\n\n# In[27]:\n\n\ncv_score_check(forest, X_train, y_train, kf)\n\n\n# The model appears to be overfitting when using training set.\n# When using cross validation set, scoring slightly reduced.\n\n# #### Fine tuning hyperparameters\n\n# In[28]:\n\n\n# Define hyperparameters to be used in the randomized search cross validation \n\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom pprint import pprint\n\n# Number of trees in random forest\nn_estimators = [int(x) for x in np.linspace(start = 10, stop = 100, num = 10)]\n\n# Number of features to consider at every split\nmax_features = ['auto', 'sqrt']\n\n# Maximum number of levels in tree\nmax_depth = [int(x) for x in np.linspace(10, 60, num=10)]\nmax_depth.append(None)\n\n# Minimum number of samples required to split a node\nmin_samples_split = [2, 5, 10]\n\n# Minimum number of samples required at each leaf node\nmin_samples_leaf = [1, 2, 4]\n\n# Method of selecting samples for training each tree\nbootstrap = [True, False]\n\n# Create the random grid\nrandom_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\npprint(random_grid)\n\n\n# In[29]:\n\n\n# Random search of parameters, using 3 fold cross validation, \n# search across 100 different combinations, and use all available cores\nrf_random = RandomizedSearchCV(estimator=forest,\n param_distributions=random_grid,\n n_iter=100,\n cv=kf,\n verbose=2,\n random_state=10,\n n_jobs = -1,\n scoring='f1')\n# Fit the random search model\nrf_random.fit(X_train, y_train)\n\n\n# In[30]:\n\n\n# best model of the random forest model\nforest_best = rf_random.best_estimator_\nprint(forest_best)\n\n\n# In[31]:\n\n\ncv_score_check(forest_best, X_train, y_train, kf, model_name='Random Forest', \n scores_dict=final_scores, use=True)\n\n\n# The fine-tuned model perform slightly better than the original model\n\n# ### Logistic Regression\n\n# In[32]:\n\n\nfrom sklearn.linear_model import LogisticRegression\n\nlogistic = LogisticRegression()\nlogistic.fit(X_train, y_train)\n\ny_train_pred = logistic.predict(X_train)\n\n\n# In[33]:\n\n\nscore_check(y_train, y_train_pred, 'Training')\n\n\n# In[34]:\n\n\n# CV score before fine-tuning\ncv_score_check(logistic, X_train, y_train, kf)\n\n\n# In[35]:\n\n\nfrom sklearn.model_selection import RepeatedStratifiedKFold\nfrom sklearn.model_selection import GridSearchCV\n\nsolvers = ['newton-cg', 'lbfgs', 'liblinear']\npenalty = ['l2']\nc_values = [100, 10, 1.0, 0.1, 0.01]\n\n# define grid search\ngrid = dict(solver=solvers,penalty=penalty,C=c_values)\ngrid_search = GridSearchCV(estimator=logistic, \n param_grid=grid,\n n_jobs=-1,\n cv=kf,\n scoring='f1',\n error_score=0)\n\nlogistic_grid = grid_search.fit(X_train, y_train)\n\n# summarize results\nprint(\"Best: %f using %s\" % (logistic_grid.best_score_, logistic_grid.best_params_))\nmeans = logistic_grid.cv_results_['mean_test_score']\nstds = logistic_grid.cv_results_['std_test_score']\nparams = logistic_grid.cv_results_['params']\nfor mean, stdev, param in zip(means, stds, params):\n print(\"%f (%f) with: %r\" % (mean, stdev, param))\n\n\n# In[36]:\n\n\nlogistic_best = logistic_grid.best_estimator_\nprint(logistic_best)\n\n\n# In[37]:\n\n\n# CV score after fine-tuning\ncv_score_check(logistic_best, X_train, y_train, kf, model_name='Logistic Regression', \n scores_dict=final_scores, use=True)\n\n\n# ### SVM\n\n# In[38]:\n\n\n# SVM\nfrom sklearn.svm import SVC\n\nsvm = SVC()\nsvm.fit(X_train, y_train)\n\ny_train_pred = svm.predict(X_train)\nscore_check(y_train, y_train_pred, 'Training')\n\n\n# In[39]:\n\n\n# CV score before fine-tuning\ncv_score_check(svm, X_train, y_train, kf)\n\n\n# In[40]:\n\n\n# # SVM\nfrom sklearn.svm import SVC\n\nparam_grid = {'C':[0.1,1, 10, 100],'gamma':[0.0001,0.001,'scale'],'kernel':['rbf']}\nsvc = SVC(probability=True)\nsvm_grid = GridSearchCV(svc, param_grid, scoring='f1', cv=kf)\nsvm_grid.fit(X_train, y_train)\n\n\n# In[41]:\n\n\nsvm_best = svm_grid.best_estimator_\nprint(svm_best)\n\n\n# In[42]:\n\n\n# CV score after fine-tuning\ncv_score_check(svm_best, X_train, y_train, kf, model_name='SVM', \n scores_dict=final_scores, use=True)\n\n\n# ## Cross Validation Results Compare\n\n# In[43]:\n\n\nresult = pd.DataFrame(final_scores)\nresult\n\n\n# We can see that SVM has the best accuracy, precision, recall, and F1 score in the cross validation set\n\n# # ROC Curve\n\n# In[44]:\n\n\nfrom sklearn.metrics import roc_curve, roc_auc_score\nfrom sklearn.model_selection import cross_val_predict\n\n# Random Forest\ny_probas_forest = cross_val_predict(forest_best, X_train, y_train, cv=kf, method=\"predict_proba\")\ny_scores_forest = y_probas_forest[:, 1] # score = proba of positive class\nfpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train, y_scores_forest)\n\n# Logistic Regression\ny_probas_logistic = cross_val_predict(logistic_best, X_train, y_train, cv=kf, method=\"predict_proba\")\ny_scores_logistic = y_probas_logistic[:, 1] # score = proba of positive class\nfpr_logistic, tpr_logistic, thresholds_logistic = roc_curve(y_train, y_scores_logistic)\n\n# SVM\ny_scores_svm = svm_best.decision_function(X_train)\nfpr_svm, tpr_svm, thresholds_svm = roc_curve(y_train, y_scores_svm)\n\n\ndef plot_roc_curve(fpr, tpr, label=None): \n plt.plot(fpr, tpr, linewidth=1, label=label)\n plt.plot([0, 1], [0, 1], 'k--') # dashed diagonal\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n\n \nplot_roc_curve(fpr_forest, tpr_forest, \"Random Forest\") # Random Forest Plot\nplot_roc_curve(fpr_logistic, tpr_logistic, \"Logistic Regression\") # Random Forest Plot\nplt.plot(fpr_svm, tpr_svm, \"b:\", label=\"SVM\") #SVM Plot\nplt.title(\"ROC Curve\")\nplt.legend(loc=\"lower right\")\nplt.show()\n\n\n# ## ROC Scores\n\n# In[45]:\n\n\n# Random Forest\nroc_auc_forest = roc_auc_score(y_train, y_scores_forest)\n\n# Logistic Regression\nroc_auc_logistic = roc_auc_score(y_train, y_scores_logistic)\n\n# SVM\nroc_auc_svm = roc_auc_score(y_train, y_scores_svm)\n\nprint(f\"Random Forest ROC AUC: {roc_auc_forest}\")\nprint(f\"Logistic Regression ROC AUC: {roc_auc_logistic}\")\nprint(f\"SVM ROC AUC: {roc_auc_svm}\")\n\n\n# ## Test\n\n# In[46]:\n\n\nX_test = pipeline.transform(X_test)\n\n\n# In[47]:\n\n\n# Random Forest\nrf_y_test_pred = forest_best.predict(X_test)\nscore_check(y_test, rf_y_test_pred, 'Test')\n\n\n# In[48]:\n\n\n# Logistic regression\nlogistic_y_test_pred = logistic_best.predict(X_test)\nscore_check(y_test, logistic_y_test_pred, 'Test')\n\n\n# In[49]:\n\n\n# SVM\nsvm_y_test_pred = svm_best.predict(X_test)\nscore_check(y_test, svm_y_test_pred, 'Test')\n\n\n# We observed SVM model has the best performance.\n\n# ## Confusion Matrix\n\n# In[50]:\n\n\nfrom sklearn.metrics import confusion_matrix\nimport seaborn as sns;\n\n\n# In[54]:\n\n\n# Random Forest\n\ncf_matrix = confusion_matrix(y_test, rf_y_test_pred)\nsns.heatmap(cf_matrix, annot=True, fmt='.4g');\n\n\n# In[55]:\n\n\n# Logistic Regression\n\ncf_matrix = confusion_matrix(y_test, logistic_y_test_pred)\nsns.heatmap(cf_matrix, annot=True, fmt='.4g');\n\n\n# In[56]:\n\n\n# SVM\n\ncf_matrix = confusion_matrix(y_test, svm_y_test_pred)\nsns.heatmap(cf_matrix, annot=True, fmt='.4g');\n\n\n# We can see the true negative (971) and true positive (136) are relatively high compared to the false positive (26) and false negative (67). \n\n# In[ ]:\n\n\n\n\n","repo_name":"ckng0221/COVID-19_Lungs_Classification","sub_path":"Covid_Classification.py","file_name":"Covid_Classification.py","file_ext":"py","file_size_in_byte":13823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16060828488","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('customers', '0003_customer_card_details'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='feedback',\n field=models.CharField(blank=True, max_length=16, null=True, choices=[(b'UP', b'Happy'), (b'DOWN', b'Unhappy')]),\n ),\n migrations.AlterField(\n model_name='order',\n name='status',\n field=models.CharField(default=b'AC', max_length=16, verbose_name=b'Status', choices=[(b'AC', b'Active'), (b'SH', b'Shipped'), (b'PA', b'Paused'), (b'CA', b'Canceled'), (b'ER', b'Failed'), (b'DE', b'Declined')]),\n ),\n migrations.AlterField(\n model_name='preferences',\n name='package',\n field=models.CharField(default=b'WB', max_length=16, verbose_name=b'Packaging method', choices=[(b'GR', b'Grounded (250g)'), (b'WB', b'Wholebeans (250g)'), (b'DR', b'Drip bags (x10)')]),\n ),\n ]\n","repo_name":"webexpert0727/ReactJS_Python","sub_path":"customers/migrations/0004_auto_20160204_0743.py","file_name":"0004_auto_20160204_0743.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72040747920","text":"#!/usr/bin/python3\n'''\nCreated on Mar 7, 2017\n\n@author: famez\n\n\nThis file is part of Intrusion Dectection System Project.\n\nIntrusion Dectection System Project is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nIntrusion Dectection System Project is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with Intrusion Dectection System Project. If not, see .\n\n\n'''\nimport sys\nimport re\nimport os\nimport smtplib\nimport time,datetime\nfrom scapy.all import *\nfrom optparse import OptionError\nfrom optparse import OptionGroup\nfrom optparse import OptionParser\nimport Ifaddr\nfrom socket import AF_INET\nimport struct\n\n\nBROADCAST_ADDR = \"10.1.1.255\"\nBROADCAST_MAC=\"ff:ff:ff:ff:ff:ff\"\n\nFIFO_NAME = \"/tmp/notifications\"\n\n\n#Overwritten IP packet so as to get answers from broadcast addresses\nclass IPModified(IP):\n name = \"IPMOD\"\n def answers(self, other):\n val = super(IP, self).answers(other)\n if val:\n return val\n if(hasattr(self, \"brdcastAddr\")):\n if (other.dst == self.brdcastAddr):\n return self.payload.answers(other.payload)\n return 0\n def setBrcastAddress(self, addr):\n self.brdcastAddr = addr\n\nclass IDS():\n \n def fakeIPPacket(self):\n conf.checkIPaddr = False;\n \n #Used customize ip packet which gets broadcast responses\n split_layers(Ether, IP, type=2048)\n bind_layers( Ether, IPModified, type=2048)\n \n #Same for ICMP\n split_layers( IP, ICMP, frag=0, proto=1)\n bind_layers( IPModified, ICMP, frag=0, proto=1)\n \n def unFakeIPPacket(self):\n conf.checkIPaddr = True;\n \n #Used customize ip packet which gets broadcast responses\n split_layers(Ether, IPModified, type=2048)\n bind_layers( Ether, IP, type=2048)\n \n #Same for ICMP\n split_layers( IPModified, ICMP, frag=0, proto=1)\n bind_layers( IP, ICMP, frag=0, proto=1)\n \n def getBrcastAddr(self, iface):\n for i in Ifaddr.get_network_interfaces():\n if i.name == iface:\n return i.brdcastAddress\n return \"\"\n def getLocalAddr(self, iface):\n for i in Ifaddr.get_network_interfaces():\n if i.name == iface:\n return i.addresses[AF_INET][0]\n return \"\"\n def getNetmask(self, iface):\n for i in Ifaddr.get_network_interfaces():\n if i.name == iface:\n return i.netmask\n return \"\"\n \n\n def __init__(self):\n self.version = \"0.1\"\n self.whitelist_file = \"\"\n self.log = False\n self.verbose = False\n \n def detectMachines(self):\n (opts, args) = self.__handleArguments()\n if opts.macadd:\n macs = opts.macadd.split(\",\")\n for x in macs:\n self.__writeWhitelist(x)\n elif opts.macremove:\n macs = opts.macremove.split(\",\")\n for x in macs:\n self.__removeWhitelist(x)\n else:\n self.__detectMachinesNetwork()\n \n def __startARPScan(self):\n results, unanswered = srp(Ether(dst=BROADCAST_MAC)/ARP(op=ARP.who_has, pdst=self.opts.ip), iface=\"eth0\", timeout=self.opts.timeout);\n machines={}\n for result in results:\n answer = result[1]\n machines[answer.psrc] = answer.src\n \n return machines\n \n def __startNMAPScan(self):\n srcPort = random.randint(1025,65534)\n dstPort = random.randint(0,1024)\n results, unanswered = srp(Ether(dst=getmacbyip(self.opts.ip))/IP(dst=self.opts.ip)/TCP(sport=srcPort,dport=dstPort,flags=\"S\"), iface=\"eth0\",timeout=self.opts.timeout)\n machines={}\n for result in results:\n answer = result[1]\n machines[answer[IP].src] = answer[Ether].src\n return machines\n \n def __startPINGScan(self):\n if self.opts.bcast: #If we want to make a broadcast ping. Good for local networks, although some equipments may not response \n \n self.fakeIPPacket()\n brcastAddr = self.getBrcastAddr(\"eth0\")\n ip = IPModified(dst=brcastAddr)\n ip.setBrcastAddress(brcastAddr)\n results, unanswered = srp(Ether(dst=BROADCAST_MAC)/ip/ICMP(type='echo-request'), iface=\"eth0\", timeout=self.opts.timeout)\n machines={}\n for result in results:\n result[0].show()\n answer = result[1]\n answer.show()\n machines[answer[IPModified].src] = answer[Ether].src\n self.unFakeIPPacket()\n return machines\n else: #If we don´t know the MAC, it is a problem as the framework will make an ARP request for every unknown MAC\n results, unanswered = srp(Ether()/IP(dst=self.opts.ip)/ICMP(type='echo-request'), iface=\"eth0\", timeout=self.opts.timeout)\n machines={}\n for result in results:\n result[0].show()\n answer = result[1]\n answer.show()\n machines[answer[IP].src] = answer[Ether].src\n return machines\n \n scanMethods = {'ARP' : __startARPScan, 'NMAP' : __startNMAPScan, 'PING' : __startPINGScan}\n\n def __scanNetwork(self):\n machines={}\n if self.opts.scan in self.scanMethods:\n machines = self.scanMethods[self.opts.scan](self)\n else:\n machines = self.__startARPScan()\n \n return machines\n\n def __detectMachinesNetwork(self):\n machines = self.__scanNetwork()\n whitelist = self.__read_file()\n email_msg = \"\"\n macs_detected = []\n malicious_macs = []\n for ip,mac in machines.items(): \n if self.log:\n self.__writeLog(\"-------\") \n if not mac in whitelist: \n msg = \"Mac \" + mac +\" is not in the whilelist!! IP: \" + ip\n if self.verbose:\n self.__consoleMessage(msg)\n if self.log:\n self.__writeLog(msg)\n if self.notify:\n self.__notify_to_session(msg)\n malicious_macs.append(mac)\n if mac in macs_detected:\n msg = \"Mac \" + mac +\" duplicated!! IP: \" + ip\n if self.verbose:\n self.__consoleMessage(msg)\n if self.log:\n self.__writeLog(msg)\n if self.notify:\n self.__notify_to_session(msg)\n malicious_macs.append(mac)\n macs_detected.append(mac)\n if self.opts.emailto:\n self.__sendEmail(malicious_macs, self.opts)\n\n\n\n def __handleArguments(self,argv=None):\n \"\"\"\n This function parses the command line parameters and arguments\n \"\"\"\n\n parser = OptionParser()\n if not argv:\n argv = sys.argv\n\n mac = OptionGroup(parser, \"Mac\", \"At least one of these \"\n \"options has to be provided to define the machines\")\n\n mac.add_option('--ma','--macadd', action='store', dest='macadd', help='Add mac to whitelist')\n mac.add_option('--mr','--macremove', action='store', dest='macremove', help='Remove mac from whitelist')\n\n\n email = OptionGroup(parser, \"Email\", \"You need user,password,server and destination\"\n \"options has to be provided to define the server send mail\")\n\n email.add_option('-u','--user', action='store', dest='user', help='User mail server')\n email.add_option('--pwd','--password', action='store', dest='password', help='Password mail server')\n email.add_option('-s','--server', action='store', dest='server', help='mail server')\n email.add_option('-p','--port', action='store', default='25', dest='port', help='Port mail server')\n email.add_option('--et','--emailto', action='store', dest='emailto', help='Destination E-mail')\n \n scan = OptionGroup(parser, \"Scan Parameters\", \"Scan type can be ARP method, NMAP method or PING method\")\n scan.add_option('--sc','--scan', action='store', dest='scan', help='Scan type. Can be ARP, NMAP or PING')\n scan.add_option('-t','--timeout', action='store', default=2, dest='timeout', help='Scan timeout')\n scan.add_option('-b','--broadcast', action='store_true', default=False, dest='bcast', help='Broadcast IP Address')\n\n parser.add_option('-r','--range', action='store', dest='ip', help='Secure network range ')\n parser.add_option('--wl','--whitelist', action='store', default='whitelist.txt' , dest='whitelist_file', help='File have Mac whitelist ')\n parser.add_option('-l','--log', action='store_true', default=False, dest='log', help='Log actions script')\n parser.add_option('-v','--verbose', action='store_true', default=False, dest='verbose', help='Verbose actions script')\n parser.add_option('-n','--notify', action='store_true', default=False, dest='notify', help='Notify to user')\n\n\n parser.add_option_group(mac)\n parser.add_option_group(email)\n parser.add_option_group(scan)\n\n (opts, args) = parser.parse_args()\n\n self.log = opts.log\n self.verbose = opts.verbose\n self.notify = opts.notify\n self.whitelist_file = opts.whitelist_file\n \n if not opts.ip: #Detect local address\n localAddr = self.getLocalAddr(\"eth0\")\n netmask = self.getNetmask(\"eth0\")\n \n localAddr_byte_array = inet_pton(AF_INET, localAddr)\n netmask_byte_array = inet_pton(AF_INET, netmask)\n \n localAddrInt = struct.unpack('!I', localAddr_byte_array)[0]\n netmaskInt = struct.unpack('!I', netmask_byte_array)[0]\n \n networkAddrInt = localAddrInt & netmaskInt\n \n networkAddr_byte_array = struct.pack('!I', networkAddrInt)\n \n networkAddr = inet_ntop(AF_INET, networkAddr_byte_array) #Network address calculated\n \n prefix = 0\n while (((netmaskInt >> prefix) & 1) == 0 ):\n prefix += 1 \n prefix = 32 - prefix #Prefix calculated\n \n netAddrStr = networkAddr + \"/\" + str(prefix)\n print (netAddrStr)\n opts.ip = netAddrStr\n\n if opts.user or opts.password or opts.server or opts.emailto:\n if not all([opts.user, opts.password,opts.server,opts.emailto]):\n errMsg = \"missing some email option (-u, --pwd, -s, --et), use -h for help\" \n parser.error(errMsg)\n self.__writeLog(errMsg)\n sys.exit(-1)\n \n self.opts = opts;\n \n return opts, args\n\n\n def __sendEmail(self,alert_mac,opts):\n \"\"\"\n This function send mail with the report\n \"\"\"\n header = 'From: %s\\n' % opts.user\n header += 'To: %s\\n' % opts.emailto\n if alert_mac:\n header += 'Subject: New machines connected\\n\\n'\n message = header + 'List macs: \\n '+str(alert_mac)\n else:\n header += 'Subject: No intruders - All machines known \\n\\n'\n message = header + 'No intruders'\n\n server = smtplib.SMTP(opts.server+\":\"+opts.port)\n server.starttls()\n server.login(opts.user,opts.password)\n if self.verbose or self.log:\n debugemail = server.set_debuglevel(1)\n if self.verbose:\n self.__consoleMessage(debugemail)\n problems = server.sendmail(opts.user, opts.emailto, message)\n print (problems)\n server.quit()\n\n\n def __consoleMessage(self,message):\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n print ('['+st+'] '+str(message))\n\n\n def __writeLog(self,log):\n \"\"\"\n This function write log\n \"\"\"\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n try:\n file_read = open('/var/log/ids.txt', 'a+')\n file_read.write('['+st+'] '+log+\"\\n\")\n file_read.close()\n except IOError:\n msg = 'ERROR: Cannot open'+ self.whitelist_file\n if self.verbose:\n self.__consoleMessage(msg)\n sys.exit(-1)\n\n\n def __writeWhitelist(self,mac):\n \"\"\"\n This function add newmac to whitelist\n \"\"\"\n if re.match(\"[0-9a-f]{2}([-:])[0-9a-f]{2}(\\\\1[0-9a-f]{2}){4}$\", mac.lower()):\n if os.path.isfile(self.whitelist_file):\n try:\n file_read = open(self.whitelist_file, 'a')\n file_read.write(mac+\"\\n\")\n file_read.close()\n msg = \"Mac: \"+ mac + \" add correctly\"\n if self.verbose:\n self.__consoleMessage(msg)\n if self.log:\n self.__writeLog(msg) \n except IOError:\n print \n msg = 'ERROR: Cannot open'+ self.whitelist_file\n if self.verbose:\n self.__consoleMessage(msg)\n if self.log:\n self.__writeLog(msg) \n sys.exit(-1)\n else:\n print (self.whitelist_file)\n msg = \"ERROR: The Whitelist file \"+ self.whitelist_file+ \" doesn't exist!\"\n if self.verbose:\n self.__consoleMessage(msg)\n if self.log:\n self.__writeLog(msg) \n sys.exit(-1)\n else:\n msg = \"ERROR: The Mac \"+ mac +\" not valid!\"\n if self.verbose:\n self.__consoleMessage(msg)\n if self.log:\n self.__writeLog(msg) \n\n def __removeWhitelist(self,mac):\n \"\"\"\n This function remove newmac from whitelist\n \"\"\"\n if re.match(\"[0-9a-f]{2}([-:])[0-9a-f]{2}(\\\\1[0-9a-f]{2}){4}$\", mac.lower()):\n if os.path.isfile(self.whitelist_file):\n try:\n file_read = open(self.whitelist_file, 'r')\n lines = file_read.readlines()\n file_read.close()\n file_read = open(self.whitelist_file, 'w')\n for line in lines:\n if line.strip() != mac:\n file_read.write(line)\n file_read.close()\n msg = \"Mac \"+mac+\" remove correctly\"\n if self.verbose:\n self.__consoleMessage(msg)\n if self.log:\n self.__writeLog(msg) \n except IOError:\n msg = 'ERROR: Cannot open '+ self.whitelist_file\n if self.verbose:\n self.__consoleMessage(msg)\n if self.log:\n self.__writeLog(msg) \n sys.exit(-1)\n else:\n msg = \"ERROR: The Whitelist file \"+ self.whitelist_file+ \" doesn't exist!\"\n if self.verbose:\n self.__consoleMessage(msg)\n if self.log:\n self.__writeLog(msg) \n sys.exit(-1)\n else:\n msg = \"ERROR: The Mac \"+ mac + \" doesn't exist!\"\n if self.verbose:\n self.__consoleMessage(msg)\n if self.log:\n self.__writeLog(msg) \n\n def __read_file(self):\n \"\"\"\n This function read the whitelist\n \"\"\"\n whitelist = []\n if os.path.isfile(self.whitelist_file):\n try:\n file_read = open(self.whitelist_file, 'r')\n for line in file_read:\n if re.match(\"[0-9a-f]{2}([-:])[0-9a-f]{2}(\\\\1[0-9a-f]{2}){4}$\", line.strip().lower()):\n whitelist.append(line.strip())\n return whitelist\n except IOError:\n msg = 'ERROR: Cannot open '+ self.whitelist_file\n if self.verbose:\n self.__consoleMessage(msg)\n if self.log:\n self.__writeLog(msg) \n sys.exit(-1)\n else:\n print (self.whitelist_file)\n msg = \"ERROR: The Whitelist file \"+ self.whitelist_file+ \" doesn't exist!\"\n if self.verbose:\n self.__consoleMessage(msg)\n if self.log:\n self.__writeLog(msg) \n sys.exit(-1)\n \n def __notify_to_session(self, text):\n try:\n fifo = open(FIFO_NAME, \"w\")\n fifo.write(text)\n except OSError as err:\n print (\"Error: \" + err.errno)\n\nif __name__ == '__main__':\n ids = IDS()\n ids.detectMachines()\n","repo_name":"famez/Intrusion-Detection-System-ESII-","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":17448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"28317988343","text":"import pygame\nfrom src.config.settings import *\nfrom src.functions.helper import import_folder\nfrom src.debug.debugging_tool import debug\n\n\nclass Player(pygame.sprite.Sprite):\n\n def __init__(self, pos, groups, obstacle_sprites, create_attack,\n destroy_attack, create_spell, destroy_spell):\n super().__init__(groups)\n self.image = pygame.image.load(\n 'src/graphics/test/player.png').convert_alpha()\n self.rect = self.image.get_rect(topleft=pos)\n self.hitbox = self.rect.inflate(0, -26)\n\n # graphics setup\n self.import_player_assets()\n self.status = 'down'\n self.frame_index = 0\n self.animation_speed = 0.1\n\n # movement\n self.direction = pygame.math.Vector2()\n self.attacking = False\n self.attack_cd = 250\n self.attack_time = 0\n self.obstacle_sprites = obstacle_sprites\n\n # weapon\n self.create_attack = create_attack\n self.destroy_attack = destroy_attack\n self.weapon_index = 0\n self.weapon = list(WEAPON_DATA.keys())[self.weapon_index]\n self.can_switch_weapon = True\n self.weapon_switch_duration = None\n self.weapon_switch_cd = 200\n\n # spells\n self.create_spell = create_spell\n self.destroy_spell = destroy_spell\n self.spell_index = 0\n self.spell = list(SPELL_DATA.keys())[self.spell_index]\n self.can_switch_spell = True\n self.spell_switch_duration = None\n self.spell_switch_cd = 0\n\n # stats\n self.base_stats = PLAYER_DATA\n self.hp = self.base_stats['hp']\n self.sta = self.base_stats['sta']\n self.mp = self.base_stats['mp']\n self.exp = 10\n self.spd = self.base_stats['dex'] * 0.8\n\n def import_player_assets(self):\n self.animations = {\n 'up': [],\n 'down': [],\n 'left': [],\n 'right': [],\n 'right_idle': [],\n 'left_idle': [],\n 'up_idle': [],\n 'down_idle': [],\n 'right_attack': [],\n 'left_attack': [],\n 'up_attack': [],\n 'down_attack': [],\n }\n\n for animation in self.animations.keys():\n self.animations[animation] = import_folder(\n PLAYER_DATA['graphics'] + animation)\n\n def input(self):\n if not self.attacking:\n keys = pygame.key.get_pressed()\n\n # move up and down\n if keys[pygame.K_w]:\n self.direction.y = -1\n self.status = 'up'\n elif keys[pygame.K_s]:\n self.direction.y = +1\n self.status = 'down'\n else:\n self.direction.y = 0\n\n # move left and right\n if keys[pygame.K_a]:\n self.direction.x = -1\n self.status = 'left'\n elif keys[pygame.K_d]:\n self.direction.x = +1\n self.status = 'right'\n else:\n self.direction.x = 0\n\n # atack\n if keys[pygame.K_e] and not self.attacking:\n self.attacking = True\n self.attack_time = pygame.time.get_ticks()\n self.create_attack()\n\n # spell\n if keys[pygame.K_r] and not self.attacking:\n self.attacking = True\n self.attack_time = pygame.time.get_ticks()\n self.create_spell(pow=SPELL_DATA[self.spell]['pow'] *\n self.base_stats['int'],\n mp=SPELL_DATA[self.spell]['mp'],\n style=self.spell)\n\n # switch weapons\n if self.can_switch_weapon:\n for weapon_index in range(len(WEAPON_DATA)):\n if keys[pygame.K_1 + weapon_index]:\n self.can_switch_weapon = False\n self.weapon_switch_duration = pygame.time.get_ticks()\n self.weapon_index = weapon_index\n self.weapon = list(\n WEAPON_DATA.keys())[self.weapon_index]\n\n # switch spells\n if self.can_switch_spell:\n for spell_index in range(len(SPELL_DATA)):\n if keys[pygame.K_6 + spell_index]:\n self.can_switch_spell = False\n self.spell_switch_duration = pygame.time.get_ticks()\n self.spell_index = spell_index\n self.spell = list(SPELL_DATA.keys())[self.spell_index]\n\n def get_status(self):\n # idle status\n if self.direction.x == 0 and self.direction.y == 0:\n if not 'idle' in self.status and not 'attack' in self.status:\n self.status = self.status + '_idle'\n\n # attack staus\n if self.attacking:\n self.direction.x = 0\n self.direction.y = 0\n if not 'attack' in self.status:\n if 'idle' in self.status:\n self.status = self.status.replace('_idle', '_attack')\n else:\n self.status = self.status + '_attack'\n else:\n if 'attack' in self.status:\n self.status = self.status.replace('_attack', '')\n\n def move(self, speed):\n if self.direction.magnitude() != 0:\n self.direction = self.direction.normalize()\n\n self.hitbox.x += self.direction.x * speed * (\n 1 + self.base_stats['dex'] / 10)\n self.collision('horizontal')\n self.hitbox.y += self.direction.y * speed * (\n 1 + self.base_stats['dex'] / 10)\n self.collision('vertical')\n self.rect.center = self.hitbox.center\n\n def collision(self, direction):\n if direction == 'horizontal':\n for sprite in self.obstacle_sprites:\n # if obstacle collides with player horizontally\n if sprite.hitbox.colliderect(self.hitbox):\n if self.direction.x > 0: # mov right\n self.hitbox.right = sprite.hitbox.left\n\n if self.direction.x < 0: # mov left\n self.hitbox.left = sprite.hitbox.right\n\n if direction == 'vertical':\n for sprite in self.obstacle_sprites:\n # if obstacle collides with player vertically\n if sprite.hitbox.colliderect(self.hitbox):\n if self.direction.y > 0: # mov down\n self.hitbox.bottom = sprite.hitbox.top\n\n if self.direction.y < 0: # mov up\n self.hitbox.top = sprite.hitbox.bottom\n\n def cooldowns(self):\n current_time = pygame.time.get_ticks()\n\n if self.attacking:\n if current_time - self.attack_time >= self.attack_cd:\n self.attacking = False\n self.destroy_attack()\n\n if not self.can_switch_weapon:\n if current_time - self.weapon_switch_duration >= self.weapon_switch_cd:\n self.can_switch_weapon = True\n\n if not self.can_switch_spell:\n if current_time - self.spell_switch_duration >= self.spell_switch_cd:\n self.can_switch_spell = True\n\n def animate(self):\n animation = self.animations[self.status]\n\n # loop over the frame index\n self.frame_index += self.animation_speed\n if self.frame_index >= len(animation): self.frame_index = 0\n\n # set image\n self.image = animation[int(self.frame_index)]\n self.rect = self.image.get_rect(center=self.hitbox.center)\n\n def update(self):\n self.input()\n self.move(self.spd)\n self.cooldowns()\n self.get_status()\n self.animate()","repo_name":"ricardossiqueira/2d_game","sub_path":"src/classes/Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":7801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"47338784658","text":"from cms.models.pluginmodel import CMSPlugin\n\nfrom django.db import models\n\nfrom .defaults import user_name as default_name\nfrom .utils import has_proper_name, get_proper_name\n\nclass TaccsiteSample(CMSPlugin):\n # Overwrites\n\n def get_short_description(self):\n return 'Hello, […]'\n\n # Fields\n\n \"\"\"\n Components > \"Sample (Greet User)\" Model\n https://url.to/docs/components/sample/\n \"\"\"\n guest_name = models.CharField(\n max_length=50,\n default=default_name,\n help_text=f'If user is logged in they are greeted by their name. If not logged in, they are greeted as this value. If this value is blank, they are greeted as \"{default_name}\".',\n # To change the widget, a new Form class is required\n # FAQ: Wesley B searched for hours to find this important information\n # SEE: http://disq.us/p/210zgp2\n # SEE: [`TaccsiteSamplePlugin.form`](./cms_plugins.py)\n # widget=forms.TextInput(attrs={'placeholder': 'Search'}),\n blank=True\n )\n\n # Custom\n\n def get_name(self, user=None):\n \"\"\"Get name by which to greet the user.\n\n :param user: Django user object\n\n :rtype: str\n :returns: Name of authenticated user or the name for any guest\n \"\"\"\n if has_proper_name(user):\n name = get_proper_name(user)\n elif user.is_authenticated:\n name = user.username\n elif bool(self.guest_name):\n name = self.guest_name\n else:\n name = default_name\n\n return name\n","repo_name":"TACC/Core-CMS","sub_path":"taccsite_cms/contrib/taccsite_sample/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"25794362223","text":"import uuid\nimport click\n\n\n@click.command()\n@click.argument('filename', type=str, default='ml_experiment.yaml')\ndef main(filename):\n with open(filename, 'w') as file:\n file.write(f'id: {uuid.uuid4()}\\n')\n file.write('algorithms:\\n')\n file.write(' -\\n')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ArishSultan/rna_experiment_1","sub_path":"src/models/create_train_config.py","file_name":"create_train_config.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13638136646","text":"#!/usr/bin/python3\n\"\"\"script displays body of response(in utf-8) of POST request to url\nw email as param\"\"\"\nimport urllib.request\nimport urllib.parse\nimport sys\n\n\nif __name__ == \"__main__\":\n data = urllib.parse.urlencode({\"email\": sys.argv[2]})\n data = data.encode('ascii')\n req = urllib.request.Request(sys.argv[1], data)\n\n with urllib.request.urlopen(req) as response:\n response_email = response.read().decode('utf-8')\n print(response_email)\n","repo_name":"Serinajefthas/alx-higher_level_programming","sub_path":"0x11-python-network_1/2-post_email.py","file_name":"2-post_email.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22419730245","text":"\"\"\"\nGiven an array of strings strs, group the anagrams together. You can return the answer in any order.\n\nAn Anagram is a word or phrase formed by rearranging the letters of a different word or phrase,\ntypically using all the original letters exactly once.\n\"\"\"\nfrom collections import defaultdict\nimport unittest\n\n\nclass Solution:\n def groupAnagrams(self, strs: str):\n res = defaultdict(list)\n for s in strs:\n temp = list(s)\n temp.sort()\n res[tuple(temp)].append(s)\n return list(res.values())\n\n # Time complexity: O(NKlogK), N is the length of strs K is the\n # maximum length of a string in strs. The outer loop has complexity\n # O(N) as we iterate through each string. sort each string in O(KlogK) time.)\n # Space complexity: O(NK), the total information stored in ans.\n\n def groupAnagrams1(self, strs: str):\n ans = defaultdict(list)\n for s in strs:\n count = [0]*26\n for c in s:\n count[ord(c) - ord('a')] +=1\n ans[tuple(count)].append(s)\n return list(ans.values())\n\n # time complexity: O(NK), N is the length of strs K is the\n # maximum length of a string in strs. Counting each string is linear in\n # the size of the string, and we count every string.\n # Space complexity: O(NK)\n\n\nclass testSolution(unittest.TestCase):\n def test0(self):\n strs = [\"eat\",\"tea\",\"tan\",\"ate\",\"nat\",\"bat\"]\n output = [[\"eat\",\"tea\",\"ate\"],[\"tan\",\"nat\"],[\"bat\"]]\n self.assertEqual(Solution().groupAnagrams(strs), output)\n self.assertEqual(Solution().groupAnagrams1(strs), output)\n\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"Movahe/Leetcode-problems-sovled","sub_path":"49 Group Anagrams.py","file_name":"49 Group Anagrams.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"17012634039","text":"import cv2\nfrom PIL import Image\nimport pytesseract\nimport os\n\ndirectory = \"./images\"\n\n\ndef loadImagesFromFolder(folder):\n print(\"Nacitani souboru ve slozce\" + folder)\n images = []\n for filename in os.listdir(folder):\n img = cv2.imread(os.path.join(folder, filename))\n if img is not None:\n images.append(img)\n print(\"Dokonceno, startuji generaci textu pomoci openCV.\")\n return images\n\n\ndef generateTextFromImage(image):\n f = open(\"output.txt\", \"a\")\n content = \"\\n\" + pytesseract.image_to_string(image)\n f.write(content)\n f.close\n print(\"Strana vygenerovana.\")\n\n\nif __name__ == \"__main__\":\n images = loadImagesFromFolder(directory)\n\n for image in images:\n generateTextFromImage(image)\n","repo_name":"valajczech/getext","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36026568087","text":"from math import sin\n\ndef is_power_of_two(n):\n # replace the pass statement with your code\n if n == 1:\n return True\n elif n % 2 == 0: \n return is_power_of_two(n / 2)\n else: \n return False\n\n\ndef fib(n):\n # replace the pass statement with your code\n if n == 0 or n == 1:\n return 1\n elif n >= 2: \n return fib(n - 1) + fib(n - 2)\n\ndef find_root_sqrt2(epsilon, a, b):\n # replace the pass statement with your code\n start_pt = (a) ** 2 - 2\n end_pt = (b) ** 2 - 2\n if start_pt < epsilon and start_pt > -1 * epsilon:\n return start_pt\n elif end_pt < epsilon and end_pt > -1 * epsilon:\n return end_pt \n elif start_pt <= 0 and end_pt >= 0: \n c = (a + b) / 2\n mid_pt = c ** 2 - 2\n if mid_pt < epsilon and mid_pt > -1 * epsilon:\n return c\n elif start_pt < 0 and mid_pt > 0:\n return find_root_sqrt2(epsilon, a, c)\n elif mid_pt < 0 and end_pt > 0: \n return find_root_sqrt2(epsilon, c, b)\n else: \n print(\"Invalid Inputs\")\n\ndef find_root(func, epsilon, a, b):\n # replace the pass statement with your code\n start_pt = func(a)\n end_pt = func(b)\n if start_pt < epsilon and start_pt > -1 * epsilon:\n return start_pt\n elif end_pt < epsilon and end_pt > -1 * epsilon:\n return end_pt \n elif start_pt <= 0 and end_pt >= 0: \n c = (a + b) / 2\n mid_pt = func(c)\n if mid_pt < epsilon and mid_pt > -1 * epsilon:\n return c\n elif start_pt < 0 and mid_pt > 0:\n return find_root(func, epsilon, a, c)\n elif mid_pt < 0 and end_pt > 0: \n return find_root(func, epsilon, c, b)\n else: \n print(\"Invalid Inputs\")\n\ndef sinpoint5(x):\n return sin(x) - 0.5\n\ndef root2(x): \n return x ** 2 - 2 \n\n\nt0 = {\"key\":\"node0\",\n \"val\":27,\n \"children\":[]}\n\nt1 = {\"key\":\"node0\",\n \"val\":1,\n \"children\":[{\"key\":\"node0\",\n \"val\":2,\n \"children\":[{\"key\":\"node0\",\n \"val\":3,\n \"children\":[]}]},\n {\"key\":\"node0\",\n \"val\":4,\n \"children\":[]},\n {\"key\":\"node0\",\n \"val\":5,\n \"children\":[]}]}\n\n\ndef count_leaves(t):\n '''\n Count the number of leaves in the tree rooted at t\n \n Inputs: (dictionary) a tree\n \n Returns: (integer) number of leaves in t\n '''\n assert t is not None\n\n if not t[\"children\"]:\n return 1\n\n num_leaves = 0\n for kid in t[\"children\"]:\n num_leaves += count_leaves(kid)\n\n return num_leaves\n\n\ndef add_values(t):\n # replace the pass statement with your code\n assert t is not None\n\n if not t[\"children\"]:\n return t[\"val\"]\n\n values = 0\n values += t[\"val\"]\n for kid in t[\"children\"]:\n values += add_values(kid)\n\n return values \n","repo_name":"lbvalcke/Classwork","sub_path":"lbvalcke-master-122/lbvalcke-master-a3fa9d442dc280c08a0ff3fd54751bb1bdff6871/lab8/recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22605439945","text":"from django.shortcuts import redirect, render\nfrom . models import Category,Photo \nfrom django.views.generic.edit import DeleteView\nfrom django.urls import reverse_lazy\n\n\n\ndef galler(request):\n category = request.GET.get('category')\n if category == None:\n photos = Photo.objects.all()\n else:\n photos = Photo.objects.filter(category__name=category)\n\n\n\n categories = Category.objects.all()\n \n context = {'categories': categories, 'photos':photos}\n return render(request, 'galapp/gallery.html', context)\n\ndef add(request):\n categories = Category.objects.all()\n if request.method == 'POST':\n data = request.POST\n image = request.FILES.get('image')\n\n print('date:', data)\n print('image:', image)\n\n if data['category'] != 'none':\n category = Category.objects.get(id=data['category'])\n elif data['category_new'] != '':\n category, created = Category.objects.get_or_create(name=data['category_new'])\n else:\n category = None\n \n photo = Photo.objects.create(\n category=category,\n description = data['description'],\n images = image,\n )\n\n return redirect('gallery')\n\n\n\n context = {'categories': categories}\n return render(request, 'galapp/add.html', context)\n\n\ndef photo(request, pk):\n photo = Photo.objects.get(id=pk)\n return render(request, 'galapp/photo.html', {'photo': photo})\n\nclass viewdelete(DeleteView):\n model = Photo\n template_name = 'galapp/delete.html'\n success_url = reverse_lazy('gallery')\n context_object_name = 'photo'\n\n","repo_name":"arunkumar27-ank-tech/Django_Gallery_App","sub_path":"galapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71453026962","text":"TC = int(input())\nA = [i for i in range(1,13)]\nfor case_num in range(1, TC+1):\n N, K = list(map(int, input().split()))\n n = len(A)\n c = []\n for i in range(1< None:\n dut.expect_exact('Press ENTER to see the list of tests')\n dut.write('')\n dut.expect_exact('Enter test for running.')\n dut.write('*')\n dut.expect_unity_test_output()\n","repo_name":"espressif/esp-idf","sub_path":"components/fatfs/test_apps/flash_ro/pytest_fatfs_flash_ro.py","file_name":"pytest_fatfs_flash_ro.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":11541,"dataset":"github-code","pt":"3"} +{"seq_id":"34539428405","text":"import healpy as hp\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.io import fits\n\n# Center of the sky region\nlongitude = 300\nlatitude = -20\n# Resolution (in arcmin)\nreso = 2.35\n# Map size (in pixel)\nN = 768\nnside = 2048\n\nfreq = 100\n\nn_noise_maps = 50\nnoise_folder = '../data/IQU_Planck_data/Planck_noise_fits/'\n\nTEB_Noise_maps = np.zeros((3,n_noise_maps,N,N))\nfor i in range(n_noise_maps):\n noise_map = fits.open(noise_folder+'product-action?SIMULATED_MAP.FILE_ID=ffp10_noise_'+str(freq)+'_full_map_mc_000'+str(1000+i+50)[-2:]+'.fits')\n IQU_map = [hp.read_map(noise_map,field=0),hp.read_map(noise_map,field=1),hp.read_map(noise_map,field=2)]\n print(\"Map \"+str(i+1)+\" loaded !\")\n TEB_Noise_alm = hp.map2alm(IQU_map)\n print(\"Alm computed !\")\n TEB_Noise_map = [hp.alm2map(TEB_Noise_alm[0],2048),hp.alm2map(TEB_Noise_alm[1],2048),hp.alm2map(TEB_Noise_alm[2],2048)]\n TEB_Noise_maps[0,i] = np.array(hp.gnomview(TEB_Noise_map[0],coord='G',rot=[longitude,latitude],reso=reso,xsize=N,ysize=N,return_projected_map=True,cmap='inferno'))\n plt.close()\n TEB_Noise_maps[1,i] = np.array(hp.gnomview(TEB_Noise_map[1],coord='G',rot=[longitude,latitude],reso=reso,xsize=N,ysize=N,return_projected_map=True,cmap='inferno'))\n plt.close()\n TEB_Noise_maps[2,i] = np.array(hp.gnomview(TEB_Noise_map[2],coord='G',rot=[longitude,latitude],reso=reso,xsize=N,ysize=N,return_projected_map=True,cmap='inferno'))\n plt.close()\n print(\"Map \"+str(i+1)+\" done !\")\n\nnp.save(\"../data/IQU_Planck_data/TE correlation data/Noise_TEB_\"+str(freq)+\"_768_la_suite.npy\",TEB_Noise_maps)","repo_name":"constantauclair/CompSepDustCMB","sub_path":"test_biais_std/generate_TEB_flat_noise_Planck_maps.py","file_name":"generate_TEB_flat_noise_Planck_maps.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40620891806","text":"from colorama import init, Fore\n\nimport menu_text\n\ninit()\n\n\ndef display_menu():\n \"\"\" This function displays the menu and gets the user's choice \"\"\"\n menu = menu_text.menu\n title = menu_text.title\n\n print(Fore.BLUE + title + Fore.RESET)\n print(Fore.BLUE + menu + Fore.RESET)\n\n while True:\n try:\n choice = int(input(Fore.YELLOW + 'Enter choice (0-10): ' + Fore.RESET))\n if choice not in range(0, 11):\n print(f'Error: Choice {choice} not supported! Try again.')\n else:\n return choice\n except ValueError:\n print('Error: Invalid input! Enter an integer between 1 and 10.')\n\n\n","repo_name":"inesfolha/movie_app","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2270981957","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\n\"\"\"\nReads a list of reviews and decide if each review is positive or negative,\nbased on the occurences of positive and negative words.\nWrites the results in a file.\n\"\"\"\n\nPOS_FILE, NEG_FILE = 'positive-words.txt', 'negative-words.txt'\n\ndef loadLexicon(wordFile):\n \"\"\"Returns a set of words from a file\"\"\"\n return set(line.strip() for line in open(wordFile))\n\n# Let's load the default lexicons at the time of module loading itself\nPOS_LEX, NEG_LEX = loadLexicon(POS_FILE), loadLexicon(NEG_FILE)\n\"\"\"Source:http://www.saltycrane.com/blog/2008/01/how-to-find-intersection-and-union-of/\nTo find intersection of two Lists\"\"\"\n\ndef to_multiset(x):\n result = set()\n max_rep = len(x)\n for elt in x:\n for n in xrange(max_rep):\n n_elt = (elt,n)\n if n_elt not in result:\n result.add(n_elt)\n break\n return result\n\ndef from_multiset(x):\n return sorted([elt for elt,n in x])\n\ndef multi_intersect(a, b):\n aa = to_multiset(a)\n bb = to_multiset(b)\n return from_multiset(aa & bb)\n \ndef get_sentiment(line, posLex=POS_LEX, negLex=NEG_LEX):\n \"\"\"Gets sentiment for a line based on the lexicons.\n returns line, sentiment, positive_words, and negative_words\n \"\"\"\n words = set(line.split(' '))\n posList = multi_intersect(words, posLex) # list intersection\n negList = multi_intersect(words, negLex)\n diff = len(posList) - len(negList)\n sentiment = 'Netural' if diff == 0 else 'Positive' if diff > 0 else 'Negative'\n return (sentiment, posList, negList)\n\n\n\ndef process_reviews(review_file='nokia.txt', outfile='results.txt', \n posLex=POS_LEX, negLex=NEG_LEX):\n \"\"\"Processes the reviews based on lexicons; we can pass custom lexicons too\"\"\"\n lines = ( line.strip() for line in open(review_file) )\n outFmt = '{}\\n{}\\n{}\\n{}\\n\\n' # line\\nPosList\\nNegList\\nSenti\\n\\n\n with open('results.txt', 'w') as outfile:\n for line in lines:\n senti, pws, negws = get_sentiment(line, posLex, negLex)\n outfile.write(outFmt.format(line, list(pws), list(negws), senti)) \n\ndef main(review_file='nokia.txt', outfile='results.txt', posFile=None, negFile=None):\n \"\"\"Processes reviews with optional lexicons for positive and negative reviews\"\"\"\n posLex = POS_LEX if not posFile else loadLexicon(posFile)\n negLex = NEG_LEX if not negFile else loadLexicon(negFile)\n process_reviews(review_file, outfile, posLex, negLex)\n\ndef print_sentiment(line, **kwargs):\n # you can pass options such as posLex=loadLexicon('new_pos_file') here\n res = get_sentiment(line, **kwargs) \n fmt = 'Sentiment:{0}; Positive_words:[{1}]; Negative_words:[{2}]; Given_line:[{3}]'\n print(fmt.format(res[0], ','.join(res[1]), ','.join(res[2]), line))\n \ndef run_tests():\n # Let's randomly sample a set of 20 words from the lexicons\n import random\n Nsample = 20\n pos_words = random.sample(POS_LEX, Nsample)\n neg_words = random.sample(NEG_LEX, Nsample) \n # careful printing out \"negative\" words in the NEG_LEX into a notebook\n # there are a lot of \"curse words\" in there that may make things awkward\n\n for j in range(Nsample):\n assert get_sentiment(pos_words[j])[0] == 'Positive'\n assert get_sentiment(neg_words[j])[0] == 'Negative'\n # The next one 'sometimes fails' because we have an overlap of the words\n assert not get_sentiment(pos_words[j] + ' ' + neg_words[j])[0] == 'Neutral'\n\n assert not POS_LEX & NEG_LEX is None\n # This fails because we have a set of overlaps...\n assert get_sentiment('This is good .')[0]=='Positive'\n assert get_sentiment('good nice')[0] == 'Positive'\n assert get_sentiment('worthless')[0] == 'Negative'\n assert not get_sentiment('abcdefghijklmnop')[0] == 'Neutral'\n assert not get_sentiment('')[0] == 'Neutral'\n assert not get_sentiment('good ' * 100 + ' bad')[0] == 'Positive'\n assert not get_sentiment('bad ' * 100 + ' good')[0] == 'Negative'\n \nif __name__ == \"__main__\": \n #import sys\n #if len(sys.argv) == 2 and sys.argv[1] == \"--test\": # valid if we run as python prog.py --test\n run_tests()\n #elif len(sys.argv) == 1: # no arguments were passed\n # main()\n #else:\n # get_sentiment(' '.join(sys.argv[1:]))\n # This is helpful to test one sentence at a time on command-line\n","repo_name":"himanshunagdev/Sentimental-Analysis","sub_path":"Sentimental-Analysis.py","file_name":"Sentimental-Analysis.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3786476846","text":"# coding: utf-8\n\n# Demonstrator / keine Fehlerbehandlung\n\nimport cherrypy\n\nfrom p3.app.database import Database_cl\nfrom p3.app.view import View_cl\n\n# Method-Dispatching!\n\n# Übersicht Anforderungen / Methoden\n\n\"\"\"\nAnforderung GET PUT POST DELETE\n-----------------------------------------------------------------------------\nkomponente/ Alle - Ein neue -\n Komponenten Komponenten \n liefern anlegen \n-----------------------------------------------------------------------------\nkomponente/id Eine Eine - Eine\nkomponente/?id=id Komponente Komponente Komponente\n liefern updaten loeschen\n\"\"\"\n\n\n# ----------------------------------------------------------\nclass Component_Cl(object):\n exposed = True\n\n def __init__(self, path):\n self.db = Database_cl(path)\n self.view_o = View_cl()\n\n @cherrypy.tools.json_out()\n def GET(self, id=None):\n if id is None:\n return {'data': self.db.readFile('component.json')['data'],\n 'projects': self.db.readFile('project.json')['data']};\n\n data = self.db.findId(\"component.json\", id)\n if not data is None:\n data['projects'] = self.db.readFile('project.json')['data']\n return data\n return self.view_o.createAlert(\"Komponenten ID ist nicht vorhanden.\")\n\n @cherrypy.tools.json_out()\n def POST(self, title, desc, projects):\n id = self.createComponent(title, desc, projects)\n if not id is None:\n return {\n \"id\": id\n }\n return self.view_o.createAlert(\"Nicht alle angebene Projekte sind vorhanden.\", 400)\n\n @cherrypy.tools.json_out()\n def PUT(self, id, title, desc, projects):\n code = self.updateComponent(id=id, name=title, desc=desc, projectids=projects)\n if code == 0:\n return self.view_o.createFeedbackMessage(\"Komponente erfolgreich bearbeitet.\", 200)\n elif code == 1:\n return self.view_o.createAlert(\"Komponenten ID ist nicht vorhanden.\", 404)\n else:\n return self.view_o.createAlert(\"Nicht alle angebene Projekte sind vorhanden.\", 400)\n\n @cherrypy.tools.json_out()\n def DELETE(self, id):\n if self.deleteComponent(id=id):\n return self.view_o.createFeedbackMessage(\"Komponente erflogreich gelöscht.\", 200)\n return self.view_o.createAlert(\"Komponenten ID ist nicht vorhanden.\", 404)\n\n def createComponent(self, name, desc, projectids):\n newId = self.db.getMaxId('component.json') + 1\n data = self.db.readFile('component.json')\n\n newEntry = {\n \"id\": newId,\n \"name\": name,\n \"desc\": desc,\n \"project\": int(projectids)\n }\n\n data['data'].append(newEntry)\n\n success = True\n # Add the component to the project component array\n data_projects = self.db.readFile('project.json')\n\n exists = False\n for entry in data_projects['data']:\n if int(projectids) == int(entry['id']):\n entry['component'].append(int(newId))\n exists = True\n break\n if not exists:\n success = False\n\n if not success:\n return None\n\n self.db.writeFile('project.json', data_projects)\n self.db.writeFile('component.json', data)\n return newId\n\n def updateComponent(self, id, name, desc, projectids):\n if not self.db.isNumber(id):\n return 1\n\n if self.db.findId(\"component.json\", id) is None:\n return 1\n\n data = self.db.readFile('component.json')\n # Test if project ids is an int value or an array\n\n projects = projectids\n\n for entry in data['data']:\n if int(entry['id']) == int(id):\n entry['name'] = name\n entry['desc'] = desc\n oldProject = entry['project']\n entry['project'] = projects\n break\n\n # Delete the component from all projects\n data_projects = self.db.readFile('project.json')\n for entry in data_projects['data']:\n if int(oldProject) == int(entry['id']):\n try:\n entry['component'].remove(int(id))\n except:\n print('Components update error found.')\n break\n\n success = True\n for projectId in projects:\n exists = False\n for entry in data_projects['data']:\n if int(projectId) == int(entry['id']):\n entry['component'].append(int(id))\n exists = True\n break\n if not exists:\n success = False\n break\n\n if not success:\n return 2\n\n self.db.writeFile('project.json', data_projects)\n self.db.writeFile('component.json', data)\n return 0\n\n def deleteComponent(self, id):\n if not self.db.isNumber(id):\n return False\n\n if self.db.findId(\"component.json\", id) is None:\n return False\n\n # Get the current file\n data = self.db.readFile('component.json')\n data_bugs = self.db.readFile('bug.json')\n\n # Remove the component from the components array\n components = []\n for entry in data['data']:\n if not entry['id'] == int(id):\n components.append(entry)\n else:\n projects = entry['project']\n data['data'] = components\n\n bugs = []\n for entry in data_bugs['data']:\n if entry['component'] != int(id):\n bugs.append(entry)\n data_bugs['data'] = bugs\n\n # Remove the component id from the projects\n data_projects = self.db.readFile('project.json')\n for projectEntry in data_projects['data']:\n for value in projects:\n if projectEntry['id'] == int(value):\n projectEntry['component'].remove(int(id))\n\n self.db.writeFile('bug.json', data_bugs)\n self.db.writeFile('project.json', data_projects)\n self.db.writeFile('component.json', data)\n return True\n# EOF\n","repo_name":"Pixeldweller/TaskThatIsSupposedToBeDone","sub_path":"p4/app/objects/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":6418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6904749060","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .Block import MLPBlock, ResidualBlock\nfrom .PatchEmbedding import PatchEmbedding\n\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, emb_dim=768, num_heads=12, drop_rate=0.) -> None:\n super().__init__()\n self.multiHead = nn.MultiheadAttention(\n embed_dim=emb_dim, num_heads=num_heads, dropout=drop_rate)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.multiHead(x, x, x)[0]\n\n\nclass VisionTransformer(nn.Module):\n def __init__(self, in_channels: int, patch_size: int, num_heads: int, expansion: int, drop_p: float, feed_forward_drop_p: float, depth: int = 12) -> None:\n super().__init__()\n # patches\n emb_dim = in_channels * (patch_size ** 2) # 32 * 16 * 16 \n self.patch_embedding = PatchEmbedding(\n in_channels=in_channels, patch_size=patch_size, emb_dim=emb_dim, img_size=224)\n \n self.transformer_blocks = nn.Sequential(\n ResidualBlock(\n nn.Sequential(\n nn.LayerNorm(emb_dim),\n MultiHeadAttention(emb_dim=emb_dim, num_heads=num_heads),\n nn.Dropout(drop_p),\n )),\n ResidualBlock(\n nn.Sequential(\n nn.LayerNorm(emb_dim),\n nn.Linear(emb_dim, emb_dim),\n nn.GELU(),\n nn.Dropout(feed_forward_drop_p),\n ))\n\n )\n self.transformer_encoder = nn.Sequential(\n *[self.transformer_blocks for _ in range(depth)]\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.patch_embedding(x)\n x = self.transformer_encoder(x)\n return x\n\n\nif __name__ == \"__main__\":\n x = torch.randn(5, 32, 224, 224).to(\"cuda\")\n model = VisionTransformer(in_channels=32, patch_size=16, num_heads=128 * 7, expansion=4, drop_p=0.1, feed_forward_drop_p=0.1, depth=4)\n model.to(\"cuda\")\n print(model(x).shape)\n","repo_name":"ngtuan092/recaptured-image-detector","sub_path":"modules/VisionTransformer.py","file_name":"VisionTransformer.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"3846044212","text":"filename='demo.txt'\r\nWRITE='w'\r\nREAD='r'\r\nAPPEND='a'\r\nWRITEREAD='w+'\r\nfile=open(filename,WRITEREAD)\r\nfile.write('this is python file making in sublime text\\n')\r\nfile.write('this is a second line in which we write the code')\r\nfile.close()\r\nprint('thid is correct in w plus mode we can write and read both')","repo_name":"rahulworld/PyPY","sub_path":"file_write.py","file_name":"file_write.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12612329193","text":"\"\"\"Mixins to help test catalog integration.\"\"\"\n\n\nfrom openedx.core.djangoapps.catalog.models import CatalogIntegration\n\n\nclass CatalogIntegrationMixin:\n \"\"\"Utility for working with the catalog service during testing.\"\"\"\n\n catalog_integration_defaults = {\n 'enabled': True,\n 'internal_api_url': 'https://catalog-internal.example.com/api/v1/',\n 'cache_ttl': 0,\n 'service_username': 'lms_catalog_service_user',\n 'page_size': 20,\n }\n\n def create_catalog_integration(self, **kwargs):\n \"\"\"\n Creates a new CatalogIntegration with catalog_integration_defaults,\n updated with any provided overrides.\n \"\"\"\n fields = dict(self.catalog_integration_defaults, **kwargs)\n CatalogIntegration(**fields).save()\n\n return CatalogIntegration.current()\n","repo_name":"openedx/edx-platform","sub_path":"openedx/core/djangoapps/catalog/tests/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"42293851135","text":"from optparse import Values\r\nfrom unittest.loader import VALID_MODULE_NAME\r\nimport PySimpleGUI as sg\r\nimport pyperclip as pc\r\nimport keyboard as kb\r\nimport textwrap\r\nfrom pynput.keyboard import Key, Controller\r\n\r\nkeyboard = Controller()\r\n\r\nglobal BButton\r\n\r\nsg.theme(\"None\")\r\n\r\n\r\ndef fadeaway():\r\n USE_FADE_IN = True\r\n WIN_MARGIN = 60\r\n\r\n # colors\r\n WIN_COLOR = \"#282828\"\r\n TEXT_COLOR = \"#ffffff\"\r\n\r\n DEFAULT_DISPLAY_DURATION_IN_MILLISECONDS = 300\r\n\r\n # Base64 Images to use as icons in the window\r\n img_error = b'iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAMAAABEpIrGAAAAA3NCSVQICAjb4U/gAAAACXBIWXMAAADlAAAA5QGP5Zs8AAAAGXRFWHRTb2Z0d2FyZQB3d3cuaW5rc2NhcGUub3Jnm+48GgAAAIpQTFRF////20lt30Bg30pg4FJc409g4FBe4E9f4U9f4U9g4U9f4E9g31Bf4E9f4E9f4E9f4E9f4E9f4FFh4Vdm4lhn42Bv5GNx5W575nJ/6HqH6HyI6YCM6YGM6YGN6oaR8Kev9MPI9cbM9snO9s3R+Nfb+dzg+d/i++vt/O7v/fb3/vj5//z8//7+////KofnuQAAABF0Uk5TAAcIGBktSYSXmMHI2uPy8/XVqDFbAAAA8UlEQVQ4y4VT15LCMBBTQkgPYem9d9D//x4P2I7vILN68kj2WtsAhyDO8rKuyzyLA3wjSnvi0Eujf3KY9OUP+kno651CvlB0Gr1byQ9UXff+py5SmRhhIS0oPj4SaUUCAJHxP9+tLb/ezU0uEYDUsCc+l5/T8smTIVMgsPXZkvepiMj0Tm5txQLENu7gSF7HIuMreRxYNkbmHI0u5Hk4PJOXkSMz5I3nyY08HMjbpOFylF5WswdJPmYeVaL28968yNfGZ2r9gvqFalJNUy2UWmq1Wa7di/3Kxl3tF1671YHRR04dWn3s9cXRV09f3vb1fwPD7z9j1WgeRgAAAABJRU5ErkJggg=='\r\n img_success = b'iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAMAAABEpIrGAAAAA3NCSVQICAjb4U/gAAAACXBIWXMAAAEKAAABCgEWpLzLAAAAGXRFWHRTb2Z0d2FyZQB3d3cuaW5rc2NhcGUub3Jnm+48GgAAAHJQTFRF////ZsxmbbZJYL9gZrtVar9VZsJcbMRYaMZVasFYaL9XbMFbasRZaMFZacRXa8NYasFaasJaasFZasJaasNZasNYasJYasJZasJZasJZasJZasJZasJYasJZasJZasJZasJZasJaasJZasJZasJZasJZ2IAizQAAACV0Uk5TAAUHCA8YGRobHSwtPEJJUVtghJeYrbDByNjZ2tvj6vLz9fb3/CyrN0oAAADnSURBVDjLjZPbWoUgFIQnbNPBIgNKiwwo5v1fsQvMvUXI5oqPf4DFOgCrhLKjC8GNVgnsJY3nKm9kgTsduVHU3SU/TdxpOp15P7OiuV/PVzk5L3d0ExuachyaTWkAkLFtiBKAqZHPh/yuAYSv8R7XE0l6AVXnwBNJUsE2+GMOzWL8k3OEW7a/q5wOIS9e7t5qnGExvF5Bvlc4w/LEM4Abt+d0S5BpAHD7seMcf7+ZHfclp10TlYZc2y2nOqc6OwruxUWx0rDjNJtyp6HkUW4bJn0VWdf/a7nDpj1u++PBOR694+Ftj/8PKNdnDLn/V8YAAAAASUVORK5CYII='\r\n\r\n # -------------------------------------------------------------------\r\n\r\n def display_notification(title, message, icon, display_duration_in_ms=DEFAULT_DISPLAY_DURATION_IN_MILLISECONDS, use_fade_in=True, alpha=0.9, location=None):\r\n\r\n # Compute location and size of the window\r\n message = textwrap.fill(message, 50)\r\n win_msg_lines = message.count(\"\\n\") + 1\r\n\r\n screen_res_x, screen_res_y = sg.Window.get_screen_size()\r\n win_margin = WIN_MARGIN # distance from screen edges\r\n win_width, win_height = 364, 66 + (14.8 * win_msg_lines)\r\n win_location = location if location is not None else (screen_res_x - win_width - win_margin, screen_res_y - win_height - win_margin)\r\n\r\n layout = [[sg.Graph(canvas_size=(win_width, win_height), graph_bottom_left=(0, win_height), graph_top_right=(win_width, 0), key=\"-GRAPH-\",\r\n background_color=WIN_COLOR, enable_events=True)]]\r\n\r\n window = sg.Window(title, layout, background_color=WIN_COLOR, no_titlebar=True,\r\n location=win_location, keep_on_top=True, alpha_channel=0, margins=(0, 0), element_padding=(0, 0),\r\n finalize=True)\r\n\r\n window[\"-GRAPH-\"].draw_rectangle((win_width, win_height), (-win_width, -win_height), fill_color=WIN_COLOR, line_color=WIN_COLOR)\r\n window[\"-GRAPH-\"].draw_image(data=icon, location=(20, 20))\r\n window[\"-GRAPH-\"].draw_text(title, location=(64, 20), color=TEXT_COLOR, font=(\"Arial\", 12, \"bold\"), text_location=sg.TEXT_LOCATION_TOP_LEFT)\r\n window[\"-GRAPH-\"].draw_text(message, location=(64, 44), color=TEXT_COLOR, font=(\"Arial\", 9), text_location=sg.TEXT_LOCATION_TOP_LEFT)\r\n\r\n # change the cursor into a \"hand\" when hovering over the window to give user hint that clicking does something\r\n window['-GRAPH-'].set_cursor('hand2')\r\n\r\n if use_fade_in == True:\r\n for i in range(1,int(alpha*100)): # fade in\r\n window.set_alpha(i/100)\r\n event, values = window.read(timeout=1)\r\n if event != sg.TIMEOUT_KEY:\r\n window.set_alpha(1)\r\n break\r\n event, values = window(timeout=display_duration_in_ms)\r\n if event == sg.TIMEOUT_KEY:\r\n for i in range(int(alpha*100),1,-1): # fade out\r\n window.set_alpha(i/100)\r\n event, values = window.read(timeout=1)\r\n if event != sg.TIMEOUT_KEY:\r\n break\r\n else:\r\n window.set_alpha(alpha)\r\n event, values = window(timeout=display_duration_in_ms)\r\n\r\n window.close()\r\n\r\n if __name__ == '__main__':\r\n title = \"Multiclipduck\"\r\n message = c \r\n display_notification(title, message, img_success, 100, use_fade_in=True)\r\n\r\ndef ende():\r\n ende_layout = [[sg.Text(\"Danke fürs Benutzen meiner kleinen App\")],\r\n [sg.Button(\"Beenden\")]\r\n ]\r\n\r\n ende_window = sg.Window(\"Ende im Gelände\", ende_layout)\r\n\r\n while True:\r\n event, values = ende_window.read()\r\n\r\n if event == sg.WIN_CLOSED:\r\n break\r\n if event == \"Beenden\":\r\n break\r\n\r\ndef mcd(ersterhotkey, zweiterhotkey, BButton):\r\n \r\n mcd_layout = [[sg.Text(\"Speicher für: \" + ersterhotkey), sg.InputText()]\r\n ]\r\n\r\n if BButton == True:\r\n mcd_layout.append([sg.Text(\"Speicher für \" + zweiterhotkey), sg.InputText()]) \r\n \r\n mcd_layout.append([sg.Push(),sg.Button(\"Neue Hotkeys belegen\"),sg.Button(\"Beenden\"), sg.Push()])\r\n\r\n mcd_window = sg.Window(\"Multiclipboard\", mcd_layout)\r\n\r\n while True:\r\n event, values = mcd_window.read(timeout=100)\r\n if event == \"Beenden\" or sg.WIN_CLOSED:\r\n ende()\r\n break\r\n if event == \"Neue Hotkeys belegen\":\r\n mcd_window.close()\r\n greetw()\r\n if not values[0] == \"\":\r\n if BButton == True:\r\n if kb.is_pressed(zweiterhotkey):\r\n global c\r\n if values[1] == \"\":\r\n c = \"Keine Daten vorhanden\"\r\n fadeaway()\r\n else:\r\n pc.copy(values[1])\r\n c = \"Es wurde \\\"\" + values[1] + \"\\\" in den Cache geladen.\"\r\n fadeaway() \r\n\r\n if kb.is_pressed(ersterhotkey):\r\n pc.copy(values[0])\r\n c = values[0]\r\n c = \"Es wurde \\\"\" + values[0] + \"\\\" in den Cache geladen.\"\r\n fadeaway()\r\n\r\ndef zweiterButtonP(ersterhotkey, zweiterhotkey, BButton):\r\n zweiterButtonP_layout = [[sg.Text(\"Du hast \" + zweiterhotkey + \" gedrückt! Stimmt das?\")],\r\n [sg.Push(), sg.Button(\"Ja\"), sg.Button(\"Nein\"),sg.Push()]\r\n ]\r\n zweiterButtonP_window = sg.Window(\"Multiclipboardduck\", zweiterButtonP_layout)\r\n\r\n while True:\r\n event, values = zweiterButtonP_window.read()\r\n if event == \"Ja\": \r\n zweiterButtonP_window.close()\r\n mcd(ersterhotkey, zweiterhotkey, BButton)\r\n break\r\n if event == \"Nein\":\r\n sg.Popup(\"Versuchen wirs nochmal!\")\r\n zweiterButtonP_window.close()\r\n zweiterButton(ersterhotkey)\r\n if event == sg.WIN_CLOSED:\r\n zweiterButtonP_window.close()\r\n break\r\n\r\ndef zweiterButton(ersterhotkey):\r\n zweiterButton_layout = [[sg.Text(\"Welcher soll dein zweiter Hotkey sein?\")],\r\n [sg.Push(),sg.Button(\"Zweiten Hotkey festlegen\"),sg.Button(\"Ich möchte nur einen Hotkey\"),sg.Push()]\r\n ]\r\n zweiterButton_window = sg.Window(\"Multiclipboardduck\", zweiterButton_layout)\r\n\r\n BButton = True\r\n while True: \r\n event, values = zweiterButton_window.read()\r\n\r\n if event == sg.WIN_CLOSED:\r\n zweiterButton_window.close\r\n break\r\n if event == \"Zweiten Hotkey festlegen\":\r\n zweiterhotkey = kb.read_hotkey()\r\n zweiterButton_window.close()\r\n zweiterButtonP(ersterhotkey, zweiterhotkey, BButton)\r\n break\r\n if event == \"Ich möchte nur einen Hotkey\":\r\n BButton = False\r\n zweiterhotkey = 0\r\n zweiterButton_window.close()\r\n mcd(ersterhotkey, zweiterhotkey, BButton)\r\n break\r\n\r\n\r\ndef ersterButton(ersterhotkey):\r\n zweiButtons_layout = [[sg.Text(\"Du hast \" + ersterhotkey + \" gedrückt! Stimmt das?\")],\r\n [sg.Push(),sg.Button(\"Ja\"), sg.Button(\"Nein\"), sg.Push()]\r\n ]\r\n zweiButtons_window = sg.Window(\"Multiclipboardduck\", zweiButtons_layout)\r\n\r\n while True:\r\n event, values = zweiButtons_window.read()\r\n\r\n if event == \"Ja\":\r\n zweiButtons_window.close()\r\n zweiterButton(ersterhotkey)\r\n break\r\n if event == \"Nein\":\r\n sg.Popup(\"Versuchen wirs nochmal!\")\r\n zweiButtons_window.close()\r\n greetw()\r\n if event == sg.WIN_CLOSED:\r\n zweiButtons_window.close()\r\n break\r\n\r\n\r\ndef greetw():\r\n\r\n global zweiterhotkey\r\n zweiterhotkey=\"\"\r\n greetw_layout = [[sg.Text(\"Willkommen beim Multiclipboardduck!\")],\r\n [sg.Text(\"Welche Shortcuts/Hotkeys möchtest du verwenden? Bitte drücke den Button und danach die Hotkeys!\")],\r\n [sg.Text(\"Es wird empfohlen die Hotkeys \\\"Strg+Shift+E\\\" und \\\"Strg+Shift+W\\\" zu verwenden\")],\r\n [sg.Push(),sg.Button(\"Ersten Hotkey belegen\", key = \"-Button1-\"),sg.Button(\"Empfohlene verwenden\"),sg.Push()]\r\n ]\r\n\r\n greetw_window = sg.Window(\"Multiclipboardduck\", greetw_layout)\r\n\r\n while True:\r\n event, values = greetw_window.read()\r\n\r\n if event == sg.WIN_CLOSED:\r\n break\r\n\r\n if event == \"-Button1-\":\r\n ersterhotkey = kb.read_hotkey()\r\n\r\n greetw_window.close()\r\n ersterButton(ersterhotkey)\r\n break\r\n if event == \"Empfohlene verwenden\":\r\n ersterhotkey = \"Strg+Umschalt+E\"\r\n BButton = True\r\n zweiterhotkey = \"Strg+Umschalt+W\"\r\n greetw_window.close()\r\n mcd(ersterhotkey, zweiterhotkey, BButton)\r\n break\r\ngreetw()\r\n","repo_name":"Stempelente/Multiclipboard","sub_path":"MultiCLipboard/Multiclipboard_v_1.py","file_name":"Multiclipboard_v_1.py","file_ext":"py","file_size_in_byte":10439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14971034167","text":"# hash table\nclass hash_table:\n\n # constructor\n # inputs: size (defaults to 8 if no arguments are provided)\n def __init__(self, size=8):\n # self.table: empty hash table of indicated size\n self.table = (None,) * size\n # self.size: number of positions in table\n self.size = size\n\n # Already completed function!\n # INSERTS value INTO HASHTABLE AT index\n # example: insert(5, 10) will place 5 into index#10\n def insert(self, value, index):\n temp = list(self.table)\n temp[index] = value\n self.table = tuple(temp)\n\n # function name: linear_probe\n # input: value- value to be inserted\n # start_index- where linear probing starts\n # output: returns the index of the hash_table that the value should be\n #\tinserted after linear probing\n # assumptions: value will always be an integer\n #\tyour table will always be big enough\n def linear_probe(self, value, start_index):\n\n # TODO\n # hint: empty spots in tuples are labeled as None\n #only need to use one of the values such as either the value being inserted or the start index\n #np need to use both\n if start_index == self.size - 1:\n start_index = 0\n else:\n start_index += 1\n return start_index #simple linear probing that jsut returns the starting index\n\n # function name: insert\n # input: value- value to be inserted\n # output: Do not return anything. Just insert value into the proper position\n #\tin self.table. Utilize linear_probe and insert in this function\n # assumptions: value will always be an integer\n #\tyour table will always be big enough\n def hash(self, value):\n\n #TODO\n # hint: empty spots in tuples are labeled as None\n\n # mod each value to figure out which position they should be in\n position =value%self.size\n self.table=list(self.table) #need to change the tuple into a list because tuples cannot be edited\n while self.table[position] != None: #figure out where to put the things\n position = self.linear_probe(value,position)\n self.insert(value,position) #insert into the list\n self.table = tuple(self.table) #turn it back into a tupple\n\n\n\n\n # Already completed function!\n def get_table(self):\n return self.table\n\n # Already completed function!\n def __str__(self):\n return str(self.table)\n\n\n\"\"\"**********************************************************************\"\"\"\n# test cases\n# Everything below MUST be commented out or deleted in your submission\n# otherwise the grading script will pick it up! You WILL lose points!\n# please note that these are not the only test cases that will be run\n\"\"\"**********************************************************************\"\"\"\n\n\ndef checker(expected, actual):\n if expected == actual:\n print(\"CORRECT!\")\n else:\n print(\"expected \" + str(expected) + \", but got \" + str(actual))\n\n\n\"\"\"**********************************************************************\"\"\"\n\ntest1 = hash_table(5)\ntest1.hash(9)\ntest1.hash(25)\ntest1.hash(10)\ntest1.hash(14)\nexpected1 = (25, 10, 14, None, 9)\n\nchecker(expected1, test1.get_table())\n\n\"\"\"**********************************************************************\"\"\"\n\ntest2 = hash_table(8)\ntest2.hash(5)\ntest2.hash(30)\ntest2.hash(52)\ntest2.hash(95)\ntest2.hash(45)\nexpected2 = (45, None, None, None, 52, 5, 30, 95)\n\nchecker(expected2, test2.get_table())","repo_name":"NightToki/CECS229Lab3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30683221951","text":"import scipy.sparse as sp\n\nclass ItemSVD(object):\n\n def __init__(self, u):\n self.S = None\n self.URM = None\n self.u = u\n\n def fit(self, URM, k=3000, knn=150, evaluate=False):\n self.URM = URM\n\n if evaluate:\n self.S = self.u.get_itemsim_SVD(k, knn)\n sp.save_npz(\"./s_itemsvd_new.npz\", self.S)\n else:\n self.S = sp.load_npz(\"similarities/s_itemsvd_current.npz\")\n\n def recommend(self, target_playlist):\n row = self.URM[target_playlist].dot(self.S)\n return self.u.get_top_10(self.URM, target_playlist, row.toarray().ravel())\n","repo_name":"iPhra/RecommenderSystems","sub_path":"Progetto/recommenders/Basic/ICM_SVD.py","file_name":"ICM_SVD.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22139717469","text":"#!/usr/bin/python\nimport sys\nimport os\nimport npyscreen\nimport curses\nimport time\n\n\nclass ListButton(npyscreen.ButtonPress):\n \"\"\"\n override the __init__ and update functions to change apperance.\n override the whenPressed function to control behavior.\n \"\"\"\n\n def __init__(self, screen, name='Button', memo_path=None, cursor_color=None, *args, **keywords):\n super(ListButton, self).__init__(screen, name=name, cursor_color=None, *args, **keywords)\n self.cursor_color = \"CURSOR_INVERSE\"\n self.color = \"CURSOR_COLOR\"\n self.date_color = \"LABEL\"\n self.cursor_date = \"CURSOR_INVERSE\"\n self.memo_path = memo_path\n\n def update(self, clear=True):\n if clear: self.clear()\n if self.hidden:\n self.clear()\n return False\n\n if self.editing:\n button_state = curses.A_STANDOUT\n else:\n button_state = curses.A_NORMAL\n\n button_name = self.name\n if isinstance(button_name, bytes):\n button_name = button_name.decode(self.encoding, 'replace')\n button_name = button_name.center(self.label_width)\n\n if self.do_colors():\n if self.cursor_color:\n if self.editing:\n button_attributes = self.parent.theme_manager.findPair(self, self.cursor_color)\n else:\n button_attributes = self.parent.theme_manager.findPair(self, self.color)\n else:\n button_attributes = self.parent.theme_manager.findPair(self, self.color) | button_state\n else:\n button_attributes = button_state\n\n if self.do_colors():\n if self.editing:\n date_attributes = self.parent.theme_manager.findPair(self, self.cursor_date)\n else:\n date_attributes = self.parent.theme_manager.findPair(self, self.date_color)\n else:\n button_attributes = button_state\n\n date = button_name.split('\\t')[0] + '\\t'\n file_name = button_name.split('\\t')[1]\n\n self.add_line(self.rely, self.relx+1,\n date,\n self.make_attributes_list(date, date_attributes),\n len(date)\n )\n self.add_line(self.rely, self.relx+1+len(date),\n file_name,\n self.make_attributes_list(file_name, button_attributes),\n len(file_name)\n )\n\n def whenPressed(self):\n f = self.name.split('\\t')[1]\n m_path = os.path.join(self.memo_path, f)\n os.system(\"vim %s\" % m_path)\n sys.exit(0)\n\n\nclass ListForm(npyscreen.FormMultiPage):\n \"\"\"\n List form page\n \"\"\"\n\n def create(self):\n self.file_list = self.parentApp.file_list\n self.memo_path = self.parentApp.memo_path\n\n for f, d in self.file_list:\n display = str(d) + '\\t' + f\n self.add_widget_intelligent(ListButton, name=display, memo_path=self.memo_path, value=display)\n\n self.add_handlers({\n \"q\": self.exit_editing,\n \"Q\": self.exit_editing\n })\n\n def afterEditing(self):\n self.parentApp.setNextForm(None)\n\n\nclass ListUI(npyscreen.NPSAppManaged):\n \"\"\"\n UI for memo list\n \"\"\"\n def __init__(self, config):\n super(ListUI, self).__init__()\n self.memo_path = config.get('memo', 'memo_path', fallback=os.path.join(sys.prefix, 'memo', 'data', 'local'))\n memo_files = os.listdir(self.memo_path)\n file_list = []\n for memo_file in memo_files:\n mdate = os.path.getmtime(os.path.join(self.memo_path, memo_file))\n t = time.localtime(mdate)\n file_list.append((memo_file, \"%02d-%02d-%02d %02d:%02d:%02d\" % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)))\n file_list.sort(key=lambda x: x[1], reverse=True)\n\n self.file_list = file_list\n\n def onStart(self):\n self.addForm(\"MAIN\", ListForm, name=\"Memo List\")\n","repo_name":"wilighthasaki/shell_memo","sub_path":"memo/list_ui.py","file_name":"list_ui.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"19533494825","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 22 14:35:25 2022\r\n\r\n@author: nolan\r\n\"\"\"\r\n\r\n# n = 7\r\n# f = 4\r\n# m = [1,2,4,4,5,7,9]\r\n\r\nn, f = map(int, input().split())\r\nlst = list(map(int, input().split()))\r\n\r\nl = 0\r\nr = len(lst)-1\r\nresult = 0\r\n\r\nwhile l<=r:\r\n mid = (l+r)//2\r\n if lst[mid] < f:\r\n l = mid+1\r\n elif lst[mid] > f:\r\n r = mid-1\r\n else: \r\n result = lst[mid]\r\n \r\nprint(result, mid,l,r)\r\nif result != mid:\r\n l, r = r, l\r\n if lst[r]-f >= f-lst[l]:\r\n result = lst[l]\r\n else:\r\n result = lst[r]\r\n \r\nprint(result)\r\n","repo_name":"nolan11701/cs102","sub_path":"week12/3.find_the_closest_element.py","file_name":"3.find_the_closest_element.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7705215210","text":"#-*- coding:utf-8 _*-\n\"\"\"\n@author:charlesXu\n@file: utils.py\n@desc: 信息抽取程序主入口\n@time: 2018/08/08\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nimport os, argparse, time, random\nimport elasticsearch\nimport redis\nimport pymysql\nimport split_sentence # 分句\n# import Aip_config\nimport re\n\nfrom elasticsearch import Elasticsearch\nfrom datetime import datetime\n\nfrom numba import jit\n\n# import pdb\n\n# os.environ['CUDA_VISIBLE_DEVICES']='0' # 设置只用一块显卡\n\nfrom Entity_Extraction import proprecess_money\nfrom Entity_Extraction.Enext_model import BiLSTM_CRF\nfrom Entity_Extraction.utils import str2bool, get_logger, get_entity, get_MON_entity\nfrom Entity_Extraction.data import read_corpus, read_dictionary, tag2label, random_embedding\nfrom Entity_Extraction.get_data import get_datas, get_MONGO_data, del_MONGO_data\n\nfrom Chatbot_Model.utils.WriteToCSV import NER2CSV\n\nfrom Entity_Extraction.get_location import get_add, cut_addr\n\n\n## Session configuration\nos.environ['CUDA_VISIBLE_DEVICES'] = '0' # 设置只用一块显卡\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # default: 0\nconfig = tf.ConfigProto()\n# config.gpu_options.allow_growth = True\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.2 # need ~700MB GPU memory\n\n# 数据库操作\n# db = pymysql.Connect(\"localhost\", \"root\", \"Aa123456\", \"zhizhuxia\")\n# print('Connect successful')\n# cursor = db.cursor()\n# redis = redis.Redis(host='127.0.0.1', port=6379)\n\n# 连接ES\nes = Elasticsearch(\n # ['test.npacn.com'], # 192.168.11.251\n # http_auth=('admin', 'u1PJTXyzjVqT'),\n ['192.168.11.211'],\n port=9200,\n timeout= 30,\n)\n# 创建索引\n# es.indices.create(index='zhizhixia_ner')\nprint('索引创建成功。')\n\ntext_list = [] # 创建一个tuple,用来装分句后的数据\n\n\n## hyperparameters\nparser = argparse.ArgumentParser(description='BiLSTM-CRF for Chinese NER task')\nparser.add_argument('--train_data', type=str, default='D:\\project\\Chatbot_CN\\Chatbot_Data\\Info_Extraction', help='train data source')\nparser.add_argument('--test_data', type=str, default='D:\\project\\Chatbot_CN\\Chatbot_Data\\Info_Extraction', help='test data source')\nparser.add_argument('--batch_size', type=int, default=4, help='#sample of each minibatch')\nparser.add_argument('--epoch', type=int, default=100, help='#epoch of training')\nparser.add_argument('--hidden_dim', type=int, default=300, help='#dim of hidden state')\nparser.add_argument('--optimizer', type=str, default='Adam', help='Adam/Adadelta/Adagrad/RMSProp/Momentum/SGD')\nparser.add_argument('--CRF', type=str2bool, default=True, help='use CRF at the top layer. if False, use Softmax')\nparser.add_argument('--lr', type=float, default=0.001, help='learning rate')\nparser.add_argument('--clip', type=float, default=5.0, help='gradient clipping')\nparser.add_argument('--dropout', type=float, default=0.5, help='dropout keep_prob')\nparser.add_argument('--update_embedding', type=str2bool, default=True, help='update embedding during training')\nparser.add_argument('--pretrain_embedding', type=str, default='random', help='use pretrained char embedding or init it randomly')\nparser.add_argument('--embedding_dim', type=int, default=300, help='random init char embedding_dim')\nparser.add_argument('--shuffle', type=str2bool, default=True, help='shuffle training data before each epoch')\nparser.add_argument('--mode', type=str, default='demo', help='train/test/demo')\nparser.add_argument('--demo_model', type=str, default='1535444492', help='model for test and demo')\nargs = parser.parse_args()\n\n\n## get char embeddings\nword2id = read_dictionary(os.path.join('.', args.train_data, 'word2id.pkl'))\nif args.pretrain_embedding == 'random':\n embeddings = random_embedding(word2id, args.embedding_dim)\nelse:\n embedding_path = 'pretrain_embedding.npy'\n embeddings = np.array(np.load(embedding_path), dtype='float32')\n\n\n## read corpus and get training data\nif args.mode != 'demo':\n train_path = os.path.join('.', args.train_data, 'train_data') # 训练数据\n test_path = os.path.join('.', args.test_data, 'test_data')\n train_data = read_corpus(train_path)\n test_data = read_corpus(test_path);\n test_size = len(test_data)\n\n\n## paths setting\npaths = {}\ntimestamp = str(int(time.time())) if args.mode == 'train' else args.demo_model\noutput_path = os.path.join('.', args.train_data+\"_save\", timestamp)\nif not os.path.exists(output_path): os.makedirs(output_path)\nsummary_path = os.path.join(output_path, \"summaries\")\npaths['summary_path'] = summary_path\nif not os.path.exists(summary_path): os.makedirs(summary_path)\nmodel_path = os.path.join(output_path, \"checkpoints/\")\nif not os.path.exists(model_path): os.makedirs(model_path)\nckpt_prefix = os.path.join(model_path, \"model\")\npaths['model_path'] = ckpt_prefix\nresult_path = os.path.join(output_path, \"results\")\npaths['result_path'] = result_path\nif not os.path.exists(result_path): os.makedirs(result_path)\nlog_path = os.path.join(result_path, \"log.txt\")\npaths['log_path'] = log_path\nget_logger(log_path).info(str(args))\n\n\n## training Entity Extraction model\nif args.mode == 'train':\n model = BiLSTM_CRF(args, embeddings, tag2label, word2id, paths, config=config)\n model.build_graph()\n\n ## hyperparameters-tuning, split train/dev\n # dev_data = train_data[:5000]; dev_size = len(dev_data)\n # train_data = train_data[5000:]; train_size = len(train_data)\n # print(\"train data: {0}\\ndev data: {1}\".format(train_size, dev_size))\n # model.train(train=train_data, dev=dev_data)\n\n ## train model on the whole training data\n print(\"train data: {}\".format(len(train_data)))\n model.train(train=train_data, dev=test_data) # use test_data as the dev_data to see overfitting phenomena\n\n## testing model\nelif args.mode == 'test':\n ckpt_file = tf.train.latest_checkpoint(model_path)\n print(ckpt_file)\n paths['model_path'] = ckpt_file\n model = BiLSTM_CRF(args, embeddings, tag2label, word2id, paths, config=config)\n model.build_graph()\n print(\"test data: {}\".format(test_size))\n model.test(test_data)\n\n## demo\nelif args.mode == 'demo':\n\n # 这里指定了模型路径\n model_path = 'D:\\project\\Chatbot_CN\\Chatbot_Data\\Info_Extraction_save\\\\1535444492\\checkpoints'\n ckpt_file = tf.train.latest_checkpoint(model_path)\n # print('>>>>>>>>>>>',ckpt_file)\n paths['model_path'] = ckpt_file\n model = BiLSTM_CRF(args, embeddings, tag2label, word2id, paths, config=config)\n model.build_graph()\n saver = tf.train.Saver()\n with tf.Session(config=config) as sess:\n print('============= demo =============')\n saver.restore(sess, ckpt_file)\n # saver.restore(sess, tf.train.latest_checkpoint(\"Chatbot_Data/Info_Extraction_save\"))\n # while(1):\n # print('Please input your sentence:')\n # demo_sent = input()\n\n # all_texts = get_datas() # 数据库返回的按“一、二、三、四、”切割返回的文本 mysql数据库\n all_texts = get_MONGO_data()\n try:\n for i, one_text in enumerate(all_texts):\n # mongodb的数据格式\n addr = one_text['addr'] # 归属地\n charge = one_text['charge'] # 犯罪原因\n judgementId = one_text['judgementId'] # 判决Id,唯一标示\n keywords = one_text['keywords'] # 关键词\n court = one_text['court'] # 法院信息\n judge_text = one_text['judge_text'] # 判决结果,是一个列表,继续循环\n proponents = one_text['proponents'] # 原告\n opponents = one_text['opponents'] # 被告\n\n for text in judge_text: # 处理判决结果\n text = re.sub(\"\",'', text)\n print('judge_text: ', text)\n demo_data = [(text, ['O'] * len(text))]\n tag = model.demo_one(sess, demo_data)\n PER, LOC, ORG = get_entity(tag, text)\n MON = get_MON_entity(text)\n print('PER: {}\\nLOC: {}\\nORG: {}\\nMON: {}\\n'.format(PER, LOC, ORG, MON))\n\n # 将数据写入es\n es.index(index='zhizhuxia', doc_type='ner_type',\n body={'addr': addr,\n 'charge': charge,\n 'judgementId': judgementId,\n 'keywords': keywords,\n 'court': court,\n 'judge_text': text,\n 'PER': PER,\n 'LOC': LOC,\n 'ORG': ORG,\n 'MON': MON,\n 'proponents': proponents,\n 'opponents': opponents,\n 'timestamp': datetime.now()})\n # 将数据写入csv\n # NER2CSV(judgementId,addr,charge, keywords, court, PER, LOC, ORG, MON, proponents, opponents, judge_text, timestamp)\n\n\n # 根据judgement_id删除数据\n # del_ = del_MONGO_data(judgementId)\n # print('Del succeed')\n\n # mysql数据格式\n # uuid = one_text[0] # 获取每条数据的uuid\n # obligors = one_text[1] # 原告\n # creditors = one_text[2] # 被告\n # texts = one_text[3]\n # text_sent = split_sentence.split_sentence_thr(texts) # 分句\n\n # 这里的逻辑是处理提取出来的判决文本数据\n # for text in text_sent:\n # print(text)\n # text_strip = list(text.strip()) #\n # demo_data = [(text, ['O'] * len(text))]\n # tag = model.demo_one(sess, demo_data)\n # PER, LOC, ORG = get_entity(tag, text)\n # LOC_RE = get_add(text)\n # 调用money处理方法,获取金额实体\n # MON = get_MON_entity(text)\n #\n # print('PER: {}\\nLOC: {}\\nORG: {}\\nMON: {}\\n'.format(PER, LOC, ORG, MON))\n # print('LOC_RE :{}'.format(LOC_RE))\n\n # 将数据写入ES\n # es.index(index='chatbot', doc_type='test_type',\n # body={'uuid': uuid,\n # 'text': text, # 原文\n # 'PER': PER,\n # 'LOC': LOC,\n # 'ORG': ORG,\n # 'MON': MON,\n # # 'LOC_RE': LOC_RE,\n # 'obligors': obligors,\n # 'creditors': creditors,\n # 'timestamp': datetime.now()})\n\n\n # 调用关系抽取\n except Exception as e:\n print('Error is', e)","repo_name":"kingzhengguang/XuShengQuan-NLP-","sub_path":"Chatbot_Model/Info_Extraction/Info_Ext_main.py","file_name":"Info_Ext_main.py","file_ext":"py","file_size_in_byte":11096,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"31121657146","text":"import streamlit as st\nimport pandas as pd\nimport altair as alt\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\ndef ct_hist(col, df):\n hist = alt.Chart(df, width=600).mark_bar().encode(\n alt.X(col, bin=True),\n y='count()', tooltip=[col, 'count()']\n ).interactive()\n return hist\n\ndef ct_correlation(df, cols):\n cor_data = (df[cols]).corr().stack().reset_index().rename(\n columns={0: 'correlation', 'level_0': 'variable', 'level_1': 'variable2'})\n cor_data['correlation_label'] = cor_data['correlation'].map('{:.2f}'.format) # Round to 2 decimal\n base = alt.Chart(cor_data, width=500, height=500).encode(x='variable2:O', y='variable:O')\n text = base.mark_text().encode(text='correlation_label',\n color=alt.condition(alt.datum.correlation > 0.5, alt.value('white'),\n alt.value('black')))\n\n # The correlation heatmap itself\n cor_plot = base.mark_rect().encode(\n color='correlation:Q')\n\n return cor_plot + text\n\n\ndef main():\n st.image('logo.png', width=200)\n st.title('Exploratory Data Analysis')\n st.subheader('Example of data analysis and visualization\\n'\n '* Source: http://mlr.cs.umass.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv')\n st.sidebar.image('cn.png', width=200)\n st.sidebar.title('Challenge by:')\n st.sidebar.markdown('* AceleraDev Data Science')\n st.sidebar.title('Author:')\n st.sidebar.markdown('* Marcel Rocha Nascimento')\n st.sidebar.title('Contact me:')\n st.sidebar.markdown('* marcel.nanoufrj@gmail.com')\n st.sidebar.title('Find out more:')\n st.sidebar.markdown('* [LinkedIn](https://www.linkedin.com/in/marcel-rocha-nascimento-8185a6148/) - '\n '[GitHub](https://github.com/MarcelRocha)')\n #dataset_url = 'http://mlr.cs.umass.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv'\n df = pd.read_csv('winequality-red.csv')\n data_col = list(df.columns)\n st.markdown('**Dataframe visualization**')\n number = st.slider('Choose the number of rows you want to see', min_value=1, max_value=20)\n st.dataframe(df.head(number))\n st.title('Statistics')\n col = st.selectbox('Select the feature:', data_col)\n if col is not None:\n st.markdown('Select analysis:')\n mean = st.checkbox('Mean')\n if mean:\n st.markdown(df[col].mean())\n median = st.checkbox('Median')\n if median:\n st.markdown(df[col].median())\n data_std = st.checkbox('Standard Deviation')\n if data_std:\n st.markdown(df[col].std())\n describe = st.checkbox('Statistics (summary)')\n if describe:\n st.dataframe(df[col].describe())\n st.title('Data Visualization')\n st.markdown('Select your visualization')\n histogram = st.checkbox('Histogram')\n if histogram:\n col_num = st.selectbox('Select a column: ', data_col, key='unique')\n st.markdown('Histogram')\n st.write(ct_hist(col_num, df))\n distribution = st.checkbox('Distribution Chart')\n if distribution:\n col_dist = st.selectbox('Select a column: ', data_col, key='unique')\n sns.distplot(df[col_dist])\n plt.xlabel('')\n plt.title(col_dist, {'fontsize': 20})\n st.pyplot()\n correlation = st.checkbox('Correlation')\n if correlation:\n st.markdown('Correlation Heatmap')\n st.write(ct_correlation(df, data_col))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MarcelRocha/streamlit-heroku","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32905420191","text":"import numpy as np\n\n# cd audio at 44,100 hz and 16 bits per sample\nSAMPLES_S = 44_100\nBITS_SAMPLE = 16\n\n# wave header constants\nCHUNK_ID = b'RIFF'\nFORMAT = b'WAVE'\nSUBCHUNK_1_ID = b'fmt '\nSUBCHUNK_2_ID = b'data'\n\n# PCM constants\nSUBCHUNK_1_SIZE = (16).to_bytes(4, byteorder = 'little')\nAUDIO_FORMAT = (1).to_bytes(2, byteorder = 'little')\n\ndef create_pcm(frequency):\n ang_freq = 2 * np.pi * frequency\n x_vals = np.arange(SAMPLES_S)\n y_vals = 32767 * .3 * np.sin(ang_freq * x_vals/SAMPLES_S)\n return np.int16(y_vals)\n\ndef wav_wav(channels, filename, *args):\n seconds = len(args)/2\n\n chunk_size = (int(36 + (seconds * SAMPLES_S * BITS_SAMPLE/8))).to_bytes(4, 'little')\n num_channels = (channels).to_bytes(2, byteorder = 'little')\n sample_rate = (SAMPLES_S).to_bytes(4, byteorder = 'little')\n byte_rate = (int(SAMPLES_S * channels * BITS_SAMPLE/8)).to_bytes(4, byteorder = 'little')\n block_align = (int(channels * BITS_SAMPLE/8)).to_bytes(2, byteorder = 'little')\n bits_per_sample = (BITS_SAMPLE).to_bytes(2, byteorder = 'little')\n subchunk_2_size = (int(seconds * SAMPLES_S * BITS_SAMPLE/8)).to_bytes(4, byteorder = 'little')\n\n my_pcm = []\n my_pcm2 = []\n\n #TAKE 'for arg' AND USE TO ADD FREQ TO SONG\n #MAKE PCM OUTSIDE OF FUNC DEF AND REPLACE *ARG WITH IT IN FUNCTION CALL\n for arg in args:\n my_pcm.append(create_pcm(arg))\n\n mat1 = np.array(my_pcm)\n\n for arg in args:\n my_pcm2.append(create_pcm(arg/3))\n mat2 = np.array(my_pcm)\n\n my_final = (mat1 + mat2)/2\n\n with open(f'{filename}.wav', 'wb') as fo:\n fo.write(\n CHUNK_ID +\n chunk_size +\n FORMAT +\n SUBCHUNK_1_ID +\n SUBCHUNK_1_SIZE +\n AUDIO_FORMAT +\n num_channels +\n sample_rate +\n byte_rate +\n block_align +\n bits_per_sample +\n SUBCHUNK_2_ID +\n subchunk_2_size +\n my_final.tobytes()\n )\n","repo_name":"Boboston98/test","sub_path":"test_test.py","file_name":"test_test.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37433717934","text":"import pyautogui as pt #pip install pyautogui\r\nimport pyperclip as pc #pip install pyperclip\r\nfrom pynput.mouse import Button,Controller\r\nfrom time import sleep\r\nimport keyboard # pip install keyboard\r\nimport wikipedia as wiki\r\nimport requests\r\nimport random\r\n\r\npt.FAILSAFE=True\r\nmouse=Controller()\r\n\r\n# Navigate to any image\r\n\r\ndef nav_to_img(image,clicks,off_x=0,off_y=0):\r\n position=pt.locateCenterOnScreen(image,confidence=.8) #.8 is for checking image is matching 80% than it will procced\r\n if position is None:\r\n print(f\"Image not found...\",clicks)\r\n return 0\r\n else:\r\n pt.moveTo(position,duration=.3) #.5 sec.\r\n pt.moveRel(off_x,off_y,duration=.1)\r\n pt.click(clicks=clicks,interval=.1)\r\n \r\n \r\ndef get_message():\r\n nav_to_img('WhatsApp_bot_img\\media.png',0,off_y=-80) #-80 is my pixle from media icon to go up our cursor \r\n mouse.click(Button.left,3) #3 mean number of left-mouse click\r\n pt.rightClick()\r\n copy = nav_to_img('WhatsApp_bot_img\\copy.png',1)#1 mean number of mouse click\r\n sleep(.5)\r\n return pc.paste() if copy !=0 else 0\r\n\r\ndef send_message(msg,*args):\r\n nav_to_img('WhatsApp_bot_img\\media.png',2,off_x=150) #150 is my pixle from media icon to go up our cursor \r\n pt.typewrite(msg,interval=.1)\r\n for i in range(len(args)):\r\n if \"keyboard.press_and_release('shift+enter')\" in i:\r\n keyboard.press_and_release('shift+enter')\r\n else:\r\n # pt.typewrite(f\"{msg}\",interval=.1)\r\n pt.typewrite(args,interval=.1)\r\n \r\n pt.typewrite('\\n') #used as enter to send msg \r\n\r\ndef close_reply_box():\r\n nav_to_img('WhatsApp_bot_img\\cancel.png',2) #2 mean number of mouse click\r\n \r\ndef process_message(msg):\r\n raw_msg=msg.lower()\r\n if raw_msg=='hello' or raw_msg=='hii' or raw_msg=='hi':\r\n x=\"keyboard.press_and_release('shift+enter')\"\r\n y=\"How can I help you\"\r\n # return \"hello there how are you. {keyboard.press_and_release('shift+enter')} How can I help you.\"\r\n print(\"hello there how are you\",x,y)\r\n return \"hello there how are you\",x,y\r\n elif raw_msg==\"yes\":\r\n return \"Bot says you wrote yes.\"\r\n elif 'how are you' in raw_msg or 'fine and you' in raw_msg or 'good and you' in raw_msg or 'what about you' in raw_msg:\r\n return f\"I'm also fine, Thankyou for asking :) {keyboard.press_and_release('shift+enter')} How can I help you.\"\r\n elif ('what' in raw_msg or 'who' in raw_msg or 'when' in raw_msg or 'how' in raw_msg or 'where' in raw_msg or 'why' in raw_msg):\r\n info =wiki.summary(msg,2) #2 mean number of line\r\n return f\"according to wikipedia {info}\"\r\n elif 'thankyou' in raw_msg or 'thank you' in raw_msg:\r\n return \"It's my pleasure that I am able to help you\"\r\n # elif ''\r\n else:\r\n return \"I did not understand what you wrote.\"\r\ndef open_wp():\r\n keyboard.press_and_release('win')\r\n sleep(1)\r\n nav_to_img(r'WhatsApp_bot_img\\search.png',1)\r\n pt.typewrite('Whatsapp',interval=.1)\r\n sleep(1)\r\n nav_to_img(r\"WhatsApp_bot_img\\whatsapp_logo.png\",2,off_y=50)\r\n # sleep(10)\r\n \r\n\r\nlast_msg=''\r\n\r\n\r\n#opening whatsapp\r\nsleep(2)\r\nopen_wp()\r\nsleep(12)\r\nwhile True:\r\n \r\n # check for new messages\r\n nav_to_img(r\"WhatsApp_bot_img\\unread.png\",2,off_x=-150)\r\n # close_reply_box()\r\n messgae=get_message()\r\n sleep(.2)\r\n if messgae!=0 and messgae!=last_msg:\r\n last_msg=messgae\r\n send_message(process_message(messgae))\r\n else:\r\n print('There are no new messages')\r\n close_reply_box()\r\n sleep(5) #sleep for 10 sec than again looping start\r\n \r\n\r\n\r\n\r\n\r\n \r\n# sleep(3)\r\n # checking all the function is working or not\r\n \r\n# nav_to_img(r\"PYTHON\\Projects\\AI\\WhatsApp_Bot\\WhatsApp_bot_img\\unread.png\",0)\r\n# nav_to_img('WhatsApp_bot_img\\cancel.png',0)\r\n# nav_to_img('WhatsApp_bot_img\\media.png',0)\r\n# nav_to_img('WhatsApp_bot_img\\media_1.png',0) #backup_img for media icon\r\n# nav_to_img('WhatsApp_bot_img\\copy.png',0)\r\n\r\n# nav_to_img(r\"WhatsApp_bot_img\\unread_light.png\",0)\r\n# nav_to_img('WhatsApp_bot_img\\media_light.png',0)\r\n# nav_to_img('WhatsApp_bot_img\\media_1__light.png',0)\r\n# nav_to_img('WhatsApp_bot_img\\cancel_light.png',0)","repo_name":"subh-sk/WhatsApp-Bot","sub_path":"WhatsApp_Bot.py","file_name":"WhatsApp_Bot.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"14475108535","text":"\"\"\"\nScript for generating and saving shape geometries, stiffness distributions, and strain fields.\n\nThis script creates shape geometries based on given node coordinates, calculates stiffness\ndistributions around various gripper positions, and simulates strain fields due to material\nproperties and external forces.\n\"\"\"\n\nimport numpy as np\nimport os\nimport json\nimport matplotlib.image as mpli\nimport scipy.interpolate as spi\nimport pandas as pd\nfrom tqdm import tqdm\n\npath = os.path.dirname(\"__file__\")\ndata_path = os.path.join(path, \"data\")\n\ndata_dics = {\n \"doubledome\": {\n \"folder_path\": os.path.join(path, \"Austausch_IPD_Optimierung\"),\n \"shape_data_path\": os.path.join(path, \"Austausch_IPD_Optimierung\", \"gen_greyscale_image_tool\"),\n \"spring_coords_path\": os.path.join(path, \"Austausch_IPD_Optimierung\", \"spring_coords\"),\n \"u_spring_data_path\": os.path.join(path, \"Austausch_IPD_Optimierung\", \"u_spring\"),\n \"el_coords_path\": os.path.join(path, \"Austausch_IPD_Optimierung\", \"gen_greyscale_shear\"),\n \"train_data\": {\n \"x_data_path\": os.path.join(path, \"Austausch_IPD_Optimierung\", \"XY-Simulationsdaten_train\", \"x_vals.npy\"),\n \"y_data_path\": os.path.join(path, \"Austausch_IPD_Optimierung\", \"XY-Simulationsdaten_train\", \"y_vals.npy\"),\n },\n \"val_data\": {\n \"x_data_path\": os.path.join(path, \"Austausch_IPD_Optimierung\", \"XY-Simulationsdaten_val\", \"x_vals.npy\"),\n \"y_data_path\": os.path.join(path, \"Austausch_IPD_Optimierung\", \"XY-Simulationsdaten_val\", \"y_vals.npy\"),\n }\n },\n \"L_shape\": {\n \"folder_path\": os.path.join(path, \"2023_06\"),\n \"shape_data_path\": os.path.join(path, \"2023_06\", \"01_Generate_Tool_Greyscale_Image\"),\n \"u_spring_data_path\": os.path.join(path, \"2023_06\", \"04_Spring_Elongation\"),\n \"el_coords_path\": os.path.join(path, \"2023_06\", \"02_Generate_Shear_Greyscale_Image\"),\n \"train_data\": {\n \"x_data_path\": os.path.join(path, \"2023_06\", \"03_Spring_Stiffnesses\", \"X-Vals_-_Spring_stiffnesses.npy\"),\n \"y_data_path\": os.path.join(path, \"2023_06\", \"02_Generate_Shear_Greyscale_Image\",\n \"Y-Vals_-_ShearAngles_L-Winkel.npy\"),\n }\n }\n}\n\n\ndef create_shape(shape_name, dic_paths):\n \"\"\"\n Create a shape image using greyscale parameters.\n \"\"\"\n shape_data_path = os.path.join(data_path, shape_name, \"shape\")\n os.makedirs(shape_data_path, exist_ok=True)\n\n image_tool_path = dic_paths[\"shape_data_path\"]\n abs_path_gs_json = os.path.join(image_tool_path, \"tool_greyscale.json\")\n abs_path_nodes = os.path.join(image_tool_path, f\"nodes_{shape_name}.npy\")\n abs_path_gs_img = os.path.join(shape_data_path, \"shape.png\")\n abs_path_gs_matrix = os.path.join(shape_data_path, \"shape.npy\")\n\n # load greyscale parameters:\n with open(abs_path_gs_json, \"r\") as gs_json:\n gs_param = json.load(gs_json)\n\n # load nodes of geometry:\n nodes_xyz = np.load(abs_path_nodes)\n\n x_min = gs_param[\"bb_x_min\"]\n x_max = gs_param[\"bb_x_max\"]\n y_min = gs_param[\"bb_y_min\"]\n y_max = gs_param[\"bb_y_max\"]\n num_pxl_x = gs_param[\"num_pxl_x\"]\n num_pxl_y = gs_param[\"num_pxl_y\"]\n h_min = gs_param[\"h_min\"]\n h_max = gs_param[\"h_max\"]\n\n x_grid = np.linspace(x_min, x_max, num_pxl_x)\n y_grid = np.linspace(y_min, y_max, num_pxl_y)\n pix_x, pix_y = np.meshgrid(x_grid, y_grid)\n\n if np.any(nodes_xyz): # Check if any non-zero values in array (i.e. valid mesh):\n nodes_xyz = np.unique(nodes_xyz, axis=0)\n nodes_xy = nodes_xyz[:, 0:2]\n nodes_z = nodes_xyz[:, 2]\n\n pix_gs = spi.griddata(nodes_xy, nodes_z, (pix_x, pix_y), method=\"linear\")\n else:\n pix_gs = np.zeros((32, 32))\n\n mask = pix_gs < 0\n pix_gs[mask] = 0\n\n mpli.imsave(abs_path_gs_img, pix_gs, cmap=\"gray\", vmin=h_min, vmax=h_max)\n np.save(abs_path_gs_matrix, pix_gs)\n\n\nfor shape_name, dic_paths in data_dics.items():\n create_shape(shape_name, dic_paths)\n\ngrippers_path = data_dics[\"doubledome\"][\"spring_coords_path\"]\ngrippers_directory_coordinates_xy = np.load(grippers_path + '/spring_dir_of_attack_coords.npy')[:2]\ngrippers_point_coordinates_xy = np.load(grippers_path + '/spring_pnt_of_attack_coords.npy')[:2]\n\nencoding_data_path = os.path.join(data_path, \"encoding\")\nos.makedirs(encoding_data_path, exist_ok=True)\n\n\ndef stiffness_distribution(x_0, y_0, x_dir, y_dir):\n \"\"\"\n Calculate the stiffness distribution for given spring parameters.\n \"\"\"\n x_max = 300\n y_max = 460\n\n E_1 = 140.0\n E_2 = 10.0\n nu_12 = 0.3\n G_12 = 0.1\n\n Q_11 = E_1 / (1 - nu_12 ** 2 * E_2 / E_1)\n Q_12 = nu_12 * E_2 / (1 - nu_12 ** 2 * E_2 / E_1)\n Q_22 = E_2 / (1 - nu_12 ** 2 * E_2 / E_1)\n Q_33 = G_12\n\n def calculate_q11(x, y, _x_0, _y_0):\n eps = 0.1 # prevent division by zero\n if x_dir == -10 and y_dir == 0 and _x_0 == 0: # spring on left\n alpha_rad = np.arctan((y - _y_0) / (x + eps))\n elif x_dir == 10 and y_dir == 0 and _x_0 == x_max: # spring on right\n alpha_rad = np.arctan((y - _y_0) / (x - _x_0 + eps)) + np.pi\n elif x_dir == 0 and y_dir == -10 and _y_0 == 0: # spring on bottom\n x, y = y, x\n _x_0, _y_0 = _y_0, _x_0\n alpha_rad = np.arctan((y - _y_0) / (x + eps))\n elif x_dir == 0 and y_dir == 10 and _y_0 == y_max: # spring on top\n x, y = y, x\n _x_0, _y_0 = _y_0, _x_0\n alpha_rad = np.arctan((y - _y_0) / (x - _x_0 + eps)) + np.pi\n else:\n raise ValueError('Invalid spring configuration!')\n q_vec = np.array([Q_11, Q_12, Q_22, Q_33])\n cs_vec = np.array([\n np.cos(alpha_rad) ** 4,\n 2 * np.cos(alpha_rad) ** 2 * np.sin(alpha_rad) ** 2,\n np.sin(alpha_rad) ** 4,\n 4 * np.cos(alpha_rad) ** 2 * np.sin(alpha_rad) ** 2\n ])\n q_11s = np.inner(cs_vec, q_vec)\n r = np.sqrt((x - _x_0) ** 2 + (y - _y_0) ** 2)\n len_aff_inf = 30\n len_aff_0 = 10\n r_ref = 200\n len_aff = (1 - np.exp(-(r / r_ref))) * (len_aff_inf - len_aff_0) + len_aff_0\n kd = np.exp(-((y - _y_0) / len_aff) ** 2)\n\n q_11s_kd = kd * q_11s / Q_11\n return q_11s_kd\n\n _xx = np.arange(0, x_max)\n _yy = np.arange(0, y_max)\n xg, yg = np.meshgrid(_xx, _yy)\n\n q_rel = np.array([calculate_q11(x, y, x_0, y_0) for x, y in zip(xg.ravel(), yg.ravel())]).reshape(\n xg.shape)\n\n return q_rel\n\n\nfor i in tqdm(range(len(grippers_point_coordinates_xy[0]))):\n distrib = stiffness_distribution(grippers_point_coordinates_xy[0][i] + 150,\n grippers_point_coordinates_xy[1][i] + 230,\n grippers_directory_coordinates_xy[0][i],\n grippers_directory_coordinates_xy[1][i])\n abs_path_gs_img = os.path.join(encoding_data_path, f\"encoding_{i}.png\")\n abs_path_gs_matrix = os.path.join(encoding_data_path, f\"encoding_{i}.npy\")\n mpli.imsave(abs_path_gs_img, distrib, cmap=\"gray\", vmin=0, vmax=1)\n np.save(abs_path_gs_matrix, distrib)\n\n\ndef create_strain_field(shape_name, dic_paths, data_type, gamma, image_name):\n \"\"\"\n Create a strain field image using the provided data.\n \"\"\"\n strain_field_data_path = os.path.join(data_path, shape_name, \"strain_field\", data_type)\n os.makedirs(strain_field_data_path, exist_ok=True)\n\n shear_path = dic_paths[\"el_coords_path\"]\n el_coords_xyz = np.load(os.path.join(shear_path, 'el_coords_xyz.npy'))\n\n abs_path_out_image = os.path.join(strain_field_data_path, image_name + '.png')\n abs_path_out_matrix = os.path.join(strain_field_data_path, image_name + '.npy')\n\n x_min = -150.0\n x_max = 150.0\n num_pxl_x = 300\n\n y_min = -230.0\n y_max = 230.0\n num_pxl_y = 460\n\n h_min = 0\n h_max = 90.0\n\n x_grid = np.linspace(x_min, x_max, num_pxl_x)\n y_grid = np.linspace(y_min, y_max, num_pxl_y)\n pix_x, pix_y = np.meshgrid(x_grid, y_grid)\n\n nodes_xy = el_coords_xyz[:2, :].T\n pix_gs = spi.griddata(nodes_xy, gamma, (pix_x, pix_y), method='linear')\n\n # fill in NaN values\n x_ind, y_ind = np.indices(pix_gs.shape)\n missing = np.isnan(pix_gs)\n points = np.column_stack([x_ind[~missing], y_ind[~missing]])\n values = pix_gs[~missing]\n pix_gs[missing] = spi.griddata(points, values, (x_ind[missing], y_ind[missing]), method='nearest')\n\n mpli.imsave(abs_path_out_image, pix_gs, cmap='gray', vmin=h_min, vmax=h_max)\n np.save(abs_path_out_matrix, pix_gs)\n\n\n# Material properties\nE_1 = 140.0 # in N/mm² --> in direction of the springs --> x-direction here\nE_2 = 10.0 # in N/mm² --> transverse direction of the springs\nnu_12 = 0.3 # Poissons ratio\nG_12 = 0.1 # Shear modulus\n\n# Calculate stiffnesses before rotation\nQ_11 = E_1 / (1 - nu_12 ** 2 * E_2 / E_1)\nQ_12 = nu_12 * E_2 / (1 - nu_12 ** 2 * E_2 / E_1)\nQ_22 = E_2 / (1 - nu_12 ** 2 * E_2 / E_1)\nQ_33 = G_12\n\nfor shape_name, dic_paths in data_dics.items():\n for data_type in ['train', 'val']:\n if f'{data_type}_data' not in dic_paths.keys():\n continue\n\n simulated_forces = np.load(dic_paths[f\"{data_type}_data\"][\"x_data_path\"])\n simulated_angels = np.load(dic_paths[f\"{data_type}_data\"][\"y_data_path\"])\n u_springs_path = os.path.join(dic_paths[\"u_spring_data_path\"], f\"U_spring_{shape_name}.csv\")\n u_spring_data = pd.read_csv(u_springs_path)\n\n n_grippers = len(grippers_point_coordinates_xy[0])\n n_angels = len(simulated_angels[0])\n\n columns = ['stamp_shape_image_path', 'stamp_shape_matrix_path'] + \\\n [f'gripper_x_{i}' for i in range(n_grippers)] + \\\n [f'gripper_y_{i}' for i in range(n_grippers)] + \\\n [f'gripper_dir_x_{i}' for i in range(n_grippers)] + \\\n [f'gripper_dir_y_{i}' for i in range(n_grippers)] + \\\n [f'gripper_force_{i}' for i in range(n_grippers)] + \\\n [f'gripper_length_{i}' for i in range(n_grippers)] + \\\n [f'gripper_encoding_image_path_{i}' for i in range(n_grippers)] + \\\n [f'gripper_encoding_matrix_path_{i}' for i in range(n_grippers)] + \\\n ['characteristic_e_1', 'characteristic_e_2', 'characteristic_nu_12', 'characteristic_g_12'] + \\\n ['stiffness_q_11', 'stiffness_q_12', 'stiffness_q_22', 'stiffness_q_33'] + \\\n [f'angle_{i}' for i in range(n_angels)] + \\\n ['strain_field_image_path', 'strain_field_matrix_path']\n\n data = pd.DataFrame(columns=columns)\n\n for idx, forces in tqdm(enumerate(simulated_forces), total=len(simulated_forces)):\n angles = simulated_angels[idx]\n image_name = f'strain_field_{idx}'\n create_strain_field(shape_name, dic_paths, data_type, angles, image_name)\n data.loc[idx] = [f'data/{shape_name}/shape/shape.png', f'data/{shape_name}/shape/shape.npy'] + \\\n list(grippers_point_coordinates_xy[0] + 150) + \\\n list(grippers_point_coordinates_xy[1] + 230) + \\\n list(grippers_directory_coordinates_xy[0]) + \\\n list(grippers_directory_coordinates_xy[1]) + \\\n list(forces) + \\\n u_spring_data['u'].tolist() + \\\n [f'data/encoding/encoding_{i}.png' for i in range(n_grippers)] + \\\n [f'data/encoding/encoding_{i}.npy' for i in range(n_grippers)] + \\\n [E_1, E_2, nu_12, G_12] + \\\n [Q_11, Q_12, Q_22, Q_33] + \\\n list(angles) + \\\n [f'data/{shape_name}/strain_field/{data_type}/{image_name}.png',\n f'data/{shape_name}/strain_field/{data_type}/{image_name}.npy']\n\n data.to_csv(os.path.join(data_path, shape_name, f'{data_type}.csv'), index=False)\n data = data.filter(regex=\"stamp_shape_matrix_path|gripper_force|gripper_length|strain_field_matrix_path\")\n data.to_csv(os.path.join(data_path, shape_name, f'{data_type}_short.csv'), index=False)\n","repo_name":"eismont21/knowledge-surrogate-opt","sub_path":"create_data.py","file_name":"create_data.py","file_ext":"py","file_size_in_byte":12274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"39306627657","text":"# Gomoku Main Game\nfrom __future__ import print_function\nimport numpy as np\nimport argparse\n\n# parse the input parameters\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--width',type = int)\nparser.add_argument('--height',type = int)\n\nargs = parser.parse_args()\n\nWIDTH = args.width\nHEIGHT = args.height\n\nclass Board(object):\n \"\"\"\n board for the game\n like a x-y Cartesian coordinate system\n the (0,0) is 0 and coming to the right\n each row/column starts with 0(array-like)\n \"\"\"\n\n def __init__(self, **kwargs):\n self.width = int(kwargs.get('width', 8))\n self.height = int(kwargs.get('height', 8))\n # kwargs is a parameter in the form of dictionary\n # key: move as location on the board,\n # value: player as pieces type\n self.states = {} # board states stored as a dict,\n self.n_in_row = int(kwargs.get('n_in_row', 5)) # need how many pieces in a row to win\n self.players = [1, 2] # player1 and player2\n\n def init_board(self, start_player=0):\n if self.width < self.n_in_row or self.height < self.n_in_row:\n raise Exception('board width and height can not be '\n 'less than {}'.format(self.n_in_row))\n\n self.current_player = self.players[start_player] # start player\n # keep available moves in a list\n self.availables = list(range(self.width * self.height)) # a list including all the MOVE in the board starting from 0\n self.states = {}\n self.last_move = -1\n\n # move is a single value marking the position of the current point, while location is a (x,y) location\n def move_to_location(self, move):\n \"\"\"\n 3*3 board's moves like:\n 6 7 8\n 3 4 5\n 0 1 2\n and move 5's location is (1,2)\n \"\"\"\n h = move // self.width #return the integer part of the result after the divide\n w = move % self.width\n return [h, w]\n\n def location_to_move(self, location):\n if len(location) != 2:\n return -1\n h = location[0]\n w = location[1]\n move = h * self.width + w\n if move not in range(self.width * self.height):\n return -1\n return move\n\n ######################\n def current_state(self):\n \"\"\"return the board state from the perspective of the current player.\n state shape: 4*width*height --- a 4 level matrix\n \"\"\"\n\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n moves, players = np.array(list(zip(*self.states.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width,\n move_curr % self.height] = 1.0\n # 0 stores cur move\n square_state[1][move_oppo // self.width,\n move_oppo % self.height] = 1.0\n # 1 stores oppo move\n square_state[2][self.last_move // self.width,\n self.last_move % self.height] = 1.0\n # 2 indicate last move\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n # all 3 are 1\n return square_state[:, ::-1, :]\n\n def do_move(self, move):\n # since states is a dictionary, so each time we do a move\n # we select from the dict by the move as a key and insert current_player\n # to mark that the player has put a pawn here.\n self.states[move] = self.current_player\n self.availables.remove(move)\n self.current_player = (\n # switch players\n self.players[0] if self.current_player == self.players[1]\n else self.players[1]\n )\n self.last_move = move\n\n def has_a_winner(self):\n width = self.width\n height = self.height\n states = self.states\n n = self.n_in_row\n\n moved = list(set(range(width * height)) - set(self.availables))\n ## find the difference between two sets\n if len(moved) < self.n_in_row + 2:\n return False, -1\n # if there is some space left, game continue\n\n for m in moved:\n # for every used space\n h = m // width\n w = m % width\n player = states[m]\n\n #row\n if (w in range(width - n + 1) and\n len(set(states.get(i, -1) for i in range(m, m + n))) == 1):\n # here we use the length of the set, it means only unqiue value could appears here\n # so if len()==1 it means there are n pawns belongs to same player\n # note 3 states: empty(-1)/player1/player2\n return True, player\n #column\n if (h in range(height - n + 1) and\n len(set(states.get(i, -1) for i in range(m, m + n * width, width))) == 1):\n return True, player\n #x=y direction\n if (w in range(width - n + 1) and h in range(height - n + 1) and\n len(set(states.get(i, -1) for i in range(m, m + n * (width + 1), width + 1))) == 1):\n return True, player\n #x=-y direction\n if (w in range(n - 1, width) and h in range(height - n + 1) and\n len(set(states.get(i, -1) for i in range(m, m + n * (width - 1), width - 1))) == 1):\n return True, player\n\n return False, -1\n\n def game_end(self):\n \"\"\"Check whether the game is ended or not\"\"\"\n win, winner = self.has_a_winner()\n if win:\n return True, winner\n elif not len(self.availables):\n return True, -1\n return False, -1\n\n def get_current_player(self):\n return self.current_player\n\n\nclass Game(object):\n \"\"\"game server\"\"\"\n\n def __init__(self, board, **kwargs):\n self.board = board\n\n def graphic(self, board, player1, player2):\n \"\"\"Draw the board and show game info\"\"\"\n width = board.width\n height = board.height\n\n print(\"Player\", player1, \"with X\".rjust(3))\n print(\"Player\", player2, \"with O\".rjust(3))\n print()\n for x in range(width):\n print(\"{0:8}\".format(x), end='')\n print('\\r\\n')\n for i in range(height - 1, -1, -1): # print from top to the bottom\n print(\"{0:4d}\".format(i), end='')\n for j in range(width):\n loc = i * width + j\n p = board.states.get(loc, -1)\n if p == player1:\n print('X'.center(8), end='')\n elif p == player2:\n print('O'.center(8), end='')\n else:\n print('_'.center(8), end='')\n print('\\r\\n\\r\\n')\n\n def start_play(self, player1, player2, start_player=0, is_shown=1):\n \"\"\"start a game between two players\"\"\"\n if start_player not in (0, 1):\n raise Exception('start_player should be either 0 (player1 first) '\n 'or 1 (player2 first)')\n self.board.init_board(start_player)\n p1, p2 = self.board.players\n player1.set_player_ind(p1)\n player2.set_player_ind(p2)\n players = {p1: player1, p2: player2}\n if is_shown:\n self.graphic(self.board, player1.player, player2.player)\n while True:\n current_player = self.board.get_current_player()\n player_in_turn = players[current_player]\n move = player_in_turn.get_action(self.board)\n self.board.do_move(move)\n if is_shown:\n self.graphic(self.board, player1.player, player2.player)\n end, winner = self.board.game_end()\n if end:\n if is_shown:\n if winner != -1:\n print(\"Game end. Winner is\", players[winner])\n else:\n print(\"Game end. Tie\")\n return winner\n\nclass player(object):\n \"\"\"\n human player\n \"\"\"\n\n def __init__(self):\n self.player = None\n\n def set_player_ind(self, p):\n self.player = p\n\n def get_action(self, board):\n try:\n location = input(\"Your move: \")\n if isinstance(location, str): # for python3\n location = [int(n, 10) for n in location.split(\",\")]\n move = board.location_to_move(location)\n except Exception as e:\n move = -1\n\n if move == -1 or move not in board.availables:\n print(\"invalid move\")\n move = self.get_action(board)\n return move\n\n def __str__(self):\n return \"Human {}\".format(self.player)\n\ndef run():\n n = 5\n width = WIDTH\n height = HEIGHT\n if not width:\n width = 9\n if not height:\n height = 9\n try:\n board = Board(width=width, height=height, n_in_row=n)\n game = Game(board)\n player1 = player()\n player2 = player()\n # set start_player=0 for human first\n game.start_play(player1, player2, start_player=1, is_shown=1)\n except KeyboardInterrupt:\n print('\\n\\rquit')\n\nif __name__ == '__main__':\n run()\n\n","repo_name":"CesareMJLi/terminalPVPGomoku","sub_path":"Gomoku.py","file_name":"Gomoku.py","file_ext":"py","file_size_in_byte":9330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9483754359","text":"listOfTu = input().split()\r\n\r\n'''\r\nTính từ nam: -lios\r\nTính từ nữ: -liala\r\nDanh từ nam: -etr\r\nDanh từ nữ: -etra\r\nĐộng từ nam: -initis\r\nĐộng từ nữ: -inites\r\n\r\nTính từ: adj\r\nDanh từ: noun\r\nĐộng từ: verb\r\n\r\nNam: 0\r\nNữ: 1\r\n\r\nPhần tử trong list: (từ, độ dài, từ loại, giới tính)\r\n\r\nreturn (Từ loại, giới tính)\r\n'''\r\n\r\nlistOfTuLoai = [('lios', 4, 10, 0), ('liala', 5, 10, 1),\r\n\t\t\t\t('etr', 3, 20, 0), ('etra', 4, 20, 1),\r\n\t\t\t\t('initis', 6, 30, 0), ('inites', 6, 30, 1)]\r\n\r\ndef xacDinhTuLoai(word, listOfTuLoai):\r\n\tfor rule in listOfTuLoai:\r\n\t\ttemp = word\r\n\t\tlength = rule[1]\r\n\t\ttemp = temp[-length:]\r\n\r\n\t\tif temp == rule[0]:\r\n\t\t\treturn (rule[2], rule[3])\r\n\r\n\treturn None\r\n\r\ndef exitModified():\r\n\tprint('NO')\r\n\texit()\r\n\r\n\r\nif len(listOfTu) == 1:\r\n\tresult = xacDinhTuLoai(listOfTu[0], listOfTuLoai)\r\n\r\n\tif result != None:\r\n\t\tprint('YES')\r\n\telse:\r\n\t\tprint('NO')\r\n\r\nelse:\r\n\tlistOfResult = []\r\n\r\n\tfor word in listOfTu:\r\n\t\tresult = xacDinhTuLoai(word, listOfTuLoai)\r\n\r\n\t\tif result == None:\r\n\t\t\texitModified()\r\n\r\n\t\tlistOfResult.append(result)\r\n\r\n\tfoundNoun = False\r\n\tcurrentTuLoai = 10\r\n\tcurrentGioiTinh = 0\r\n\r\n\tfor ketQua in listOfResult:\r\n\t\tif ketQua[0] == 30 and foundNoun == False:\r\n\t\t\texitModified()\r\n\t\tif ketQua[0] > currentTuLoai:\r\n\t\t\tcurrentTuLoai = ketQua[0]\r\n\t\tif ketQua[0] < currentTuLoai:\r\n\t\t\texitModified()\r\n\t\tif ketQua[0] == 20:\r\n\t\t\tif foundNoun:\r\n\t\t\t\texitModified()\r\n\t\t\telse:\r\n\t\t\t\tcurrentTuLoai = ketQua[0]\r\n\t\t\t\tfoundNoun = True\r\n\r\n\t\tcurrentGioiTinh += ketQua[1]\r\n\r\n\tif currentGioiTinh == 0 or currentGioiTinh == len(listOfResult):\r\n\t\tprint('YES')\r\n\telse:\r\n\t\tprint('NO')\r\n\r\n\r\n\r\n","repo_name":"hhanhvu99/Personal-CS114.L21.KHCL","sub_path":"Tuan 1.2/language.py","file_name":"language.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19778155632","text":"import random\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\naX = 0\r\naY = 0\r\naZ = 0\r\nbX = 2*np.pi\r\nbY = 2*np.pi\r\nbZ = 2*np.pi\r\nN = 10000\r\nn = 1000\r\nintegrals = []\r\n\r\ndef f(x, y, z):\r\n return (np.exp(np.sin(x*y*z)))\r\n\r\nfor i in range(n):\r\n sumf = 0\r\n for i in range(N):\r\n xi = random.uniform(aX, bX)\r\n yi = random.uniform(aY, bY)\r\n zi = random.uniform(aZ, bZ)\r\n sumf += f(xi, yi, zi)\r\n integral = ((bX-aX)*(bY-aY)*(bZ-aZ)*sumf)/N\r\n integrals.append(integral)\r\n\r\nplt.title(\"Distribution of the integrals\")\r\nplt.hist(integrals,bins=40)\r\nplt.xlabel(\"Integrals\")\r\nplt.show()\r\n\r\navgIntegral = sum(integrals)/len(integrals)\r\nprint(\"The value of integration is \",avgIntegral)\r\n","repo_name":"theeemanuel/math","sub_path":"random variables/MonteCarloIntegration.py","file_name":"MonteCarloIntegration.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"20627754633","text":"import numpy as np\nimport cv2\n\n\nvideo_capture = cv2.VideoCapture('video-0.avi')\n\n\nwidth = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\nret, frame = video_capture.read()\n\ngray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\nedges = cv2.Canny(gray, 50, 150, apertureSize=3)\nminLineLength = 100\nmaxLineGap = 10\nlines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, minLineLength, maxLineGap)\n\nx1_min, y2_min, x2_max, y1_max = 50000, 50000, -1, -1\n\nfor line in lines:\n x11, y11, x22, y22 = line[0]\n if x11 < x1_min:\n x1_min = x11\n if y11 > y1_max:\n y1_max = y11\n if x22 > x2_max:\n x2_max = x22\n if y22 < y2_min:\n y2_min = y22\n\nwhile True:\n ret, frame = video_capture.read()\n\n cv2.line(frame, (x1_min, y1_max), (x2_max, y2_min), (0, 0, 255), thickness=2)\n if ret:\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(frame, 'ZBIR: ', (width-150, height-20), font, 1, (255, 255, 255), 5, cv2.LINE_AA)\n cv2.imshow('Video', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n break\nvideo_capture.release()\ncv2.destroyAllWindows()\n","repo_name":"JovanaJelisavcic/VideoAnalysis","sub_path":"loadVideos.py","file_name":"loadVideos.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22335686581","text":"from django.shortcuts import render\r\nfrom .models import Topic,Entry\r\nfrom .forms import TopicForm, EntryForm\r\nfrom django.shortcuts import redirect\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.http import Http404\r\n\r\n\r\n# Create your views here.\r\n\r\ndef index(request):\r\n \"\"\"home page for learning log\"\"\"\r\n return render(request,'learning_logs/index.html')\r\n\r\n@login_required\r\ndef topics(request):\r\n\r\n \"\"\" show all the topics here.\"\"\"\r\n topics = Topic.objects.filter(owner=request.user).order_by('date_added')\r\n context = {'topics':topics}\r\n return render(request, 'learning_logs/topics.html', context)\r\n\r\n@login_required\r\ndef topic(request,topic_id):\r\n\r\n \"\"\" details of al entries regarding a topics \"\"\"\r\n topic = Topic.objects.get(id=topic_id)\r\n entries = topic.entry_set.order_by('-date_added')\r\n context = {'topic':topic, 'entries': entries}\r\n return render(request,'learning_logs/topic.html',context)\r\n # to make sure request belongs to the valid user\r\n if topic.owner != request.user:\r\n raise Http404\r\n\r\n@login_required\r\ndef new_topic(request):\r\n \"\"\" details regarding the new topic form\"\"\"\r\n if request.method != 'POST':\r\n #no data is submitted, generate an empty form\r\n form = TopicForm()\r\n else:\r\n #Submit button is clicked hence we need to verify if the entered data is valid\r\n form = TopicForm(data=request.POST)\r\n if form.is_valid():\r\n new_topic = form.save(commit=False)\r\n new_topic.owner = request.user\r\n new_topic.save()\r\n return redirect('learning_logs:topics')\r\n\r\n \r\n #this means the entered form is invalid hence a display message\r\n context = {'form': form}\r\n return render(request, 'learning_logs/new_topic.html' , context)\r\n\r\n@login_required\r\ndef new_entry(request, topic_id):\r\n topic = Topic.objects.get(id=topic_id)\r\n \"\"\"details regarding the new entry related to the topic\"\"\"\r\n if request.method != 'POST':\r\n #no data is submitted , generate an empty form\r\n form = EntryForm()\r\n \r\n else:\r\n #submit button is clicked\r\n form = EntryForm(data=request.POST)\r\n if form.is_valid():\r\n new_entry = form.save(commit=False)\r\n new_entry.topic = topic\r\n new_entry.save()\r\n return redirect('learning_logs:topic', topic_id=topic.id)\r\n \r\n \r\n #if entry is invalid ence a display message\r\n context = {'topic': topic,'form': form}\r\n return render(request, 'learning_logs/new_entry.html', context)\r\n\r\n\r\n@login_required\r\ndef edit_entry(request,entry_id):\r\n\r\n entry = Entry.objects.get(id=entry_id)\r\n topic = entry.topic\r\n # to make sure request belongs to the valid user\r\n if topic.owner != request.user:\r\n raise Http404\r\n\r\n \"\"\"Detauls regarding editing an entry made\"\"\"\r\n if request.method != 'POST':\r\n #display the entry made here \r\n form = EntryForm(instance=entry)\r\n else:\r\n #edit entry is submitted we need to replace the old entry by a new one\r\n form = EntryForm(instance=entry, data=request.POST)\r\n if form.is_valid():\r\n form.save()\r\n return redirect('learning_logs:topic',topic_id=topic.id)\r\n \r\n #if entry is invalid then we need to display message\r\n context = {'entry': entry, 'topic': topic, 'form': form}\r\n return render(request, 'learning_logs/edit_entry.html', context)\r\n","repo_name":"HariniBooravalli/Learning_Log","sub_path":"src/learning_logs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9337259736","text":"from setuptools import setup, find_packages\n\n\ndef get_dependencies(file):\n return [dep.strip() for dep in open(file).readlines()]\n\n\nsetup(\n name=\"simple_api\",\n version=\"0.1.0\",\n description=\"A template to make API's.\",\n packages=find_packages(exclude=\".env\"),\n include_package_data=True,\n install_requires=get_dependencies(\"requirements.txt\"),\n extras_require={\"dev\": get_dependencies(\"requirements-dev.txt\")},\n)","repo_name":"IsaiasDimitri/flask-simple-api-structure","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"31030054413","text":"import torch\nimport torch.nn as nn\nfrom modules import Encoder, LayerNorm, Decoder, VariationalDropout\nimport math\nimport numpy as np\nimport random\nimport pdb\nimport torch\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom torch import nn\n\nclass PositionalEncoding(nn.Module):\n \n def __init__(self, d_model, max_len):\n \"\"\"\n sin, cos encoding 구현\n \n parameter\n - d_model : model의 차원\n - max_len : 최대 seaquence 길이\n - device : cuda or cpu\n \"\"\"\n \n super(PositionalEncoding, self).__init__() # nn.Module 초기화\n \n # input matrix(자연어 처리에선 임베딩 벡터)와 같은 size의 tensor 생성\n # 즉, (max_len, d_model) size\n self.encoding = torch.zeros(max_len, d_model)\n self.encoding.requires_grad = False # 인코딩의 그래디언트는 필요 없다. \n \n # 위치 indexing용 벡터\n # pos는 max_len의 index를 의미한다.\n pos = torch.arange(0, max_len).unsqueeze(dim=1)\n # 1D : (max_len, ) size -> 2D : (max_len, 1) size -> word의 위치를 반영하기 위해\n\n# pos = pos.float().unsqueeze(dim=1) # int64 -> float32 (없어도 되긴 함)\n \n # i는 d_model의 index를 의미한다. _2i : (d_model, ) size\n # 즉, embedding size가 512일 때, i = [0,512]\n _2i = torch.arange(0, d_model, step=2).float()\n # (max_len, 1) / (d_model/2 ) -> (max_len, d_model/2)\n\n self.encoding[:, ::2] = torch.sin(pos / 10000 ** (_2i / d_model))\n self.encoding[:, 1::2] = torch.cos(pos / 10000 ** (_2i / d_model))\n \n\n def forward(self, x):\n # self.encoding\n # [max_len = 512, d_model = 512]\n\n # batch_size = 128, seq_len = 30\n batch_size, seq_len = x.size()\n \n # [seq_len = 30, d_model = 512]\n # [128, 30, 512]의 size를 가지는 token embedding에 더해질 것이다. \n # \n return self.encoding[:seq_len, :]\n \n\nclass ContrastVAE(nn.Module):\n\n def __init__(self, args):\n super(ContrastVAE, self).__init__()\n self.mode = None\n self.item_embeddings = nn.Embedding(args.item_size, args.hidden_size, padding_idx=0)\n self.position_embeddings = nn.Embedding(args.max_seq_length, args.hidden_size) # position vector 까지 더해줌 \n self.position_encoding = PositionalEncoding(args.hidden_size,args.max_seq_length)\n self.item_encoder_mu = Encoder(self.mode,args) # transformer encoder\n self.item_encoder_logvar = Encoder(self.mode,args)\n self.item_decoder = Decoder(self.mode,args)\n self.LayerNorm = LayerNorm(args.hidden_size, eps=1e-12)\n self.dropout = nn.Dropout(args.hidden_dropout_prob)\n self.args = args\n self.latent_dropout = nn.Dropout(args.reparam_dropout_rate)\n self.apply(self.init_weights)\n self.temperature = nn.Parameter(torch.zeros(1), requires_grad=True)\n\n def add_position_embedding(self, sequence):\n\n seq_length = sequence.size(1)\n position_ids = torch.arange(seq_length, dtype=torch.long, device=sequence.device)\n position_ids = position_ids.unsqueeze(0).expand_as(sequence)\n item_embeddings = self.item_embeddings(sequence) # shape: b*max_Sq*d\n \n position_embeddings = self.position_embeddings(position_ids)\n position_encoding = self.position_encoding(sequence)\n if self.args.encoding :\n sequence_emb = item_embeddings.cuda() + position_encoding.cuda() \n else:\n sequence_emb = item_embeddings + position_embeddings\n sequence_emb = self.LayerNorm(sequence_emb)\n sequence_emb = self.dropout(sequence_emb)\n\n return sequence_emb # shape: b*max_Sq*d\n\n\n def extended_attention_mask(self, input_ids):\n attention_mask = (input_ids > 0).long()# used for mu, var\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # torch.int64 b*1*1*max_Sq\n max_len = attention_mask.size(-1)\n attn_shape = (1, max_len, max_len)\n subsequent_mask = torch.triu(torch.ones(attn_shape), diagonal=1) # torch.uint8 for causality\n subsequent_mask = (subsequent_mask == 0).unsqueeze(1) #1*1*max_Sq*max_Sq\n subsequent_mask = subsequent_mask.long()\n\n if self.args.cuda_condition:\n subsequent_mask = subsequent_mask.cuda()\n\n extended_attention_mask = extended_attention_mask * subsequent_mask #shape: b*1*max_Sq*max_Sq\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n return extended_attention_mask\n\n\n def eps_anneal_function(self, step):\n\n return min(1.0, (1.0*step)/self.args.total_annealing_step)\n\n def reparameterization(self, mu, logvar, step): # vanila reparam\n\n std = torch.exp(0.5 * logvar)\n if self.training:\n eps = torch.randn_like(std)\n res = mu + std * eps\n else: res = mu + std\n return res\n\n def reparameterization1(self, mu, logvar, step): # reparam without noise\n std = torch.exp(0.5*logvar)\n return mu+std\n\n\n def reparameterization2(self, mu, logvar, step): # use dropout\n\n if self.training:\n std = self.latent_dropout(torch.exp(0.5*logvar))\n else: std = torch.exp(0.5*logvar)\n res = mu + std\n return res\n\n def reparameterization3(self, mu, logvar,step): # apply classical dropout on whole result\n std = torch.exp(0.5*logvar)\n res = self.latent_dropout(mu + std)\n return res\n\n\n def init_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.args.initializer_range)\n elif isinstance(module, LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\n def encode(self, sequence_emb, extended_attention_mask,mode): # forward\n if mode : \n item_encoded_mu_layers = self.item_encoder_mu(sequence_emb,\n extended_attention_mask,\n output_all_encoded_layers=True,mode = True)\n item_encoded_logvar_layers = self.item_encoder_logvar(sequence_emb, extended_attention_mask,\n output_all_encoded_layers=True,mode = True)\n else:\n item_encoded_mu_layers = self.item_encoder_mu(sequence_emb,\n extended_attention_mask,\n output_all_encoded_layers=True, mode = False)\n\n item_encoded_logvar_layers = self.item_encoder_logvar(sequence_emb, extended_attention_mask,\n output_all_encoded_layers=True,mode = False)\n\n return item_encoded_mu_layers[-1], item_encoded_logvar_layers[-1]\n\n def decode(self, z, extended_attention_mask,mode,ed):\n if mode:\n item_decoder_layers = self.item_decoder(z,\n extended_attention_mask,\n output_all_encoded_layers = True,mode = True, ed = ed)\n sequence_output = item_decoder_layers[-1]\n else: \n item_decoder_layers = self.item_decoder(z,\n extended_attention_mask,\n output_all_encoded_layers = True,mode = False, ed = ed)\n sequence_output = item_decoder_layers[-1]\n return sequence_output\n\n\n\n def forward(self, input_ids, aug_input_ids, step,ed):\n\n sequence_emb = self.add_position_embedding(input_ids)# shape: b*max_Sq*d\n extended_attention_mask = self.extended_attention_mask(input_ids)\n\n if self.args.latent_contrastive_learning:\n if self.args.fft:\n mu1, log_var1 = self.encode(sequence_emb, extended_attention_mask,mode = False)\n mu2, log_var2 = self.encode(sequence_emb, extended_attention_mask,mode = True)\n z1 = self.reparameterization1(mu1, log_var1, step)\n z2 = self.reparameterization2(mu2, log_var2, step)\n reconstructed_seq1 = self.decode(z1, extended_attention_mask,mode = False,ed = ed)\n reconstructed_seq2 = self.decode(z2, extended_attention_mask,mode = True,ed = ed)\n else:\n mode = False\n mu1, log_var1 = self.encode(sequence_emb, extended_attention_mask,mode)\n mu2, log_var2 = self.encode(sequence_emb, extended_attention_mask,mode)\n z1 = self.reparameterization1(mu1, log_var1, step)\n z2 = self.reparameterization2(mu2, log_var2, step)\n reconstructed_seq1 = self.decode(z1, extended_attention_mask,mode,ed = ed)\n reconstructed_seq2 = self.decode(z2, extended_attention_mask,mode,ed = ed) \n return reconstructed_seq1, reconstructed_seq2, mu1, mu2, log_var1, log_var2, z1, z2\n\n elif self.args.latent_data_augmentation:\n aug_sequence_emb = self.add_position_embedding(aug_input_ids) # shape: b*max_Sq*d\n aug_extended_attention_mask = self.extended_attention_mask(aug_input_ids)\n if self.args.fft:\n mode = True\n mu1, log_var1 = self.encode(sequence_emb, extended_attention_mask,mode = True)\n mu2, log_var2 = self.encode(aug_sequence_emb, aug_extended_attention_mask,mode = True)\n z1 = self.reparameterization1(mu1, log_var1, step)\n z2 = self.reparameterization2(mu2, log_var2, step)\n reconstructed_seq1 = self.decode(z1, extended_attention_mask,mode = True,ed = ed)\n reconstructed_seq2 = self.decode(z2, extended_attention_mask,mode = True,ed = ed)\n else:\n mode = False\n mu1, log_var1 = self.encode(sequence_emb, extended_attention_mask,mode)\n mu2, log_var2 = self.encode(aug_sequence_emb, aug_extended_attention_mask,mode)\n z1 = self.reparameterization1(mu1, log_var1, step)\n z2 = self.reparameterization2(mu2, log_var2, step)\n reconstructed_seq1 = self.decode(z1, extended_attention_mask,mode,ed = ed)\n reconstructed_seq2 = self.decode(z2, extended_attention_mask,mode,ed = ed) \n return reconstructed_seq1, reconstructed_seq2, mu1, mu2, log_var1, log_var2, z1, z2\n\n else: # vanilla attentive VAE\n mu, log_var = self.encode(sequence_emb, extended_attention_mask)\n z = self.reparameterization(mu, log_var, step)\n reconstructed_seq1 = self.decode(z, extended_attention_mask)\n return reconstructed_seq1, mu, log_var\n\n \n\n\n\nclass ContrastVAE_VD(ContrastVAE):\n\n def __init__(self, args):\n super(ContrastVAE, self).__init__()\n\n self.item_embeddings = nn.Embedding(args.item_size, args.hidden_size, padding_idx=0)\n self.position_embeddings = nn.Embedding(args.max_seq_length, args.hidden_size)\n\n self.item_encoder_mu = Encoder(args)\n self.item_encoder_logvar = Encoder(args)\n self.item_decoder = Decoder(args)\n \n self.dropout = nn.Dropout(args.hidden_dropout_prob)\n\n self.LayerNorm = LayerNorm(args.hidden_size, eps=1e-12)\n self.latent_dropout_VD = VariationalDropout(inputshape=[args.max_seq_length, args.hidden_size], adaptive='layerwise')\n self.latent_dropout = nn.Dropout(0.1)\n self.args = args\n self.apply(self.init_weights)\n\n self.drop_rate = nn.Parameter(torch.tensor(0.2), requires_grad=True)\n\n\n def reparameterization3(self, mu, logvar, step): # use drop out\n\n std, alpha = self.latent_dropout_VD(torch.exp(0.5*logvar))\n res = mu + std\n return res, alpha\n\n def forward(self, input_ids, augmented_input_ids, step):\n if self.args.variational_dropout:\n sequence_emb = self.add_position_embedding(input_ids) # shape: b*max_Sq*d\n extended_attention_mask = self.extended_attention_mask(input_ids)\n mu1, log_var1 = self.encode(sequence_emb, extended_attention_mask)\n mu2, log_var2 = self.encode(sequence_emb, extended_attention_mask)\n pdb.set_trace()\n z1 = self.reparameterization1(mu1, log_var1, step)\n z2, alpha = self.reparameterization3(mu2, log_var2, step)\n reconstructed_seq1 = self.decode(z1, extended_attention_mask)\n reconstructed_seq2 = self.decode(z2, extended_attention_mask)\n\n elif self.args.VAandDA:\n sequence_emb = self.add_position_embedding(input_ids) # shape: b*max_Sq*d\n extended_attention_mask = self.extended_attention_mask(input_ids)\n aug_sequence_emb = self.add_position_embedding(augmented_input_ids) # shape: b*max_Sq*d\n aug_extended_attention_mask = self.extended_attention_mask(augmented_input_ids)\n\n mu1, log_var1 = self.encode(sequence_emb, extended_attention_mask)\n mu2, log_var2 = self.encode(aug_sequence_emb, aug_extended_attention_mask)\n z1 = self.reparameterization1(mu1, log_var1, step)\n z2, alpha = self.reparameterization3(mu2, log_var2, step)\n reconstructed_seq1 = self.decode(z1, extended_attention_mask)\n reconstructed_seq2 = self.decode(z2, extended_attention_mask)\n\n\n return reconstructed_seq1, reconstructed_seq2, mu1, mu2, log_var1, log_var2, z1, z2, alpha\n","repo_name":"skdytpq/vae_to_cf","sub_path":"ContrastVAE/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30603899231","text":"# https://programmers.co.kr/learn/courses/30/lessons/42840\n\ndef solution(answers):\n answer = [0, 0, 0, 0]\n result = []\n student = [[1, 2, 3, 4, 5],\n [2, 1, 2, 3, 2, 4, 2, 5],\n [3, 3, 1, 1, 2, 2, 4, 4, 5, 5]]\n maxscore = 0\n\n for s in range(3):\n score = 0\n j = 0\n for i in range(len(answers)):\n if(s == 0):\n j = i % 5\n elif(s == 1):\n j = i % 8\n else:\n j = i % 10\n if answers[i] == student[s][j]:\n score = score + 1\n if score >= maxscore:\n maxscore = score\n answer[s] = maxscore\n\n for i in range(3):\n print(i, \"=\", answer[i])\n if answer[i] == maxscore:\n result.append(i+1)\n\n return result\n","repo_name":"yonghun16/Study","sub_path":"Algorithm/Programers/programers_42840/programers_42840.py","file_name":"programers_42840.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21483943814","text":"import datetime\nimport random\n\nimport pytest\n\nfrom simulation.constants import (\n BOILERS,\n ENGLAND_WALES_HOUSEHOLD_COUNT_2020,\n HEAT_PUMPS,\n HeatingSystem,\n)\nfrom simulation.costs import (\n DECOMMISSIONING_COST_MAX,\n MEAN_COST_GBP_BOILER_GAS,\n estimate_boiler_upgrade_scheme_grant,\n estimate_rhi_annual_payment,\n get_heating_fuel_costs_net_present_value,\n get_unit_and_install_costs,\n)\nfrom simulation.tests.common import household_factory, model_factory\n\n\nclass TestCosts:\n @pytest.mark.parametrize(\"heating_system\", set(HeatingSystem))\n def test_cost_of_any_heating_system_is_cheaper_if_already_installed(\n self, heating_system\n ) -> None:\n household_sticking_same_system = household_factory(\n heating_system=heating_system\n )\n\n alternative_system = random.choice(list(set(HeatingSystem) - {heating_system}))\n household_switching_system = household_factory(\n heating_system=alternative_system\n )\n\n model = model_factory()\n\n assert get_unit_and_install_costs(\n household_sticking_same_system, heating_system, model\n ) < get_unit_and_install_costs(\n household_switching_system, heating_system, model\n )\n\n @pytest.mark.parametrize(\"heat_pump\", HEAT_PUMPS)\n def test_cost_of_heat_pump_increases_with_kw_capacity_required(\n self,\n heat_pump,\n ) -> None:\n\n household = household_factory(\n total_floor_area_m2=random.randint(20, 200), heating_system=heat_pump\n )\n larger_household = household_factory(\n total_floor_area_m2=household.total_floor_area_m2 * 1.2,\n heating_system=heat_pump,\n )\n\n model = model_factory()\n\n assert household.compute_heat_pump_capacity_kw(\n heat_pump\n ) <= larger_household.compute_heat_pump_capacity_kw(heat_pump)\n assert get_unit_and_install_costs(\n household, heat_pump, model\n ) <= get_unit_and_install_costs(larger_household, heat_pump, model)\n\n @pytest.mark.parametrize(\"boiler\", BOILERS)\n def test_cost_of_boiler_increases_with_property_size(\n self,\n boiler,\n ) -> None:\n household = household_factory(\n total_floor_area_m2=random.randint(20, 200), heating_system=boiler\n )\n larger_household = household_factory(\n total_floor_area_m2=household.total_floor_area_m2 * 1.5,\n heating_system=boiler,\n )\n model = model_factory()\n assert get_unit_and_install_costs(\n household, boiler, model\n ) <= get_unit_and_install_costs(larger_household, boiler, model)\n\n @pytest.mark.parametrize(\"heating_system\", set(HeatingSystem))\n def test_fuel_bills_net_present_value_decreases_as_discount_rate_increases(\n self,\n heating_system,\n ) -> None:\n\n household = household_factory(\n property_value_gbp=random.randint(50_000, 300_000)\n )\n wealthier_household = household_factory(\n property_value_gbp=household.property_value_gbp * 1.1\n )\n\n num_look_ahead_years = random.randint(2, 10)\n model = model_factory(household_num_lookahead_years=num_look_ahead_years)\n\n assert household.discount_rate > wealthier_household.discount_rate\n\n assert get_heating_fuel_costs_net_present_value(\n household, heating_system, model\n ) < get_heating_fuel_costs_net_present_value(\n wealthier_household, heating_system, model\n )\n\n @pytest.mark.parametrize(\"heat_pump\", set(HEAT_PUMPS))\n def test_heat_pumps_are_cheaper_to_reinstall_than_install_first_time(\n self,\n heat_pump,\n ) -> None:\n\n household = household_factory(heating_system=HeatingSystem.BOILER_GAS)\n model = model_factory()\n\n new_heat_pump_quote = get_unit_and_install_costs(household, heat_pump, model)\n\n household.heating_system = heat_pump\n reinstall_heat_pump_quote = get_unit_and_install_costs(\n household, heat_pump, model\n )\n\n assert reinstall_heat_pump_quote < new_heat_pump_quote\n\n @pytest.mark.parametrize(\"heat_pump\", set(HEAT_PUMPS))\n def test_rhi_annual_payments_are_non_zero_for_households_switching_to_heat_pumps(\n self, heat_pump\n ):\n\n household_with_boiler = household_factory(\n heating_system=random.choices(list(BOILERS))[0]\n )\n\n assert estimate_rhi_annual_payment(household_with_boiler, heat_pump) > 0\n\n @pytest.mark.parametrize(\"boiler\", set(BOILERS))\n def test_rhi_annual_payments_zero_for_households_switching_to_boilers(self, boiler):\n\n household = household_factory(\n heating_system=random.choices(list(HeatingSystem))[0]\n )\n\n assert estimate_rhi_annual_payment(household, boiler) == 0\n\n @pytest.mark.parametrize(\"heat_pump\", set(HEAT_PUMPS))\n def test_rhi_annual_payments_reach_cap_for_large_households(self, heat_pump):\n\n mansion = household_factory(\n heating_system=random.choices(list(BOILERS))[0],\n total_floor_area_m2=random.randint(500, 1_000),\n )\n\n larger_mansion = household_factory(\n heating_system=mansion.heating_system,\n total_floor_area_m2=mansion.total_floor_area_m2 * 1.1,\n )\n\n assert estimate_rhi_annual_payment(\n mansion, heat_pump\n ) == estimate_rhi_annual_payment(larger_mansion, heat_pump)\n\n def test_air_source_heat_pumps_unit_install_costs_are_adjusted_by_discount_factor_across_discount_schedule(\n self,\n ):\n\n discount_factor = 0.3\n household = household_factory(heating_system=HeatingSystem.HEAT_PUMP_AIR_SOURCE)\n model = model_factory(\n start_datetime=datetime.datetime(2022, 1, 1),\n step_interval=datetime.timedelta(minutes=1440),\n air_source_heat_pump_price_discount_schedule=[\n (datetime.datetime(2022, 1, 2), discount_factor),\n ],\n )\n first_quote = get_unit_and_install_costs(\n household, HeatingSystem.HEAT_PUMP_AIR_SOURCE, model\n )\n\n model.increment_timestep()\n later_quote = get_unit_and_install_costs(\n household, HeatingSystem.HEAT_PUMP_AIR_SOURCE, model\n )\n\n assert later_quote == int((1 - discount_factor) * first_quote)\n\n @pytest.mark.parametrize(\"boiler\", set(BOILERS))\n def test_boiler_upgrade_scheme_grant_is_zero_for_boilers_within_grant_window(\n self, boiler\n ):\n\n start_datetime = datetime.datetime(2022, 4, 1, 0, 0)\n end_datetime = datetime.datetime(2025, 4, 1, 0, 0)\n random_n_days = random.randrange((end_datetime - start_datetime).days)\n start_datetime = start_datetime + datetime.timedelta(days=random_n_days)\n\n model = model_factory(\n start_datetime=start_datetime,\n )\n\n assert estimate_boiler_upgrade_scheme_grant(boiler, model) == 0\n\n @pytest.mark.parametrize(\"heating_system\", set(HeatingSystem))\n def test_boiler_upgrade_scheme_grant_is_zero_when_outside_grant_window(\n self, heating_system\n ):\n\n model = model_factory(start_datetime=datetime.datetime(2026, 1, 1, 0, 0))\n model.add_agents([household_factory()])\n\n assert estimate_boiler_upgrade_scheme_grant(heating_system, model) == 0\n\n @pytest.mark.parametrize(\"heat_pump\", set(HEAT_PUMPS))\n def test_boiler_upgrade_scheme_grant_is_zero_when_grant_cap_exceeded(\n self, heat_pump\n ):\n\n model = model_factory(\n start_datetime=datetime.datetime(2023, 1, 1, 0, 0),\n )\n\n num_households = random.randint(1, 5)\n model.add_agents([household_factory()] * num_households)\n\n model_population_scale = (\n ENGLAND_WALES_HOUSEHOLD_COUNT_2020 / model.household_count\n )\n boiler_upgrade_scheme_budget_scaled = 450_000_000 / model_population_scale\n\n model.boiler_upgrade_scheme_cumulative_spend_gbp = (\n boiler_upgrade_scheme_budget_scaled * 0.8\n )\n assert estimate_boiler_upgrade_scheme_grant(heat_pump, model) > 0\n\n model.boiler_upgrade_scheme_cumulative_spend_gbp = (\n boiler_upgrade_scheme_budget_scaled\n )\n assert estimate_boiler_upgrade_scheme_grant(heat_pump, model) == 0\n\n def test_boiler_upgrade_scheme_grant_is_non_zero_for_heat_pumps_when_grant_is_active(\n self,\n ):\n\n model = model_factory(\n start_datetime=datetime.datetime(2023, 1, 1, 0, 0),\n )\n model.add_agents([household_factory()])\n\n assert (\n estimate_boiler_upgrade_scheme_grant(\n HeatingSystem.HEAT_PUMP_AIR_SOURCE, model\n )\n == 5_000\n )\n assert (\n estimate_boiler_upgrade_scheme_grant(\n HeatingSystem.HEAT_PUMP_GROUND_SOURCE, model\n )\n == 6_000\n )\n\n def test_air_source_heat_pump_unit_and_install_costs_floored_at_gas_boiler_cost_for_households_with_air_source_heat_pump(\n self,\n ):\n model = model_factory(\n start_datetime=datetime.datetime(2022, 1, 1),\n air_source_heat_pump_price_discount_schedule=[\n (datetime.datetime(2022, 1, 1), 1.0)\n ],\n )\n\n household = household_factory(heating_system=HeatingSystem.HEAT_PUMP_AIR_SOURCE)\n heat_pump_cost = get_unit_and_install_costs(\n household, HeatingSystem.HEAT_PUMP_AIR_SOURCE, model\n )\n ashp_price_min_cap = MEAN_COST_GBP_BOILER_GAS[household.property_size]\n\n assert heat_pump_cost == ashp_price_min_cap\n\n def test_air_source_heat_pump_unit_and_install_costs_floored_at_gas_boiler_plus_decommissioning_costs_for_households_switching_heating_system(\n self,\n ):\n\n model = model_factory(\n start_datetime=datetime.datetime(2022, 1, 1),\n air_source_heat_pump_price_discount_schedule=[\n (datetime.datetime(2022, 1, 1), 1.0)\n ],\n )\n household = household_factory(heating_system=HeatingSystem.BOILER_GAS)\n heat_pump_cost = get_unit_and_install_costs(\n household, HeatingSystem.HEAT_PUMP_AIR_SOURCE, model\n )\n ashp_price_min_cap = MEAN_COST_GBP_BOILER_GAS[household.property_size]\n\n assert heat_pump_cost <= ashp_price_min_cap + DECOMMISSIONING_COST_MAX\n","repo_name":"centrefornetzero/domestic-heating-abm","sub_path":"simulation/tests/test_costs.py","file_name":"test_costs.py","file_ext":"py","file_size_in_byte":10484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"36317510414","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('firestation', '0037_auto_20170111_1000'),\n ('firecares_core', '0012_departmentinvitation_user'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='PredeterminedUser',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('email', models.EmailField(max_length=254)),\n ('first_name', models.CharField(max_length=30)),\n ('last_name', models.CharField(max_length=30)),\n ('department', models.ForeignKey(to='firestation.FireDepartment')),\n ],\n ),\n ]\n","repo_name":"FireCARES/firecares","sub_path":"firecares/firecares_core/migrations/0013_predetermineduser.py","file_name":"0013_predetermineduser.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"3"} +{"seq_id":"8377131398","text":"import _plotly_utils.basevalidators\n\n\nclass SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):\n def __init__(\n self, plotly_name=\"side\", parent_name=\"layout.polar.radialaxis\", **kwargs\n ):\n super(SideValidator, self).__init__(\n plotly_name=plotly_name,\n parent_name=parent_name,\n edit_type=kwargs.pop(\"edit_type\", \"plot\"),\n values=kwargs.pop(\"values\", [\"clockwise\", \"counterclockwise\"]),\n **kwargs,\n )\n","repo_name":"plotly/plotly.py","sub_path":"packages/python/plotly/plotly/validators/layout/polar/radialaxis/_side.py","file_name":"_side.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":14438,"dataset":"github-code","pt":"3"} +{"seq_id":"32769166384","text":"from __future__ import print_function\n\nimport click.testing as clt\nimport clodius.cli.convert as ccc\nimport clodius.tiles.multivec as ctv\nimport os.path as op\nimport tempfile\nimport h5py\n\ntestdir = op.realpath(op.dirname(__file__))\n\n\ndef test_bedfile_to_multivec():\n runner = clt.CliRunner()\n input_file = op.join(testdir, \"sample_data\", \"sample.multival.bed\")\n chromsizes_file = op.join(testdir, \"sample_data\", \"sample.multival.chrom.sizes\")\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n out_file = op.join(tmp_dir, \"out.multivec\")\n\n runner.invoke(\n ccc.bedfile_to_multivec,\n [\n input_file,\n \"--output-file\",\n out_file,\n \"--assembly\",\n \"hg38\",\n \"--num-rows\",\n 3,\n \"--chromsizes-filename\",\n chromsizes_file,\n \"--starting-resolution\",\n \"1000\",\n ],\n )\n\n # import traceback\n # a, b, tb = result.exc_info\n\n # print(\"exc_info:\", result.exc_info)\n # print(\"result:\", result)\n # print(\"result.output\", result.output)\n # print(\"result.error\", traceback.print_tb(tb))\n # print(\"Exception:\", a,b)\n\n tsinfo = ctv.tileset_info(out_file)\n # print(\"tsinfo:\", tsinfo)\n\n assert \"resolutions\" in tsinfo\n assert tsinfo[\"max_pos\"][0] == 18000\n tile = ctv.get_single_tile(out_file, (0, 0))\n\n # input_file:\n # chr1 0 1000 1.0 2.0 3.0\n # chr1 1000 2000\n # chr2 5000 6000 20.0 30.0 40.0\n #\n # # input chromsizes\n # chr1 10000\n # chr2 8000\n\n # first row, first chrom first value\n assert len(tile) == 3\n\n assert tile[0][0] == 1.0\n assert tile[0][15] == 20.0\n\n assert tile[1][0] == 2.0\n assert tile[2][0] == 3.0\n assert tile[2][15] == 40.0\n\n\ndef test_load_multivec_tiles():\n op.join(testdir, \"sample_data\", \"sample.bed.multires.mv5\")\n # TODO: Make assertions about result\n\n\ndef test_states_format_befile_to_multivec():\n runner = clt.CliRunner()\n input_file = op.join(testdir, \"sample_data\", \"states_format_input_testfile.bed.gz\")\n rows_info_file = op.join(testdir, \"sample_data\", \"states_format_test_row_infos.txt\")\n tempfile.NamedTemporaryFile(delete=False)\n # TODO: Make assertions about result\n # print(\"input_file\", input_file)\n\n result = runner.invoke(\n ccc.bedfile_to_multivec,\n [\n input_file,\n \"--format\",\n \"states\",\n \"--row-infos-filename\",\n rows_info_file,\n \"--assembly\",\n \"hg38\",\n \"--starting-resolution\",\n \"200\",\n \"--num-rows\",\n \"10\",\n ],\n )\n\n # import traceback\n a, b, tb = result.exc_info\n \"\"\"\n print(\"exc_info:\", result.exc_info)\n print(\"result:\", result)\n print(\"result.output\", result.output)\n print(\"result.error\", traceback.print_tb(tb))\n print(\"Exception:\", a,b)\n \"\"\"\n\n\ndef test_ignore_bedfile_headers():\n runner = clt.CliRunner()\n input_file = op.join(testdir, \"sample_data\", \"3_header_100_testfile.bed.gz\")\n rows_info_file = op.join(testdir, \"sample_data\", \"3_header_100_row_infos.txt\")\n tempfile.NamedTemporaryFile(delete=False)\n # TODO: Make assertions about result\n\n result = runner.invoke(\n ccc.bedfile_to_multivec,\n [\n input_file,\n \"--format\",\n \"states\",\n \"--row-infos-filename\",\n rows_info_file,\n \"--assembly\",\n \"hg19\",\n \"--starting-resolution\",\n \"200\",\n \"--num-rows\",\n \"15\",\n ],\n )\n\n # import traceback\n a, b, tb = result.exc_info\n\n\ndef test_retain_lines():\n runner = clt.CliRunner()\n input_file = op.join(testdir, \"sample_data\", \"sample2.multival.bed\")\n chromsizes_file = op.join(testdir, \"sample_data\", \"sample2.multival.chrom.sizes\")\n row_infos_file = op.join(\n testdir, \"sample_data\", \"states_format_test_row_infos_v2.txt\"\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n out_file = op.join(tmp_dir, \"out.multires.mv5\")\n\n result = runner.invoke(\n ccc.bedfile_to_multivec,\n [\n input_file,\n \"--output-file\",\n out_file,\n \"--format\",\n \"states\",\n \"--chromsizes-filename\",\n chromsizes_file,\n \"--starting-resolution\",\n \"1000\",\n \"--row-infos-filename\",\n row_infos_file,\n \"--num-rows\",\n \"3\",\n ],\n )\n\n # import traceback\n a, b, tb = result.exc_info\n\n # input_file:\n # chr1 0 1000 State1\n # chr1 1000 10111 State2\n # chr2 5000 8000 State3\n #\n # # input chromsizes\n # chr1 10111\n # chr2 8000\n #\n # # input row_infos file\n # State1\n # State2\n # State3\n\n f = h5py.File(out_file, \"r\")\n # The last bin of chromosome 1 should contain the State2 Vector [0,1,0]\n assert f[\"resolutions\"][\"1000\"][\"values\"][\"chr1\"][10][0] == 0.0\n assert f[\"resolutions\"][\"1000\"][\"values\"][\"chr1\"][10][1] == 1.0\n assert f[\"resolutions\"][\"1000\"][\"values\"][\"chr1\"][10][2] == 0.0\n\n\ndef test_chr_boundaries_states():\n\n data_file = op.join(testdir, \"sample_data\", \"chrm_boundaries_test.multires.mv5\")\n f = h5py.File(data_file, \"r\")\n\n chromsizes = list(zip(f[\"chroms\"][\"name\"], f[\"chroms\"][\"length\"]))\n\n # Tile that contains the boundary of chr1 and chr2 at highest resultion\n tile1 = ctv.get_tile(f, chromsizes, 200, 248934400, 248985600, [256, 4])\n\n assert tile1[110][0] == 1.0 and tile1[110][1] == 0.0\n assert tile1[111][0] == 0.0 and tile1[111][1] == 1.0\n\n # Tile that contains the boundary of chr2 and chr3 at highest resultion\n tile2 = ctv.get_tile(f, chromsizes, 200, 491110400, 491161600, [256, 4])\n\n assert tile2[197][0] == 0.0 and tile2[197][1] == 1.0\n assert tile2[198][0] == 1.0 and tile2[198][1] == 0.0\n\n # Tile that contains the boundary of chr5 and chr6 at highest resultion\n tile3 = ctv.get_tile(f, chromsizes, 200, 1061171200, 1061222400, [256, 4])\n\n assert tile3[135][0] == 1.0 and tile3[135][1] == 0.0\n assert tile3[136][0] == 0.0 and tile3[136][1] == 1.0\n","repo_name":"higlass/clodius","sub_path":"test/multivec_test.py","file_name":"multivec_test.py","file_ext":"py","file_size_in_byte":6548,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"3"} +{"seq_id":"35836519511","text":"# https://practice.geeksforgeeks.org/problems/minimum-cost-path/0\nimport sys\n\n\ntc = int(input().strip())\n\n\nclass PriorityQueue():\n def __init__(self, size):\n self.curr_size = 0\n self.array= [None] * size\n self.position = {} # stores position of vertex in array\n\n def is_empty(self):\n return self.curr_size == 0\n\n def min_heapify(self, idx):\n l_c = self.left_c_idx(idx)\n r_c = self.right_c_idx(idx)\n\n if l_c < self.curr_size and self.array[l_c][0] < self.array[idx][0]:\n smallest = l_c\n else:\n smallest = idx\n if r_c < self.curr_size and self.array[r_c][0] < self.array[smallest][0]:\n smallest = r_c\n if smallest != idx:\n self.swap(idx, smallest)\n self.min_heapify(smallest)\n\n def swap(self, i, j):\n self.position[self.array[i][1]] = j\n self.position[self.array[j][1]] = i\n self.array[i], self.array[j] = self.array[j], self.array[i]\n\n def left_c_idx(self, idx):\n return (2 * idx) + 1\n\n def right_c_idx(self, idx):\n return (2 * idx) + 2\n\n def par_idx(self, idx):\n return idx // 2\n\n def decrease_key(self, d_v, new_dist):\n idx = self.position[d_v[1]]\n self.array[idx] = (new_dist, d_v[1])\n while idx > 0 and self.array[self.par_idx(idx)][0] > self.array[idx][0]:\n self.swap(idx, self.par_idx(idx))\n idx = self.par_idx(idx)\n\n def insert(self, d_v):\n self.position[d_v[1]] = self.curr_size\n print('*' * 80)\n print('curr_size', self.curr_size)\n print('array', self.array)\n self.array[self.curr_size] = (sys.maxsize, d_v[1])\n self.curr_size += 1\n # self.array.append((sys.maxsize, d_v[1]))\n self.decrease_key((sys.maxsize, d_v[1]), d_v[0])\n\n def extract_min(self):\n print('*' * 80)\n print('self.array', self.array)\n print('self.position', self.position)\n min_node = self.array[0][1]\n self.array[0] = self.array[self.curr_size - 1]\n self.curr_size -= 1\n self.min_heapify(0)\n print('min_node', min_node)\n del self.position[min_node]\n return min_node\n\n\ndef shortest_path(grid2d, row_size, col_size):\n def is_inside_grid(x, y):\n if (x >= row_size or y >= col_size or x < 0 or y < 0):\n return False\n else:\n return True\n\n dist_matrix = []\n\n for a_row in range(row_size):\n dist_matrix.append([sys.maxsize] * col_size)\n dist_matrix[0][0] = grid2d[0][0]\n # print(dist_matrix)\n\n dr = [-1, 0, 1, 0]\n dc = [0, 1, 0, -1]\n\n p_q = PriorityQueue(row_size * col_size)\n p_q.insert((0, 0, 0))\n while path_cells:\n min_dist_ind = 0\n min_dist = sys.maxsize\n for trk3 in range(len(path_cells)):\n if min_dist > path_cells[trk3][2]:\n min_dist_ind = trk3\n\n closest_cell = path_cells.pop(trk3)\n # print('*' * 80)\n # print('closest_cell', closest_cell)\n\n\n for trk in range(4):\n x = closest_cell[0] + dr[trk]\n y = closest_cell[1] + dc[trk]\n\n if not is_inside_grid(x, y):\n continue\n # print('after continue')\n # print('x , y ', x, y)\n # print('sum', dist_matrix[closest_cell[0]][closest_cell[1]] + grid2d[x][y])\n if dist_matrix[x][y] > dist_matrix[closest_cell[0]][closest_cell[1]] + grid2d[x][y]:\n # print('inside distance comparison')\n if dist_matrix[x][y] != sys.maxsize:\n for trk2 in range(len(path_cells)):\n if (path_cells[trk2][0], path_cells[trk2][1]) == (x, y):\n path_cells[trk2][2] = dist_matrix[closest_cell[0]][closest_cell[1]] + grid2d[x][y]\n break\n else:\n path_cells.append([x, y, dist_matrix[x][y]])\n else:\n path_cells.append([x, y, dist_matrix[x][y]])\n\n dist_matrix[x][y] = dist_matrix[closest_cell[0]][closest_cell[1]] + grid2d[x][y]\n # print('dist_matrix')\n # print(dist_matrix)\n print(dist_matrix[row_size - 1][col_size - 1])\n\nfor a_tc in range(tc):\n size = int(input().strip())\n grid = input().strip().split()\n grid = [int(i) for i in grid]\n\n grid2d = []\n\n for trk in range(size):\n grid2d.append(grid[(trk* size):((trk + 1) * size)])\n # print(grid2d)\n\n shortest_path(grid2d, size, size)\n","repo_name":"s-surineni/atice","sub_path":"min_cost_path2.py","file_name":"min_cost_path2.py","file_ext":"py","file_size_in_byte":4535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10239525919","text":"import docker\nfrom constellation import docker_util\n\nfrom src.montagu_deploy.config import MontaguConfig\nfrom src.montagu_deploy.montagu_constellation import MontaguConstellation\nfrom tests.utils import http_get\n\n\ndef test_start_and_stop():\n cfg = MontaguConfig(\"config/basic\")\n obj = MontaguConstellation(cfg)\n\n obj.start()\n\n cl = docker.client.from_env()\n\n assert docker_util.network_exists(cfg.network)\n assert docker_util.volume_exists(cfg.volumes[\"db\"])\n assert docker_util.volume_exists(cfg.volumes[\"burden_estimates\"])\n assert docker_util.volume_exists(cfg.volumes[\"emails\"])\n assert docker_util.volume_exists(cfg.volumes[\"mq\"])\n assert docker_util.volume_exists(cfg.volumes[\"templates\"])\n assert docker_util.volume_exists(cfg.volumes[\"guidance\"])\n\n assert docker_util.container_exists(\"montagu-api\")\n assert docker_util.container_exists(\"montagu-db\")\n assert docker_util.container_exists(\"montagu-proxy\")\n assert docker_util.container_exists(\"montagu-proxy-metrics\")\n assert docker_util.container_exists(\"montagu-admin\")\n assert docker_util.container_exists(\"montagu-contrib\")\n assert docker_util.container_exists(\"montagu-mq\")\n assert docker_util.container_exists(\"montagu-flower\")\n assert docker_util.container_exists(\"montagu-task-queue\")\n assert docker_util.container_exists(\"montagu-fake-smtp\")\n\n containers = cl.containers.list()\n assert len(containers) == 10\n\n obj.stop(kill=True, remove_volumes=True)\n\n\ndef test_api_configured():\n cfg = MontaguConfig(\"config/basic\")\n obj = MontaguConstellation(cfg)\n\n obj.start()\n\n api = get_container(cfg, \"api\")\n api_config = docker_util.string_from_container(api, \"/etc/montagu/api/config.properties\").split(\"\\n\")\n\n assert \"app.url=https://localhost/api\" in api_config\n assert \"db.host=db\" in api_config\n assert \"db.username=api\" in api_config\n assert \"db.password=apipassword\" in api_config\n assert \"allow.localhost=False\" in api_config\n assert \"upload.dir=/upload_dir\" in api_config\n assert \"email.mode=real\" not in api_config\n\n res = http_get(\"https://localhost/api/v1\")\n assert '\"status\": \"success\"' in res\n\n obj.stop(kill=True, remove_volumes=True)\n\n cfg = MontaguConfig(\"config/complete\")\n obj = MontaguConstellation(cfg)\n\n obj.start()\n api = get_container(cfg, \"api\")\n api_config = docker_util.string_from_container(api, \"/etc/montagu/api/config.properties\").split(\"\\n\")\n assert \"email.mode=real\" in api_config\n assert \"email.password=changeme\" in api_config\n assert \"flow.url=fakeurl\" in api_config\n\n obj.stop(kill=True, remove_volumes=True)\n\n\ndef test_proxy_configured_self_signed():\n cfg = MontaguConfig(\"config/basic\")\n obj = MontaguConstellation(cfg)\n\n obj.start()\n\n api = get_container(cfg, \"proxy\")\n cert = docker_util.string_from_container(api, \"/etc/montagu/proxy/certificate.pem\")\n key = docker_util.string_from_container(api, \"/etc/montagu/proxy/ssl_key.pem\")\n param = docker_util.string_from_container(api, \"/etc/montagu/proxy/dhparam.pem\")\n assert cert is not None\n assert key is not None\n assert param is not None\n\n res = http_get(\"https://localhost\")\n assert \"Montagu\" in res\n\n obj.stop(kill=True, remove_volumes=True)\n\n\ndef test_db_configured():\n cfg = MontaguConfig(\"config/complete\")\n obj = MontaguConstellation(cfg)\n\n obj.start()\n\n db = get_container(cfg, \"db\")\n res = docker_util.exec_safely(db, f'psql -U {cfg.db_root_user} -d postgres -c \"\\\\du\"')\n res = res.output.decode(\"UTF-8\")\n\n for u in cfg.db_users:\n assert u in res\n\n query = \"SELECT * FROM pg_replication_slots WHERE slot_name = 'barman'\"\n res = docker_util.exec_safely(db, f'psql -U {cfg.db_root_user} -d postgres -c \"{query}\"')\n res = res.output.decode(\"UTF-8\")\n\n assert \"barman\" in res\n\n obj.stop(kill=True, remove_volumes=True)\n\n\ndef test_proxy_configured_ssl():\n cfg = MontaguConfig(\"config/complete\")\n obj = MontaguConstellation(cfg)\n\n obj.start()\n\n api = get_container(cfg, \"proxy\")\n cert = docker_util.string_from_container(api, \"/etc/montagu/proxy/certificate.pem\")\n key = docker_util.string_from_container(api, \"/etc/montagu/proxy/ssl_key.pem\")\n param = docker_util.string_from_container(api, \"/etc/montagu/proxy/dhparam.pem\")\n assert cert == \"cert\"\n assert key == \"k3y\"\n assert param == \"param\"\n\n obj.stop(kill=True, remove_volumes=True)\n\n\ndef test_metrics():\n cfg = MontaguConfig(\"config/basic\")\n obj = MontaguConstellation(cfg)\n\n obj.start()\n http_get(\"http://localhost:9113/metrics\")\n\n obj.stop(kill=True)\n\n\ndef get_container(cfg, name):\n cl = docker.client.from_env()\n return cl.containers.get(f\"{cfg.container_prefix}-{cfg.containers[name]}\")\n","repo_name":"vimc/montagu-deploy","sub_path":"tests/test_constellation.py","file_name":"test_constellation.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30969865824","text":"# -*- coding: utf-8 -*-\n# Create your views here.\nfrom woistbier_rest.models import Kiosk, BeerPrice, Image, Beer, Comment\nfrom woistbier_rest.serializers import KioskSerializer, ImageSerializer, BeerSerializer, CommentSerializer, BeerPriceSerializer, KioskListItemSerializer, KioskDetailSerializer\nfrom django.http import Http404, HttpResponseBadRequest\nfrom django.shortcuts import render_to_response\nfrom rest_framework import generics, status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.throttling import ScopedRateThrottle\nfrom django.db.models import Avg\nimport math\nimport logging\nfrom django.conf import settings\nimport os\nlog = logging.getLogger(__name__)\n\n\ndef not_found_view(request):\n response = render_to_response('bier/404.html')\n response.status_code = 404\n return response\n\n\ndef index(request):\n num_kiosk = Kiosk.objects.all().count()\n num_beer = Beer.objects.all().count()\n return render_to_response('bier/index.html', {'kiosk_number': num_kiosk, 'beer_number': num_beer})\n\n\ndef beer_list(request):\n num_beer = Beer.objects.all().count()\n return render_to_response('bier/beer.html', {'beer_count': num_beer})\n\n\ndef impressum(request):\n return render_to_response('bier/impressum.html')\n\n\n#Here come the views for the rest api\n\n\ndef check_kiosk_args(kiosk_id):\n if kiosk_id is None:\n return False\n try:\n long(kiosk_id)\n except ValueError:\n return False\n return True\n\n\ndef check_if_kiosk_exists(kiosk_id):\n try:\n Kiosk.objects.get(pk = kiosk_id)\n except Kiosk.DoesNotExist:\n return False\n return True\n\n \n#views for images\nclass ImageList(generics.ListAPIView):\n model = Image\n serializer_class = ImageSerializer\n filter_fields = ['kiosk']\n\n def post(self, request):\n #curl -X POST -S -H 'Accept: application/json' -F \"image=@/home/mackaiver/Pictures/alf2.jpg; type=image/jpg\"\n # http://localhost:8000/bier/rest/image/68/\n print(str(request))\n serializer = ImageSerializer(data = request.DATA , files=request.FILES)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ImageDetail(generics.RetrieveAPIView):\n model = Image\n serializer_class = ImageSerializer \n\n\nclass CommentList(generics.ListAPIView):\n model = Comment\n serializer_class = CommentSerializer\n filter_fields = ('name', 'created', 'kiosk')\n # def get(self, request):\n # kiosk_id = self.request.QUERY_PARAMS.get('kiosk', None)\n # return getSetForKioskId(Comment, CommentSerializer, kiosk_id)\n \n def post(self, request):\n serializer = CommentSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass CommentDetail(generics.CreateAPIView):\n model = Comment\n serializer_class = CommentSerializer \n \n \nclass BeerPriceList(generics.ListCreateAPIView):\n model = BeerPrice\n serializer_class = BeerPriceSerializer\n filter_fields = ['kiosk']\n\n\nclass BeerPriceDetail(generics.RetrieveUpdateAPIView):\n model = BeerPrice\n serializer_class = BeerPriceSerializer\n\n\nclass BeerList(generics.ListAPIView):\n model = Beer\n serializer_class = BeerSerializer\n filter_fields = ['name', 'brand', 'location', 'brew']\n\n def get(self, request, *args, **kwargs):\n kiosk_id = self.request.QUERY_PARAMS.get('kiosk', None)\n if kiosk_id is not None:\n if not check_kiosk_args(kiosk_id):\n return HttpResponseBadRequest(\"Kiosk id arguments was malformed\")\n beer_set = Beer.objects.filter(related_beer__kiosk__id = kiosk_id)\n if beer_set.count() == 0:\n return Response(status = status.HTTP_204_NO_CONTENT)\n else:\n beer_set = Beer.objects.all()\n \n serializer = BeerSerializer(beer_set, many=True)\n return Response(serializer.data)\n \n \nclass BeerDetail(generics.RetrieveAPIView):\n model = Beer\n serializer_class = BeerSerializer\n \n\nclass SimpleKioskList(generics.ListCreateAPIView):\n throttle_classes = (ScopedRateThrottle,)\n throttle_scope = 'kiosk_uploads'\n model = Kiosk\n serializer_class = KioskSerializer\n filter_fields = ('id', 'name', 'owner', 'street', 'city', 'zip_code')\n\n def post(self, request, *args, **kwargs):\n serializer = KioskSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n \n log.warn(\"Serializer is invalid for kiosk put. : \" + str(serializer.errors))\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n\nclass KioskDetail(APIView):\n\n def get_object(self, kiosk_id):\n try:\n return Kiosk.objects.get(pk=kiosk_id)\n except Kiosk.DoesNotExist:\n raise Http404\n\n def get(self, request, kiosk_id):\n k = self.get_object(kiosk_id)\n serializer = KioskSerializer(k)\n return Response(serializer.data)\n \n\n#An object to hold all the info the kiosk detail view on the client needs. this will be passed to the serializer\nclass KioskDetailContainer(object):\n def __init__(self, kiosk, beerPrice=None, images=None, comments = None, comment_count = 0, beer_count = 0, avg_price = 1):\n self.kiosk = kiosk\n self.beerPrice = beerPrice\n self.images = images\n self.comments = comments\n self.comment_count = comment_count\n self.beer_count = beer_count\n self.avg_price = avg_price\n \n \n# this will get the kiosk with the given id from the database and pulls all the necessary info from the connected tables\nclass KioskDetailView(APIView):\n\n def get_object(self, primaryKey):\n try:\n return Kiosk.objects.get(pk=primaryKey)\n except Kiosk.DoesNotExist:\n raise Http404\n\n def get(self, request, primaryKey):\n kiosk = self.get_object(primaryKey)\n imageSet = Image.objects.filter(kiosk__pk = kiosk.id)\n commentSet = Comment.objects.filter(kiosk__pk = kiosk.id)\n beerPriceSet = BeerPrice.objects.filter(kiosk__id = kiosk.id).order_by('score')\n beer_count = beerPriceSet.count()\n comment_count = commentSet.count()\n avg_price = beerPriceSet.aggregate(Avg('score'))\n l = KioskDetailContainer(kiosk, beerPrice=beerPriceSet, images=imageSet, comments=commentSet,\n comment_count=comment_count, beer_count=beer_count, avg_price=avg_price)\n serializer = KioskDetailSerializer(l)\n return Response(serializer.data)\n \n \n#angle to radians conversion\ndef radians(r):\n return r * math.pi / 180\n\n\n# simple pythagorean distance for flat surfaces. Returns distance in kilometers!\ndef calculate_distance(lat_1, lon_1, lat_2, lon_2):\n R = 6371\n lat_1 = radians(lat_1)\n lat_2 = radians(lat_2)\n lon_1 = radians(lon_1)\n lon_2 = radians(lon_2)\n x = (lon_2-lon_1) * math.cos((lat_1+lat_2)/2)\n y = (lat_2-lat_1)\n return math.sqrt(x*x + y*y) * R\n\n\n#An Object to hold all the stuff needed for a listItem. This Object will be provided to the Serializer\nclass ListItem(object):\n def __init__(self, kiosk, beer_price=None, image=None, distance=0.0):\n self.kiosk = kiosk\n self.beerPrice = beer_price\n self.image = image\n self.distance = distance\n \n \n#\n# returns a listitem object given a specific kiosk and the lat,long values of the client\n# so it can calculate the distance between kiosk and client to write into the listitem object\n# if a beer argument is supplied only kioskItems containg that beer will be returned\n#\ndef get_list_item_from_kiosk(kiosk, lat=None, lon=None, beer=None):\n img = None\n beerPrice = None\n imageSet = Image.objects.filter(kiosk__pk=kiosk.id)\n if imageSet.exists():\n img = imageSet[0]\n\n beerPriceSet = BeerPrice.objects.filter(kiosk__id = kiosk.id).order_by('score')\n if beerPriceSet.exists():\n if beer is not None:\n beerPriceSet = beerPriceSet.filter(beer__name__icontains = beer)\n if not beerPriceSet.exists():\n return None\n beerPrice = beerPriceSet[0]\n else:\n if beer is not None:\n return None\n if lat is not None and lon is not None:\n distance = calculate_distance(float(kiosk.geo_lat), float(kiosk.geo_long), lat, lon)\n return ListItem(kiosk=kiosk, beer_price=beerPrice, image=img, distance=distance)\n\n return ListItem(kiosk=kiosk, beer_price=beerPrice, thumb=img)\n \n\n#View to return a list of kioskItems where the kiosk is within a boundingbox with the length radius supplied in the url\n#in case the parameter contain bullshit values this will raise a ERROR400 bad request to the client\nclass KioskList(APIView):\n\n def get(self, request):\n #get parameters from httprequest\n #in case url contains bullshit for lat,long or radius this will throw a value exception\n #default values 5km radius \n g_lat = 51.52\n g_long = 7.46\n radius = 5.0\n beer = None\n #check url parameter \n try:\n if request.QUERY_PARAMS.get('geo_lat', None) is not None:\n g_lat = float(request.QUERY_PARAMS.get('geo_lat', None))\n if request.QUERY_PARAMS.get('geo_long', None) is not None:\n g_long = float(request.QUERY_PARAMS.get('geo_long', None))\n if request.QUERY_PARAMS.get('radius', None) is not None:\n radius = float(request.QUERY_PARAMS.get('radius', None))\n if request.QUERY_PARAMS.get('beer', None) is not None:\n beer = request.QUERY_PARAMS.get('beer', None)\n except :\n return HttpResponseBadRequest('bad parameter string')\n\n max_distance = radius;\n #radius from km to degrees\n R = 6371 # earth radius\n #from a length to radians. see definition of a radian\n radius = radius/R\n # now convert radians to degrees\n radius = radius*180/math.pi\n \n #get all kiosk within the bounding box\n queryResult = Kiosk.objects.filter(geo_lat__lte = g_lat + radius, geo_long__lte = g_long + radius, geo_lat__gte = g_lat - radius, geo_long__gte = g_long - radius)\n \n #build a kiosklistem for every kiosk we fetched and return it through the serializer\n l = list()\n for kiosk in queryResult:\n item = get_list_item_from_kiosk(kiosk, g_lat, g_long, beer)\n if item is not None and item.distance <= max_distance :\n l.append(item)\n \n serializer = KioskListItemSerializer(l, many=True)\n return Response(serializer.data)\n \n\n# View for a single kiosklistitem does not take any parameter\nclass KioskListItem(APIView):\n \n def get_object(self, primaryKey):\n try:\n return Kiosk.objects.get(pk=primaryKey)\n except Kiosk.DoesNotExist:\n raise Http404\n\n def get(self, request, primaryKey):\n kiosk = self.get_object(primaryKey)\n l = get_list_item_from_kiosk(kiosk)\n serializer = KioskListItemSerializer(l)\n return Response(serializer.data)","repo_name":"WoIstBier/bier-django","sub_path":"woistbier_rest/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23114694602","text":"from html.parser import HTMLParser\nfrom urllib import parse\n\nclass LinkFinder(HTMLParser):\n def __init__(self, base_url, page_url):\n super().__init__()\n self.base_url = base_url\n self.page_url = page_url\n self.links = []\n self.content = []\n self.record = False\n\n\n # When we call HTMLParser feed(), this function is called it encounters an opening tag \n def handle_starttag(self, tag, attrs):\n if tag == 'a':\n test = 0\n for (attribute, value) in attrs:\n if (attribute == 'href') and ('thread' in value):\n test = 1\n self.url = parse.urljoin(self.base_url, value)\n self.links.append(self.url)\n break\n for (attribute, value) in attrs:\n if (attribute == 'onclick') and ('atarget' in value):\n test += 1\n if test == 2:\n self.record = True\n test = 0\n break\n if test == 1:\n self.links.pop(-1)\n def handle_endtag(self, tag):\n if tag == 'a':\n self.record = False\n def handle_data(self, data):\n if self.record:\n self.content.append(data)\n def page_links(self):\n return [self.links, self.content]\n","repo_name":"gfdsa0788/Spider","sub_path":"link_finder.py","file_name":"link_finder.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37637905075","text":"import euclid3 as euclid\nfrom .model import Model\nimport os\nfrom PIL import Image\n\nfrom FbxCommon import InitializeSdkObjects, LoadScene, FbxNodeAttribute, FbxSurfacePhong, FbxAnimStack, FbxTime, FbxAMatrix, FbxTexture, FbxLayerElement\n\nimport logging\nlog = logging.getLogger()\n\ndef fbx_to_euclid(input_matrix):\n return euclid.Matrix4.new(\n input_matrix.Get(0,0),input_matrix.Get(1,0),input_matrix.Get(2,0),input_matrix.Get(3,0),\n input_matrix.Get(0,1),input_matrix.Get(1,1),input_matrix.Get(2,1),input_matrix.Get(3,1),\n input_matrix.Get(0,2),input_matrix.Get(1,2),input_matrix.Get(2,2),input_matrix.Get(3,2),\n input_matrix.Get(0,3),input_matrix.Get(1,3),input_matrix.Get(2,3),input_matrix.Get(3,3))\n\n\n\nclass Reader:\n def __init__(self):\n self.material_index = []\n self.bones = {}\n self.cluster_transforms = {}\n pass\n\n def process_clusters(self, object, mesh):\n # each mesh should contain a single deformer, containing\n # multiple clusters; roughly each cluster corresponds\n # to each bone in our models.\n if mesh.GetDeformerCount() > 0:\n deformer = mesh.GetDeformer(0)\n # loop over all the bones\n\n for i in range(deformer.GetClusterCount()):\n cluster = deformer.GetCluster(i)\n\n transform_link_matrix = FbxAMatrix()\n transform_matrix = FbxAMatrix()\n cluster.GetTransformLinkMatrix(transform_link_matrix) #if this even works\n cluster.GetTransformMatrix(transform_matrix) #if this even works\n self.cluster_transforms[cluster.GetLink().GetName()] = fbx_to_euclid(transform_matrix) * fbx_to_euclid(transform_link_matrix).inverse()\n #print(\"Cluster: \", cluster.GetLink().GetName())\n #print(self.cluster_transforms[cluster.GetLink().GetName()])\n #print(fbx_to_euclid(transform_matrix))\n\n #print(cluster.GetLink().GetName(), \": \", cluster.GetControlPointIndicesCount())\n # loop over every point this bone controlls\n for j in range(cluster.GetControlPointIndicesCount()):\n if object.vertecies[cluster.GetControlPointIndices()[j]].group != \"default\":\n log.warn(\"Oh no! Multiple bones affect the same vertex. Bad things!!\")\n object.vertecies[cluster.GetControlPointIndices()[j]].setGroup(cluster.GetLink().GetName())\n\n def process_materials(self, object, fbx_mesh):\n material_count = fbx_mesh.GetNode().GetMaterialCount()\n #print(\"Layer count: \", fbx_mesh.GetLayerCount())\n for l in range(fbx_mesh.GetLayerCount()):\n for i in range(material_count):\n material = fbx_mesh.GetNode().GetMaterial(i)\n #print(\"Material: \", material.GetName())\n if material.GetClassId().Is(FbxSurfacePhong.ClassId):\n #check for and process textures\n texture_name = None\n texture_width = 1\n texture_height = 1\n if material.Diffuse.GetSrcObjectCount(FbxTexture.ClassId) > 0:\n texture = material.Diffuse.GetSrcObject(FbxTexture.ClassId,0)\n log.debug(\"Texture original path/name: %s\", texture.GetFileName())\n texture_name = os.path.basename(texture.GetFileName())\n texture_name = os.path.splitext(texture_name)[0]\n log.debug(\"Found texture: %s\", texture_name)\n try:\n image = Image.open(texture.GetFileName())\n texture_width = image.size[0]\n texture_height = image.size[1]\n except:\n log.warn(\"Could not load texture file: %s\", texture.GetFileName())\n\n\n\n #print(\"Is phong!\")\n #this is a valid enough material to add, so do it!\n object.addMaterial(material.GetName(),\n {\"r\": material.Ambient.Get()[0],\n \"g\": material.Ambient.Get()[1],\n \"b\": material.Ambient.Get()[2]},\n\n {\"r\": material.Specular.Get()[0],\n \"g\": material.Specular.Get()[1],\n \"b\": material.Specular.Get()[2]},\n\n {\"r\": material.Diffuse.Get()[0],\n \"g\": material.Diffuse.Get()[1],\n \"b\": material.Diffuse.Get()[2]},\n texture_name, texture_width, texture_height)\n\n\n #TODO: More gracefully handle multiple meshes in a single file; we would need to\n #offset the vertex count somehow when coding in polygons.\n def process_mesh(self, object, mesh):\n # Import polygon and vertex data.\n log.debug(\"Polygons: %d\", mesh.GetPolygonCount())\n log.debug(\"Verticies: %d\", len(mesh.GetControlPoints()))\n\n #this list contains all the points in the model; polygons will\n #index into this list\n vertex_list = mesh.GetControlPoints()\n\n #add the verticies to the model\n for i in range(len(vertex_list)):\n object.addVertex(euclid.Vector3(vertex_list[i][0], vertex_list[i][1], vertex_list[i][2]))\n\n #do something about materials\n self.process_materials(object, mesh)\n material_map = mesh.GetLayer(0).GetMaterials().GetIndexArray()\n\n log.debug(\"Mesh Global Transform:\")\n log.debug(fbx_to_euclid(mesh.GetNode().EvaluateGlobalTransform()))\n #exit()\n #well ... that explains a lot.\n self.mesh_global = fbx_to_euclid(mesh.GetNode().EvaluateGlobalTransform())\n object.global_matrix = self.mesh_global\n\n for face in range(mesh.GetPolygonCount()):\n #this importer only supports triangles and\n #quads, so we need to throw out any weird\n #sizes here\n vertex_count = mesh.GetPolygonSize(face)\n if vertex_count >= 3 and vertex_count <= 4:\n points = []\n normals = []\n uvlist = []\n for v in range(vertex_count):\n points.append(mesh.GetPolygonVertex(face,v))\n\n #figure out if there's normal data?\n #TODO: Why does this need to loop over layer data? Investigate!\n for l in range(mesh.GetLayerCount()):\n normal_data = mesh.GetLayer(l).GetNormals()\n if normal_data:\n normal = normal_data.GetDirectArray().GetAt(v)\n #print(normal)\n normals.append((normal[0],normal[1],normal[2]))\n uv_data = mesh.GetLayer(l).GetUVs()\n if uv_data:\n if uv_data.GetMappingMode() == FbxLayerElement.eByControlPoint:\n log.warn(\"eByControlPoint not supported for UVs!\")\n elif uv_data.GetMappingMode() == FbxLayerElement.eByPolygonVertex:\n uv_index = mesh.GetTextureUVIndex(face, v)\n uv = uv_data.GetDirectArray().GetAt(uv_index)\n #print(\"UVs: \", uv)\n uvlist.append((uv[0], uv[1]))\n\n #todo: not discard UV coordinates here\n if len(uvlist) == 0:\n uvlist = None\n object.addPoly(points, uvlist, normals, mesh.GetNode().GetMaterial(material_map.GetAt(face)).GetName())\n\n self.process_clusters(object, mesh)\n\n def process_skeleton(self, object, skeleton):\n #TODO: This obviously.\n #print(\"SKELETON encountered!\")\n self.bones[skeleton.GetName()] = skeleton\n return\n\n def process_node(self, object, node):\n if node.GetNodeAttribute() == None:\n log.debug(\"NULL Node Attribute: %s\", node.GetName())\n else:\n attribute = node.GetNodeAttribute()\n attribute_type = attribute.GetAttributeType()\n\n if attribute_type == FbxNodeAttribute.eMesh:\n self.process_mesh(object, attribute)\n if attribute_type == FbxNodeAttribute.eSkeleton:\n self.process_skeleton(object, attribute)\n\n #regardless of emptiness, if this node has any children, we process those as well\n for i in range(node.GetChildCount()):\n #print(\"recursing into: \", node.GetName())\n self.process_node(object, node.GetChild(i))\n\n def calculate_transformation(self, bone, frame, last_step=True):\n timestamp = FbxTime()\n timestamp.SetFrame(frame)\n #animation_transform = bone.GetNode().EvaluateLocalTransform(timestamp)\n animation_transform = bone.GetNode().EvaluateGlobalTransform(timestamp)\n\n #make this euclid format please\n animation_transform = fbx_to_euclid(animation_transform)\n\n bind_pose_inverse = self.cluster_transforms[bone.GetNode().GetName()]\n\n #print(\"\\n\".join(sorted(dir(bone.GetNode().GetScene().GetRootNode()))))\n #exit()\n\n #animation_transform = self.cluster_transforms[bone.GetNode().GetName()].inverse() * animation_transform\n #animation_transform = animation_transform.identity()\n #return euclid.Matrix4()\n\n #print(bone.GetNode().GetName())\n return bind_pose_inverse * animation_transform\n\n #return self.mesh_global * animation_transform * bind_pose.inverse() * self.mesh_global.inverse()\n\n #return animation_transform.inverse()\n\n def process_animation(self, object, scene):\n #print(sorted(dir(scene)))\n #evaluator = scene.GetAnimationEvaluator()\n #print(sorted(dir(evaluator)))\n\n for i in range(scene.GetSrcObjectCount(FbxAnimStack.ClassId)):\n animation_stack = scene.GetSrcObject(FbxAnimStack.ClassId, i)\n log.debug(\"Animation: %s\", animation_stack.GetName())\n log.debug(\"Length: %d\", animation_stack.LocalStop.Get().GetFrameCount())\n\n #evaluator.SetContext(animation_stack)\n scene.SetCurrentAnimationStack(animation_stack)\n obj_animation = object.createAnimation(animation_stack.GetName())\n\n # Note here: animations in blender are 60FPS, but FBX forces it to\n # read as 30 FPS. It totally accepts half-frames for steps, so we're\n # fiddling with the numbers to convert it back to 60 FPS for export.\n obj_animation.length = animation_stack.LocalStop.Get().GetFrameCount() * 2\n\n #initialize our list of animation stuffs\n for k in self.bones:\n transform_list = []\n for frame in range(obj_animation.length * 2):\n transform_list.append(self.calculate_transformation(self.bones[k], frame / 2))\n\n obj_animation.addNode(self.bones[k].GetName(), transform_list)\n\n\n def read(self, filename):\n #first, make sure we can open the file\n SdkManager, scene = InitializeSdkObjects()\n if not LoadScene(SdkManager, scene, filename):\n log.error(\"Could not parse %s as .fbx, bailing.\", filename)\n return\n else:\n object = Model()\n\n #Process all nodes in the scene\n node_list = scene.GetRootNode()\n for i in range(node_list.GetChildCount()):\n self.process_node(object, node_list.GetChild(i))\n\n #animation is handled separately for some weird reason\n self.process_animation(object, scene)\n\n return object\n","repo_name":"zeta0134/dsgx-converter","sub_path":"model/fbx_importer.py","file_name":"fbx_importer.py","file_ext":"py","file_size_in_byte":11752,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"10815543767","text":"'''\n동적프로그래밍 : 1번계산한 것은 또 계산하지 않음 -> 이를 메모이제이션이라함\n->점화식을 세워야함.\n'''\n'''\nimport sys\nn=int(sys.stdin.readline())\na,b = 1 , 2\nif n==1:\n print(a)\nelif n==2:\n print(b)\nelse:\n for _ in range(n-2):\n a,b = b, (a+b)%15746\n print(b)\n\n'''\n\nn=int(input())\ndp=[0]*1000001\ndp[1]=1\ndp[2]=2\n\nfor i in range(3,n+1):\n dp[i]=(dp[i-2]+dp[i-1])%15746\n\nprint(dp[n])","repo_name":"ngj1014/Baekjoon_problem","sub_path":"다이나믹프로그래밍,위상정렬/Algorithm_Baekjoon_1904.py","file_name":"Algorithm_Baekjoon_1904.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70504825362","text":"#\n# @lc app=leetcode id=815 lang=python3\n#\n# [815] Bus Routes\n#\nfrom typing import List\nimport collections\n# @lc code=start\n# Time: O(nlogn)\n# Space: O(n)\nclass Solution:\n def numBusesToDestination(self, routes: List[List[int]], S: int, T: int) -> int:\n if S == T:\n return 0\n stop_board = collections.defaultdict()\n for bus, stops in enumerate(routes):\n for stop in stops:\n if stop not in stop_board:\n stop_board[stop] = [bus]\n else:\n stop_board[stop].append(bus)\n queue = collections.deque([S])\n visited = set()\n\n res = 0\n while queue:\n res += 1\n size_stops_reach = len(queue)\n for _ in range(size_stops_reach):\n cur_stop = queue.popleft()\n for bus in stop_board[cur_stop]:\n if bus in visited:\n continue\n visited.add(bus)\n for stop in routes[bus]:\n if stop == T:\n return res\n queue.append(stop)\n return -1\n# 45/45 cases passed (392 ms)\n# Your runtime beats 92 % of python3 submissions\n# Your memory usage beats 100 % of python3 submissions (31.6 MB)\n\n# @lc code=end\n\n","repo_name":"binlidaily/Coding-Interviews","sub_path":"LeetCode/815.bus-routes.py","file_name":"815.bus-routes.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7590623817","text":"import os\nimport numpy as np\nimport torch.utils.data\nimport torchvision\nfrom PIL import Image\nfrom scipy.stats import entropy\nfrom torch.nn import functional as F\nfrom torchvision import transforms\n\nclass InceptionDataset(torch.utils.data.Dataset):\n def __init__(self, image_dir):\n self.image_dir = image_dir\n self.list_image_names = os.listdir(self.image_dir)\n\n def __len__(self):\n return len(self.list_image_names)\n\n def __getitem__(self, index):\n img_name = os.path.join(self.image_dir, self.list_image_names[index])\n img = np.array(Image.open(img_name))\n img = self.transform_image(img)\n return img, index\n\n @staticmethod\n def transform_image(image):\n # mean values for RGB\n t_ = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize(mean=[0.407, 0.457, 0.485],\n std=[0.225, 0.224, 0.229])\n ])\n image = t_(image)\n image = F.interpolate(image.unsqueeze(0), size=(299, 299), mode='bilinear', align_corners=True).squeeze(0)\n return image\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nprint('device = %s' % device)\n\nsplits = 10\nbatch_size = 16\n# Create dataset and dataloader\nimages_dataset = InceptionDataset(image_dir='./true_light/')\nnum_images = len(images_dataset)\ndataloader = torch.utils.data.DataLoader(images_dataset, batch_size=batch_size)\n\n# Load inception-v3\nprint('Loading inception-v3 model')\ninception_model = torchvision.models.inception_v3(pretrained=True, transform_input=False, progress=True)\ninception_model = inception_model.to(device)\ninception_model.eval()\n\n# Evaluation loop\n# Create predictions vector of size (num_images, num_inception-v3_classes)\npredictions = np.zeros((num_images, 1000))\n\nprint('Evaluating images using inception-v3')\nfor i, batch in enumerate(dataloader, start=0):\n if i % 100 == 0:\n print('Batch [%g/%g]' % (i, num_images // batch_size))\n image_batch = batch[0].to(device)\n batch_size_i = image_batch.size(0)\n\n with torch.no_grad():\n image_batch = inception_model(image_batch)\n\n predictions[i * batch_size: i * batch_size + batch_size_i] = F.softmax(image_batch, dim=1).detach().cpu().numpy()\n\n# Calculate mean KullbackLeibler divergence\nprint('Calculating mean KullbackLeibler divergence')\nsplit_scores = []\nfor k in range(splits):\n part = predictions[k * (num_images // splits): (k + 1) * (num_images // splits), :]\n py = np.mean(part, axis=0)\n scores = []\n for i in range(part.shape[0]):\n pyx = part[i, :]\n scores.append(entropy(pyx, py))\n split_scores.append(np.exp(np.mean(scores)))\n\ninception_score_mean = np.mean(split_scores)\ninception_score_std = np.std(split_scores)\nprint('Inception Score: (mean, std) = (%g, %g)' % (inception_score_mean, inception_score_std))\n\n\n","repo_name":"BirkhoffG/crowd_project","sub_path":"metrics/is.py","file_name":"is.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21121775412","text":"r\"\"\"NOTE: This script is very data specific!\"\"\"\nimport os\nimport sys\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\n\nfrom graphembed.utils import nnp1d2_to_n\n\nfrom .agg_grid_results import get_best_optim\nfrom .plot_angle_ratios import loss_fn_for_display\nfrom .utils import (build_manifold, fullpath_list,\n manifold_factors_from_path_label)\n\nmatplotlib.rcParams.update({'font.size': 22})\n\n\ndef main():\n # create one plot per dataset\n height = 9 if args.dim == 3 else 8\n fig, ax = plt.subplots(figsize=(20, height))\n\n ds_dirs = sorted(list(fullpath_list(args.root_dir)))\n n_dirs = len(args.datasets) if args.datasets else len(ds_dirs)\n ds_id = None\n offsets = 1.5 * np.arange(n_dirs) / n_dirs\n offsets = offsets - np.median(offsets)\n bplot_width = 0.5 if n_dirs == 1 else offsets[-1] - offsets[-2]\n x_step = 2\n\n colors = []\n labels = []\n\n for ds_dir in ds_dirs:\n ds_name = os.path.basename(ds_dir)\n if args.datasets and ds_name not in args.datasets:\n continue\n ds_id = 0 if ds_id is None else ds_id + 1\n labels.append(ds_name)\n color = plt.cm.tab10.colors[len(colors)]\n colors.append(color)\n\n fp_dir = os.path.join(ds_dir, 'flipp_0.0000')\n loss_fns = []\n loss_fn_dirs = sorted(\n list(fullpath_list(fp_dir)), key=loss_fns_key_sorted)\n for loss_fn_id, loss_fn_dir in enumerate(loss_fn_dirs):\n loss_fn_str = os.path.basename(loss_fn_dir)\n if args.loss_fns and loss_fn_str not in args.loss_fns:\n continue\n loss_fns.append(loss_fn_str)\n\n for man_dir in fullpath_list(loss_fn_dir):\n man_name = os.path.basename(man_dir)\n if not man_name.startswith('spd_'):\n continue\n factor_names = manifold_factors_from_path_label(man_name)\n man_factors = build_manifold(*factor_names)\n dim = sum([m.dim for m in man_factors])\n if dim != args.dim:\n continue\n\n # load the samples\n samples = load_seccurv_samples(man_dir)\n\n pos = x_step * loss_fn_id + offsets[ds_id]\n bplot = ax.boxplot([samples],\n sym='',\n whis=[10, 90],\n positions=[pos],\n labels=[ds_name],\n widths=bplot_width,\n notch=True,\n patch_artist=True,\n medianprops=dict(linewidth=0, color='gray'))\n for patch in bplot['boxes']:\n patch.set_facecolor(color)\n if args.dim == 3:\n add_annotation_dim3(ax, samples, pos, ds_name, loss_fn_str)\n elif args.dim == 6:\n add_annotation_dim6(ax, samples, pos, ds_name, loss_fn_str)\n\n ax.set_xticks(np.arange(0, x_step * len(loss_fns), x_step))\n ax.set_xticklabels([loss_fn_for_display(lfn) for lfn in loss_fns])\n ax.set_xlim(-1.0, x_step * len(loss_fns) - 0.75)\n for i in range(len(loss_fns)):\n ax.axvline(\n x_step * i + x_step // 2, color='lightblue', ls='--', lw=2)\n\n # save the figure\n plot_seccurvs(ax, fig, labels, colors)\n\n\ndef load_seccurv_samples(man_dir):\n return load_samples_for_best_embedding(os.path.join(man_dir, 'orig'))\n\n\ndef load_samples_for_best_embedding(exp_dir):\n best_optim, _ = get_best_optim(exp_dir)\n path = os.path.join(exp_dir, best_optim, 'spd-seccurvs_0.npy')\n return np.load(path)\n\n\ndef add_annotation_dim3(ax, samples, xpos, ds_name, loss_fn_str):\n ypos = -9\n arrowprops = dict(facecolor='k', arrowstyle='-|>')\n bbox = dict(boxstyle='round', facecolor='none')\n if loss_fn_str == 'sne-incl_10' and ds_name == 'facebook':\n p10 = np.percentile(samples, 10)\n ax.annotate(\n 'p10={}'.format(int(p10)),\n xy=(xpos - 0.05, ypos + 0.05),\n xytext=(xpos - 1.5, ypos + 0.5),\n fontsize=16,\n arrowprops=arrowprops,\n bbox=bbox)\n elif loss_fn_str == 'sste-incl_10' and ds_name == 'facebook':\n p10 = np.percentile(samples, 10)\n ax.annotate(\n 'p10={}'.format(int(p10)),\n xy=(xpos - 0.05, ypos + 0.05),\n xytext=(xpos - 1.25, ypos + 0.5),\n fontsize=16,\n arrowprops=arrowprops,\n bbox=bbox)\n elif loss_fn_str == 'sne-incl_10' and ds_name == 'power':\n p10, p25 = np.percentile(samples, [10, 25])\n ax.annotate(\n 'p25={}\\np10={}'.format(int(p25), int(p10)),\n xy=(xpos + 0.1, ypos + 0.1),\n xytext=(xpos + 0.5, ypos + 0.5),\n fontsize=16,\n arrowprops=arrowprops,\n bbox=bbox)\n elif loss_fn_str == 'sste-incl_50' and ds_name == 'power':\n p10, p25 = np.percentile(samples, [10, 25])\n ax.annotate(\n 'p25={}\\np10={}'.format(int(p25), int(p10)),\n xy=(xpos + 0.1, ypos + 0.1),\n xytext=(xpos + 0.25, ypos + 2.0),\n fontsize=16,\n arrowprops=arrowprops,\n bbox=bbox)\n elif loss_fn_str == 'sne-excl_50' and ds_name == 'bio-diseasome':\n p10 = np.percentile(samples, 10)\n ax.annotate(\n 'p10={}'.format(int(p10)),\n xy=(xpos - 0.1, ypos + 0.05),\n xytext=(xpos - 1.25, ypos + 0.5),\n fontsize=16,\n arrowprops=arrowprops,\n bbox=bbox)\n elif loss_fn_str == 'sne-excl_10' and ds_name == 'california':\n p10, p25 = np.percentile(samples, [10, 25])\n ax.annotate(\n 'p25={}\\np10={}'.format(int(p25), int(p10)),\n xy=(xpos - 0.1, ypos + 0.05),\n xytext=(xpos - 1.40, ypos + 1.0),\n fontsize=16,\n arrowprops=arrowprops,\n bbox=bbox)\n elif loss_fn_str == 'sne-excl_10' and ds_name == 'facebook':\n p10 = np.percentile(samples, 10)\n ax.annotate(\n 'p10={}'.format(int(p10)),\n xy=(xpos + 0.05, ypos + 0.1),\n xytext=(xpos + 0.8, ypos + 1.5),\n fontsize=16,\n arrowprops=arrowprops,\n bbox=bbox)\n elif loss_fn_str == 'sne-excl_10' and ds_name == 'web-edu':\n p10 = np.percentile(samples, 10)\n ax.annotate(\n 'p10={}'.format(int(p10)),\n xy=(xpos + 0.05, ypos + 0.1),\n xytext=(xpos + 0.1, ypos + 0.5),\n fontsize=16,\n arrowprops=arrowprops,\n bbox=bbox)\n elif loss_fn_str == 'stress' and ds_name == 'facebook':\n p10 = np.percentile(samples, 10)\n ax.annotate(\n 'p10={}'.format(int(p10)),\n xy=(xpos + 0.05, ypos + 0.1),\n xytext=(xpos + 0.5, ypos + 0.5),\n fontsize=16,\n arrowprops=arrowprops,\n bbox=bbox)\n elif loss_fn_str == 'dist_1' and ds_name == 'web-edu':\n p10 = np.percentile(samples, 10)\n ax.annotate(\n 'p10={}'.format(int(p10)),\n xy=(xpos + 0.05, ypos + 0.1),\n xytext=(xpos + 0.5, ypos + 0.5),\n fontsize=16,\n arrowprops=arrowprops,\n bbox=bbox)\n\n\ndef add_annotation_dim6(ax, samples, xpos, ds_name, loss_fn_str):\n ypos = -9\n arrowprops = dict(facecolor='k', arrowstyle='-|>')\n bbox = dict(boxstyle='round', facecolor='none')\n if (loss_fn_str in ('sne-incl_50', 'dist_1') and ds_name == 'web-edu') or \\\n (loss_fn_str == 'sste-incl_50' and ds_name == 'power'):\n p10 = np.percentile(samples, 10)\n ax.annotate(\n 'p10={}'.format(int(p10)),\n xy=(xpos - 0.05, ypos + 0.05),\n xytext=(xpos - 1.25, ypos + 0.5),\n fontsize=16,\n zorder=2,\n arrowprops=arrowprops,\n bbox=bbox)\n\n\ndef plot_seccurvs(ax, fig, labels, colors):\n ax.set_ylim(top=0.5, bottom=-9)\n ax.set_ylabel('SPD({})\\n\\nSectional Curvatures'.format(\n nnp1d2_to_n(args.dim)))\n if args.dim == 3:\n ax.xaxis.set_ticks([])\n ax.set_xticklabels([])\n ax.set_title(\n 'Distributions of SPD Sectional Curvatures as Sampled '\n 'Around the Learned Embeddings',\n y=1.26)\n # The legend.\n patches = [\n mpatches.Patch(color=color, label=label)\n for color, label in zip(colors, labels)\n ]\n ax.legend(\n handles=patches,\n bbox_to_anchor=(0, 1.02, 1, 0.2),\n loc='lower left',\n mode='expand',\n borderaxespad=0,\n ncol=3)\n elif args.dim == 6:\n ax.tick_params(axis='x', labelrotation=-15)\n\n plt.tight_layout()\n fig.savefig('spd-seccurvs{}.pdf'.format(args.dim), bbox_inches='tight')\n\n\ndef loss_fns_key_sorted(loss_fn_str):\n order = {\n 'sne-incl_50': 0,\n 'sne-incl_10': 1,\n 'sste-incl_50': 2,\n 'sste-incl_10': 3,\n 'sne-excl_50': 4,\n 'sne-excl_10': 5,\n 'stress': 6,\n 'dist_1': 7,\n 'dist_2': 8\n }\n return order[os.path.basename(loss_fn_str)]\n\n\ndef parse_args():\n import argparse\n parser = argparse.ArgumentParser(\n description='SPD sectional curvatures plot across datasets.')\n parser.add_argument('--root_dir', type=str, help='The root directory.')\n parser.add_argument(\n '--dim', type=int, required=True, help='The manifold dimension.')\n parser.add_argument('--datasets', nargs='*', help='The datasets.')\n parser.add_argument(\n '--loss_fns', nargs='+', type=str, help='The loss functions.')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n global args\n args = parse_args()\n sys.exit(main())\n","repo_name":"dalab/matrix-manifolds","sub_path":"graphembed/experiments/agg_spd_seccurvs.py","file_name":"agg_spd_seccurvs.py","file_ext":"py","file_size_in_byte":10307,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"22"} +{"seq_id":"27673905420","text":"\ndef minSubArraySum(nums):\n \"\"\"\n Given an array of integers, find the length of the longest subarray where the sum of the elements is greater than or equal to the integer k.\n \"\"\"\n if len(nums) == 0:\n return 0\n min_sum = nums[0]\n max_sum = nums[0]\n for i in range(1, len(nums)):\n min_sum = min(min_sum, nums[i])\n max_sum = max(max_sum, nums[i])\n return max_sum - min_sum\n","repo_name":"githubcopilot-22/Assessing-the-Quality-of-GitHub-Copilot-s-Code-Generation","sub_path":"using_only_function_names/code_generation_used_in_exp/114/prompt_114.py","file_name":"prompt_114.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"73629164857","text":"import numpy as np\nimport pytest\n\nfrom qutip import Qobj, qeye\nfrom qutip_qip.decompose._utility import (\n check_gate,\n)\n\n\n# Tests for check_gate\n@pytest.mark.parametrize(\n \"invalid_input\",\n [\n np.array([[1, 1], [1, 1]]),\n ([[1, 1], [1, 1]]),\n 1.5,\n 3,\n (1, 2, 3, 4),\n np.array([[], []]),\n ([[], []]),\n (),\n ],\n)\ndef test_check_gate_non_qobj(invalid_input):\n \"\"\"Checks if correct value is returned or not when the input is not a Qobj\n .\"\"\"\n with pytest.raises(TypeError, match=\"The input matrix is not a Qobj.\"):\n check_gate(invalid_input, num_qubits=1)\n\n\n@pytest.mark.parametrize(\"non_unitary\", [Qobj([[1, 1], [0, 1]])])\ndef test_check_gate_non_unitary(non_unitary):\n \"\"\"Checks if non-unitary input is correctly identified.\"\"\"\n with pytest.raises(ValueError, match=\"Input is not unitary.\"):\n check_gate(non_unitary, num_qubits=1)\n\n\n@pytest.mark.parametrize(\"non_1qubit_unitary\", [qeye(4)])\ndef test_check_gate_non_1qubit(non_1qubit_unitary):\n \"\"\"Checks if non-unitary input is correctly identified.\"\"\"\n num_qubits = 1\n with pytest.raises(ValueError, match=f\"Input is not a unitary on {num_qubits} qubits.\"):\n check_gate(non_1qubit_unitary, num_qubits)\n\n\n@pytest.mark.parametrize(\"unitary\", [Qobj([[1, 0], [0, -1]])])\ndef test_check_gate_unitary_input(unitary):\n \"\"\"Checks if shape of input is correctly identified.\"\"\"\n # No error raised if it passes.\n check_gate(unitary, num_qubits=1)\n","repo_name":"qutip/qutip-qip","sub_path":"tests/decomposition_functions/test_utility.py","file_name":"test_utility.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"22"} +{"seq_id":"36390578731","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"pyanote.utils\n\n(C) Lisa Baget et Matthieu Durand, 2018-2019\n\nCe module contient les fonctions permettant de lire des informations simples dans un fichier binaire Midi.\n\nDONE\n\"\"\"\n\ndef verifier(fichier, mot_clé, msg_erreur):\n ''' Verifie si mot_clé (binaire) est dans le fichier, sinon envoie une erreur avec msg_erreur.\n\n Dans tous les cas avance de len(mot_clé) dans le fichier. \n '''\n if fichier.read(len(mot_clé)) != mot_clé: ## lit le même nombre d'octets que dans mot_clé\n raise TypeError(msg_erreur) ## crée une erreur si c'est pas la même chose\n\ndef lire_entier(fichier, nb_octets):\n ''' Lit un entier codé sur nb_octets dans le fichier.\n '''\n entier = 0\n for i in range(nb_octets): #commence par poids fort\n entier = entier + ord(fichier.read(1)) * 256**(nb_octets-i-1) # ord transforme 1 octet binaire en entier (\n return entier\n\ndef lire_entier_variable(fichier):\n ''' Lit un entier codé sur un nombre variable d'octets dans le fichier (d'après le format MIDI).\n\n Continue à lire des octets dans que le premier bit est à 1.\n '''\n entier = 0\n octet = ord(fichier.read(1))\n while octet >= 128: ## si le bit de poids fort est 1 il faut continuer\n entier = entier * 128 + octet - 128\n octet = ord(fichier.read(1))\n return entier * 128 + octet\n\ndef avancer(fichier, nb_octets):\n '''Avance de nb_octets dans le fichier sans les lire.\n '''\n fichier.seek(nb_octets, 1) #1 a partir de la position ou l'on est / 0 debut fichier\n\ndef lire_chaine(fichier, taille, liste_codages=['utf-8']): # UTF-8 par défaut\n ''' Lit une chaine binaire de taille octets et essaie de la transformer en une chaine en\n utilisant un des codages de liste_codages.\n\n Les codages sont essayés dans l'ordre de la liste et la fonction renvoie une chaine binaire\n si aucun des codages n'a pu etre utilisé.\n '''\n chaine = fichier.read(taille)\n for codage in liste_codages: # pour chaque codage dans la liste en parametre\n try: # essayer de décoder\n return chaine.decode(codage)\n except UnicodeDecodeError: # siça marche pas\n pass # rien faire pour essayer un autre codage\n return chaine # si aucun codage a marché la chaine sera binaire\n\ndef lire_liste_octets(fichier, taille):\n ''' Lit taille entiers codés chacun sur un octet et les stocke dans une liste.\n '''\n liste = []\n for i in range(taille): \n liste.append(ord(fichier.read(1)))\n return liste\n\nif __name__ == \"__main__\":\n nom_fichier = 'fichiersMidi/Dave Brubeck - Take Five.mid'\n print(\"=========================================================\")\n print(\"Test du fichier\", nom_fichier)\n print(\"=========================================================\")\n fichier = open(nom_fichier, 'rb')\n print(\"Verification du MThd: \", end=\"\")\n try:\n verifier(fichier, b'MThd', \"\")\n print(\"OK\")\n except TypeError:\n print(\"Erreur\")\n taille_header = lire_entier(fichier, 4)\n print(\"Taille du header: \", taille_header, \"octets\")\n avancer(fichier, taille_header)\n print(\"Verification du premier MTrk: \", end=\"\")\n try:\n verifier(fichier, b'MTrk', \"\")\n print(\"OK\")\n except TypeError:\n print(\"Erreur\")\n taille_piste = lire_entier(fichier, 4)\n print(\"Taille de la première piste: \", taille_piste, \"octets\")\n delta_time = lire_entier_variable(fichier)\n print(\"Valeur du premier delta time:\", delta_time, \"ticks\")\n\n\n","repo_name":"Lisa-Baget/pyanote","sub_path":"pyanote/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74588104375","text":"from selenium.webdriver.common.by import By\r\nfrom model.user import User\r\nimport re\r\n\r\n\r\nclass UserHelper:\r\n\r\n def __init__(self, app):\r\n self.app = app\r\n\r\n def add_new_user(self):\r\n self.app.driver.find_element(By.LINK_TEXT, \"add new\").click()\r\n\r\n def create(self, user):\r\n self.add_new_user()\r\n self.fill_user_form(user)\r\n # save changes\r\n self.app.driver.find_element(By.XPATH, \"(//input[@name=\\'submit\\'])[2]\").click()\r\n self.app.navigation.go_to_home_page()\r\n self.user_cache = None\r\n\r\n def change_field_value(self, field_name, text):\r\n if text is not None:\r\n # check text field\r\n if field_name not in [\"bday\", \"bmonth\", \"aday\", \"amonth\"]:\r\n self.app.driver.find_element(By.NAME, field_name).click()\r\n self.app.driver.find_element(By.NAME, field_name).clear()\r\n self.app.driver.find_element(By.NAME, field_name).send_keys(text)\r\n # check dropdown list\r\n else:\r\n self.app.driver.find_element(By.NAME, field_name).click()\r\n dropdown = self.app.driver.find_element(By.NAME, field_name)\r\n dropdown.find_element(By.XPATH, \"//option[. = %s]\" % text).click()\r\n\r\n def fill_user_form(self, user):\r\n self.change_field_value(\"firstname\", user.firstname)\r\n self.change_field_value(\"middlename\", user.middlename)\r\n self.change_field_value(\"lastname\", user.lastname)\r\n self.change_field_value(\"nickname\", user.nickname)\r\n self.change_field_value(\"title\", user.title)\r\n self.change_field_value(\"company\", user.company)\r\n self.change_field_value(\"address\", user.address)\r\n self.change_field_value(\"home\", user.home)\r\n self.change_field_value(\"mobile\", user.mobile)\r\n self.change_field_value(\"work\", user.work)\r\n self.change_field_value(\"fax\", user.fax)\r\n self.change_field_value(\"email\", user.email)\r\n self.change_field_value(\"email2\", user.email2)\r\n self.change_field_value(\"email3\", user.email3)\r\n self.change_field_value(\"homepage\", user.homepage)\r\n self.change_field_value(\"bday\", user.bday)\r\n self.change_field_value(\"bmonth\", user.bmonth)\r\n self.change_field_value(\"byear\", user.byear)\r\n self.change_field_value(\"aday\", user.aday)\r\n self.change_field_value(\"amonth\", user.amonth)\r\n self.change_field_value(\"ayear\", user.ayear)\r\n self.change_field_value(\"address2\", user.address2)\r\n self.change_field_value(\"phone2\", user.phone2)\r\n self.change_field_value(\"notes\", user.notes)\r\n\r\n def delete(self):\r\n # delete first user\r\n self.delete_by_index(0)\r\n\r\n def delete_by_index(self, index):\r\n self.select_user_by_index(index)\r\n # delete user\r\n self.app.driver.find_element(By.XPATH, \"//input[@value='Delete']\").click()\r\n # confirm changes\r\n self.app.driver.switch_to.alert.accept()\r\n self.app.navigation.go_to_home_page()\r\n self.user_cache = None\r\n\r\n def delete_by_id(self, id):\r\n self.select_user_by_id(id)\r\n # delete user\r\n self.app.driver.find_element(By.XPATH, \"//input[@value='Delete']\").click()\r\n # confirm changes\r\n self.app.driver.switch_to.alert.accept()\r\n self.app.navigation.go_to_home_page()\r\n self.user_cache = None\r\n\r\n def select_user_by_index(self, index):\r\n self.app.driver.find_elements(By.NAME, \"selected[]\")[index].click()\r\n\r\n def select_user_by_id(self, id):\r\n self.app.driver.find_element(By.CSS_SELECTOR, f\"input[value='{id}']\").click()\r\n\r\n def modify(self, user):\r\n # modify first user\r\n self.modify_by_index(0, user)\r\n\r\n def modify_by_index(self, index, user):\r\n self.open_form_to_modify(index)\r\n self.fill_user_form(user)\r\n # save changes\r\n self.app.driver.find_element(By.NAME, \"update\").click()\r\n self.app.navigation.go_to_home_page()\r\n self.user_cache = None\r\n\r\n def modify_by_id(self, id, user):\r\n self.open_form_to_modify_by_id(id)\r\n self.fill_user_form(user)\r\n # save changes\r\n self.app.driver.find_element(By.NAME, \"update\").click()\r\n self.app.navigation.go_to_home_page()\r\n self.user_cache = None\r\n\r\n def open_form_to_modify(self, index):\r\n self.app.navigation.go_to_home_page()\r\n self.app.driver.find_elements(By.XPATH, \"//img[@alt='Edit']\")[index].click()\r\n\r\n def open_form_to_modify_by_id(self, id):\r\n self.app.navigation.go_to_home_page()\r\n self.app.driver.find_element(By.CSS_SELECTOR, f\"a[href='edit.php?id={id}']\").click()\r\n\r\n\r\n def open_form_by_view(self, index):\r\n self.app.navigation.go_to_home_page()\r\n self.app.driver.find_elements(By.XPATH, \"//img[@alt='Details']\")[index].click()\r\n\r\n def count(self):\r\n self.app.navigation.go_to_home_page()\r\n return len(self.app.driver.find_elements(By.NAME, \"selected[]\"))\r\n\r\n user_cache = None\r\n\r\n def get_contact_list(self):\r\n self.app.navigation.go_to_home_page()\r\n if self.user_cache is None:\r\n self.user_cache = []\r\n find_user_list = self.app.driver.find_element(By.ID, \"maintable\").find_elements(By.NAME, \"entry\")\r\n for user in find_user_list:\r\n user_params_list = user.find_elements(By.TAG_NAME, \"td\")\r\n first_name = user_params_list[2].text\r\n last_name = user_params_list[1].text\r\n user_id = user_params_list[0].find_element(By.NAME, \"selected[]\").get_attribute(\"value\")\r\n # get all phones in str\r\n all_phones = user_params_list[5].text\r\n # get all address in str\r\n address = user_params_list[3].text\r\n all_email = user_params_list[4].text\r\n self.user_cache.append(User(firstname=first_name, lastname=last_name, id=user_id,\r\n all_phones_from_page=all_phones, address=address,\r\n all_email_from_page=all_email))\r\n # return copy user_cache\r\n return list(self.user_cache)\r\n\r\n def get_contact_info_from_edit_page(self, index):\r\n self.open_form_to_modify(index)\r\n firstname = self.app.driver.find_element(By.NAME, \"firstname\").get_attribute(\"value\")\r\n lastname = self.app.driver.find_element(By.NAME, \"lastname\").get_attribute(\"value\")\r\n user_id = self.app.driver.find_element(By.NAME, \"id\").get_attribute(\"value\")\r\n homephone = self.app.driver.find_element(By.NAME, \"home\").get_attribute(\"value\")\r\n workphone = self.app.driver.find_element(By.NAME, \"work\").get_attribute(\"value\")\r\n mobilephone = self.app.driver.find_element(By.NAME, \"mobile\").get_attribute(\"value\")\r\n secondaryphone = self.app.driver.find_element(By.NAME, \"phone2\").get_attribute(\"value\")\r\n address = self.app.driver.find_element(By.NAME, \"address\").get_attribute(\"value\")\r\n email_1 = self.app.driver.find_element(By.NAME, \"email\").get_attribute(\"value\")\r\n email_2 = self.app.driver.find_element(By.NAME, \"email2\").get_attribute(\"value\")\r\n email_3 = self.app.driver.find_element(By.NAME, \"email3\").get_attribute(\"value\")\r\n return User(firstname=firstname, lastname=lastname, id=user_id, home=homephone, work=workphone,\r\n mobile=mobilephone, phone2=secondaryphone, address=address, email=email_1, email2=email_2,\r\n email3=email_3)\r\n\r\n def get_contact_from_view_page(self, index):\r\n self.open_form_by_view(index)\r\n # get all text on page\r\n text = self.app.driver.find_element(By.ID, \"content\").text\r\n # get all phones on page with Fax number\r\n phones_with_fax = \"\".join(text.split('\\n\\n')[1])\r\n # delete Fax number\r\n phones_without_fax = re.sub(\"F.*\", \"\", phones_with_fax)\r\n secondaryphone = text.split('\\n\\n')[5]\r\n all_phones = phones_without_fax + secondaryphone\r\n return User(all_phones_from_page=all_phones)\r\n\r\n def add_user_in_group(self, user, group):\r\n self.app.navigation.go_to_home_page()\r\n self.select_user_by_id(user.id)\r\n # select group to add contact\r\n dropdown = self.app.driver.find_element(By.NAME, \"to_group\")\r\n dropdown.find_element(By.CSS_SELECTOR, f\"option[value='{group.id}']\").click()\r\n # add contact\r\n self.app.driver.find_element(By.NAME, \"add\").click()\r\n self.show_all_groups()\r\n\r\n def show_contacts_in_group(self, group):\r\n dropdown = self.app.driver.find_element(By.NAME, \"group\")\r\n dropdown.find_element(By.CSS_SELECTOR, f\"option[value='{group.id}']\").click()\r\n\r\n def del_user_from_group(self, user, group):\r\n self.app.navigation.go_to_home_page()\r\n self.show_contacts_in_group(group)\r\n self.select_user_by_id(user.id)\r\n self.app.driver.find_element(By.NAME, \"remove\").click()\r\n self.show_all_groups()\r\n\r\n def show_all_groups(self):\r\n self.app.navigation.go_to_home_page()\r\n dropdown = self.app.driver.find_element(By.NAME, \"group\")\r\n dropdown.find_element(By.XPATH, \"//option[. = '[all]']\").click()\r\n\r\n\r\n\r\n","repo_name":"chernenko-art/test_python","sub_path":"fixture/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":9316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43445770680","text":"import pandas as pd\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait \nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\ndriver_path = '/Users/msaad/Downloads/chromedriver'\n\noptions = Options()\noptions.add_argument('--headless')\noptions.add_argument(\"--window-size=1280,700\")\noptions.add_argument('--ignore-certificate-errors')\n\ndriver = webdriver.Chrome(options= options, executable_path = driver_path)\n\ndriver.get('https://www.nofrills.ca/food/c/27985')\ndriver.implicitly_wait(40)\ndriver.find_element(By.XPATH, \"//button[@class='modal-dialog__content__close']\").click()\n\ntime.sleep(5)\ncookies = WebDriverWait(driver, 40).until(EC.element_to_be_clickable((By.XPATH, \"//button[@class='lds__privacy-policy__btnClose']\")))\ncookies.click()\n\ncount = 0\nwhile count < 100 and driver.find_elements(By.XPATH, \"//button[@class='primary-button primary-button--load-more-button']\"):\n load_more = WebDriverWait(driver, 40).until(EC.element_to_be_clickable((By.XPATH, \"//button[@class='primary-button primary-button--load-more-button']\")))\n #driver.find_element(By.XPATH, \"//button[@class='primary-button primary-button--load-more-button']\").click()\n load_more.click()\n count += 1\n print(count)\n \n\nfoods = driver.find_elements(By.XPATH, \"//span[@class='product-name__item product-name__item--name']\")\nprices = driver.find_elements(By.XPATH, \"//span[@class='price__value selling-price-list__item__price selling-price-list__item__price--now-price__value']\")\n#print(len(foods), len(prices))\nproducts_list = []\n\nfor i in range(len(foods)):\n products_list.append([foods[i].text, prices[i].text[1:]])\n\ndriver.quit()\n\nnofrills_df = pd.DataFrame(products_list)\nnofrills_df.columns = ['product', 'price']\nnofrills_df = nofrills_df.reset_index(drop = True)\nnofrills_df.to_csv('./nofrills_product_prices.csv', index = False)","repo_name":"msaad-a/cheapest-groceries","sub_path":"nofrills_scraper.py","file_name":"nofrills_scraper.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26773486928","text":"def knapSack(W, wt, val, n):\n arr = [[0 for x in range(W + 1)] for x in range(n + 1)]\n for i in range(n + 1):\n for w in range(W + 1):\n if i == 0 or w == 0:\n arr[i][w] = 0\n elif wt[i-1] <= w:\n arr[i][w] = max(val[i-1] + arr[i-1][w-wt[i-1]], arr[i-1][w])\n else:\n arr[i][w] = arr[i-1][w]\n for i in range(n, 0, -1):\n if arr[i][W] != arr[i-1][W]:\n print(\"Item \",i ,\"with weight \", wt[i-1], \"& value \", val[i-1], \"is selected\")\n print(\"Maximum possible value that can be accomodated in Knapsack is \", arr[n][W])\nval = [10,5,15,7,6,18,3]\nwt = [2,3,5,7,1,4,1]\nW = 15\nn = len(val)\nknapSack(W, wt, val, n)\n","repo_name":"V35HR4J/Your-First-Contribution","sub_path":"Python/knapsack_dp.py","file_name":"knapsack_dp.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"22"} +{"seq_id":"29550201939","text":"tutors = [\r\n 'Иван', 'Анастасия', 'Петр', 'Сергей',\r\n 'Дмитрий', 'Борис', 'Елена', 'Николай'\r\n]\r\nclasses = [\r\n '9А', '7В', '9Б', '9В', '8Б', '10А'\r\n]\r\n\r\n\r\ndef journal(name, rank):\r\n for i in range(len(name)):\r\n if i > len(rank) - 1:\r\n yield name[i], None\r\n else:\r\n yield name[i], rank[i]\r\n\r\n\r\nsort_journal = journal(tutors, classes)\r\nprint(type(sort_journal), *sort_journal, sep=\"\\n\")\r\n","repo_name":"spaIpeen/basic-python","sub_path":"Moroz_Vladislav_dz_5/task_5_3.py","file_name":"task_5_3.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"1603743350","text":"####################################################\n####################################################\n# Object Oriented Programming Challenge - Solution\n####################################################\n####################################################\n#\n# For this challenge, create a bank account class that has two attributes:\n#\n# * owner\n# * balance\n#\n# and two methods:\n#\n# * deposit\n# * withdraw\n#\n# As an added requirement, withdrawals may not exceed the available balance.\n#\n# Instantiate your class, make several deposits and withdrawals, and test to make sure the account can't be overdrawn.\n\n\n\n\nclass Account():\n def __init__(self, owner, balance = 0):\n self.owner = owner\n self.balance = balance\n\n def __repr__(self):\n return f\"Account holder's name : {self.owner} \\n Balance : {self.balance}\"\n\n def deposit(self, amount_dep):\n self.balance = self.balance + amount_dep\n print(f\"Amount deposit accepted!! \\n Available balance : {self.balance}\")\n\n def withdraw(self, amaount_wdr):\n if self.balance > amaount_wdr:\n self.balance = self.balance - amaount_wdr\n print(f\"Withdrawal Successfull!! \\n Available balance : {self.balance}\")\n else:\n print(\"Funds unavailable!!\")\n\n\n\n# 1. Instantiate the class\nacct1 = Account('Jose',100)\n\n\n# 2. Print the object\nprint(acct1)\n\n\n\n\n# 3. Show the account owner attribute\nprint(acct1.owner)\n\n\n\n\n# 4. Show the account balance attribute\nprint(acct1.balance)\n\n\n\n\n# 5. Make a series of deposits and withdrawals\nacct1.deposit(50)\n\n\n\n\nacct1.withdraw(75)\n\n\n\n\n# 6. Make a withdrawal that exceeds the available balance\nacct1.withdraw(500)\n\n\n\n# ## Good job!\n","repo_name":"ParitoshChaudhary/PythonPractice","sub_path":"OOPS_Project.py","file_name":"OOPS_Project.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"74525087095","text":"import numpy as np\n\ndef line_search(f, x_c, c, search_dir, rho, grad_dir, tol=1e-3, max_iter=100):\n alpha = 1\n iter = 0\n while iter < max_iter and f(x_c + alpha * search_dir) > f(x_c) + (c * alpha * grad_dir.T @ search_dir) + tol:\n alpha = rho * alpha\n iter = iter + 1\n if iter % 20 == 0:\n \tprint(\"iter: \", iter)\n \tprint(\"lhs: \", f(x_c + alpha * search_dir))\n \tprint(\"rhs: \", f(x_c) + (c * alpha * grad_dir.T @ search_dir) + tol)\n \tprint(\"\\n\")\n return alpha, f(x_c + alpha * search_dir)\n","repo_name":"annh3/optimization_utils","sub_path":"bls.py","file_name":"bls.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34534325312","text":"# -*- coding: utf-8 -*-\nfrom twython import Twython, TwythonError, TwythonAuthError, TwythonRateLimitError\n\nfrom .config import unittest\n\nimport responses\nimport requests\n\nfrom twython.compat import is_py2\nif is_py2:\n from StringIO import StringIO\nelse:\n from io import StringIO\n\ntry:\n import unittest.mock as mock\nexcept ImportError:\n import mock\n\n\nclass TwythonAPITestCase(unittest.TestCase):\n def setUp(self):\n self.api = Twython('', '', '', '')\n\n def get_url(self, endpoint):\n \"\"\"Convenience function for mapping from endpoint to URL\"\"\"\n return '%s/%s.json' % (self.api.api_url % self.api.api_version, endpoint)\n\n def register_response(self, method, url, body='{}', match_querystring=False,\n status=200, adding_headers=None, stream=False,\n content_type='application/json; charset=utf-8'):\n \"\"\"Wrapper function for responses for simpler unit tests\"\"\"\n\n # responses uses BytesIO to hold the body so it needs to be in bytes\n if not is_py2:\n body = bytes(body, 'UTF-8')\n\n responses.add(method, url, body, match_querystring,\n status, adding_headers, stream, content_type)\n\n @responses.activate\n def test_request_should_handle_full_endpoint(self):\n \"\"\"Test that request() accepts a full URL for the endpoint argument\"\"\"\n url = 'https://api.twitter.com/1.1/search/tweets.json'\n self.register_response(responses.GET, url)\n\n self.api.request(url)\n\n self.assertEqual(1, len(responses.calls))\n self.assertEqual(url, responses.calls[0].request.url)\n\n @responses.activate\n def test_request_should_handle_relative_endpoint(self):\n \"\"\"Test that request() accepts a twitter endpoint name for the endpoint argument\"\"\"\n url = 'https://api.twitter.com/1.1/search/tweets.json'\n self.register_response(responses.GET, url)\n\n self.api.request('search/tweets', version='1.1')\n\n self.assertEqual(1, len(responses.calls))\n self.assertEqual(url, responses.calls[0].request.url)\n\n @responses.activate\n def test_request_should_post_request_regardless_of_case(self):\n \"\"\"Test that request() accepts the HTTP method name regardless of case\"\"\"\n url = 'https://api.twitter.com/1.1/statuses/update.json'\n self.register_response(responses.POST, url)\n\n self.api.request(url, method='POST')\n self.api.request(url, method='post')\n\n self.assertEqual(2, len(responses.calls))\n self.assertEqual('POST', responses.calls[0].request.method)\n self.assertEqual('POST', responses.calls[1].request.method)\n\n @responses.activate\n def test_request_should_throw_exception_with_invalid_http_method(self):\n \"\"\"Test that request() throws an exception when an invalid HTTP method is passed\"\"\"\n # TODO(cash): should Twython catch the AttributeError and throw a TwythonError\n self.assertRaises(AttributeError, self.api.request, endpoint='search/tweets', method='INVALID')\n\n @responses.activate\n def test_request_should_encode_boolean_as_lowercase_string(self):\n \"\"\"Test that request() encodes a boolean parameter as a lowercase string\"\"\"\n endpoint = 'search/tweets'\n url = self.get_url(endpoint)\n self.register_response(responses.GET, url)\n\n self.api.request(endpoint, params={'include_entities': True})\n self.api.request(endpoint, params={'include_entities': False})\n\n self.assertEqual(url + '?include_entities=true', responses.calls[0].request.url)\n self.assertEqual(url + '?include_entities=false', responses.calls[1].request.url)\n\n @responses.activate\n def test_request_should_handle_string_or_number_parameter(self):\n \"\"\"Test that request() encodes a numeric or string parameter correctly\"\"\"\n endpoint = 'search/tweets'\n url = self.get_url(endpoint)\n self.register_response(responses.GET, url)\n\n self.api.request(endpoint, params={'lang': 'es'})\n self.api.request(endpoint, params={'count': 50})\n\n self.assertEqual(url + '?lang=es', responses.calls[0].request.url)\n self.assertEqual(url + '?count=50', responses.calls[1].request.url)\n\n @responses.activate\n def test_request_should_encode_list_of_strings_as_string(self):\n \"\"\"Test that request() encodes a list of strings as a comma-separated string\"\"\"\n endpoint = 'search/tweets'\n url = self.get_url(endpoint)\n location = ['37.781157', '-122.39872', '1mi']\n self.register_response(responses.GET, url)\n\n self.api.request(endpoint, params={'geocode': location})\n\n # requests url encodes the parameters so , is %2C\n self.assertEqual(url + '?geocode=37.781157%2C-122.39872%2C1mi', responses.calls[0].request.url)\n\n @responses.activate\n def test_request_should_encode_numeric_list_as_string(self):\n \"\"\"Test that request() encodes a list of numbers as a comma-separated string\"\"\"\n endpoint = 'search/tweets'\n url = self.get_url(endpoint)\n location = [37.781157, -122.39872, '1mi']\n self.register_response(responses.GET, url)\n\n self.api.request(endpoint, params={'geocode': location})\n\n self.assertEqual(url + '?geocode=37.781157%2C-122.39872%2C1mi', responses.calls[0].request.url)\n\n @responses.activate\n def test_request_should_ignore_bad_parameter(self):\n \"\"\"Test that request() ignores unexpected parameter types\"\"\"\n endpoint = 'search/tweets'\n url = self.get_url(endpoint)\n self.register_response(responses.GET, url)\n\n self.api.request(endpoint, params={'geocode': self})\n\n self.assertEqual(url, responses.calls[0].request.url)\n\n @responses.activate\n def test_request_should_handle_file_as_parameter(self):\n \"\"\"Test that request() pulls a file out of params for requests lib\"\"\"\n endpoint = 'account/update_profile_image'\n url = self.get_url(endpoint)\n self.register_response(responses.POST, url)\n\n mock_file = StringIO(\"Twython test image\")\n self.api.request(endpoint, method='POST', params={'image': mock_file})\n\n self.assertIn(b'filename=\"image\"', responses.calls[0].request.body)\n self.assertIn(b\"Twython test image\", responses.calls[0].request.body)\n\n @responses.activate\n def test_request_should_put_params_in_body_when_post(self):\n \"\"\"Test that request() passes params as data when the request is a POST\"\"\"\n endpoint = 'statuses/update'\n url = self.get_url(endpoint)\n self.register_response(responses.POST, url)\n\n self.api.request(endpoint, method='POST', params={'status': 'this is a test'})\n\n self.assertIn(b'status=this+is+a+test', responses.calls[0].request.body)\n self.assertNotIn('status=this+is+a+test', responses.calls[0].request.url)\n\n @responses.activate\n def test_get_uses_get_method(self):\n \"\"\"Test Twython generic GET request works\"\"\"\n endpoint = 'account/verify_credentials'\n url = self.get_url(endpoint)\n self.register_response(responses.GET, url)\n\n self.api.get(endpoint)\n\n self.assertEqual(1, len(responses.calls))\n self.assertEqual(url, responses.calls[0].request.url)\n\n @responses.activate\n def test_post_uses_post_method(self):\n \"\"\"Test Twython generic POST request works\"\"\"\n endpoint = 'statuses/update'\n url = self.get_url(endpoint)\n self.register_response(responses.POST, url)\n\n self.api.post(endpoint, params={'status': 'I love Twython!'})\n\n self.assertEqual(1, len(responses.calls))\n self.assertEqual(url, responses.calls[0].request.url)\n\n def test_raise_twython_error_on_request_exception(self):\n \"\"\"Test if TwythonError is raised by a RequestException\"\"\"\n with mock.patch.object(requests.Session, 'get') as get_mock:\n # mocking an ssl cert error\n get_mock.side_effect = requests.RequestException(\"hostname 'example.com' doesn't match ...\")\n self.assertRaises(TwythonError, self.api.get, 'https://example.com')\n\n @responses.activate\n def test_request_should_get_convert_json_to_data(self):\n \"\"\"Test that Twython converts JSON data to a Python object\"\"\"\n endpoint = 'statuses/show'\n url = self.get_url(endpoint)\n self.register_response(responses.GET, url, body='{\"id\": 210462857140252672}')\n\n data = self.api.request(endpoint, params={'id': 210462857140252672})\n\n self.assertEqual({'id': 210462857140252672}, data)\n\n @responses.activate\n def test_request_should_raise_exception_with_invalid_json(self):\n \"\"\"Test that Twython handles invalid JSON (though Twitter should not return it)\"\"\"\n endpoint = 'statuses/show'\n url = self.get_url(endpoint)\n self.register_response(responses.GET, url, body='{\"id: 210462857140252672}')\n\n self.assertRaises(TwythonError, self.api.request, endpoint, params={'id': 210462857140252672})\n\n @responses.activate\n def test_request_should_handle_401(self):\n \"\"\"Test that Twython raises an auth error on 401 error\"\"\"\n endpoint = 'statuses/home_timeline'\n url = self.get_url(endpoint)\n self.register_response(responses.GET, url, body='{\"errors\":[{\"message\":\"Error\"}]}', status=401)\n\n self.assertRaises(TwythonAuthError, self.api.request, endpoint)\n\n @responses.activate\n def test_request_should_handle_400_for_missing_auth_data(self):\n \"\"\"Test that Twython raises an auth error on 400 error when no oauth data sent\"\"\"\n endpoint = 'statuses/home_timeline'\n url = self.get_url(endpoint)\n self.register_response(responses.GET, url,\n body='{\"errors\":[{\"message\":\"Bad Authentication data\"}]}', status=400)\n\n self.assertRaises(TwythonAuthError, self.api.request, endpoint)\n\n @responses.activate\n def test_request_should_handle_400_that_is_not_auth_related(self):\n \"\"\"Test that Twython raises a normal error on 400 error when unrelated to authorization\"\"\"\n endpoint = 'statuses/home_timeline'\n url = self.get_url(endpoint)\n self.register_response(responses.GET, url,\n body='{\"errors\":[{\"message\":\"Bad request\"}]}', status=400)\n\n self.assertRaises(TwythonError, self.api.request, endpoint)\n\n @responses.activate\n def test_request_should_handle_rate_limit(self):\n \"\"\"Test that Twython raises an rate limit error on 429\"\"\"\n endpoint = 'statuses/home_timeline'\n url = self.get_url(endpoint)\n self.register_response(responses.GET, url,\n body='{\"errors\":[{\"message\":\"Rate Limit\"}]}', status=429)\n\n self.assertRaises(TwythonRateLimitError, self.api.request, endpoint)\n\n @responses.activate\n def test_get_lastfunction_header_should_return_header(self):\n \"\"\"Test getting last specific header of the last API call works\"\"\"\n endpoint = 'statuses/home_timeline'\n url = self.get_url(endpoint)\n self.register_response(responses.GET, url, adding_headers={'x-rate-limit-remaining': '37'})\n\n self.api.get(endpoint)\n\n value = self.api.get_lastfunction_header('x-rate-limit-remaining')\n self.assertEqual('37', value)\n value2 = self.api.get_lastfunction_header('does-not-exist')\n self.assertIsNone(value2)\n value3 = self.api.get_lastfunction_header('not-there-either', '96')\n self.assertEqual('96', value3)\n\n def test_get_lastfunction_header_should_raise_error_when_no_previous_call(self):\n \"\"\"Test attempting to get a header when no API call was made raises a TwythonError\"\"\"\n self.assertRaises(TwythonError, self.api.get_lastfunction_header, 'no-api-call-was-made')\n\n @responses.activate\n def test_sends_correct_accept_encoding_header(self):\n \"\"\"Test that Twython accepts compressed data.\"\"\"\n endpoint = 'statuses/home_timeline'\n url = self.get_url(endpoint)\n self.register_response(responses.GET, url)\n\n self.api.get(endpoint)\n\n self.assertEqual(b'gzip, deflate', responses.calls[0].request.headers['Accept-Encoding'])\n\n # Static methods\n def test_construct_api_url(self):\n \"\"\"Test constructing a Twitter API url works as we expect\"\"\"\n url = 'https://api.twitter.com/1.1/search/tweets.json'\n constructed_url = self.api.construct_api_url(url, q='#twitter')\n self.assertEqual(constructed_url, 'https://api.twitter.com/1.1/search/tweets.json?q=%23twitter')\n\n def test_encode(self):\n \"\"\"Test encoding UTF-8 works\"\"\"\n self.api.encode('Twython is awesome!')\n\n def test_cursor_requires_twython_function(self):\n \"\"\"Test that cursor() raises when called without a Twython function\"\"\"\n def init_and_iterate_cursor(*args, **kwargs):\n cursor = self.api.cursor(*args, **kwargs)\n return next(cursor)\n\n non_function = object()\n non_twython_function = lambda x: x\n\n self.assertRaises(TypeError, init_and_iterate_cursor, non_function)\n self.assertRaises(TwythonError, init_and_iterate_cursor, non_twython_function)\n\n","repo_name":"ryanmcgrath/twython","sub_path":"tests/test_core.py","file_name":"test_core.py","file_ext":"py","file_size_in_byte":13244,"program_lang":"python","lang":"en","doc_type":"code","stars":1848,"dataset":"github-code","pt":"22"} +{"seq_id":"16425882847","text":"from django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.models import User, Permission\nfrom django.contrib.sites.models import Site\nfrom django.core.cache import cache\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext_lazy as _\nfrom vcboard.forms import PermissionMatrixForm\nfrom vcboard.models import Forum, UserGroup, ForumPermission, GroupPermission, UserPermission\nfrom vcboard.ranks.models import Rank, RankPermission\nfrom vcboard.utils import render\nimport re\n\n@staff_member_required\ndef permission_matrix(request, obj_type=None, id=None, template='admin/permission_matrix.html'):\n \"\"\"\n Allows the user to quickly adjust permissions for forums, groups, ranks\n and individual users\n \"\"\"\n\n site = Site.objects.get_current()\n default_perms = False\n klass, matrix_type = {\n 'forum': (Forum, ForumPermission),\n 'group': (UserGroup, GroupPermission),\n 'rank': (Rank, RankPermission),\n 'user': (User, UserPermission),\n }.get(obj_type, (None, None))\n\n if klass and id:\n obj = get_object_or_404(klass, pk=id)\n permissions = matrix_type.objects.filter(**{\n 'site': site,\n str(obj_type + '__id'): obj.id})\n else:\n obj = None\n default_perms = True\n permissions = ForumPermission.objects.filter(site=site)\n matrix_type = ForumPermission\n\n # make the forum permission matrix view slightly different\n forum = None\n if isinstance(obj, Forum):\n forum = obj\n \n if request.method == 'POST':\n form = PermissionMatrixForm(request.POST, forum=forum)\n if form.is_valid():\n forums = {}\n perms = {}\n\n for field_name in form.fields.keys():\n forum_id, permission_id = re.findall('f_(\\d+)_p_(\\d+)', field_name)[0]\n\n # retrieve the forum\n f = forums.get(forum_id, None)\n if not f:\n f = Forum.objects.get(pk=forum_id)\n forums[forum_id] = f\n\n # retrieve the permission\n p = perms.get(permission_id, None)\n if not p:\n p = Permission.objects.get(pk=permission_id)\n perms[permission_id] = p\n\n params = {'site': site, 'forum': f, 'permission': p}\n if not forum and not default_perms:\n params[str(obj_type)] = obj\n\n perm, c = matrix_type.objects.get_or_create(**params)\n value = form.cleaned_data[field_name]\n if perm.has_permission != value:\n perm.has_permission = value\n perm.save()\n\n # remove previously cached permissions for this forum. This makes\n # it so the permissions have to be refreshed\n perms = cache.get('perms_for_forums', {})\n cached_perm_dict = cache.get('vcboard_user_perms', {})\n for forum_id in forums.keys():\n if perms.has_key(forum_id):\n for key in perms[forum_id]:\n del cached_perm_dict[key]\n perms[str(forum_id)] = []\n cache.set('vcboard_user_perms', cached_perm_dict)\n cache.set('perms_for_forums', perms)\n\n request.user.message_set.create(message='Permissions have been saved.')\n else:\n form = PermissionMatrixForm(permissions=permissions, forum=forum)\n\n data = {\n 'object': obj,\n 'form': form,\n 'default_perms': default_perms\n }\n\n return render(request, template, data)\n","repo_name":"codekoala/django-vcboard","sub_path":"vcboard/admin_views.py","file_name":"admin_views.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"12368999691","text":"from rdflib import URIRef, BNode, Literal\nfrom rdflib.namespace import RDF, RDFS\nfrom pyfuseki import FusekiUpdate, FusekiQuery\nimport pysolr \n\nacervoUpdate = FusekiUpdate('http://localhost:3030', 'acervo')\nacervoQuery = FusekiQuery('http://localhost:3030', 'acervo')\n\nprefix = \"\"\"PREFIX work: \n PREFIX rdf: \n PREFIX bf: \n PREFIX rdfs: \"\"\"\n\ndef UpdateContribution(request, work_uri):\n\n thesaurusUpdate = FusekiUpdate('http://localhost:3030', 'authorities') \n \n up = \"\"\"PREFIX bflc: \n INSERT DATA\n { GRAPH <\"\"\"+request.primaryContributionUri+\"\"\"> { \n <\"\"\"+request.primaryContributionUri+\"\"\"> \n bflc:contributorTo \n <\"\"\"+str(work_uri)+\"\"\"> } }\"\"\"\n thesaurusUpdate.run_sparql(up)\n\n solr = pysolr.Solr('http://localhost:8983/solr/authorities/', timeout=10)\n contributionID = request.primaryContributionUri.split(\"/\")[-1]\n doc = {\n \"id\": contributionID,\n \"contributorTo\": {\"add\": [work_uri]}\n }\n solr.add([doc], commit=True)\n\ndef Contributor(g, request, uri, BF, BFLC):\n\n contribution = BNode() \n agent = URIRef(request.primaryContributionUri)\n role = URIRef(request.primaryContributionRoleUri)\n g.add((uri, BF.contribution, contribution))\n g.add((contribution, RDF.type, BF.Contribution))\n g.add((contribution, BF.agent, agent))\n g.add((contribution, RDFS.label, Literal(request.primaryContributionAgent)))\n g.add((contribution, BF.role, role))\n\n UpdateContribution(request, uri)\n\n return g\n\ndef EditContributor(request, bkID):\n\n askAgent = prefix+\"ASK { graph work:\"+bkID+\"\"\"\n {work:\"\"\"+bkID+\"\"\" bf:contribution ?o .\n ?o bf:agent <\"\"\"+request.primaryContributionUri+\"\"\"> }}\"\"\"\n\n responseAgent = acervoQuery.run_sparql(askAgent)\n responseAgent = responseAgent.convert()\n\n askRole = prefix+\"ASK { graph work:\"+bkID+\"\"\"\n {work:\"\"\"+bkID+\"\"\" bf:contribution ?o .\n ?o bf:role <\"\"\"+request.primaryContributionRoleUri+\"\"\"> }}\"\"\"\n responseRole = acervoQuery.run_sparql(askRole)\n responseRole = responseRole.convert()\n responses = [responseAgent['boolean'], responseRole['boolean']]\n\n if False in responses:\n up = prefix+\"WITH work:\"+bkID+\"\"\"\n DELETE {work:\"\"\"+bkID+\"\"\" bf:contribution ?o .\n ?o ?p ?agent }\n INSERT {work:\"\"\"+bkID+\"\"\" bf:contribution ?o .\n ?o rdf:type bf:Contribution .\n ?o rdfs:label '\"\"\"+request.primaryContributionAgent+\"\"\"' .\n ?o bf:role .\n ?o bf:agent <\"\"\"+request.primaryContributionUri+\"\"\"> }\n WHERE {work:\"\"\"+bkID+\"\"\" bf:contribution ?o .\n ?o ?p ?agent }\"\"\"\n acervoUpdate.run_sparql(up)\n\ndef GetContribution(bkID, bkDict):\n\n query = \"SELECT ?p ?o WHERE { graph work:\"+bkID+\"\"\" {\n work:\"\"\"+bkID+\"\"\" bf:contribution ?contribution .\n ?contribution ?p ?o \n } }\"\"\"\n\n queryContribution = prefix+query\n response = acervoQuery.run_sparql(queryContribution)\n response = response.convert()\n bindings = response['results']['bindings']\n\n contribution = {}\n for i in bindings:\n metadadoUri = i['p']['value']\n if metadadoUri == 'http://www.w3.org/2000/01/rdf-schema#label':\n contribution['label'] = i['o']['value']\n elif metadadoUri == 'http://id.loc.gov/ontologies/bibframe/role':\n value = i['o']['value'].split('/')[-1]\n contribution['role'] = value\n elif metadadoUri == 'http://id.loc.gov/ontologies/bibframe/agent':\n value = i['o']['value']\n contribution['uri'] = value\n\n bkDict['primaryContribution'] = contribution\n\n return bkDict\n ","repo_name":"inacioigne/BiblioKeia","sub_path":"api/src/function/bibframe/Work/contributor.py","file_name":"contributor.py","file_ext":"py","file_size_in_byte":4086,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"23506422837","text":"print('Super progressão aritmética')\nprint('=-' *20)\n\nPA = int(input('Digite o primeiro termo: '))\nrazao = int(input('Digite a razão: '))\ntermo = razao*10 + PA\nconta = 1\nfor conta in range(PA,termo,razao):\n print('{} -> '.format(conta),end=' ')\n conta += 1\n\nprint('PAUSA')\nPA2 = 1\ncont = PA2\nwhile PA2 != 0:\n PA2 = int(input('Quantos termos você quer mostrar a mais: '))\n if PA2 > 0:\n conta2 = 1\n while conta2 <= PA2:\n print('{} -> '.format(termo),end=' ')\n termo += razao\n conta2 += 1\n print('PAUSA')\n cont += PA2\nprint('Progressão finalizada com {} termos mostrados'.format(cont + 9))\n\n","repo_name":"Bethania-Freitas/Curso-de-Python","sub_path":"ex062.py","file_name":"ex062.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"26502999446","text":"import plotly.express as px\nfrom numpy import average\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix\n\n\ndef count_metrics(y_true, y_pred):\n acc = accuracy_score(y_true, y_pred)\n prec = precision_score(y_true, y_pred, average='macro')\n rec = recall_score(y_true, y_pred, average='macro')\n f1 = f1_score(y_true, y_pred, average='macro')\n return acc, prec, rec, f1\n\ndef print_metrics(name, acc, prec, rec, f1):\n print(f'{name}')\n print('accuracy:\\t', acc)\n print('precision:\\t', prec)\n print('recall:\\t\\t', rec)\n print('f1 score:\\t', f1)\n\ndef confussion_matrix(y_true, y_pred, binary=False):\n cf_matrix = confusion_matrix(y_true, y_pred, normalize='pred')\n\n fig = px.imshow(cf_matrix,\n labels=dict(x=\"Ground truth\", y=\"Predictions\", color=\"F1 score\"),\n x=[\"Healthy\", \"Sick\"] if binary else [\"Blood donor\", \"Suspect blood donor\", \"Hepatitis\", \"Fibrosis\", \"Cirrhosis\"],\n y=[\"Healthy\", \"Sick\"] if binary else [\"Blood donor\", \"Suspect blood donor\", \"Hepatitis\", \"Fibrosis\", \"Cirrhosis\"])\n fig.show()\n","repo_name":"sates298/HCV-fuzzy-classifier","sub_path":"src/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6284859829","text":"from flask import Flask, render_template, request,redirect,url_for\nimport mysql.connector\nmydb = mysql.connector.connect(user='root', password='root123', host='127.0.0.1', database='ranidb')\nimport pyshorteners\nimport datetime\nimport socket\n\nlongurl=[]\nshorturl=[]\napp = Flask(__name__)\n@app.route('/')\ndef enterurl():\n return render_template('index.html')\n\n@app.route('/result', methods=['POST', 'GET'])\ndef result_app():\n if request.method == 'POST':\n a = request.form\n longurl = a['name']\n b=longurl\n s = pyshorteners.Shortener()\n result = s.tinyurl.short(longurl)\n ts = datetime.datetime.now()\n date_time = ts.strftime(\"%m/%d/%Y, %H:%M:%S\")\n hostname = socket.gethostname()\n IPAddr = socket.gethostbyname(hostname)\n mycursor = mydb.cursor()\n #mycursor.execute(\"CREATE TABLE Url_entry(id int NOT NULL AUTO_INCREMENT, longurl VARCHAR(255),shorturl VARCHAR(255),timestamp VARCHAR(255),ip_address VARCHAR(255),PRIMARY KEY (id))\")\n mycursor.execute(\"INSERT INTO Url_entry(longurl,shorturl,timestamp,ip_address)values(%s,%s,%s,%s)\",(b,result,date_time,IPAddr))\n mydb.commit()\n mycursor.execute(\"SELECT id,timestamp,ip_address,shorturl,longurl,count(longurl) as count FROM Url_entry GROUP BY longurl\")\n count=mycursor.fetchall()\n\n return render_template('result.html',output=count)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"RaniBorka/Assignment2","sub_path":"assignment2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"22345400000","text":"import os\nimport re\n\nimport _import_wrapper as iw\n\n\nclass XsParser(object):\n\n def __init__(self, path, unit):\n self._path = path\n retargeted = os.path.join(unit.path(), os.path.basename(path))\n with open(path, 'rb') as f:\n includes, induced = XsParser.parse_includes(f.readlines())\n\n self._includes = unit.resolve_include([retargeted] + includes) if includes else []\n self._induced = unit.resolve_include([retargeted] + induced) if induced else []\n\n @staticmethod\n def parse_includes(lines):\n includes = []\n induced = []\n\n include_pattern = re.compile(r'INCLUDE\\s*:\\s*(?P\\S*)')\n induced_pattern = re.compile(r'\\#include\\s*[\"<](?P[^\">]*)')\n\n for line in lines:\n line = line.lstrip()\n\n comment_pos = line.find('//')\n\n if comment_pos != -1:\n line = line[:comment_pos] # assumes there are no cases like #include \"a//b/c.h\"\n\n if line.startswith('#include'):\n m = induced_pattern.match(line)\n\n if m:\n induced.append(m.group('path'))\n\n elif line.startswith('INCLUDE'):\n m = include_pattern.match(line)\n\n if m:\n includes.append(m.group('path'))\n\n return includes, induced\n\n def includes(self):\n return self._includes\n\n def induced_deps(self):\n return {'cpp': self._induced}\n\n\ndef init():\n if 1:\n iw.addparser('xs', XsParser, {'xs': 'use', 'xscpp': 'pass'})\n\n\n# ----------------Plugin test------------------ #\ndef test_include_parser():\n text = '''\naaaaa\n#include \n# include \n#include\nINCLUDE: included1\nINCLUDE : included2\nINCLUDE: included3 // asdasd\n//INCLUDE : not_included\n'''\n includes, induced = XsParser.parse_includes(text.split('\\n'))\n assert includes == ['included1', 'included2', 'included3', ]\n assert induced == ['induced1', 'induced2']\n","repo_name":"yandex/CMICOT","sub_path":"build/plugins/xs.py","file_name":"xs.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"22"} +{"seq_id":"25397083870","text":"import sys\nimport cv2\nimport time\nimport dlib\nimport threading\nimport face_detector\nimport numpy as np\nimport loadmodel\nfrom copy import deepcopy\nfrom facewindow import Ui_MainWindow\nfrom PyQt5 import QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QFileDialog, QMainWindow\nfrom PyQt5.QtCore import QThread, pyqtSignal, Qt\nfrom PyQt5.QtGui import QPixmap, QImage\nfrom PIL import Image, ImageDraw, ImageFont\nimport os\n\nos.environ['TF_CPP_MIN_LOG_LEVE'] = '3'\n\n\nclass Window(QMainWindow, Ui_MainWindow):\n def __init__(self):\n super(Window, self).__init__()\n self.setupUi(self)\n self.th = None\n self.current_image = None\n self.label_name_dic = {1: '梁朝伟', 2: '刘德华', 3: '马云', 4: '郭冬临', 5: '暴走的小吉吉', 6: '陈豆豆',\n 7: '古天乐', 8: '赵丽颖', 9: '邓超', 10: '孙俪', 11: '岳云鹏', 12: '沈腾',\n 13: '何炅', 14: '邓紫棋', 15: '李荣浩', 16: '陈赫', 17: '钟汉良', 18: '刘涛',\n 19: '冯提莫', 20: '王力宏', 21: '吴亦凡', 22: '张杰', 23: '张家辉', 24: '佟丽娅',\n 25: '杨洋'}\n self.alliswellnamedic = {0: '石天冬', 1: '苏大强', 2: '苏明成', 3: '苏明玉', 4: '苏明哲', 5: '吴非', 6: '朱丽'}\n self.friendnamedic = {0: '李梦旋', 1: '蒋畅', 2: '钟侠骄', 3: '陈洪', 4: '李廷川', 5: '陈毅', 6: '李任宁', 7: '汪林', 8: '张城阳',\n 9: '冯松', 10: '胡稳', 11: '林雪梅', 12: '熊英英', 13: '高小霞', 14: '邓杰', 15: '叶港培', 16: '李博录',\n 17: '刘天龙', 18: '刘泽城',19: '柏现迪'}\n #打开视频,使用了线程来开启视频\n def openStream322(self):\n self.DispImage.setText('')\n self.th = Thread(self)\n self.th.changePixmap.connect(self.set_video_image322)\n self.th.start()\n\n #拍照\n def snap322(self):\n ret, frame = self.th.cap.read()\n if ret:\n rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n # rgb_image = cv2.resize(rgb_image, (28, 28))\n self.current_image = rgb_image\n face_detector.detect_and_label(rgb_image)\n convert_to_QtFormat = QtGui.QImage(rgb_image.data, rgb_image.shape[1],\n rgb_image.shape[0],\n QImage.Format_RGB888)\n p = convert_to_QtFormat.scaled(320, 240, Qt.KeepAspectRatio)\n self.set_image322(self.ImgLabel2, p)\n\n #打开图片\n def openImage322(self):\n self.DispResult.setText('')\n self.DispImage.setText('')\n img_name, img_type = QFileDialog.getOpenFileName(self, \"选择图片\", \"\", \" *.bmp;;*.jpg;;*.png;;*.jpeg\")\n # print(img_name, img_type)\n image = cv2.imread(img_name)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n image = cv2.equalizeHist(image)\n image = image / 255.\n image = cv2.resize(image, (128, 128))\n\n self.current_image = np.reshape(image, (1, 128, 128, 1))\n # 利用ImgLabel1显示图片\n # 适应设计ImgLabel1时的大小\n png = QtGui.QPixmap(img_name).scaled(self.DispImage.width(), self.DispImage.height())\n self.DispImage.setPixmap(png)\n\n\n def ReadTestImage322(self):\n filename = 'D:\\Comprehensive3\\FaceRecognition\\DataSet2-FaceOfStar\\\\test\\\\17_333.bmp'\n image = cv2.imread(filename)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n image = image / 255.\n image = cv2.resize(image, (128, 128))\n image = np.reshape(image, (1, 128, 128, 1))\n return image\n\n #识别函数\n def recognize322(self):\n result = loadmodel.predict322(self.current_image)\n print(result)\n if result != None:\n print(self.friendnamedic[np.array(result)[0]])\n self.DispResult.setText('')\n self.DispResult.insertPlainText(self.friendnamedic[np.array(result)[0]])\n else:\n print('未知')\n self.DispResult.setText('')\n self.DispResult.insertPlainText('未知')\n\n #实时识别函数\n def RealTimeRecognition322(self, image):\n result = loadmodel.predictvideo322(image)\n return result\n\n\n def set_video_image322(self, image):\n self.set_image322(self.DispImage, image)\n\n def set_image322(self, label, image):\n label.setPixmap(QPixmap.fromImage(image))\n\n#播放视频线程\nclass Thread(QThread):\n def __init__(self, other):\n super(Thread, self).__init__()\n self.cap = None\n self.pause = False\n self.dlib_face_detector = dlib.get_frontal_face_detector()\n self.friendnamedic = {0: '李梦旋', 1: '蒋畅', 2: '钟侠骄', 3: '陈洪', 4: '李廷川', 5: '陈毅', 6: '李任宁', 7: '汪林', 8: '张城阳',\n 9: '冯松', 10: '胡稳', 11: '林雪梅', 12: '熊英英', 13: '高小霞', 14: '邓杰', 15: '叶港培', 16: '李博录',\n 17: '刘天龙', 18: '刘泽城', 19: '柏现迪'}\n\n changePixmap = pyqtSignal(QtGui.QImage)\n def deal322(self, image, rgb_image):\n dets = self.dlib_face_detector(image, 1)\n tempimg = deepcopy(image)\n if len(dets) != 0:\n for detection in dets:\n image = deepcopy(tempimg)\n image1 = image[detection.top():detection.bottom(), detection.left():detection.right()]\n x = detection.left()\n y = detection.top()\n image1 = cv2.equalizeHist(image1)\n image1 = image1 / 255.0\n image1 = cv2.resize(image1, (128, 128))\n image1 = np.reshape(image1, (1, 128, 128, 1))\n result = Window.RealTimeRecognition322(self, image1)\n\n if result != None and result <=19:\n # 向视频中添加字符串\n # cv2.putText(rgb_image, self.friendnamedic[result],\n # (x, y+10), # 坐标\n # cv2.FONT_HERSHEY_SIMPLEX, # 字体\n # 2, # 字号\n # (255, 0, 255), # 颜色\n # 3)#粗细\n # 向视频中添加中文名字\n rgb_image = Image.fromarray(cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB))\n ttfont = ImageFont.truetype(\"simhei.ttf\", 20)\n draw = ImageDraw.Draw(rgb_image)\n draw.text((x, y), self.friendnamedic[result], fill=(255, 0, 255), font=ttfont)\n rgb_image = cv2.cvtColor(np.array(rgb_image), cv2.COLOR_RGB2BGR)\n convert_to_QtFormat = QtGui.QImage(rgb_image.data, rgb_image.shape[1],\n rgb_image.shape[0],\n QImage.Format_RGB888)\n p = convert_to_QtFormat.scaled(411, 321, Qt.KeepAspectRatio)\n self.changePixmap.emit(p)\n else:\n rgb_image = Image.fromarray(cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB))\n ttfont = ImageFont.truetype(\"simhei.ttf\", 20)\n draw = ImageDraw.Draw(rgb_image)\n draw.text((x, y), '未知', fill=(255, 0, 255), font=ttfont)\n rgb_image = cv2.cvtColor(np.array(rgb_image), cv2.COLOR_RGB2BGR)\n convert_to_QtFormat = QtGui.QImage(rgb_image.data, rgb_image.shape[1],\n rgb_image.shape[0],\n QImage.Format_RGB888)\n p = convert_to_QtFormat.scaled(411, 321, Qt.KeepAspectRatio)\n self.changePixmap.emit(p)\n else:\n convert_to_QtFormat = QtGui.QImage(rgb_image.data, rgb_image.shape[1],\n rgb_image.shape[0],\n QImage.Format_RGB888)\n p = convert_to_QtFormat.scaled(411, 321, Qt.KeepAspectRatio)\n self.changePixmap.emit(p)\n\n def run(self):\n self.cap = cv2.VideoCapture(0)\n self.cap.set(3, 360) # 设置分辨率,分辨率太大界面会卡\n self.cap.set(4, 300)\n while self.cap.isOpened():\n if 1 - self.pause:\n ret, frame = self.cap.read()\n if ret:\n rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n # 在这里可以对每帧图像进行处理\n image = deepcopy(rgb_image)\n image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n face_detector.detect_and_label(rgb_image)\n self.deal322(image, rgb_image)\n\n def FaceDetecttionCut(self, img):\n dets = self.dlib_face_detector(img, 1)\n if len(dets) != 0:\n for detection in dets:\n # img = cv2.rectangle(img,\n # (detection.left(), detection.top()), # (x1,y1)\n # (detection.right(), detection.bottom()), # (x2,y2)\n # (255, 255, 255),\n # 2)\n img = img[detection.top():detection.bottom(), detection.left():detection.right()]\n return img\n else:\n return np.array([0])\n\n def threcognize(self):\n t = threading.Thread(target=Window.RealTimeRecognition, args=(self,))\n t.start()\n\n#主函数\ndef main():\n qtapp = QtWidgets.QApplication(sys.argv)\n window = Window()\n window.show()\n sys.exit(qtapp.exec_())\n\nif __name__ == '__main__':\n main()\n","repo_name":"Creaturepersonal/faceRecognitionDemo","sub_path":"faceRecognition/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9884,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"40136538061","text":"import logging\nimport sys\nimport re\nimport multiprocessing\nimport os\nimport time\nimport shutil\nfrom urllib2 import URLError\nimport tempfile\n\nfrom Bio.Alphabet import generic_protein\nfrom Bio.Seq import Seq\n\nfrom antismash import utils\n\n\nname = \"eficaz\"\nshort_description = name.capitalize()\npriority = 10000\n\nEFICAzBinary = \"eficaz2.5\"\n\ndef check_prereqs():\n \"Check if all required files and applications are around\"\n\n # Tuple is ( binary_name, optional)\n _required_binaries = [\n ('eficaz2.5', False) \n ]\n\n failure_messages = []\n\n for binary_name, optional in _required_binaries:\n if utils.locate_executable(binary_name) is None and not optional:\n failure_messages.append(\"Failed to locate executable: %r\" % binary_name)\n\n return failure_messages\n\nclass EFICAzECPrediction:\n \n def __init__(self, seq_record, options):\n \n # Assign variables\n self.seq_record = seq_record\n self.options = options\n \n # Variables to store EC prediction\n self.EC4Dict = {}\n self.EC3Dict = {}\n self.EC4InfoDict = {}\n self.EC3InfoDict = {}\n \n # Dictionary to store the fasta file per Chunkdir\n # self.ChunkFilenames['/path/to/Chunk1'] = '/path/to/fastafile'\n self.ChunkFilenames = {}\n \n self.basedirName = os.path.abspath(os.path.join(options.outputfoldername, \"EFICAz\"))\n try:\n os.mkdir(self.basedirName)\n except OSError:\n if os.path.exists(self.basedirName):\n # We are nearly safe\n logging.debug(\"Outputfolder %s already exists.\" % self.basedirName)\n else:\n logging.exception(\"Cannot create EFICAz output directory %s\" % self.basedirName)\n sys.exit(1)\n \n tempdir = tempfile.mkdtemp(prefix='antiSMASH_ECpred')\n self.tempdirname = tempdir\n \n def _getMultiFastaList(self):\n features = utils.get_cds_features(self.seq_record)\n allFastaList = []\n for feature in features:\n gene_id = utils.get_gene_id(feature)\n fasta_seq = feature.qualifiers['translation'][0]\n if \"-\" in str(fasta_seq):\n fasta_seq = Seq(str(fasta_seq).replace(\"-\",\"\"), generic_protein)\n \n # Never write empty fasta entries\n if len(fasta_seq) == 0:\n logging.debug(\"No translation for %s, skipping\" % gene_id)\n continue\n \n allFastaList.append(\">%s\\n%s\\n\" % (gene_id, fasta_seq))\n \n return allFastaList\n \n \n def _prepareInput(self):\n \"\"\"Generate $options.cpus chunks of multi-Fasta-files; each in it's own subdirectory named \"Chunk0000x\";\n returns: list of directorynames\"\"\"\n \n logging.debug(\"Preparing input files for EFICAz\")\n InputDirList = []\n allFastaList = self._getMultiFastaList()\n \n \n maxChunks = self.options.cpus\n \n \n if len(allFastaList) < maxChunks:\n maxChunks = len(allFastaList)\n \n if maxChunks == 0:\n logging.warn('No input files for %s', self.seq_record.id)\n return []\n equalpartsizes = int(len(allFastaList) / maxChunks)\n \n \n # Generate directory structure and write chunks;\n # for debug purposes use outputfolder; later on we should move to a temporary directory\n \n for i in range(maxChunks):\n if i == 0:\n fastaChunk = allFastaList[:equalpartsizes]\n elif i == (self.options.cpus-1):\n fastaChunk = allFastaList[(i*equalpartsizes):]\n else:\n fastaChunk = allFastaList[(i*equalpartsizes):((i+1)*equalpartsizes)] \n \n \n # setup separate directories for EFICAz\n chunkDirName = \"{basedir}{sep}Chunk{chunk_no:05d}\".format(basedir=self.tempdirname, sep=os.sep,chunk_no=i+1)\n # logging.debug(\"Trying to create folder: %s\" % chunkDirName)\n try:\n os.mkdir(chunkDirName)\n except OSError:\n if os.path.exists(chunkDirName):\n # We are nearly safe\n logging.debug(\"Outputfolder %s already exists.\" % chunkDirName)\n else:\n logging.exception(\"Cannot create directory %s.\" % chunkDirName)\n sys.exit(1)\n InputDirList.append(chunkDirName)\n \n chunkFileName = \"{dirname}{sep}input_{seqid}_{chunk_no:05d}.fasta\".format(dirname=chunkDirName, \\\n sep=os.sep, \\\n seqid=self.seq_record.id, \\\n chunk_no=i+1)\n try:\n f = open(chunkFileName, \"w\")\n except OSError:\n logging.exception(\"Cannot create fasta file %s\" % chunkFileName)\n sys.exit(1)\n \n \n self.ChunkFilenames[chunkDirName] = os.path.abspath(chunkFileName)\n for seq in fastaChunk:\n f.write(seq)\n f.close()\n self.InputDirList = InputDirList\n return InputDirList\n \n def _runEFICAz(self, chunkDir):\n cwd = os.getcwd()\n try:\n os.chdir(chunkDir)\n except OSError:\n logging.exception(\"Can't chdir to %s\" % chunkDir)\n sys.exit(1)\n\n fastafile = os.path.basename(self.ChunkFilenames[chunkDir])\n ecpredfile = fastafile+\".ecpred\"\n # Only perform calculations if result file does not already exist (from previous run)\n if not os.path.isfile(os.path.join(self.basedirName, ecpredfile)):\n EFICAzExecutable = utils.locate_executable(EFICAzBinary)\n if not EFICAzExecutable:\n logging.exception(\"EFICAz executable not found, bailing out, analysis not posible\")\n sys.exit(1)\n cmdline = [EFICAzExecutable, fastafile]\n \n logging.debug(\"executing %s in directory %s\" % (\" \".join(cmdline), chunkDir))\n try:\n utils.execute(cmdline)\n except:\n logging.exception('cannot execute EFICAz!')\n sys.exit(1)\n else:\n # As this method is executed in an own thread, it does not have the ability to change\n # the variables within th eobject;\n # As a workaround we just copy the \"old\" file to the tempdir...\n try:\n shutil.copy(os.path.abspath(os.path.join(self.basedirName, ecpredfile)), self.ChunkFilenames[chunkDir]+\".ecpred\")\n except:\n logging.exception(\"Could not copy existing eficaz result file %s to tempfile %s\", \\\n os.path.isfile(os.path.abspath(self.basedirName, ecpredfile)), \\\n self.ChunkFilenames[chunkDir]+\".ecpred\" )\n sys.exit(1)\n \n os.chdir(cwd)\n \n \n \n def _execute_EFICAz_processes(self, directorynames):\n \n processList = []\n \n for directoryname in directorynames:\n processList.append(multiprocessing.Process(target=self._runEFICAz, args = (directoryname, )))\n \n for process in processList:\n process.start()\n time.sleep(10)\n while True:\n processrunning = \"n\"\n for process in processList:\n if process.is_alive():\n processrunning = \"y\"\n if processrunning == \"y\":\n time.sleep(5)\n else:\n break\n for process in processList:\n process.join()\n \n \n logging.debug(\"After joining the processes EC4Dict has %s entries\" % len(self.EC4Dict.keys()))\n \n def _parseEFICAzResults(self, chunkDirs):\n \n for chunkDir in chunkDirs:\n \n # logging.debug(\"ChunkFilenames[%s]=%s\", chunkDir, self.ChunkFilenames[chunkDir])\n ecpredfile = self.ChunkFilenames[chunkDir]+\".ecpred\"\n try:\n f = open(ecpredfile,\"r\")\n except OSError as e:\n logging.error(\"No EFICAz outputfile %s found. Skipping Chunk...\\nOSError: %s\", ecpredfile, e)\n continue\n except IOError as e:\n logging.error(\"No EFICAz outputfile %s found. Skipping Chunk...\\nIOError: %s\", ecpredfile, e)\n continue\n EC4Pred = {}\n EC4Info = {}\n EC3Pred = {}\n EC3Info = {}\n \n for line in f.read().splitlines():\n # First get antiSMASH-ID\n (antiSMASH_ID, eficazResultString) = line.split(',', 1)\n eficazResultString = eficazResultString.strip()\n if eficazResultString == 'No EFICAz EC assignment':\n #logging.debug(\"No EC assignment found for %s\" % antiSMASH_ID)\n continue\n \n if eficazResultString.strip().startswith(\"3EC\"):\n #logging.debug(\"3EC: %s\" % eficazResultString)\n r = re.match('3EC: (\\d+\\.\\d+\\.\\d+), (.*)', eficazResultString)\n if r:\n EC = r.group(1) + \".-\"\n ECDesc = r.group(2)\n if not EC3Pred.has_key(antiSMASH_ID):\n EC3Pred[antiSMASH_ID] = []\n EC3Info[antiSMASH_ID] = []\n EC3Pred[antiSMASH_ID].append(EC)\n EC3Info[antiSMASH_ID].append(ECDesc)\n continue\n \n if eficazResultString.strip().startswith(\"4EC\"):\n r = re.match('4EC: (\\d+\\.\\d+\\.\\d+\\.\\d+), (.*)', eficazResultString)\n if r:\n EC = r.group(1)\n ECDesc = r.group(2)\n if not EC4Pred.has_key(antiSMASH_ID):\n EC4Pred[antiSMASH_ID] = []\n EC4Info[antiSMASH_ID] = []\n EC4Pred[antiSMASH_ID].append(EC)\n EC4Info[antiSMASH_ID].append(ECDesc)\n continue\n \n logging.warn(\"Could not parse line %s:\" % line)\n f.close()\n \n self.EC4Dict.update(EC4Pred)\n self.EC4InfoDict.update(EC4Info)\n self.EC3Dict.update(EC3Pred)\n self.EC3InfoDict.update(EC3Info)\n \n logging.debug(\"EC4Pred has %s entries for chunk\" % len(self.EC4Dict.keys()))\n \n def _copyFiles(self, chunkDirs):\n \"Copy the input and output files into outputfolder\"\n \n logging.debug(\"Copying the eficaz input/result files from tempdir %s to outputfolder %s\", \\\n self.tempdirname, self.basedirName)\n for chunkDir in chunkDirs:\n try:\n # logging.debug(\"Copying input fasta file from %s to outputfolder\", chunkDir)\n shutil.copy(self.ChunkFilenames[chunkDir], self.basedirName)\n except:\n logging.error(\"Could not copy eficaz input file %s to destination %s\", \\\n self.ChunkFilenames[chunkDir], self.basedirName)\n \n try:\n # logging.debug(\"Copying results file from %s to outputfolder\", chunkDir)\n shutil.copy(self.ChunkFilenames[chunkDir]+\".ecpred\", self.basedirName)\n except:\n logging.error(\"Could not copy eficaz result file %s to destination %s\", \\\n self.ChunkFilenames[chunkDir]+\".ecpred\", self.basedirName)\n # And finally remove temporary directory\n logging.debug(\"removing temp dir %s\", self.tempdirname)\n shutil.rmtree(self.tempdirname)\n \n \n def runECpred(self):\n \"Runs the EFICAz EC number predictions\"\n chunkDirs = self._prepareInput()\n if len(chunkDirs) > 0:\n logging.debug(\"split inputs to %s directories; first one is %s\" % (len(chunkDirs), chunkDirs[0]))\n self._execute_EFICAz_processes(chunkDirs)\n self._parseEFICAzResults(chunkDirs)\n self._copyFiles(chunkDirs)\n else:\n logging.warn(\"ECpredictor: No protein coding sequences found for in record: %s\" % self.seq_record.id)\n \n def getEC3(self, antiSMASH_ID):\n \"\"\"Return list of EC3 numbers for antiSMASH_ID\"\"\"\n \n if self.EC3Dict.has_key(antiSMASH_ID):\n return self.EC3Dict[antiSMASH_ID]\n else:\n return None\n\n def getEC3Info(self, antiSMASH_ID):\n \"\"\"Return list of infos for EC3 number prediction for antiSMASH_ID\"\"\"\n \n if self.EC3InfoDict.has_key(antiSMASH_ID):\n return self.EC3InfoDict[antiSMASH_ID]\n else:\n return None\n \n def getEC4(self, antiSMASH_ID):\n \"\"\"Return list of EC4 numbers for antiSMASH_ID\"\"\"\n \n if self.EC4Dict.has_key(antiSMASH_ID):\n return self.EC4Dict[antiSMASH_ID]\n else:\n return None\n \n def getEC4Info(self, antiSMASH_ID):\n \"\"\"Return list of infos for EC4 number prediction for antiSMASH_ID\"\"\"\n \n if self.EC4InfoDict.has_key(antiSMASH_ID):\n return self.EC4InfoDict[antiSMASH_ID]\n else:\n return None\n \n def getEC4Dict(self):\n \"\"\"Return dictionary of list for 4-digit EC numbers\n \n Example:\n a = EFICAzObject.getEC4Dict\n will result in:\n a[antiSMASH_ID] = ['1.2.3.4', '5.6.7.8']\"\"\"\n \n return self.EC4Dict\n \n def getEC3Dict(self):\n \"\"\"Return dictionary of 3-digit EC numbers\n \n Example:\n a = EFICAzObject.getEC3Dict\n will result in:\n a[antiSMASH_ID] = ['1.2.3.x', '5.6.7.x']\"\"\"\n \n return self.EC3Dict\n \n \n def getEC4InfoDict(self):\n \"\"\"Return dictionary of description for 4-digit EC assignment\n \n Example: \n a = EFICAzObject.getEC4ToolDict\n will result in:\n a[antiSMASH_ID] = 'EFICAz_components: CHIEFc_SVM; PFAM_SVM, MTTSI_bin: 6, Precision (mean; SD): 0.991; 0.094' \"\"\"\n \n return self.EC4InfoDict\n \n def getEC3InfoDict(self):\n \"\"\"Return dictionary of description for 3-digit EC assignment\n \n Example:\n a = EFICAzObject.getEC3ToolDict\n will result in:\n a[antiSMASH_ID] = 'EFICAz_components: CHIEFc_SVM; PFAM_SVM, MTTSI_bin: 6, Precision (mean; SD): 0.991; 0.094' \"\"\"\n \n return self.EC3InfoDict\n \n \n\n\n\ndef getECs(seq_record, options):\n logging.debug(\"Predicting EC numbers with EFICAz\")\n if not name in options.ecpred:\n logging.debug(\"ECprediction %s not selected, returning...\" % name)\n return\n \n if not 'cpus' in options:\n options.cpus = 1\n \n EFICAzECs = EFICAzECPrediction(seq_record, options)\n EFICAzECs.runECpred()\n logging.debug(\"Found %s predictions for EC4\" % len(EFICAzECs.getEC4Dict().keys()))\n \n for feature in utils.get_cds_features(seq_record):\n featureID = utils.get_gene_id(feature)\n \n notes = []\n \n if feature.qualifiers.has_key(\"note\"):\n notes = feature.qualifiers['note']\n \n if EFICAzECs.getEC4(featureID):\n logging.debug(\"Annotating %s\" % featureID)\n if feature.qualifiers.has_key('EC_number'):\n logging.warn('ECpredictor[eficaz]: Overwriting existing EC annotation: %s with %s' % \\\n (\", \".join(feature.qualifiers['EC_number']), \", \".join(EFICAzECs.getEC4(featureID))))\n feature.qualifiers['EC_number'] = EFICAzECs.getEC4(featureID)\n notes.append(\"EFICAz EC number prediction: EC4: {0}; {1}\".format(\", \".join(EFICAzECs.getEC4(featureID)), \\\n \"; \".join(EFICAzECs.getEC4Info(featureID))) )\n # Only annotate 3 digit EC if no 4 digit EC is available\n if (EFICAzECs.getEC3(featureID) and not EFICAzECs.getEC4(featureID)):\n if feature.qualifiers.has_key('EC_number'):\n if not re.search(\"\\d+\\.\\d+\\.\\d+\\.\\d+\", \" \".join(feature.qualifiers['EC_number'])):\n logging.warn('ECpredictor[eficaz]: Overwriting existing EC annotation: %s with %s' % \\\n (\", \".join(feature.qualifiers['EC_number']), \", \".join(EFICAzECs.getEC3(featureID))))\n feature.qualifiers['EC_number'] = EFICAzECs.getEC3(featureID)\n \n if EFICAzECs.getEC3Info(featureID):\n notes.append(\"EFICAz EC number prediction: EC3: {0}; {1}\".format(\", \".join(EFICAzECs.getEC3(featureID)), \\\n \"; \".join(EFICAzECs.getEC3Info(featureID))))\n if not feature.qualifiers.has_key('EC_number'):\n feature.qualifiers['EC_number'] = EFICAzECs.getEC3(featureID)\n \n feature.qualifiers['note'] = notes\n logging.debug(\"Finished EC number prediction with EFICAz\")","repo_name":"plantismash/plantismash","sub_path":"antismash/generic_modules/ecpredictor/eficaz/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":17564,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"22"} +{"seq_id":"70804591097","text":"from flask import Flask, Response\nimport logging\n\napp = Flask(__name__)\n\n\n# Configure logging\nlog_format = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\nlogging.basicConfig(level=logging.INFO, format=log_format)\n\n# Create a logger instance\nlogger = logging.getLogger(__name__)\n\n\n@app.route(\"/\")\ndef index():\n message = \"Plaform app with Python Flask running on Kubernetes =)!\"\n return Response(message, status=200, mimetype=\"application/json\")\n\n\nif __name__ == \"__main__\":\n logger.info(\"Flask application is running successfully\")\n app.run(host=\"0.0.0.0\", port=5000)\n","repo_name":"jdial1996/platform-app","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21993851974","text":"from selenium import webdriver \nfrom time import sleep\nfrom selenium.webdriver.chrome.options import Options #added for a quicker configuration \n\nclass covidBot:\n\n def __init__(self):\n \"\"\" initiatlizing function\"\"\"\n self.zip = \"89052\"\n self.dob = \"03/24/2000\"\n self.dos = \"12/21/2020\"\n options = Options()\n options.add_argument(\"no-sandbox\") #disable sandboxing, really only needed in docker\n #options.add_argument(\"headless\") #for when you have it all working\n self.driver = webdriver.Chrome() # calls webdriver\n self.goZip()\n \n def goZip(self):\n \"\"\" opens the website and fills in the information\"\"\"\n self.driver.get(\"https://www.cvs.com/minuteclinic/covid-19-testing\")\n sleep(2)\n self.driver.find_element_by_name('zip-control').send_keys(self.zip)\n sleep(2)\n self.driver.find_element_by_xpath('//*[@id=\"content\"]/div[1]/div[2]/div[1]/div/form/div[2]/button').click()\n sleep(4)\n print(\"in\")\n\n\n\ncovidBot()\n\n","repo_name":"watson-clara/Covid-Test-Bot","sub_path":"covidBot.py","file_name":"covidBot.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6884022444","text":"import os, sys, json, codecs, re\n\nfrom linebot import (LineBotApi, WebhookHandler)\nfrom linebot.models import *\n\n#前往上層目錄\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) \n#導入env, model\nfrom env import *\nfrom model import *\n\n#################### 取得資料庫中的經緯資訊 ####################\n#經緯資訊\ndef get_all_location():\n query = \"\"\"SELECT address, lat, lng FROM line_location\"\"\"\n dataRow = selectDB(query, None)\n result_dict = {}\n if len(dataRow):\n for row in dataRow:\n result_dict[row['address']] = {'lat': row['lat'], 'lng': row['lng']}\n return result_dict\n#經緯資訊\ndef get_location(location):\n query = \"\"\"SELECT lat, lng FROM line_location WHERE address = %s\"\"\"\n dataRow = selectDB(query, (location,))\n return dataRow[0] if len(dataRow) else None\n#建立經緯資訊\ndef create_location(address, lat, lng):\n if not get_location(address):\n query = \"\"\"INSERT INTO line_location (address, lat, lng) VALUES (%s, %s, %s)\"\"\"\n values = (address, lat, lng,)\n operateDB(query, values) ","repo_name":"linziyou0601/coolpanda","sub_path":"Managers/geocodingManager.py","file_name":"geocodingManager.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4424238029","text":"# Importar la biblioteca de flask y librerias necesarias\n# Repositorio Git https://github.com/leocastan/NRC_6275_LeopoldoCastroExamenbaParcial1.git\nfrom tkinter import messagebox\nfrom flask import Flask, redirect, render_template, request, url_for, flash\nimport pickle\nimport datetime\nimport requests\nimport os\nimport argparse\nimport re\nimport json\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd, FR\nfrom holidays.constants import JAN, MAY, AUG, OCT, NOV, DEC\nfrom holidays.holiday_base import HolidayBase\n\n\n\n\n# Instanciar la aplicación\n# Nombre por defecto y ruta donde están los modelos\napp = Flask(__name__)\n\n# Arreglo para almacenar las tareas\nlistaLlamadas = []\n\n# 1. Funcion controlador que muestra lista actual de tareas pendientes y un formulario para ingresar un nuevo elemento\n# Definicion de la ruta por defecto,\n@app.route('/')\n# Lamar a principal\ndef home():\n return render_template('index.html', listaLlamadas=listaLlamadas)\n\n# 2. Funcion controlador para agregar llamada a lista de llamadas\n# Definicion de la ruta\n@app.route('/enviar', methods=['POST'])\n# Llamar a enviar\ndef enviar():\n # Funcion condicional para enviar los datos del formulario\n if request.method == 'POST':\n\n nroLlamada = request.form['nroLlamada']\n placaVehiculo = request.form['placaVehiculo']\n fecha = request.form['fecha']\n hora = request.form['hora']\n prediccion = request.form['prediccion']\n\n\n # Funcion condicional para no registrar en caso de datos vacios\n if nroLlamada == '' or placaVehiculo == '' or fecha == '' or hora == '' or prediccion == '':\n #Mensaje de alerta de campos faltantes\n messagebox.showwarning(\"¡Alerta!\",\"Ingrese todos los campos\")\n return redirect(url_for('home'))\n else:\n #Mensaje de autorizacion de registro\n resultado = messagebox.askquestion(\"Registrar\", \"¿Está seguro que desea registrar los datos?\")\n #Funcion condicional de confirmacion de registro\n if resultado == \"yes\":\n listaLlamadas.append({'nroLlamada': nroLlamada, 'placaVehiculo': placaVehiculo, 'fecha': fecha, 'hora': hora, 'prediccion': prediccion })\n return redirect(url_for('home'))\n else:\n return redirect(url_for('home'))\n\n# 3. Funcion controlador para borrar la lista de tareas\n@app.route('/borrar', methods=['POST'])\ndef borrar():\n if request.method == 'POST':\n # Funcion condicional para mostrar alerta en caso de no existir\n if listaLlamadas == []:\n messagebox.showwarning(\"¡Alerta!\", \"No existen llamadas pendientes\")\n return redirect(url_for('home'))\n else:\n # Mensaje de autorizacion de borrado\n resultado = messagebox.askquestion(\n \"Borrar datos\", \"¿Está seguro de que desea borrar los datos?\")\n # Funcion condicional de confirmacion de borrado\n if resultado == \"yes\":\n messagebox.showinfo(\"Info\", \"Los datos han sido borrados\")\n listaLlamadas.clear()\n return redirect(url_for('home'))\n else:\n return redirect(url_for('home'))\n\n# 4. Funcion controlador para guardar registros en archivo *.pickle\n@app.route('/guardar', methods=['POST'])\ndef guardar():\n if request.method == 'POST':\n # Funcion condicional para mostrar alerta en caso de no existir\n if listaLlamadas == []:\n messagebox.showwarning(\n \"¡Alerta!\", \"No existen llamdas para almacenar\")\n return redirect(url_for('home'))\n else:\n # Mensaje de autorizacion de guardado\n resultado = messagebox.askquestion(\n \"Guardar registros\", \"¿Está seguro de que desea guardar los datos?\")\n # Funcion condicional de confirmacion de guardado\n if resultado == \"yes\":\n # Funcion de creacion y sobreescritura de archivo *.pickle\n with open('llamadas.pickle', 'wb') as f:\n llamadas = {'llamadas': listaLlamadas}\n pickle.dump(llamadas, f)\n messagebox.showinfo(\"Info\", \"Los datos han sido guardados\")\n return redirect(url_for('home'))\n else:\n return redirect(url_for('home'))\n\n#Holidays para ecuador\nclass HolidayEcuador(HolidayBase):\n \"\"\"\n A class to represent a Holiday in Ecuador by province (HolidayEcuador)\n It aims to make determining whether a \n specific date is a holiday as fast and flexible as possible.\n https://www.turismo.gob.ec/wp-content/uploads/2020/03/CALENDARIO-DE-FERIADOS.pdf\n ...\n Attributes (It inherits the HolidayBase class)\n ----------\n prov: str\n province code according to ISO3166-2\n Methods\n -------\n __init__(self, plate, date, time, online=False):\n Constructs all the necessary attributes for the HolidayEcuador object.\n _populate(self, year):\n Returns if a date is holiday or not\n \"\"\" \n # ISO 3166-2 codes for the principal subdivisions, called provinces\n # https://es.wikipedia.org/wiki/ISO_3166-2:EC\n PROVINCES = [\"EC-P\"] # TODO add more provinces\n\n def __init__(self, **kwargs):\n \"\"\"\n Constructs all the necessary attributes for the HolidayEcuador object.\n \"\"\" \n self.country = \"ECU\"\n self.prov = kwargs.pop(\"prov\", \"ON\")\n HolidayBase.__init__(self, **kwargs)\n\n def _populate(self, year):\n \"\"\"\n Checks if a date is holiday or not\n \n Parameters\n ----------\n year : str\n year of a date\n Returns\n -------\n Returns true if a date is a holiday otherwise flase \n \"\"\" \n # New Year's Day \n self[datetime.date(year, JAN, 1)] = \"Año Nuevo [New Year's Day]\"\n \n # Christmas\n self[datetime.date(year, DEC, 25)] = \"Navidad [Christmas]\"\n \n # Holy Week\n self[easter(year) + rd(weekday=FR(-1))] = \"Semana Santa (Viernes Santo) [Good Friday)]\"\n self[easter(year)] = \"Día de Pascuas [Easter Day]\"\n \n # Carnival\n total_lent_days = 46\n self[easter(year) - datetime.timedelta(days=total_lent_days+2)] = \"Lunes de carnaval [Carnival of Monday)]\"\n self[easter(year) - datetime.timedelta(days=total_lent_days+1)] = \"Martes de carnaval [Tuesday of Carnival)]\"\n \n # Labor day\n name = \"Día Nacional del Trabajo [Labour Day]\"\n # (Law 858/Reform Law to the LOSEP (in force since December 21, 2016 /R.O # 906)) If the holiday falls on Saturday or Tuesday\n # the mandatory rest will go to the immediate previous Friday or Monday\n # respectively\n if year > 2015 and datetime.date(year, MAY, 1).weekday() in (5,1):\n self[datetime.date(year, MAY, 1) - datetime.timedelta(days=1)] = name\n # (Law 858/Reform Law to the LOSEP (in force since December 21, 2016 /R.O # 906)) if the holiday falls on Sunday\n # the mandatory rest will go to the following Monday\n elif year > 2015 and datetime.date(year, MAY, 1).weekday() == 6:\n self[datetime.date(year, MAY, 1) + datetime.timedelta(days=1)] = name\n # (Law 858/Reform Law to the LOSEP (in force since December 21, 2016 /R.O # 906)) Holidays that are on Wednesday or Thursday\n # will be moved to the Friday of that week\n elif year > 2015 and datetime.date(year, MAY, 1).weekday() in (2,3):\n self[datetime.date(year, MAY, 1) + rd(weekday=FR)] = name\n else:\n self[datetime.date(year, MAY, 1)] = name\n \n # Pichincha battle, the rules are the same as the labor day\n name = \"Batalla del Pichincha [Pichincha Battle]\"\n if year > 2015 and datetime.date(year, MAY, 24).weekday() in (5,1):\n self[datetime.date(year, MAY, 24).weekday() - datetime.timedelta(days=1)] = name\n elif year > 2015 and datetime.date(year, MAY, 24).weekday() == 6:\n self[datetime.date(year, MAY, 24) + datetime.timedelta(days=1)] = name\n elif year > 2015 and datetime.date(year, MAY, 24).weekday() in (2,3):\n self[datetime.date(year, MAY, 24) + rd(weekday=FR)] = name\n else:\n self[datetime.date(year, MAY, 24)] = name \n \n # First Cry of Independence, the rules are the same as the labor day\n name = \"Primer Grito de la Independencia [First Cry of Independence]\"\n if year > 2015 and datetime.date(year, AUG, 10).weekday() in (5,1):\n self[datetime.date(year, AUG, 10)- datetime.timedelta(days=1)] = name\n elif year > 2015 and datetime.date(year, AUG, 10).weekday() == 6:\n self[datetime.date(year, AUG, 10) + datetime.timedelta(days=1)] = name\n elif year > 2015 and datetime.date(year, AUG, 10).weekday() in (2,3):\n self[datetime.date(year, AUG, 10) + rd(weekday=FR)] = name\n else:\n self[datetime.date(year, AUG, 10)] = name \n \n # Guayaquil's independence, the rules are the same as the labor day\n name = \"Independencia de Guayaquil [Guayaquil's Independence]\"\n if year > 2015 and datetime.date(year, OCT, 9).weekday() in (5,1):\n self[datetime.date(year, OCT, 9) - datetime.timedelta(days=1)] = name\n elif year > 2015 and datetime.date(year, OCT, 9).weekday() == 6:\n self[datetime.date(year, OCT, 9) + datetime.timedelta(days=1)] = name\n elif year > 2015 and datetime.date(year, MAY, 1).weekday() in (2,3):\n self[datetime.date(year, OCT, 9) + rd(weekday=FR)] = name\n else:\n self[datetime.date(year, OCT, 9)] = name \n \n # Day of the Dead and\n namedd = \"Día de los difuntos [Day of the Dead]\" \n # Independence of Cuenca\n nameic = \"Independencia de Cuenca [Independence of Cuenca]\"\n #(Law 858/Reform Law to the LOSEP (in force since December 21, 2016 /R.O # 906)) \n #For national and/or local holidays that coincide on continuous days, \n #the following rules will apply:\n if (datetime.date(year, NOV, 2).weekday() == 5 and datetime.date(year, NOV, 3).weekday() == 6):\n self[datetime.date(year, NOV, 2) - datetime.timedelta(days=1)] = namedd\n self[datetime.date(year, NOV, 3) + datetime.timedelta(days=1)] = nameic \n elif (datetime.date(year, NOV, 3).weekday() == 2):\n self[datetime.date(year, NOV, 2)] = namedd\n self[datetime.date(year, NOV, 3) - datetime.timedelta(days=2)] = nameic\n elif (datetime.date(year, NOV, 3).weekday() == 3):\n self[datetime.date(year, NOV, 3)] = nameic\n self[datetime.date(year, NOV, 2) + datetime.timedelta(days=2)] = namedd\n elif (datetime.date(year, NOV, 3).weekday() == 5):\n self[datetime.date(year, NOV, 2)] = namedd\n self[datetime.date(year, NOV, 3) - datetime.timedelta(days=2)] = nameic\n elif (datetime.date(year, NOV, 3).weekday() == 0):\n self[datetime.date(year, NOV, 3)] = nameic\n self[datetime.date(year, NOV, 2) + datetime.timedelta(days=2)] = namedd\n else:\n self[datetime.date(year, NOV, 2)] = namedd\n self[datetime.date(year, NOV, 3)] = nameic \n \n # Foundation of Quito, applies only to Pichincha province, \n # the rules are the same as the labor day\n name = \"Fundación de Quito [Foundation of Quito]\" \n if self.prov in (\"EC-P\"):\n if year > 2015 and datetime.date(year, DEC, 6).weekday() in (5,1):\n self[datetime.date(year, DEC, 6) - datetime.timedelta(days=1)] = name\n elif year > 2015 and datetime.date(year, DEC, 6).weekday() == 6:\n self[(datetime.date(year, DEC, 6).weekday()) + datetime.timedelta(days=1)] =name\n elif year > 2015 and datetime.date(year, DEC, 6).weekday() in (2,3):\n self[datetime.date(year, DEC, 6) + rd(weekday=FR)] = name\n else:\n self[datetime.date(year, DEC, 6)] = name\n\n\n\n# Metodo main del programa\nif __name__ == '__main__':\n # debug = True, para reiniciar automatica el servidor\n app.run(debug=True)\n","repo_name":"leocastan/NRC_6275_LeopoldoCastroExamenbaParcial1","sub_path":"CastroL.py","file_name":"CastroL.py","file_ext":"py","file_size_in_byte":12349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23371295980","text":"from turtle import Turtle\n\n\nclass Racer(Turtle):\n def __init__(self):\n super().__init__()\n self.penup()\n self.hideturtle()\n self.goto(0,-280)\n self.color(\"white\")\n self.shape(\"turtle\")\n self.setheading(90)\n self.showturtle()\n\n def move_racer(self):\n self.forward(10)","repo_name":"Satwika7/23turtle_car_race","sub_path":"racer.py","file_name":"racer.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5456038630","text":"\"\"\"Module with class to control the game.\n\nClasses:\n Game: The base class for the game GUI.\n\"\"\"\n\nimport tkinter as tk\n\nimport app\n\n\nclass Game:\n \"\"\"The base class for the translation game gui.\n\n Args:\n no_commit: Boolean denoting whether marks are committed to\n database.\n\n Attributes:\n no_commit: Boolean denoting whether marks are committed to\n database.\n gui: tkinter top level widget.\n window: Instance of the Window class.\n settings: Instance of the Settings class.\n status: Instance of the Status class.\n answers: Instance of the Answers class.\n buttons: Instance of the Buttons class.\n game_words: Instance of the GameWords class.\n menu: Instance of the Menu class.\n questions: Instance of the Questions class.\n summary: Instance of the Summary class.\n \"\"\"\n\n def __init__(self, no_commit: bool) -> None:\n self.no_commit = no_commit\n self.__initiate_game()\n\n def __initiate_game(self) -> None:\n \"\"\"Create top level widget, game window and start game.\"\"\"\n self.gui = tk.Tk()\n self.labels = app.Labels(self.gui)\n self.window = app.Window(self)\n self.window.create()\n self.start()\n\n def start(self) -> None:\n \"\"\"Start a new instance of the game.\n\n Create new instances of the game elements. Display the options\n menu and wait for the user to select the options. Then set the\n first question.\n \"\"\"\n self.settings = app.Settings()\n self.status = app.Status()\n self.answers = app.Answers(self)\n self.buttons = app.Buttons(self)\n self.game_words = app.GameWords(self)\n self.menu = app.Menu(self)\n self.questions = app.Questions(self)\n self.summary = app.Summary(self)\n self.menu.create()\n self.menu.submit_values_button.wait_variable(self.menu.values_set_indicator)\n self.questions.initialise()\n\n def run(self) -> None:\n \"\"\"Call mainloop.\"\"\"\n self.gui.mainloop()\n\n def commit_marks(self) -> None:\n \"\"\"Commit marks to database.\"\"\"\n if not self.no_commit:\n self.status.commit_marks_to_database()\n\n def destroy_widgets(self, names: list[str]) -> None:\n \"\"\"Destroy all widgets in the window which are in names list.\n\n Args:\n names: List of widget names to destroy.\n \"\"\"\n for widget in self.gui.winfo_children():\n if widget.winfo_name() in names:\n widget.destroy()\n\n def destroy_widgets_except(self, names: list[str]) -> None:\n \"\"\"Destroy all widgets in the window except for those in names.\n\n Args:\n names: List of widget names to not destroy.\n \"\"\"\n for widget in self.gui.winfo_children():\n if widget.winfo_name() not in names:\n widget.destroy()\n","repo_name":"mwtb47/swedish-vocabulary-game","sub_path":"game/app/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"1092022129","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: Samuel A. Maloney\r\n\r\n\"\"\"\r\n\r\nfrom scipy.special import roots_legendre\r\nimport scipy.sparse as sp\r\nimport scipy.sparse.linalg as sp_la\r\nimport numpy as np\r\nimport ssqr\r\nimport integrators\r\nimport mappings\r\nimport boundaries\r\n\r\nfrom timeit import default_timer\r\n\r\n\r\nclass FciFemSim:\r\n \"\"\"Class for flux-coordinate independent FEM (FCIFEM) method.\r\n Implements the convection-diffusion equation on a rectangular domain\r\n [x, y] = [0...2*pi, 0...1].\r\n\r\n Attributes\r\n ----------\r\n NX : int\r\n Number of planes along x-dimension. Must be NX >= 2.\r\n NY : int\r\n Number of nodes on each plane. Must be NY >= 2.\r\n nodeX : numpy.ndarray, shape=(NX+1,)\r\n x-coords of FCI planes (includes right boundary).\r\n dx : numpy.ndarray, shape=(NX,)\r\n Spacing between FCI planes\r\n nodeY : numpy.ndarray, shape=(NX+1, NY+1)\r\n y-coords of nodes on each FCI plane (includes right/top boundaries).\r\n idy : numpy.ndarray, shape=(NX+1, NY)\r\n 1/spacing between nodes on each FCI plane (includes right boundary).\r\n nDoFs : int\r\n Number of unique nodal points in the simulation domain (equals NX*NY).\r\n velocity : np.array([vx,vy], dtype='float64')\r\n Background velocity of the fluid.\r\n diffusivity : {numpy.ndarray, float}\r\n Diffusion coefficient for the quantity of interest.\r\n If an array, it must have shape (ndim,ndim). If a float, it will\r\n be converted to diffusivity*np.eye(ndim, dtype='float64').\r\n f : callable\r\n Forcing function. Must take 2D array of points and return 1D array.\r\n NQX : int\r\n Number of quadrature cell divisions between FCI planes.\r\n NQY : int\r\n Number of quadrature cell divisions in y-direction.\r\n Qord : int\r\n Number of quadrature points in each grid cell along one dimension.\r\n quadType : string, optional\r\n Type of quadrature to be used. Must be either 'gauss' or 'uniform'.\r\n Produces Gauss-Legendre or Newton-Cotes points/weights respectively.\r\n massLumping : bool, optional\r\n Determines whether mass-lumping was used to calculate M matrix.\r\n K : scipy.sparse.csr_matrix\r\n The stiffness matrix from the diffusion term\r\n A : scipy.sparse.csr_matrix\r\n The advection matrix\r\n M : scipy.sparse.csr_matrix\r\n The mass matrix from the time derivative\r\n b : numpy.ndarray, shape=(nDoFs,)\r\n RHS forcing vector generated from source/sink function f.\r\n integrator : Integrator\r\n Object defining time-integration scheme to be used.\r\n \"\"\"\r\n\r\n def __init__(self, NX, NY, mapping, velocity, diffusivity=0.,\r\n px=0., py=0., seed=None, xmax=2*np.pi, **kwargs):\r\n \"\"\"Initialize attributes of FCIFEM simulation class\r\n\r\n Parameters\r\n ----------\r\n NX : int\r\n Number of planes along x-dimension. Must be NX >= 2.\r\n NY : int\r\n Number of nodes on each plane. Must be NY >= 2.\r\n mapping : Mapping\r\n Mapping function for the FCIFEM method.\r\n Must be an object derived from fcifem.Mapping.\r\n velocity : np.array([vx, vy], dtype='float')\r\n Background velocity of the fluid.\r\n diffusivity : {numpy.ndarray, float}, optional\r\n Diffusion coefficient for the quantity of interest.\r\n If an array, it must have shape (ndim,ndim). If a float, it will\r\n be converted to diffusivity*np.eye(ndim, dtype='float').\r\n The default is 0.\r\n px : float, optional\r\n Max amplitude of random perturbations added to FCI plane locations.\r\n Size is relative to grid spacing (px*2*pi/NX). The default is 0.\r\n py : float, optional\r\n Max amplitude of random perturbations added to node y-coords.\r\n Size is relative to grid spacing (py/NY). The default is 0.\r\n seed : {None, int, array_like[ints], numpy.random.SeedSequence}, optional\r\n A seed to initialize the RNG. If None, then fresh, unpredictable\r\n entropy will be pulled from the OS. The default is None.\r\n xmax : float, optional\r\n Maximum x-coordinate of the rectuangular domain.\r\n The default is 2*np.pi.\r\n **kwargs\r\n Keyword arguments\r\n\r\n \"\"\"\r\n NX = int(NX) # 'numpy.int**' classes can cause problems with SuiteSparse\r\n NY = int(NY)\r\n self.ndim = 2\r\n self.NX = NX\r\n self.NY = NY\r\n self.xmax = xmax\r\n self.mapping = mapping\r\n self.velocity = velocity\r\n if isinstance(diffusivity, np.ndarray):\r\n self.diffusivity = diffusivity\r\n else:\r\n self.diffusivity = np.array(diffusivity, dtype='float')\r\n if self.diffusivity.shape != (self.ndim, self.ndim):\r\n self.diffusivity = diffusivity * np.eye(self.ndim, dtype='float')\r\n if self.diffusivity.shape != (self.ndim,self.ndim):\r\n raise SystemExit(f\"diffusivity must be (or be convertible to) a \"\r\n f\"numpy.ndarray with shape ({self.ndim}, {self.ndim}).\")\r\n rng = np.random.Generator(np.random.PCG64(seed))\r\n if \"nodeX\" in kwargs:\r\n self.nodeX = kwargs[\"nodeX\"]\r\n else:\r\n self.nodeX = xmax*np.arange(NX+1)/NX\r\n px *= xmax/NX\r\n self.nodeX[1:-1] += rng.uniform(-px, px, self.nodeX[1:-1].shape)\r\n self.nodeY = np.tile(np.linspace(0, 1, NY+1), NX+1).reshape(NX+1,-1)\r\n py /= NY\r\n self.nodeY[:-1,1:-1] += rng.uniform(-py, py, self.nodeY[:-1,1:-1].shape)\r\n self.nodeY[-1] = self.nodeY[0]\r\n self.dx = self.nodeX[1:] - self.nodeX[0:-1]\r\n self.dy = self.nodeY[:,1:] - self.nodeY[:,:-1]\r\n self.idy = 1. / self.dy\r\n\r\n def setInitialConditions(self, u0, mapped=True, BC='periodic'):\r\n \"\"\"Initialize the nodal coefficients for the given IC.\r\n\r\n Parameters\r\n ----------\r\n u0 : {numpy.ndarray, callable}\r\n Initial conditions for the simulation.\r\n Must be an array of shape (self.nDoFs,) or a callable object\r\n returning such an array and taking as input the array of node\r\n coordinates with shape (self.nDoFs, self.ndim).\r\n mapped : bool, optional\r\n Whether mapping is applied to node positions before applying ICs.\r\n The default is True.\r\n BC : {boundaries.Boundary, string}, optionalquads, iPlane)\r\n Either an object of type boundaries.Boundary, or string 'periodic'.\r\n The default is 'periodic'.\r\n\r\n Returns\r\n -------\r\n None.\r\n\r\n \"\"\"\r\n if isinstance(BC, boundaries.Boundary):\r\n self.BC = BC\r\n elif BC.lower() in ('periodic', 'p'):\r\n self.BC = boundaries.PeriodicBoundary(self)\r\n else:\r\n raise SystemExit(f\"Unkown boundary condition: {BC}\")\r\n self.nDoFs = self.BC.nDoFs\r\n self.nNodes = self.BC.nNodes\r\n self.nodes = self.BC.computeNodes()\r\n self.DoFs = self.nodes[:self.nDoFs]\r\n self.DoFsMapped = self.DoFs.copy()\r\n self.DoFsMapped[:,1] = self.BC.mapping(self.DoFs, 0)\r\n if isinstance(u0, np.ndarray) and u0.shape == (self.nDoFs,):\r\n self.u0 = u0\r\n self.u = u0.copy()\r\n self.u0func = None\r\n elif callable(u0):\r\n self.u0func = u0\r\n if mapped:\r\n self.u = u0(self.DoFsMapped)\r\n else:\r\n self.u = u0(self.DoFs)\r\n self.u0 = self.u.copy()\r\n else:\r\n raise SystemExit(f\"u0 must be an array of shape ({self.nDoFs},) \"\r\n f\"or a callable object returning such an array and taking as \"\r\n f\"input the array of node coordinates with shape \"\r\n f\"({self.nDoFs}, {self.ndim}).\")\r\n\r\n def computeSpatialDiscretization(self, f=None, NQX=1, NQY=None, Qord=2,\r\n quadType='gauss', massLumping=False, **kwargs):\r\n \"\"\"Assemble the system discretization matrices K, A, M in CSR format.\r\n\r\n K is the stiffness matrix from the diffusion term\r\n A is the advection matrix\r\n M is the mass matrix from the time derivative\r\n\r\n Parameters\r\n ----------\r\n f : {callable, None}, optional\r\n Forcing function. Must take 2D array of points and return 1D array.\r\n The default is None.\r\n NQX : int, optional\r\n Number of quadrature cell divisions between FCI planes.\r\n The default is 1.\r\n NQY : {int, None}, optional\r\n Number of quadrature cell divisions in y-direction.\r\n The default is None, which sets NQY = NY.\r\n Qord : int, optional\r\n Number of quadrature points in each grid cell along one dimension.\r\n The default is 2.\r\n quadType : string, optional\r\n Type of quadrature to be used. Must be either 'gauss' or 'uniform'.\r\n Produces either Gauss-Legendre or Newton-Cotes type points/weights.\r\n The default is 'gauss'.\r\n massLumping : bool, optional\r\n Determines whether mass-lumping is used to calculate M matrix.\r\n The default is False.\r\n\r\n Returns\r\n -------\r\n None.\r\n\r\n \"\"\"\r\n self.vci = None\r\n self.vci_solver = None\r\n ndim = self.ndim\r\n nDoFs = self.nDoFs\r\n NX = self.NX\r\n NY = self.NY\r\n if NQY is None:\r\n NQY = NY\r\n self.f = f\r\n self.NQX = NQX\r\n self.NQY = NQY\r\n self.Qord = Qord\r\n self.quadType = quadType\r\n self.massLumping = massLumping\r\n # pre-allocate arrays for stiffness matrix triplets\r\n nEntries = (2*ndim)**2\r\n nQuads = NQX * NQY * Qord**2\r\n nMaxEntries = nEntries * nQuads * NX\r\n Kdata = np.zeros(nMaxEntries)\r\n Adata = np.zeros(nMaxEntries)\r\n if not massLumping:\r\n Mdata = np.zeros(nMaxEntries)\r\n row_ind = np.zeros(nMaxEntries, dtype='int')\r\n col_ind = np.zeros(nMaxEntries, dtype='int')\r\n self.b = np.zeros(nDoFs)\r\n self.u_weights = np.zeros(nDoFs)\r\n\r\n ##### compute spatial discretizaton\r\n index = 0\r\n for iPlane in range(NX):\r\n dx = self.dx[iPlane]\r\n ##### generate quadrature points\r\n if quadType.lower() in ('gauss', 'g', 'gaussian'):\r\n offsets, weights = roots_legendre(Qord)\r\n elif quadType.lower() in ('uniform', 'u'):\r\n offsets = np.linspace(1/Qord - 1, 1 - 1/Qord, Qord)\r\n weights = np.repeat(2/Qord, Qord)\r\n offsets = (offsets * dx * 0.5 / NQX, offsets * 0.5 / NQY)\r\n weights = (weights * dx * 0.5 / NQX, weights * 0.5 / NQY)\r\n quads = ( np.indices([NQX, NQY], dtype='float').T.\r\n reshape(-1, ndim) + 0.5 ) * [dx/NQX, 1/NQY]\r\n quadWeights = np.repeat(1., len(quads))\r\n for i in range(ndim):\r\n quads = np.concatenate(\r\n [quads + offset*np.eye(ndim)[i] for offset in offsets[i]] )\r\n quadWeights = np.concatenate(\r\n [quadWeights * weight for weight in weights[i]] )\r\n\r\n quads += [self.nodeX[iPlane], 0]\r\n\r\n for iQ, quad in enumerate(quads):\r\n if f is not None:\r\n fq = f(quad)\r\n phis, gradphis, inds = self.BC(quad, iPlane)\r\n for alpha, i in enumerate(inds):\r\n if i < 0:\r\n continue # move to next i if boundary node\r\n for beta, j in enumerate(inds):\r\n if j < 0: # j is boundary node\r\n ##### Not sure if this can/should always be uncommented? #####\r\n ##### Needed for projection; but does it affect Poisson/CD #####\r\n # self.b[i] -= quadWeights[iQ] * (\r\n # phis[alpha] * phis[beta] )\r\n self.b[i] -= quadWeights[iQ] * (\r\n (gradphis[alpha] @ self.velocity) * phis[beta] +\r\n (gradphis[alpha] @ (self.diffusivity @ gradphis[beta])) )\r\n else: # i and j are both interior\r\n if not massLumping:\r\n Mdata[index] = quadWeights[iQ] * phis[alpha] * phis[beta]\r\n Adata[index] = quadWeights[iQ] * (gradphis[alpha] @ self.velocity) * phis[beta]\r\n Kdata[index] = quadWeights[iQ] * (gradphis[alpha] @ (self.diffusivity @ gradphis[beta]))\r\n row_ind[index] = i\r\n col_ind[index] = j\r\n index += 1\r\n self.u_weights[i] += quadWeights[iQ] * phis[alpha]\r\n if f is not None:\r\n self.b[i] += quadWeights[iQ] * fq * phis[alpha]\r\n\r\n self.K = sp.csr_matrix( (Kdata, (row_ind, col_ind)),\r\n shape=(nDoFs, nDoFs) )\r\n self.A = sp.csr_matrix( (Adata, (row_ind, col_ind)),\r\n shape=(nDoFs, nDoFs) )\r\n if massLumping:\r\n self.M = sp.diags(self.u_weights, format='csr')\r\n else:\r\n self.M = sp.csr_matrix( (Mdata, (row_ind, col_ind)),\r\n shape=(nDoFs, nDoFs) )\r\n\r\n def computeSpatialDiscretizationLinearVCI(self, f=None, NQX=1, NQY=None,\r\n Qord=2, quadType='gauss', massLumping=False, **kwargs):\r\n \"\"\"Assemble the system discretization matrices K, A, M in CSR format.\r\n Implements linear variationally consistent integration using assumed\r\n strain method of Chen2013 https://doi.org/10.1002/nme.4512\r\n\r\n K is the stiffness matrix from the diffusion term\r\n A is the advection matrix\r\n M is the mass matrix from the time derivative\r\n\r\n Parameters\r\n ----------\r\n f : {callable, None}, optional\r\n Forcing function. Must take 2D array of points and return 1D array.\r\n The default is None.\r\n NQX : int, optional\r\n Number of quadrature cell divisions between FCI planes.\r\n The default is 1.\r\n NQY : {int, None}, optional\r\n Number of quadrature cell divisions in y-direction.\r\n The default is None, which sets NQY = NY.\r\n Qord : int, optional\r\n Number of quadrature points in each grid cell along one dimension.\r\n The default is 2.\r\n quadType : string, optional\r\n Type of quadrature to be used. Must be either 'gauss' or 'uniform'.\r\n Produces either Gauss-Legendre or Newton-Cotes type points/weights.\r\n The default is 'gauss'.\r\n massLumping : bool, optional\r\n Determines whether mass-lumping is used to calculate M matrix.\r\n The default is False.\r\n\r\n Returns\r\n -------\r\n None.\r\n\r\n \"\"\"\r\n self.vci = 'VC1 (assumed strain)'\r\n self.vci_solver = None\r\n ndim = self.ndim\r\n nDoFs = self.nDoFs\r\n NX = self.NX\r\n NY = self.NY\r\n if NQY is None:\r\n NQY = NY\r\n self.f = f\r\n self.NQX = NQX\r\n self.NQY = NQY\r\n self.Qord = Qord\r\n self.quadType = quadType\r\n self.massLumping = massLumping\r\n # pre-allocate arrays for stiffness matrix triplets\r\n nEntries = (2*ndim)**2\r\n nQuads = NQX * NQY * Qord**2\r\n nMaxEntries = nEntries * nQuads * NX\r\n Kdata = np.zeros(nMaxEntries)\r\n Adata = np.zeros(nMaxEntries)\r\n if not massLumping:\r\n Mdata = np.zeros(nMaxEntries)\r\n row_ind = np.zeros(nMaxEntries, dtype='int')\r\n col_ind = np.zeros(nMaxEntries, dtype='int')\r\n self.b = np.zeros(nDoFs)\r\n self.u_weights = np.zeros(nDoFs)\r\n\r\n self.store = []\r\n self.areas = np.zeros(nDoFs + 1)\r\n self.xis = np.zeros((self.nDoFs + 1, self.ndim))\r\n\r\n ##### compute spatial discretizaton\r\n index = 0\r\n for iPlane in range(NX):\r\n dx = self.dx[iPlane]\r\n ##### generate quadrature points\r\n if quadType.lower() in ('gauss', 'g', 'gaussian'):\r\n offsets, weights = roots_legendre(Qord)\r\n elif quadType.lower() in ('uniform', 'u'):\r\n offsets = np.linspace(1/Qord - 1, 1 - 1/Qord, Qord)\r\n weights = np.repeat(2/Qord, Qord)\r\n offsets = (offsets * dx * 0.5 / NQX, offsets * 0.5 / NQY)\r\n weights = (weights * dx * 0.5 / NQX, weights * 0.5 / NQY)\r\n quads = ( np.indices([NQX, NQY], dtype='float').T.\r\n reshape(-1, ndim) + 0.5 ) * [dx/NQX, 1/NQY]\r\n quadWeights = np.repeat(1., len(quads))\r\n for i in range(ndim):\r\n quads = np.concatenate(\r\n [quads + offset*np.eye(ndim)[i] for offset in offsets[i]] )\r\n quadWeights = np.concatenate(\r\n [quadWeights * weight for weight in weights[i]] )\r\n\r\n quads += [self.nodeX[iPlane], 0]\r\n\r\n for iQ, quad in enumerate(quads):\r\n phis, gradphis, inds = self.BC(quad, iPlane)\r\n quadWeight = quadWeights[iQ]\r\n self.store.append((inds, phis, gradphis, quadWeight, quad))\r\n inds[inds < 0] = -1\r\n self.areas[inds] += quadWeight\r\n self.xis[inds] -= gradphis * quadWeight\r\n\r\n # self.gradphiSumsOld = -self.xis[0:-1]\r\n # self.gradphiSumsNew = np.zeros((nDoFs, 2))\r\n self.xis /= self.areas.reshape(-1,1)\r\n\r\n for iQ, (inds, phis, gradphis, quadWeight, quad) in enumerate(self.store):\r\n if f is not None:\r\n fq = f(quad)\r\n for alpha, i in enumerate(inds):\r\n if i < 0:\r\n continue # move to next i if boundary node\r\n testgrad = gradphis[alpha] + self.xis[i]\r\n # self.gradphiSumsNew[i] += testgrad * quadWeight\r\n self.u_weights[i] += quadWeight * phis[alpha]\r\n if f is not None:\r\n self.b[i] += quadWeight * fq * phis[alpha]\r\n for beta, j in enumerate(inds):\r\n if j < 0: # j is boundary node\r\n ##### Not sure if this can/should always be uncommmented? #####\r\n ##### Needed for projection; but does it affect Poisson/CD #####\r\n # self.b[i] -= quadWeight * (\r\n # phis[alpha] * phis[beta] )\r\n self.b[i] -= quadWeight * (\r\n (testgrad @ self.velocity) * phis[beta] +\r\n (testgrad @ (self.diffusivity @ gradphis[beta])) )\r\n else: # i and j are both interior\r\n if not massLumping:\r\n Mdata[index] = quadWeight * phis[alpha] * phis[beta]\r\n Adata[index] = quadWeight * (testgrad @ self.velocity) * phis[beta]\r\n Kdata[index] = quadWeight * (testgrad @ (self.diffusivity @ gradphis[beta]))\r\n row_ind[index] = i\r\n col_ind[index] = j\r\n index += 1\r\n NQX\r\n self.K = sp.csr_matrix( (Kdata, (row_ind, col_ind)),\r\n shape=(nDoFs, nDoFs) )\r\n self.A = sp.csr_matrix( (Adata, (row_ind, col_ind)),\r\n shape=(nDoFs, nDoFs) )\r\n if massLumping:\r\n self.M = sp.diags(self.u_weights, format='csr')\r\n else:\r\n self.M = sp.csr_matrix( (Mdata, (row_ind, col_ind)),\r\n shape=(nDoFs, nDoFs) )\r\n\r\n def computeSpatialDiscretizationConservativeLinearVCIold(self, f=None, NQX=1,\r\n NQY=None, Qord=2, quadType='gauss', massLumping=False,\r\n includeBoundaries=False, **kwargs):\r\n \"\"\"Assemble the system discretization matrices K, A, M in CSR format.\r\n Implements linear variationally consistent integration by re-weighting\r\n the quadrature points.\r\n\r\n K is the stiffness matrix from the diffusion term\r\n A is the advection matrix\r\n M is the mass matrix from the time derivative\r\n\r\n Parameters\r\n ----------\r\n f : {callable, None}, optional\r\n Forcing function. Must take 2D array of points and return 1D array.\r\n The default is None.\r\n NQX : int, optional\r\n Number of quadrature cell divisions between FCI planes.\r\n The default is 1.\r\n NQY : {int, None}, optional\r\n Number of quadrature cell divisions in y-direction.\r\n The default is None, which sets NQY = NY.\r\n Qord : int, optional\r\n Number of quadrature points in each grid cell along one dimension.\r\n The default is 2.\r\n quadType : string, optional\r\n Type of quadrature to be used. Must be either 'gauss' or 'uniform'.\r\n Produces either Gauss-Legendre or Newton-Cotes type points/weights.\r\n The default is 'gauss'.\r\n massLumping : bool, optional\r\n Determines whether mass-lumping is used to calculate M matrix.\r\n The default is False.\r\n\r\n Returns\r\n -------\r\n None.\r\n\r\n \"\"\"\r\n self.vci = 'VC1-C (whole domain)'\r\n ndim = self.ndim\r\n nDoFs = self.nDoFs\r\n nNodes = self.nNodes\r\n NX = self.NX\r\n NY = self.NY\r\n if NQY is None:\r\n NQY = NY\r\n self.f = f\r\n self.NQX = NQX\r\n self.NQY = NQY\r\n self.Qord = Qord\r\n self.quadType = quadType\r\n self.massLumping = massLumping\r\n # pre-allocate arrays for stiffness matrix triplets\r\n nEntries = (2*ndim)**2\r\n nQuads = NQX * NQY * Qord**2\r\n nMaxEntries = nEntries * nQuads * NX\r\n Kdata = np.zeros(nMaxEntries)\r\n Adata = np.zeros(nMaxEntries)\r\n if not massLumping:\r\n Mdata = np.zeros(nMaxEntries)\r\n row_ind = np.zeros(nMaxEntries, dtype='int')\r\n col_ind = np.zeros(nMaxEntries, dtype='int')\r\n self.b = np.zeros(nDoFs)\r\n self.u_weights = np.zeros(nNodes)\r\n\r\n self.store = []\r\n quadWeightsList = []\r\n\r\n gd = np.empty(9 * nQuads * NX)\r\n ri = np.empty(9 * nQuads * NX, dtype='int')\r\n ci = np.empty(9 * nQuads * NX, dtype='int')\r\n bounds = np.empty(nQuads * NX)\r\n\r\n self.rOld = np.zeros((nNodes, self.ndim, 3))\r\n\r\n if includeBoundaries:\r\n indexOffset = nNodes\r\n else:\r\n indexOffset = nDoFs\r\n\r\n ##### compute spatial discretizaton\r\n index = 0\r\n for iPlane in range(NX):\r\n dx = self.dx[iPlane]\r\n ##### generate quadrature points\r\n if quadType.lower() in ('gauss', 'g', 'gaussian'):\r\n offsets, weights = roots_legendre(Qord)\r\n elif quadType.lower() in ('uniform', 'u'):\r\n offsets = np.linspace(1/Qord - 1, 1 - 1/Qord, Qord)\r\n weights = np.repeat(2/Qord, Qord)\r\n offsets = (offsets * dx * 0.5 / NQX, offsets * 0.5 / NQY)\r\n weights = (weights * dx * 0.5 / NQX, weights * 0.5 / NQY)\r\n quads = ( np.indices([NQX, NQY], dtype='float').T.\r\n reshape(-1, ndim) + 0.5 ) * [dx/NQX, 1/NQY]\r\n quadWeights = np.repeat(1., len(quads))\r\n for i in range(ndim):\r\n quads = np.concatenate(\r\n [quads + offset*np.eye(ndim)[i] for offset in offsets[i]] )\r\n quadWeights = np.concatenate(\r\n [quadWeights * weight for weight in weights[i]] )\r\n\r\n quads += [self.nodeX[iPlane], 0]\r\n bounds[iPlane*nQuads:(iPlane+1)*nQuads] = -quadWeights\r\n\r\n for iQ, quad in enumerate(quads):\r\n phis, gradphis, inds = self.BC(quad, iPlane)\r\n quadWeight = quadWeights[iQ]\r\n self.store.append((inds, phis, gradphis, quad))\r\n\r\n for alpha, i in enumerate(inds):\r\n disp = quad - self.nodes[i]\r\n self.rOld[i,:,0] -= gradphis[alpha] * quadWeight\r\n self.rOld[i,0,1] -= phis[alpha] * quadWeight\r\n self.rOld[i,1,2] -= phis[alpha] * quadWeight\r\n self.rOld[i,:,1:3] -= np.outer(gradphis[alpha], disp) * quadWeight\r\n if i < 0:\r\n if includeBoundaries:\r\n i += nNodes\r\n else:\r\n continue # move to next i if boundary node\r\n gd[index:index+2] = gradphis[alpha]\r\n ri[index:index+2] = (i, i + indexOffset)\r\n ci[index:index+2] = iQ + iPlane*nQuads\r\n index += 2\r\n \r\n quadWeightsList.append(quadWeights)\r\n\r\n gd[index:index + nQuads*NX] = 1.0\r\n if includeBoundaries:\r\n ri[index:index + nQuads*NX] = 2*nNodes\r\n else:\r\n ri[index:index + nQuads*NX] = 2*nDoFs\r\n ci[index:index + nQuads*NX] = np.arange(nQuads * NX)\r\n index += nQuads * NX\r\n\r\n if (self.BC.name == 'Dirichlet') and includeBoundaries:\r\n self.boundaryIntegrals = np.zeros((self.BC.nDirichletNodes, 2))\r\n nYnodes = self.BC.nYnodes\r\n DirichletNodeX = self.BC.DirichletNodeX\r\n nBottomNodes = DirichletNodeX[0].size\r\n nTopNodes = DirichletNodeX[1].size\r\n g = self.BC.g\r\n # left boundary\r\n self.boundaryIntegrals[-nYnodes:,0] = -g(self.nodes[-nYnodes:]) \\\r\n * 0.5 * np.flip(self.nodeY[0,2:] - self.nodeY[0,:-2])\r\n # right boundary\r\n self.boundaryIntegrals[-2*nYnodes:-nYnodes,0] = \\\r\n g(self.nodes[-2*nYnodes:-nYnodes]) \\\r\n * 0.5 * np.flip(self.nodeY[-1,2:] - self.nodeY[-1,:-2])\r\n # bottom boundary\r\n self.boundaryIntegrals[nTopNodes + 1:-2*nYnodes - 1,1] \\\r\n = -g(self.nodes[nDoFs + nTopNodes + 1:-2*nYnodes - 1]) \\\r\n * 0.5 * ( DirichletNodeX[0][-1:1:-1]\r\n - DirichletNodeX[0][-3::-1] )\r\n # top boundary\r\n self.boundaryIntegrals[1:-2*nYnodes - nBottomNodes - 1,1] = \\\r\n g(self.nodes[nDoFs + 1:-2*nYnodes - nBottomNodes - 1]) \\\r\n * 0.5 * ( DirichletNodeX[1][-1:1:-1]\r\n - DirichletNodeX[1][-3::-1] )\r\n # [0., 0.]\r\n self.boundaryIntegrals[-2*nYnodes - 1] = \\\r\n -g(self.nodes[-2*nYnodes - 1]) * 0.5 * \\\r\n (1/self.idy[0][0], DirichletNodeX[0][1])\r\n # [xmax, 0. ]\r\n self.boundaryIntegrals[nTopNodes] = \\\r\n g(self.nodes[nDoFs + nTopNodes]) * 0.5 * \\\r\n (1/self.idy[-1][0], DirichletNodeX[0][-2] - self.xmax)\r\n # [0., ymax]\r\n self.boundaryIntegrals[nTopNodes - 1] = \\\r\n g(self.nodes[nDoFs + nTopNodes - 1]) * 0.5 * \\\r\n (-1/self.idy[0][-1], DirichletNodeX[1][1])\r\n # [xmax, ymax]\r\n self.boundaryIntegrals[0] = g(self.nodes[nDoFs]) * 0.5 * \\\r\n (1/self.idy[-1][-1], self.xmax - DirichletNodeX[1][-2])\r\n self.rOld[nDoFs:,:,0] += self.boundaryIntegrals\r\n self.gradphiSums = self.rOld[:,:,0]\r\n nConstraints = 2*nNodes + 1\r\n elif (self.BC.name == 'DirichletXPeriodicY') and includeBoundaries:\r\n self.boundaryIntegrals = np.zeros((self.BC.nDirichletNodes, 2))\r\n nYnodes = self.BC.nYnodes\r\n g = self.BC.g\r\n # left boundary\r\n self.boundaryIntegrals[-nYnodes:-1,0] = -g(self.nodes[-nYnodes:-1]) \\\r\n * 0.5 * np.flip(self.nodeY[0,2:] - self.nodeY[0,:-2])\r\n # [0., 0.]\r\n self.boundaryIntegrals[-1,0] = -g(self.nodes[-1]) \\\r\n * 0.5 * (self.nodeY[0,1] + 1-self.nodeY[0,-2])\r\n # right boundary\r\n self.boundaryIntegrals[-2*nYnodes:-nYnodes-1,0] = \\\r\n g(self.nodes[-2*nYnodes:-nYnodes-1]) \\\r\n * 0.5 * np.flip(self.nodeY[-1,2:] - self.nodeY[-1,:-2])\r\n # [xmax, 0.]\r\n self.boundaryIntegrals[-nYnodes-1,0] = g(self.nodes[-nYnodes-1]) \\\r\n * 0.5 * (self.nodeY[-1,1] + 1-self.nodeY[-1,-2])\r\n self.rOld[nDoFs:,:,0] += self.boundaryIntegrals\r\n self.gradphiSums = self.rOld[:,:,0]\r\n nConstraints = 2*nNodes + 1\r\n else:\r\n self.gradphiSums = self.rOld[:nDoFs,:,0]\r\n nConstraints = 2*nDoFs + 1\r\n\r\n # ##### Using SuiteSparse QR decomposition #####\r\n # # Form the transpose of G (i.e. ri and ci intentionally swapped)\r\n # # n.b. using np.iinfo('int32').max + 1 forces indices to be int64\r\n # self.G = sp.csc_matrix((gd[:index], (ci[:index], ri[:index])),\r\n # shape=(np.iinfo('int32').max + 1, nConstraints))\r\n # self.G._shape = (nQuads * NX, nConstraints)\r\n # del gd, ci, ri, offsets, weights, quads, quadWeights\r\n # start_time = default_timer()\r\n # QR = ssqr.QR_C(self.G, tol=ssqr.SPQR_DEFAULT_TOL)\r\n # r = QR.r\r\n # if r == -1:\r\n # raise SystemExit(\"Error in QR decomposition\")\r\n # try:\r\n # QR.E[0][0]\r\n # E = np.frombuffer(QR.E[0], dtype=np.int64, count=r)\r\n # rhs = np.append(self.gradphiSums.T.ravel(), 0.)[E]\r\n # except:\r\n # rhs = np.append(self.gradphiSums.T.ravel(), 0.)[:r]\r\n # R = ssqr.cholmodSparseToScipyCsc(QR.R)\r\n # x = np.empty(nQuads * NX)\r\n # x[:r] = sp_la.spsolve_triangular(R.T[:r,:r], rhs, lower=True,\r\n # overwrite_A=True, overwrite_b=True)\r\n # x[r:] = 0.\r\n # self.xi = (ssqr.qmult(QR, x), r)\r\n # print(f'xi solve time = {default_timer()-start_time} s')\r\n # self.vci_solver = 'ssqr.QR_C'\r\n\r\n ##### Using SuiteSparse min2norm (QR based solver) #####\r\n # n.b. using np.iinfo('int32').max + 1 forces indices to be int64\r\n G = sp.csc_matrix((gd[:index], (ri[:index], ci[:index])),\r\n shape=(np.iinfo('int32').max + 1, nQuads * NX))\r\n G._shape = (nConstraints, nQuads * NX)\r\n del gd, ci, ri, offsets, weights, quads, quadWeights\r\n start_time = default_timer()\r\n rhs = np.append(self.gradphiSums.T.ravel(), 0.)\r\n self.xi = (ssqr.min2norm(G, rhs).ravel(), 0)\r\n print(f'xi solve time = {default_timer()-start_time} s')\r\n self.vci_solver = 'ssqr.min2norm'\r\n\r\n # ##### Using scipy.sparse.linalg, much slower, but uses less memory #####\r\n # self.G = sp.csr_matrix((gd[:index], (ri[:index], ci[:index])),\r\n # shape=(nConstraints, nQuads * NX))\r\n # rhs = np.append(self.gradphiSums.T.ravel(), 0.)\r\n # v0 = np.zeros(nQuads * NX)\r\n # maxit = nQuads * NX\r\n # # tol = np.finfo(float).eps\r\n # tol = 1e-10\r\n # start_time = default_timer()\r\n # # self.xi = sp_la.lsmr(self.G, rhs, x0=v0, atol=tol, btol=tol, maxiter=maxit)\r\n # self.xi = sp_la.lsqr(self.G, rhs, x0=v0, atol=tol, btol=tol, iter_lim=maxit)\r\n # print(f'xi solve time = {default_timer()-start_time} s')\r\n # self.vci_solver = 'scipy.sparse.linalg.lsqr'\r\n\r\n # ##### Using scipy.optimize.lsq_linear #####\r\n # ##### VERY SLOW, but guarantees non-negative quadWeights #####\r\n # from scipy.optimize import lsq_linear\r\n # self.G = sp.csr_matrix((gd[:index], (ri[:index], ci[:index])),\r\n # shape=(nConstraints, nQuads * NX))\r\n # rhs = np.append(self.gradphiSums.T.ravel(), 0.)\r\n # maxit = nQuads * NX\r\n # tol = 1e-10\r\n # self.xi = lsq_linear(self.G, rhs, (bounds,np.inf), max_iter=100,\r\n # tol=tol)\r\n # self.vci_solver = 'scipy.optimize.lsq_linear'\r\n\r\n self.rNew = np.zeros((nNodes, self.ndim, 3))\r\n \r\n quadWeights = np.concatenate(quadWeightsList) + self.xi[0]\r\n # quadWeights = np.concatenate(quadWeightsList) + self.xi.x\r\n\r\n index = 0\r\n for iQ, (inds, phis, gradphis, quad) in enumerate(self.store):\r\n quadWeight = quadWeights[iQ]\r\n for alpha, i in enumerate(inds):\r\n disp = quad - self.nodes[i]\r\n self.rNew[i,:,0] -= gradphis[alpha] * quadWeight\r\n self.rNew[i,0,1] -= phis[alpha] * quadWeight\r\n self.rNew[i,1,2] -= phis[alpha] * quadWeight\r\n self.rNew[i,:,1:3] -= np.outer(gradphis[alpha], disp) * quadWeight\r\n self.u_weights[i] += quadWeight * np.abs(phis[alpha])\r\n if i < 0:\r\n continue # move to next i if boundary node\r\n\r\n if f is not None:\r\n self.b[i] += quadWeight * f(quad) * phis[alpha]\r\n for beta, j in enumerate(inds):\r\n if j < 0: # j is boundary node\r\n ##### Not sure if this can/should always be uncommmented? #####\r\n ##### Needed for projection; but does it affect Poisson/CD #####\r\n # self.b[i] -= quadWeight * (\r\n # phis[alpha] * phis[beta] )\r\n self.b[i] -= quadWeight * (\r\n (gradphis[alpha] @ self.velocity) * phis[beta] +\r\n (gradphis[alpha] @ (self.diffusivity @ gradphis[beta])) )\r\n else: # i and j are both interior\r\n if not massLumping:\r\n Mdata[index] = quadWeight * phis[alpha] * phis[beta]\r\n Adata[index] = quadWeight * (gradphis[alpha] @ self.velocity) * phis[beta]\r\n Kdata[index] = quadWeight * (gradphis[alpha] @ (self.diffusivity @ gradphis[beta]))\r\n row_ind[index] = i\r\n col_ind[index] = j\r\n index += 1\r\n\r\n if includeBoundaries:\r\n self.rNew[nDoFs:,:,0] += self.boundaryIntegrals\r\n self.gradphiSumsNew = self.rNew[:,:,0]\r\n else:\r\n self.gradphiSumsNew = self.rNew[:nDoFs,:,0]\r\n\r\n self.K = sp.csr_matrix( (Kdata, (row_ind, col_ind)),\r\n shape=(nDoFs, nDoFs) )\r\n self.A = sp.csr_matrix( (Adata, (row_ind, col_ind)),\r\n shape=(nDoFs, nDoFs) )\r\n if massLumping:\r\n self.M = sp.diags(self.u_weights, format='csr')\r\n else:\r\n self.M = sp.csr_matrix( (Mdata, (row_ind, col_ind)),\r\n shape=(nDoFs, nDoFs) )\r\n\r\n def computeSpatialDiscretizationConservativeLinearVCI(self, f=None, NQX=1,\r\n NQY=None, Qord=2, quadType='gauss', massLumping=False, **kwargs):\r\n \"\"\"Assemble the system discretization matrices K, A, M in CSR format.\r\n Implements linear variationally consistent integration by re-weighting\r\n the quadrature points.\r\n\r\n K is the stiffness matrix from the diffusion term\r\n A is the advection matrix\r\n M is the mass matrix from the time derivative\r\n\r\n Parameters\r\n ----------\r\n f : {callable, None}, optional\r\n Forcing function. Must take 2D array of points and return 1D array.\r\n The default is None.\r\n NQX : int, optional\r\n Number of quadrature cell divisions between FCI planes.\r\n The default is 1.\r\n NQY : {int, None}, optional\r\n Number of quadrature cell divisions in y-direction.\r\n The default is None, which sets NQY = NY.\r\n Qord : int, optional\r\n Number of quadrature points in each grid cell along one dimension.\r\n The default is 2.\r\n quadType : string, optional\r\n Type of quadrature to be used. Must be either 'gauss' or 'uniform'.\r\n Produces either Gauss-Legendre or Newton-Cotes type points/weights.\r\n The default is 'gauss'.\r\n massLumping : bool, optional\r\n Determines whether mass-lumping is used to calculate M matrix.\r\n The default is False.\r\n\r\n Returns\r\n -------\r\n None.\r\n\r\n \"\"\"\r\n self.vci = 'VC1-C (slice-by-slice)'\r\n ndim = self.ndim\r\n nDoFs = self.nDoFs\r\n nNodes = self.nNodes\r\n NX = self.NX\r\n NY = self.NY\r\n if NQY is None:\r\n NQY = NY\r\n self.f = f\r\n self.NQX = NQX\r\n self.NQY = NQY\r\n self.Qord = Qord\r\n self.quadType = quadType\r\n self.massLumping = massLumping\r\n # pre-allocate arrays for stiffness matrix triplets\r\n nEntries = (2*ndim)**2\r\n nQuads = NQX * NQY * Qord**2\r\n nMaxEntries = nEntries * nQuads * NX\r\n Kdata = np.zeros(nMaxEntries)\r\n Adata = np.zeros(nMaxEntries)\r\n if not massLumping:\r\n Mdata = np.zeros(nMaxEntries)\r\n row_ind = np.zeros(nMaxEntries, dtype='int')\r\n col_ind = np.zeros(nMaxEntries, dtype='int')\r\n self.b = np.zeros(nDoFs)\r\n self.u_weights = np.zeros(nNodes)\r\n\r\n self.integrals = []\r\n\r\n gd = np.empty(9 * nQuads)\r\n ri = np.empty(9 * nQuads, dtype='int')\r\n ci = np.empty(9 * nQuads, dtype='int')\r\n gradphiSums = np.empty((nNodes, self.ndim))\r\n\r\n self.rOld = np.zeros((nNodes, self.ndim, 3))\r\n self.rNew = np.zeros((nNodes, self.ndim, 3))\r\n\r\n index = 0\r\n\r\n ##### compute spatial discretizaton\r\n for iPlane in range(NX):\r\n Gindex = 0\r\n gradphiSums[:] = 0.\r\n store = []\r\n dx = self.dx[iPlane]\r\n ##### generate quadrature points\r\n if quadType.lower() in ('gauss', 'g', 'gaussian'):\r\n offsets, weights = roots_legendre(Qord)\r\n elif quadType.lower() in ('uniform', 'u'):\r\n offsets = np.linspace(1/Qord - 1, 1 - 1/Qord, Qord)\r\n weights = np.repeat(2/Qord, Qord)\r\n offsets = (offsets * dx * 0.5 / NQX, offsets * 0.5 / NQY)\r\n weights = (weights * dx * 0.5 / NQX, weights * 0.5 / NQY)\r\n quads = ( np.indices([NQX, NQY], dtype='float').T.\r\n reshape(-1, ndim) + 0.5 ) * [dx/NQX, 1/NQY]\r\n quadWeights = np.repeat(1., len(quads))\r\n for i in range(ndim):\r\n quads = np.concatenate(\r\n [quads + offset*np.eye(ndim)[i] for offset in offsets[i]] )\r\n quadWeights = np.concatenate(\r\n [quadWeights * weight for weight in weights[i]] )\r\n\r\n quads += [self.nodeX[iPlane], 0]\r\n\r\n for iQ, quad in enumerate(quads):\r\n phis, gradphis, inds = self.BC(quad, iPlane)\r\n quadWeight = quadWeights[iQ]\r\n store.append((inds, phis, gradphis))\r\n\r\n for alpha, i in enumerate(inds):\r\n disp = quad - self.nodes[i]\r\n gradphiSums[i] -= gradphis[alpha] * quadWeight\r\n self.rOld[i,:,0] -= gradphis[alpha] * quadWeight\r\n self.rOld[i,0,1] -= phis[alpha] * quadWeight\r\n self.rOld[i,1,2] -= phis[alpha] * quadWeight\r\n self.rOld[i,:,1:3] -= np.outer(gradphis[alpha], disp) * quadWeight\r\n if i < 0:\r\n i += nNodes\r\n gd[Gindex:Gindex+2] = gradphis[alpha]\r\n ri[Gindex:Gindex+2] = (i + 1, i + nNodes + 1)\r\n ci[Gindex:Gindex+2] = iQ\r\n Gindex += 2\r\n\r\n sliceBoundaryIntegrals = self.BC.computeSliceBoundaryIntegrals(iPlane)\r\n gradphiSums += sliceBoundaryIntegrals\r\n self.integrals.append(sliceBoundaryIntegrals)\r\n\r\n self.rOld[:,:,0] += sliceBoundaryIntegrals\r\n self.rNew[:,:,0] += sliceBoundaryIntegrals\r\n\r\n gd[Gindex:Gindex + nQuads] = 1.0\r\n ri[Gindex:Gindex + nQuads] = 0 # previously 2*nNodes\r\n ci[Gindex:Gindex + nQuads] = np.arange(nQuads)\r\n Gindex += nQuads\r\n\r\n # start_time = default_timer()\r\n\r\n # ##### Using SuiteSparseQR_min2norm #####\r\n # # n.b. using np.iinfo('int32').max + 1 forces indices to be int64\r\n # G = sp.csc_matrix((gd[:Gindex], (ri[:Gindex], ci[:Gindex])),\r\n # shape=(np.iinfo('int32').max + 1, nQuads))\r\n # G._shape = (2*nNodes + 1, nQuads)\r\n # # G = sp.csc_matrix((gd[:Gindex], (ri[:Gindex], ci[:Gindex])),\r\n # # shape=(2*nNodes + 1, nQuads))\r\n # # rhs = np.append(dx, sliceBoundaryIntegrals.T.ravel())\r\n # rhs = np.append(0., gradphiSums.T.ravel())\r\n # xi = ssqr.min2norm(G, rhs).ravel()\r\n # self.vci_solver = 'ssqr.min2norm'\r\n\r\n ##### Using scipy.sparse.linalg #####\r\n ##### slower, but uses less RAM and (slightly) more stable #####\r\n G = sp.csr_matrix((gd[:Gindex], (ri[:Gindex], ci[:Gindex])),\r\n shape=(2*nNodes + 1, nQuads))\r\n # rhs = np.append(dx, sliceBoundaryIntegrals.T.ravel())\r\n rhs = np.append(0., gradphiSums.T.ravel())\r\n maxit = nQuads\r\n tol = 1e-10\r\n # D = sp.diags(1/np.sqrt(G.power(2).sum(axis=0)).A1, format='csc')\r\n # xi = D @ sp_la.lsmr(G @ D, rhs, atol=tol, btol=tol, maxiter=maxit)[0]\r\n # # xi = D @ sp_la.lsqr(G @ D, rhs, atol=tol, btol=tol, iter_lim=maxit)[0]\r\n # xi = sp_la.lsmr(G, rhs, atol=tol, btol=tol, maxiter=maxit)[0]\r\n xi = sp_la.lsqr(G, rhs, atol=tol, btol=tol, iter_lim=maxit)[0]\r\n self.vci_solver = 'scipy.sparse.linalg.lsqr'\r\n\r\n # # attempting precondtioning with R factor; was not helpful\r\n # G = sp.csc_matrix((gd[:Gindex], (ri[:Gindex], ci[:Gindex])),\r\n # shape=(np.iinfo('int32').max + 1, nQuads))\r\n # G._shape = (2*nNodes + 1, nQuads)\r\n # QR, r = ssqr.QR_C(G, tol=ssqr.SPQR_DEFAULT_TOL)\r\n # if r == -1:\r\n # raise SystemExit(\"Error in QR decomposition\")\r\n # try:\r\n # QR.E[0][0]\r\n # E = np.frombuffer(QR.E[0], dtype=np.int64, count=r)\r\n # # rhs = np.append(0., gradphiSums.T.ravel())[E]\r\n # except:\r\n # E = np.arange(r)\r\n # # rhs = np.append(gradphiSums.T.ravel(), 0.)[:r]\r\n # R = ssqr.cholmodSparseToScipyCsc(QR.R)[:r,:r].tocsr()\r\n # RT = R.T.tocsr()\r\n # GT = G.T\r\n # def matvec(b):\r\n # # nonlocal R, E, G\r\n # x = b.copy()\r\n # x[E] = sp_la.spsolve_triangular(R, b[E], lower=False)\r\n # return G @ x\r\n # def rmatvec(b):\r\n # # nonlocal R, E, G\r\n # x = GT @ b\r\n # x[E] = sp_la.spsolve_triangular(RT, x[E], lower=True)\r\n # return x\r\n # P = sp_la.LinearOperator(G.shape, matvec=matvec, rmatvec=rmatvec)\r\n # x = sp_la.lsmr(P, rhs, atol=tol, btol=tol, maxiter=maxit)\r\n # xi = x[0].copy()\r\n # xi[E] = sp_la.spsolve_triangular(R, xi[E], lower=False)\r\n\r\n # ##### Using cupyx.scipy.sparse.linalg #####\r\n # ##### slower again unfortunately, too much copying #####\r\n # import cupy\r\n # from cupyx.scipy.sparse import csr_matrix as cu_csr_matrix\r\n # from cupyx.scipy.sparse.linalg import lsqr as cu_lsqr\r\n # from cupyx.scipy.sparse.linalg import lsmr as cu_lsmr\r\n # G = sp.csr_matrix((gd[:Gindex], (ri[:Gindex], ci[:Gindex])),\r\n # shape=(2*nNodes + 1, nQuads))\r\n # rhs = np.append(dx, sliceBoundaryIntegrals.T.ravel())\r\n # # rhs = np.append(0., gradphiSums.T.ravel())\r\n # maxit = nQuads\r\n # # tol = np.finfo(float).eps\r\n # tol = 1e-10\r\n # cu_G = cu_csr_matrix((cupy.asarray(G.data),\r\n # cupy.asarray(G.indices),\r\n # cupy.asarray(G.indptr)))\r\n # cu_rhs = cupy.asarray(rhs)\r\n # # cu_xi = cu_lsqr(cu_G, cu_rhs)[0] # raises LinAlgError: Last 2 dimensions of the array must be square\r\n # cu_xi = cu_lsmr(cu_G, cu_rhs, atol=tol, btol=tol, maxiter=maxit)[0]\r\n # xi = cupy.asnumpy(cu_xi)\r\n # self.vci_solver = 'cupyx.scipy.sparse.linalg.lsmr'\r\n\r\n # ##### Using scipy.optimize.lsq_linear #####\r\n # ##### VERY SLOW, but guarantees non-negative quadWeights #####\r\n # from scipy.optimize import lsq_linear\r\n # G = sp.csr_matrix((gd[:Gindex], (ri[:Gindex], ci[:Gindex])),\r\n # shape=(2*nNodes + 1, nQuads))\r\n # rhs = np.append(dx, sliceBoundaryIntegrals.T.ravel())\r\n # # rhs = np.append(0., gradphiSums.T.ravel())\r\n # # maxit = nQuads\r\n # tol = 1e-10\r\n # xi = lsq_linear(G, rhs, (0, np.inf), max_iter=100, tol=tol).x\r\n # self.vci_solver = 'scipy.optimize.lsq_linear'\r\n\r\n # print(f'xi solve time = {default_timer()-start_time} s')\r\n\r\n quadWeights += xi\r\n\r\n for iQ, quad in enumerate(quads):\r\n quadWeight = quadWeights[iQ]\r\n (inds, phis, gradphis) = store[iQ]\r\n for alpha, i in enumerate(inds):\r\n disp = quad - self.nodes[i]\r\n self.rNew[i,:,0] -= gradphis[alpha] * quadWeight\r\n self.rNew[i,0,1] -= phis[alpha] * quadWeight\r\n self.rNew[i,1,2] -= phis[alpha] * quadWeight\r\n self.rNew[i,:,1:3] -= np.outer(gradphis[alpha], disp) * quadWeight\r\n self.u_weights[i] += quadWeight * np.abs(phis[alpha])\r\n if i < 0:\r\n continue # move to next i if boundary node\r\n\r\n if f is not None:\r\n self.b[i] += quadWeight * f(quad) * phis[alpha]\r\n for beta, j in enumerate(inds):\r\n if j < 0: # j is boundary node\r\n ##### Not sure if this can/should always be uncommmented? #####\r\n ##### Needed for projection; but does it affect Poisson/CD #####\r\n # self.b[i] -= quadWeight * (\r\n # phis[alpha] * phis[beta] )\r\n self.b[i] -= quadWeight * (\r\n (gradphis[alpha] @ self.velocity) * phis[beta] +\r\n (gradphis[alpha] @ (self.diffusivity @ gradphis[beta])) )\r\n else: # i and j are both interior\r\n if not massLumping:\r\n Mdata[index] = quadWeight * phis[alpha] * phis[beta]\r\n Adata[index] = quadWeight * (gradphis[alpha] @ self.velocity) * phis[beta]\r\n Kdata[index] = quadWeight * (gradphis[alpha] @ (self.diffusivity @ gradphis[beta]))\r\n row_ind[index] = i\r\n col_ind[index] = j\r\n index += 1\r\n\r\n self.gradphiSumsOld = self.rOld[:,:,0]\r\n self.gradphiSumsNew = self.rNew[:,:,0]\r\n\r\n self.K = sp.csr_matrix( (Kdata, (row_ind, col_ind)),\r\n shape=(nDoFs, nDoFs) )\r\n self.A = sp.csr_matrix( (Adata, (row_ind, col_ind)),\r\n shape=(nDoFs, nDoFs) )\r\n if massLumping:\r\n self.M = sp.diags(self.u_weights, format='csr')\r\n else:\r\n self.M = sp.csr_matrix( (Mdata, (row_ind, col_ind)),\r\n shape=(nDoFs, nDoFs) )\r\n\r\n def initializeTimeIntegrator(self, integrator, dt, P='ilu', **kwargs):\r\n \"\"\"Initialize and register the time integration scheme to be used.\r\n\r\n Parameters\r\n ----------\r\n integrator : {Integrator (object or subclass type), string}\r\n Integrator object or string specifiying which scheme is to be used.\r\n If a string, must be one of 'LowStorageRK' ('RK' or 'LSRK'),\r\n 'BackwardEuler' ('BE'), or 'CrankNicolson' ('CN').\r\n dt : float\r\n Time interval between each successive timestep.\r\n P : {string, scipy.sparse.linalg.LinearOperator, None}, optional\r\n Which preconditioning method to use. P can be a LinearOperator to\r\n directly specifiy the preconditioner to be used. Otherwise it must\r\n be one of 'jacobi', 'ilu', or None. The default is 'ilu'.\r\n **kwargs\r\n Used to specify optional arguments for the time integrator.\r\n Will be passed to scipy.sparse.linalg.spilu if 'ilu' is used, or\r\n can be used to specify betas for LowStorageRK schemes.\r\n\r\n Returns\r\n -------\r\n None.\r\n\r\n \"\"\"\r\n if isinstance(integrator, integrators.Integrator):\r\n self.integrator = integrator\r\n return\r\n if isinstance(integrator, str):\r\n if integrator.lower() in ('backwardeuler', 'be'):\r\n Type = integrators.BackwardEuler\r\n elif integrator.lower() in ('cranknicolson', 'cn'):\r\n Type = integrators.CrankNicolson\r\n elif integrator.lower() in ('lowstoragerk', 'rk', 'lsrk'):\r\n Type = integrators.LowStorageRK\r\n else: # if integrator not an Integrator object or string, assume it's a type\r\n Type = integrator\r\n # Instantiate and store the integrator object\r\n try:\r\n self.integrator = Type(self, self.A - self.K, self.M, dt, P, **kwargs)\r\n except:\r\n raise SystemExit(\"Unable to instantiate integrator of type \"\r\n f\"{repr(Type)}. Should be a string containing one of \"\r\n \"'LowStorageRK' ('RK' or 'LSRK'), CrankNicolson ('CN'), or \"\r\n \"'BackwardEuler' ('BE'), a type derived from \"\r\n \"integrators.Integrator, or an object of such a type.\")\r\n\r\n def step(self, nSteps = 1, **kwargs):\r\n \"\"\"Advance the simulation a given number of timesteps.\r\n\r\n Parameters\r\n ----------\r\n nSteps : int, optional\r\n Number of timesteps to compute. The default is 1.\r\n **kwargs\r\n Used to specify optional arguments passed to the linear solver.\r\n Note that kwargs[\"M\"] will be overwritten, instead use\r\n self.precondition(...) to generate or specify a preconditioner.\r\n\r\n Returns\r\n -------\r\n None.\r\n\r\n \"\"\"\r\n self.integrator.step(nSteps, **kwargs)\r\n\r\n def generatePlottingPoints(self, nx=1, ny=1):\r\n \"\"\"Generate set of interpolation points to use for plotting.\r\n\r\n Parameters\r\n ----------\r\n nx : int, optional\r\n Number of points per grid division in the x-direction.\r\n The default is 1.\r\n ny : int, optional\r\n Number of points per grid division in the y-direction.\r\n The default is 1.\r\n\r\n Returns\r\n -------\r\n None.\r\n\r\n \"\"\"\r\n NX = self.NX\r\n NY = self.NY\r\n nPointsPerPlane = nx*(NY*ny + 1)\r\n nPointsTotal = nPointsPerPlane*NX + NY*ny + 1\r\n self.phiPlot = np.empty((nPointsTotal, 4))\r\n self.indPlot = np.empty((nPointsTotal, 4), dtype='int')\r\n self.X = np.empty(0)\r\n\r\n if self.BC.name == 'periodic':\r\n self.uPlot = self.u\r\n else:\r\n self.uPlot = np.concatenate((self.u, [1.]))\r\n\r\n for iPlane in range(NX):\r\n # dx = self.dx[iPlane]\r\n points = np.indices((nx, NY*ny + 1), dtype='float') \\\r\n .reshape(self.ndim, -1).T * [self.dx[iPlane]/nx, 1/(NY*ny)]\r\n points[:,0] += self.nodeX[iPlane]\r\n self.X = np.append(self.X, points[:,0])\r\n for iP, point in enumerate(points):\r\n phis, _, inds = self.BC(point, iPlane)\r\n self.phiPlot[iPlane*nPointsPerPlane + iP] = phis\r\n inds[inds < 0] = -1\r\n self.indPlot[iPlane*nPointsPerPlane + iP] = inds\r\n # Deal with right boundary\r\n points = np.hstack((np.full((NY*ny + 1, 1), self.xmax), points[0:NY*ny + 1,1:2]))\r\n for iP, point in enumerate(points):\r\n phis, _, inds = self.BC(point, iPlane)\r\n self.phiPlot[NX*nPointsPerPlane + iP] = phis\r\n inds[inds < 0] = -1\r\n self.indPlot[NX*nPointsPerPlane + iP] = inds\r\n\r\n self.X = np.append(self.X, np.full(NY*ny+1, self.xmax))\r\n self.Y = np.tile(points[0:NY*ny + 1,1], NX*nx + 1)\r\n self.U = np.sum(self.phiPlot * self.uPlot[self.indPlot], axis=1)\r\n\r\n def computePlottingSolution(self):\r\n \"\"\"Compute interpolated solution at the plotting points.\r\n\r\n Returns\r\n -------\r\n None.\r\n\r\n \"\"\"\r\n self.uPlot[0:self.nDoFs] = self.u\r\n self.U = np.sum(self.phiPlot * self.uPlot[self.indPlot], axis=1)\r\n","repo_name":"sam-maloney/FCIFEM","sub_path":"fcifem.py","file_name":"fcifem.py","file_ext":"py","file_size_in_byte":53749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4023366245","text":"#Faça um programa que peça dois números, base e expoente, calcule e mostre o primeiro número elevado ao segundo número. Não utilize a função de potência da linguagem.\n\n#Resultado com função pow.\n'''from math import pow\nbase = int(input('Enter base to potentiation: '))\nexp = int(input('Enter exponentiation to potentiation: '))\nr = pow(base,exp)\nprint(f'Result: {r}')'''\n\n#Resultado com While\nwhile True:\n\tbase = int(input('Enter base to potentiation: '))\n\texp = int(input('Enter exponentiation to potentiation: '))\n\tr = base**exp\n\tif exp < 0:\n\t\tr = (1/base)**exp\n\tprint(f'{r}')\n\toption = ' '\n\twhile option not in 'YN':\n\t\toption=str(input('CONTINUE? [Y/N]')).strip().upper()\n\tif option == 'N':\n\t\tbreak\nprint('thanks!')\n\n\n\n\n\n\n\n\n","repo_name":"lilnext01/python-algorithms","sub_path":"python_progressive/loop for_while/exe13.py","file_name":"exe13.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10053428778","text":"import json\nimport random\nimport re\nimport requests\n\nfrom bs4 import BeautifulSoup\nfrom bs4.element import Tag, NavigableString\nfrom django.core.management.base import BaseCommand\nfrom django.template.defaultfilters import slugify\nfrom apps.cms.models import PublicationTerm, Publication, Post, Term\nfrom apps.media.models import Media\n\n\ndef get_field(title, genera, data, f):\n if data.get(f) is not None and len(data.get(f)) > 1:\n return random.choice(data.get(f)).replace(\"{{title}}\", title).replace(\"{{genera}}\", genera)\n return None\n\n\ndef clean_origin(ori):\n return ori.replace(\" – \", \"\").strip()\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n family = \"Araceae\"\n url = \"https://en.wikipedia.org/wiki/List_of_Araceae_genera\"\n selector = \"#mw-content-text > div.mw-parser-output > table > tbody > tr\"\n default_meta = {\n \"soil\": \"mix\",\n \"light\": \"2\",\n \"humidity\": \"2\",\n \"toxicity\": True,\n \"watering\": \"2\",\n \"fertilizing\": \"2\",\n \"propagation\": [\"cuttings\", \"division\", \"offsets\"],\n \"temperature\": \"2\"\n }\n\n family_instance = PublicationTerm.objects.filter(term__slug=slugify(family), taxonomy=\"family\").first()\n pub = Publication.objects.get(pk=7)\n r = requests.get(url)\n soup = BeautifulSoup(r.content, features=\"html.parser\")\n elms = soup.select(selector)\n for elm in elms:\n # title = elm.find(text=True).title()\n title = None\n url = None\n\n if len(elm.select(\"td\")) > 0:\n title = elm.select(\"td\")[2].select(\"a\")[0].find(text=True).title()\n if \"/wiki/\" in elm.select(\"td\")[2].select(\"a\")[0].get(\"href\"):\n url = \"https://en.wikipedia.org/api/rest_v1/page/summary/\" + elm.select(\"td\")[2].select(\"a\")[0].get(\"href\").replace(\"/wiki/\", \"\")\n print(title)\n print(url)\n if title and url:\n media_url = None\n if elm.find(\"a\") and \"/wiki/\" in elm.find(\"a\").get(\"href\"):\n r2 = requests.get(url)\n r2_data = r2.json()\n description = r2_data.get(\"extract\")[:400]\n if r2_data.get(\"originalimage\"):\n media_url = r2_data.get(\"originalimage\").get(\"source\")\n term, is_created = Term.objects.get_or_create(slug=slugify(title), defaults={\n \"title\": title\n })\n term_pub, is_created = PublicationTerm.objects.get_or_create(\n publication=pub,\n term=term,\n taxonomy=\"family\",\n defaults={\n \"show_cms\": media_url is not None\n }\n )\n if media_url and term_pub.media is None:\n try:\n term_pub.media = Media.objects.save_url(media_url)\n except Exception as e:\n print(e)\n term_pub.description = description\n term_pub.meta = default_meta\n\n family_related = list(family_instance.related.all()) + [family_instance]\n old_related = term_pub.related.all()\n for related in family_related:\n if related not in old_related:\n term_pub.related.add(related)\n term_pub.save()\n","repo_name":"hoanganhlam/django_cms","sub_path":"apps/cms/management/commands/wiki_sub_family.py","file_name":"wiki_sub_family.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"16326046274","text":"import os\nimport xbmc\nimport urllib\nimport sys\n\n# Use json instead of simplejson when python v2.7 or greater\nif sys.version_info < (2, 7):\n import simplejson\nelse:\n import json as simplejson\n\n### import libraries\nfrom lib.utils import log\n\n### get datalist from the unique media item\n# Retrieve JSON list\ndef _media_unique(media_type, dbid):\n log('Using JSON for retrieving %s info' %media_type)\n Medialist = []\n \n if media_type == 'movie':\n json_query = xbmc.executeJSONRPC('{\"jsonrpc\": \"2.0\", \"method\": \"VideoLibrary.GetMovieDetails\", \"params\": {\"properties\": [\"file\", \"imdbnumber\", \"year\", \"trailer\", \"streamdetails\", \"art\"], \"movieid\":%s }, \"id\": 1}' %dbid)\n json_query = unicode(json_query, 'utf-8', errors='ignore')\n jsonobject = simplejson.loads(json_query)\n if jsonobject.has_key('result') and jsonobject['result'].has_key('moviedetails'):\n item = jsonobject['result']['moviedetails']\n # disctype = media_disctype(item.get('file','').encode('utf-8').lower(),\n # item['streamdetails']['video'])\n streamdetails = item['streamdetails']['video']\n Medialist.append({'dbid': item.get('movieid',''),\n 'id': item.get('imdbnumber',''),\n 'name': item.get('label',''),\n 'year': item.get('year',''),\n 'file': item.get('file',''),\n 'path': media_path(item.get('file','')),\n 'trailer': item.get('trailer',''),\n # 'disctype': disctype,\n 'art' : item.get('art',''),\n 'mediatype': media_type})\n\n else:\n log('No JSON results found')\n return Medialist\n\ndef _media_listing(media_type):\n log('Using JSON for retrieving %s info' %media_type)\n Medialist = []\n \n if media_type == 'movie':\n json_query = xbmc.executeJSONRPC('{\"jsonrpc\": \"2.0\", \"method\": \"VideoLibrary.GetMovies\", \"params\": {\"properties\": [\"file\", \"imdbnumber\", \"year\", \"trailer\", \"streamdetails\", \"art\"], \"sort\": { \"method\": \"label\" } }, \"id\": 1}')\n json_query = unicode(json_query, 'utf-8', errors='ignore')\n jsonobject = simplejson.loads(json_query)\n if jsonobject.has_key('result') and jsonobject['result'].has_key('movies'):\n for item in jsonobject['result']['movies']:\n # disctype = media_disctype(item.get('file','').encode('utf-8').lower(),\n # item['streamdetails']['video'])\n Medialist.append({'dbid': item.get('movieid',''),\n 'id': item.get('imdbnumber',''),\n 'name': item.get('label',''),\n 'year': item.get('year',''),\n 'file': item.get('file',''),\n 'path': media_path(item.get('file','')),\n 'trailer': item.get('trailer',''),\n # 'disctype': disctype,\n 'art' : item.get('art',''),\n 'mediatype': media_type})\n\n else:\n log('No JSON results found')\n return Medialist\n\ndef media_disctype(filename, streamdetails):\n if (('dvd') in filename and not any(x in filename for x in ['hddvd', 'hd-dvd']) or (filename.endswith('.vob') or filename.endswith('.ifo'))):\n disctype = 'dvd'\n elif ('3d' in filename and not 'ac3d' in filename):\n disctype = '3d'\n elif any(x in filename for x in ['bluray', 'blu-ray', 'brrip', 'bdrip']):\n disctype = 'bluray'\n elif streamdetails:\n videowidth = streamdetails[0]['width']\n videoheight = streamdetails[0]['height']\n if videowidth <= 720 and videoheight <= 480:\n disctype = 'dvd'\n else:\n disctype = 'bluray'\n else:\n disctype = 'n/a'\n return disctype\n\ndef media_path(path):\n # Check for stacked movies\n try:\n path = os.path.split(path)[0].rsplit(' , ', 1)[1].replace(\",,\",\",\")\n except:\n path = os.path.split(path)[0]\n # Fixes problems with rared movies and multipath\n if path.startswith(\"rar://\"):\n path = [os.path.split(urllib.url2pathname(path.replace(\"rar://\",\"\")))[0]]\n elif path.startswith(\"multipath://\"):\n temp_path = path.replace(\"multipath://\",\"\").split('%2f/')\n path = []\n for item in temp_path:\n path.append(urllib.url2pathname(item))\n else:\n path = [path]\n return path\n","repo_name":"hablaras/script.artwork.loader","sub_path":"lib/media_setup.py","file_name":"media_setup.py","file_ext":"py","file_size_in_byte":4684,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"70668259576","text":"# -*- coding: utf-8 -*-\n\"\"\"\nUtility functions to help with range handling.\n\n.. versionadded:: 0.1.0\n\"\"\"\n\nimport re\n\nMAX_NUMBER_ITEMS = 5000\n\n\ndef parse_range_header(range):\n \"\"\"\n Parse a range header as used by the dojo Json Rest store.\n\n :param str range: The content of the range header to be parsed. \n eg. `items=0-9`\n :returns: A dict with keys start, finish and number or `False` if the\n range is invalid.\n \"\"\"\n match = re.match('^items=([0-9]+)-([0-9]+)$', range)\n\n if match:\n start = int(match.group(1))\n finish = int(match.group(2))\n\n if finish < start:\n finish = start\n return {\n 'start': start,\n 'finish': finish,\n 'count': finish - start + 1\n }\n else:\n return False\n\n\ndef range_return(request, items):\n \"\"\"\n Determine what range of objects to return.\n\n Will check fot both `Range` and `X-Range` headers in the request and\n set both `Content-Range` and 'X-Content-Range' headers.\n\n :rtype: list\n \"\"\"\n if ('Range' in request.headers):\n range = parse_range_header(request.headers['Range'])\n elif 'X-Range' in request.headers:\n range = parse_range_header(request.headers['X-Range'])\n else:\n range = {\n 'start': 0,\n 'finish': MAX_NUMBER_ITEMS - 1,\n 'count': MAX_NUMBER_ITEMS\n }\n filtered = items[range['start']:range['finish'] + 1]\n if len(filtered) < range['count']:\n # Something was stripped, deal with it\n range['count'] = len(filtered)\n range['finish'] = range['start'] + range['count'] - 1\n if range['finish'] - range['start'] + 1 >= MAX_NUMBER_ITEMS:\n range['finish'] = range['start'] + MAX_NUMBER_ITEMS - 1\n filtered = items[range['start']:range['finish'] + 1]\n\n request.response.headers['Content-Range'] = 'items %d-%d/%d' % (range['start'], range['finish'], len(items))\n request.response.headers['X-Content-Range'] = request.response.headers['Content-Range']\n return filtered\n\n\ndef set_http_caching(request, gateway='crab', region='permanent'):\n \"\"\"\n Set an HTTP Cache Control header on a request.\n\n :param pyramid.request.Request request: Request to set headers on.\n :param str gateway: What gateway are we caching for? Defaults to `crab`.\n :param str region: What caching region to use? Defaults to `permanent`.\n :rtype: pyramid.request.Request\n \"\"\"\n crabpy_exp = request.registry.settings.get('crabpy.%s.cache_config.%s.expiration_time' % (gateway, region), None)\n if crabpy_exp is None:\n return request\n ctime = int(int(crabpy_exp) * 1.05)\n request.response.cache_expires(ctime, public=True)\n return request\n","repo_name":"OnroerendErfgoed/crabpy_pyramid","sub_path":"crabpy_pyramid/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"15086375827","text":"# -*- coding: utf-8 -*-\n__author__ = \"Rubén Mulero\"\n\n# Created by: PyQt5 UI code generator 5.5\n#\n# WARNING! All changes made in this file will be lost!\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QMessageBox\nfrom src.packControladoras import CMisTags\nfrom src.packVistas import IU_GESTIONAR_TAG\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(496, 531)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.listTagsUsuario = QtWidgets.QListWidget(self.centralwidget)\n self.listTagsUsuario.setObjectName(\"listTagsUsuario\")\n self.verticalLayout.addWidget(self.listTagsUsuario)\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n spacerItem = QtWidgets.QSpacerItem(90, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout_2.addItem(spacerItem)\n self.bModificar_tag = QtWidgets.QPushButton(self.centralwidget)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"plasma-next-icons/Breeze/actions/toolbar/configure-shortcuts.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.bModificar_tag.setIcon(icon)\n self.bModificar_tag.setObjectName(\"bModificar_tag\")\n self.horizontalLayout_2.addWidget(self.bModificar_tag)\n spacerItem1 = QtWidgets.QSpacerItem(50, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout_2.addItem(spacerItem1)\n self.bEliminar_tag = QtWidgets.QPushButton(self.centralwidget)\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(\"plasma-next-icons/Breeze/actions/toolbar/list-remove.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.bEliminar_tag.setIcon(icon1)\n self.bEliminar_tag.setObjectName(\"bEliminar_tag\")\n self.horizontalLayout_2.addWidget(self.bEliminar_tag)\n spacerItem2 = QtWidgets.QSpacerItem(90, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout_2.addItem(spacerItem2)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n self.verticalLayout.addItem(spacerItem3)\n self.groupBox = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setBold(False)\n font.setWeight(50)\n self.groupBox.setFont(font)\n self.groupBox.setObjectName(\"groupBox\")\n self.formLayout = QtWidgets.QFormLayout(self.groupBox)\n self.formLayout.setObjectName(\"formLayout\")\n self.label = QtWidgets.QLabel(self.groupBox)\n font = QtGui.QFont()\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName(\"label\")\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label)\n self.lNombreTag = QtWidgets.QLabel(self.groupBox)\n self.lNombreTag.setText(\"\")\n self.lNombreTag.setObjectName(\"lNombreTag\")\n self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lNombreTag)\n self.label_3 = QtWidgets.QLabel(self.groupBox)\n font = QtGui.QFont()\n font.setBold(True)\n font.setWeight(75)\n self.label_3.setFont(font)\n self.label_3.setObjectName(\"label_3\")\n self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_3)\n self.lScripts = QtWidgets.QLabel(self.groupBox)\n self.lScripts.setText(\"\")\n self.lScripts.setObjectName(\"lScripts\")\n self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.lScripts)\n self.label_5 = QtWidgets.QLabel(self.groupBox)\n font = QtGui.QFont()\n font.setBold(True)\n font.setWeight(75)\n self.label_5.setFont(font)\n self.label_5.setObjectName(\"label_5\")\n self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_5)\n self.lFecha = QtWidgets.QLabel(self.groupBox)\n self.lFecha.setText(\"\")\n self.lFecha.setObjectName(\"lFecha\")\n self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.lFecha)\n self.label_2 = QtWidgets.QLabel(self.groupBox)\n font = QtGui.QFont()\n font.setBold(True)\n font.setWeight(75)\n self.label_2.setFont(font)\n self.label_2.setObjectName(\"label_2\")\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_2)\n self.lDescripcion = QtWidgets.QLabel(self.groupBox)\n self.lDescripcion.setText(\"\")\n self.lDescripcion.setObjectName(\"lDescripcion\")\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lDescripcion)\n self.verticalLayout.addWidget(self.groupBox)\n spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n self.verticalLayout.addItem(spacerItem4)\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n spacerItem5 = QtWidgets.QSpacerItem(170, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout.addItem(spacerItem5)\n self.bCerrar = QtWidgets.QPushButton(self.centralwidget)\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(\"plasma-next-icons/Breeze/actions/toolbar/dialog-close.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.bCerrar.setIcon(icon2)\n self.bCerrar.setObjectName(\"bCerrar\")\n self.horizontalLayout.addWidget(self.bCerrar)\n spacerItem6 = QtWidgets.QSpacerItem(170, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout.addItem(spacerItem6)\n self.verticalLayout.addLayout(self.horizontalLayout)\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 496, 29))\n self.menubar.setObjectName(\"menubar\")\n self.menuNuevo = QtWidgets.QMenu(self.menubar)\n self.menuNuevo.setObjectName(\"menuNuevo\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n self.actionCrear_tag = QtWidgets.QAction(MainWindow)\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(\"plasma-next-icons/Breeze/actions/toolbar/list-add.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionCrear_tag.setIcon(icon3)\n self.actionCrear_tag.setObjectName(\"actionCrear_tag\")\n self.actionEditar_Tag = QtWidgets.QAction(MainWindow)\n self.actionEditar_Tag.setObjectName(\"actionEditar_Tag\")\n self.menuNuevo.addAction(self.actionCrear_tag)\n self.menubar.addAction(self.menuNuevo.menuAction())\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Mis Tags\"))\n MainWindow.setWhatsThis(_translate(\"MainWindow\", \"Ésta interfaz permite el manejo de los Tags personalizados del usuario. Desde aquí se pondrán crear, modificar y eliminar los Tags.\\n\"\n\"\\n\"\n\"Los Tags son agrupaciones de Scripts que permiten introducir de una vez una serie de Scripts determinados. Es una buena forma de ahorrar tiempo, cuando se tiene en mente qué Scripts se quieren añadir a un grupo determinado.\"))\n self.listTagsUsuario.setToolTip(_translate(\"MainWindow\", \"Listado de Tags del usuario\"))\n self.listTagsUsuario.setWhatsThis(_translate(\"MainWindow\", \"En ésta lista se visualizan los Tags que tiene el usuario creado actualmente. La estrella indica que es un tag para ayudar a la futura identificación en otras áreas de la aplicación.\\n\"\n\"\\n\"\n\"Al pulsar un elemento, se habilitarán los botones de Modificar y Eliminar Tag para poder hacer operaciones sobre éstos.\\n\"\n\"\\n\"\n\"Si se desea crear un nuevo Tag, se debe haccer uso del menu \\\"Nuevo\\\"\"))\n self.bModificar_tag.setToolTip(_translate(\"MainWindow\", \"Modificar el Tag seleccionado\"))\n self.bModificar_tag.setText(_translate(\"MainWindow\", \"&Modificar Tag\"))\n self.bEliminar_tag.setToolTip(_translate(\"MainWindow\", \"Eliminar el Tag seleccionado\"))\n self.bEliminar_tag.setText(_translate(\"MainWindow\", \"&Eliminar Tag\"))\n self.groupBox.setToolTip(_translate(\"MainWindow\", \"Información avanzada del Tag actualmente seleccionado.\"))\n self.groupBox.setWhatsThis(_translate(\"MainWindow\", \"Contiene la información relacionada sobre el Tag que está seleccionado actualmente. Dicha información consiste en:\\n\"\n\"\\n\"\n\"--> Nombre Tag: El nombre del Tag\\n\"\n\"--> Scripts: Qué Scripts conforman el tag actualmente seleccionado. (Vista rápida)\\n\"\n\"--> Fecha Creación: La fecha de creación del Tag. (NO DE SUS FUTURAS MODIFICACIONES)\"))\n self.groupBox.setTitle(_translate(\"MainWindow\", \"Información Avanzada\"))\n self.label.setText(_translate(\"MainWindow\", \"- Nombre Tag:\"))\n self.label_3.setText(_translate(\"MainWindow\", \"- Scripts:\"))\n self.label_5.setText(_translate(\"MainWindow\", \"- Fecha Creación:\"))\n self.label_2.setText(_translate(\"MainWindow\", \"Descripción\"))\n self.bCerrar.setText(_translate(\"MainWindow\", \"Cerrar\"))\n self.menuNuevo.setTitle(_translate(\"MainWindow\", \"&Nuevo\"))\n self.actionCrear_tag.setText(_translate(\"MainWindow\", \"&Crear Tag\"))\n self.actionEditar_Tag.setText(_translate(\"MainWindow\", \"&Editar Tag\"))\n\nclass MisTags(QtWidgets.QMainWindow):\n # Definimos el constructor de la clase principal\n def __init__(self, p_id_usuario, parent=None):\n # Llamamos al constructor de la clase padre\n super(MisTags, self).__init__(parent)\n\n # Instancio la Interfaz\n self.ventana = Ui_MainWindow()\n self.ventana.setupUi(self)\n self.move(QtWidgets.QDesktopWidget().availableGeometry().center() - self.frameGeometry().center())\n self.setWindowIcon(QtGui.QIcon('logo/Akeko_logo.png'))\n\n self.id_usuario = p_id_usuario\n self.controlador_mis_tags = CMisTags.CMisTags()\n\n # Cargamos la lista de datos\n self.cargar_datos()\n\n # Deshabilitar algunos botones inicialmente\n self.ventana.bModificar_tag.setDisabled(True)\n self.ventana.bEliminar_tag.setDisabled(True)\n\n # Creamos las ventanas\n self.window_crear_tag = None\n self.window_modificar_tag = None\n\n # Programamos las conexiones\n self.ventana.bCerrar.clicked.connect(self.close)\n self.ventana.actionCrear_tag.triggered.connect(self.crear_tag)\n self.ventana.listTagsUsuario.clicked.connect(self.imprimir_informacion)\n self.ventana.listTagsUsuario.currentRowChanged.connect(self.habilitar_botones)\n self.ventana.bModificar_tag.clicked.connect(self.modificar_tag)\n self.ventana.bEliminar_tag.clicked.connect(self.borrar_tag)\n\n def cargar_datos(self):\n # Bloqueamos señales y limpiamos\n self.ventana.listTagsUsuario.blockSignals(True)\n self.ventana.listTagsUsuario.clear()\n\n # Obtenemos la lista de los scripts y cargamos la interfaz\n lista_tags = self.controlador_mis_tags.obtener_tags_usuario(self.id_usuario)\n lista_tags.cargar_lista_tag(self.ventana.listTagsUsuario)\n\n # Libreamos las señales\n self.ventana.listTagsUsuario.blockSignals(False)\n\n # Limpiamos la ventana de información\n self.ventana.lNombreTag.setText(\"\")\n self.ventana.lFecha.setText(\"\")\n self.ventana.lDescripcion.setText(\"\")\n self.ventana.lScripts.setText(\"\")\n\n def imprimir_informacion(self):\n \"\"\"\n Imprime la informaicón del Tag seleccionado\n\n :return:\n \"\"\"\n # Obtener los datos del item\n item_seleccionado = self.ventana.listTagsUsuario.currentItem()\n item_seleccionado_datos = item_seleccionado.data(QtCore.Qt.UserRole)\n tag = item_seleccionado_datos[1]\n self.ventana.lNombreTag.setText(tag.nombre_tag)\n self.ventana.lFecha.setText(tag.f_creacion)\n self.ventana.lDescripcion.setText(tag.descripcion)\n # Obtenemos los scripts que pertenecen al TAG\n lista_scripts_tag_seleccionado = self.controlador_mis_tags.obtener_scripts_tag(tag.id_tag)\n # Emviamos el elemento y seteamos los valores\n lista_scripts_tag_seleccionado.imprimir_elementos_iu(self.ventana.lScripts)\n\n def habilitar_botones(self):\n # Habilitamos los botones para poder manipular el TAG\n self.ventana.bModificar_tag.setDisabled(False)\n self.ventana.bEliminar_tag.setDisabled(False)\n\n def crear_tag(self):\n \"\"\"\n Abre la ventana para poder realizar la creación de un TAG\n\n :return:\n \"\"\"\n self.window_crear_tag = IU_GESTIONAR_TAG.GestionarTag(self.id_usuario, None, self)\n self.window_crear_tag.show()\n\n def modificar_tag(self):\n \"\"\"\n ABre la ventana para poder realizar la modificación de un TAG\n\n :return:\n \"\"\"\n # Obtener los datos del item\n item_seleccionado = self.ventana.listTagsUsuario.currentItem()\n if item_seleccionado is not None:\n item_seleccionado_datos = item_seleccionado.data(QtCore.Qt.UserRole)\n tag = item_seleccionado_datos[1]\n self.window_modificar_tag = IU_GESTIONAR_TAG.GestionarTag(self.id_usuario, tag, self)\n self.window_modificar_tag.show()\n else:\n warm_box_modif = QMessageBox()\n warm_box_modif.setIcon(2)\n warm_box_modif.setWindowTitle(\"Mis Tags\")\n warm_box_modif.setText(\"ADVERTENCIA\")\n warm_box_modif.setInformativeText(\"Selecciona al menos un Tag antes de intentar modificar algo.\")\n warm_box_modif.exec_()\n\n def borrar_tag(self):\n \"\"\"\n Dado un tag seleccionado. Borra el Tag del sistema y elimina el tag de los grupos donde haya sido aplicado\n\n :return:\n \"\"\"\n # Mostramos advertencia al usuario\n # Generamos un QmeesageBox para gestionar la pregunta al usuario\n warm_box = QMessageBox()\n warm_box.setIcon(2)\n warm_box.setWindowTitle(\"Borrado de un tag\")\n warm_box.setText(\"¡¡Atención!!\")\n warm_box.setInformativeText(\"¿Estás seguro que deseas eliminar el tag?\")\n warm_box.setDetailedText(\"Al eliminar un Tag no sólo éste será borrado del sistema. Si no que además \"\n \"todos los grupos en le que se haya aplicado el Tag estarán afectados por dicho \"\n \"borrado. El resultado será que los scripts aplicados por los Tags implicados serán \"\n \"removados automáticamente.\")\n # Creamos los botones de aceptar y cancelar.\n warm_box.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n warm_box.setDefaultButton(QMessageBox.Cancel)\n # Ejectuamos la interfaz y recogemos el resultado de la decisión\n seleccion = warm_box.exec_()\n if seleccion == QMessageBox.Ok:\n # Obtener los datos del item\n # todo solvertar pequeño error al eliminar un tag\n item_seleccionado = self.ventana.listTagsUsuario.currentItem()\n if item_seleccionado is not None:\n item_seleccionado_datos = item_seleccionado.data(QtCore.Qt.UserRole)\n tag = item_seleccionado_datos[1]\n resultado = self.controlador_mis_tags.eliminar_tag_usuario(tag.id_tag, self.id_usuario)\n if resultado:\n # El Tag se ha eliminado de forma exitosa\n info_box = QMessageBox()\n info_box.setIcon(1)\n info_box.setWindowTitle(\"Borrado de un tag\")\n info_box.setText(\"CORRECTO\")\n info_box.setInformativeText(\"El Tag se ha borrado satisfactoriamente\")\n info_box.exec_()\n self.cargar_datos()\n else:\n # Ha ocurrido algún error\n error_box = QMessageBox()\n error_box.setIcon(3)\n error_box.setWindowTitle(\"Borrado de un tag\")\n error_box.setText(\"Error\")\n error_box.setInformativeText(\"Algo ha pasado al eliminar el Tag\")\n error_box.exec_()\n else:\n warm_box_borrar = QMessageBox()\n warm_box_borrar.setIcon(2)\n warm_box_borrar.setWindowTitle(\"Mis Tags\")\n warm_box_borrar.setText(\"ADVERTENCIA\")\n warm_box_borrar.setInformativeText(\"Selecciona al menos un Tag antes de intentar modificar algo.\")\n warm_box_borrar.exec_()","repo_name":"rubenmulero/Akeko","sub_path":"Cliente/src/packVistas/IU_MIS_TAGS.py","file_name":"IU_MIS_TAGS.py","file_ext":"py","file_size_in_byte":17306,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35420293045","text":"import datetime\nimport json\nimport math\nimport sqlite3\n\nimport mf2py\nimport mf2util\nimport requests\nfrom flask import (Blueprint, abort, current_app, flash, jsonify, redirect,\n render_template, request, send_from_directory, session)\n\nfrom config import (ROOT_DIRECTORY, RSS_DIRECTORY, SHOW_SETUP, WEBHOOK_API_KEY,\n WEBHOOK_SERVER, WEBHOOK_URL)\n\nmain = Blueprint(\"main\", __name__, static_folder=\"static\", static_url_path=\"\")\n\n\ndef change_to_json(database_result):\n columns = [column[0] for column in database_result.description]\n\n result = [dict(zip(columns, row)) for row in database_result]\n\n return result\n\n\n@main.route(\"/\")\ndef index():\n if session.get(\"me\"):\n connection = sqlite3.connect(ROOT_DIRECTORY + \"/webmentions.db\")\n\n page = request.args.get(\"page\")\n\n if page and int(page) > 1:\n offset = (int(page) - 1) * 10\n page = int(page)\n else:\n offset = 0\n page = 1\n\n sort_param = request.args.get(\"sort\")\n\n if sort_param == \"oldest\":\n sort_order = \"ASC\"\n else:\n sort_order = \"DESC\"\n\n cursor = connection.cursor()\n\n with connection:\n count = cursor.execute(\"SELECT COUNT(*) FROM webmentions\").fetchone()[0]\n webmentions = cursor.execute(\n \"\"\"\n SELECT source,\n target,\n received_date,\n contents,\n property,\n author_name\n FROM webmentions\n WHERE status = 'valid'\n ORDER BY received_date {}\n LIMIT 10 OFFSET ?;\"\"\".format(\n sort_order\n ),\n (offset,),\n ).fetchall()\n\n return render_template(\n \"dashboard/feed.html\",\n webmentions=webmentions,\n sent=False,\n received_count=count,\n page=int(page),\n page_count=math.ceil(int(count) / 10),\n base_results_query=\"/home\",\n title=\"Received Webmentions\",\n sort=sort_param,\n )\n\n return render_template(\n \"index.html\",\n title=f\"{current_app.config['ME'].strip().replace('https://', '').replace('http://', '')} Webmention Receiver Home\",\n )\n\n\n@main.route(\"/endpoint\", methods=[\"POST\"])\ndef receiver():\n # Process as www-form-encoded as per spec\n if request.content_type != \"application/x-www-form-urlencoded\":\n return jsonify({\"message\": \"Content type must be x-www-url-formencoded.\"}), 400\n\n # Use force to get data (result of experimentation)\n\n source = request.form.get(\"source\")\n target = request.form.get(\"target\")\n vouch = request.form.get(\"vouch\")\n code = request.form.get(\"code\", None)\n realm = request.form.get(\"realm\", None)\n\n token = \"\"\n\n if not (source.startswith(\"http://\") or source.startswith(\"https://\")) and (\n target.startswith(\"http://\") or target.startswith(\"https://\")\n ):\n return (\n jsonify(\n {\"message\": \"Source and target must use http:// or https:// protocols.\"}\n ),\n 400,\n )\n\n if source == target:\n return jsonify({\"message\": \"Source cannot be equal to target.\"}), 400\n\n # Make sure source and target are not identical when a trailing slash is removed from both\n if source.strip(\"/\") == target.strip(\"/\"):\n return jsonify({\"message\": \"Source cannot be equal to target.\"}), 400\n\n # a target must end with jamesg.blog to be considered valid for my endpoint\n # where jamesg.blog is the ME config variable in my case\n raw_domain = current_app.config[\"ME\"].split(\"/\")[2]\n target_domain = target.split(\"/\")[2]\n\n if not target_domain.endswith(raw_domain):\n return jsonify({\"message\": f\"Target must be a {raw_domain} resource.\"}), 400\n\n connection = sqlite3.connect(ROOT_DIRECTORY + \"/webmentions.db\")\n\n with connection:\n cursor = connection.cursor()\n\n # Process new webmentions from the same source\n # Ensures all webmentions from source X are updated when a new webmention is sent from that source\n\n already_sent_from_source = cursor.execute(\n \"SELECT source, target FROM webmentions WHERE source = ?\", (target,)\n ).fetchall()\n\n for a in already_sent_from_source:\n cursor.execute(\n \"\"\"INSERT INTO webmentions (\n source,\n target,\n received_date,\n status,\n contents,\n property,\n author_name,\n author_photo,\n author_url,\n content_html,\n vouch,\n approved_to_show,\n token\n ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\"\"\",\n (\n a[0],\n a[1],\n str(datetime.datetime.now()),\n \"validating\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n vouch,\n 0,\n token,\n ),\n )\n\n cursor.execute(\n \"DELETE FROM webmentions WHERE source = ? and target = ?\",\n (\n source,\n target,\n ),\n )\n\n # check if webmention has already been sent to see if a salmention needs to be processed\n\n already_sent = cursor.execute(\n \"SELECT source, target, content_html FROM webmentions WHERE source = ? and target = ?\",\n (\n source,\n target,\n ),\n ).fetchall()\n\n if already_sent:\n current_content_html = already_sent[0][2]\n\n parse = mf2py.Parser(url=source)\n\n parsed_h_entry = mf2util.interpret_comment(parse.to_dict(), source, target)\n\n if parsed_h_entry.get(\"content\"):\n content_html = parsed_h_entry[\"content\"]\n\n parsed_h_entry[\"content\"] = parsed_h_entry[\"content\"].replace(\n \"\"\n )\n else:\n content_html = None\n\n if content_html != current_content_html:\n cursor.execute(\n \"\"\"UPDATE webmentions SET\n content_html = ?\n WHERE source = ? and target = ?\n \"\"\",\n (\n content_html,\n source,\n target,\n ),\n )\n\n cursor.execute(\n \"\"\"INSERT INTO webmentions (\n source,\n target,\n received_date,\n status,\n contents,\n property\n ) VALUES (?, ?, ?, ?, ?, ?)\"\"\",\n (\n source,\n target,\n str(datetime.datetime.now()),\n \"validating\",\n \"\",\n \"\",\n ),\n )\n\n if WEBHOOK_SERVER:\n data = {\n \"message\": f\"You have received a webmention from {source} to {target}\"\n }\n\n headers = {\"Authorization\": f\"Bearer {WEBHOOK_API_KEY}\"}\n\n requests.post(WEBHOOK_URL, data=data, headers=headers)\n\n return jsonify({\"message\": \"Accepted.\"}), 202\n\n\n@main.route(\"/delete\", methods=[\"POST\"])\ndef delete_webmention():\n if request.method == \"POST\":\n target = request.form.get(\"target\")\n source = request.form.get(\"source\")\n\n if not target and not source:\n flash(\"Please provide a target and a source.\")\n return redirect(\"/\")\n\n connection = sqlite3.connect(ROOT_DIRECTORY + \"/webmentions.db\")\n\n with connection:\n cursor = connection.cursor()\n\n cursor.execute(\n \"DELETE FROM webmentions WHERE target = ? AND source = ?\",\n (target, source),\n )\n\n flash(f\"Webmention from {target} has been deleted.\")\n return redirect(\"/\")\n\n return abort(405)\n\n\n@main.route(\"/approve\", methods=[\"POST\"])\ndef approve_webmention():\n if request.method == \"POST\":\n target = request.form.get(\"target\")\n source = request.form.get(\"source\")\n status = request.form.get(\"status\")\n\n if not target and not source or status:\n flash(\"Please provide a target,a source, and a status.\")\n return redirect(\"/\")\n\n if status != \"hide\" and status != \"show\":\n flash(\"Status must be either hide or show.\")\n return redirect(\"/\")\n\n if status == \"hide\":\n show_value = 0\n else:\n show_value = 1\n\n connection = sqlite3.connect(ROOT_DIRECTORY + \"/webmentions.db\")\n\n with connection:\n cursor = connection.cursor()\n\n cursor.execute(\n \"\"\"UPDATE webmentions\n SET approved_to_show = ?\n WHERE target = ? AND source = ?\"\"\",\n (show_value, target, source),\n )\n\n flash(f\"Webmention from {target} has been approved.\")\n return redirect(\"/\")\n\n return abort(405)\n\n\n@main.route(\"/sent\")\ndef view_sent_webmentions_page():\n connection = sqlite3.connect(ROOT_DIRECTORY + \"/webmentions.db\")\n\n page = request.args.get(\"page\")\n\n if page and page.isnumeric() and int(page) > 1:\n offset = int(page) * 10\n page = int(page)\n else:\n offset = 0\n page = 1\n\n sort_param = request.args.get(\"sort\")\n\n if sort_param == \"oldest\":\n sort_order = \"ASC\"\n else:\n sort_order = \"DESC\"\n\n with connection:\n cursor = connection.cursor()\n\n count = cursor.execute(\"SELECT COUNT(id) FROM sent_webmentions\").fetchone()[0]\n to_process = cursor.execute(\n f\"\"\"SELECT id,\n source,\n target,\n sent_date,\n status_code,\n response,\n webmention_endpoint,\n location_header\n FROM sent_webmentions\n ORDER BY sent_date {sort_order}\n LIMIT 10 OFFSET ?;\"\"\",\n (offset,),\n ).fetchall()\n\n for c in to_process:\n if c[7] and c[7] != \"\" and c[7] != None:\n r = requests.get(c[7])\n if r.status_code == 200:\n text = r.text\n else:\n text = f\"Error: {r.status_code}, {r.text}\"\n\n cursor.execute(\n \"\"\"\n UPDATE sent_webmentions\n SET response = ?\n AND location_header = ?\n WHERE source = ?\n AND target = ?\"\"\",\n (\n text,\n \"\",\n c[1],\n c[2],\n ),\n )\n\n with connection:\n cursor = connection.cursor()\n\n webmentions = cursor.execute(\n f\"\"\"SELECT id,\n source,\n target,\n sent_date,\n status_code,\n response,\n webmention_endpoint,\n location_header\n FROM sent_webmentions\n ORDER BY sent_date {sort_order}\n LIMIT 10 OFFSET ?;\"\"\",\n (offset,),\n ).fetchall()\n\n return render_template(\n \"dashboard/sent.html\",\n webmentions=webmentions,\n sent=True,\n page=int(page),\n page_count=int(int(count) / 10),\n base_results_query=\"/sent\",\n title=\"Your Sent Webmentions\",\n sort=sort_param,\n count=count,\n )\n\n\n@main.route(\"/sent/\")\ndef view_sent_webmention(webmention_id):\n connection = sqlite3.connect(ROOT_DIRECTORY + \"/webmentions.db\")\n\n with connection:\n cursor = connection.cursor()\n webmention = cursor.execute(\n \"SELECT * FROM sent_webmentions WHERE id = ?\", (webmention_id,)\n ).fetchone()\n\n if not webmention:\n abort(404)\n\n if webmention[7] and webmention[7] != \"\":\n r = requests.get(webmention[7])\n\n if r.status_code == 200:\n text = r.text\n else:\n text = f\"Error: {r.status_code}, {r.text}\"\n\n cursor.execute(\n \"\"\"\n UPDATE sent_webmentions\n SET response = ?\n AND location_header = ?\n WHERE source = ?\n AND target = ?\"\"\",\n (\n text,\n \"\",\n webmention[1],\n webmention[2],\n ),\n )\n\n webmention = cursor.execute(\n \"SELECT * FROM sent_webmentions WHERE id = ?\", (webmention_id,)\n ).fetchone()\n\n parsed_response = str(webmention[5]).replace(\"'\", '\"')\n\n try:\n final_parsed_response = json.loads(parsed_response)\n except:\n final_parsed_response = parsed_response\n\n if webmention:\n return render_template(\n \"dashboard/webmention.html\",\n webmention=webmention,\n title=f\"Webmention to {webmention[1]} Details\",\n response=final_parsed_response,\n )\n\n return abort(404)\n\n\n@main.route(\"/sent/json\")\ndef retrieve_sent_webmentions_json():\n target = request.args.get(\"target\")\n status = request.args.get(\"status\")\n key = request.args.get(\"key\")\n\n connection = sqlite3.connect(ROOT_DIRECTORY + \"/webmentions.db\")\n\n with connection:\n cursor = connection.cursor()\n\n get_key = cursor.execute(\n \"SELECT api_key FROM user WHERE api_key = ?\", (key,)\n ).fetchone()\n\n if ((get_key and len(get_key) == 0) and not session.get(\"me\")) or not get_key:\n return (\n jsonify(\n {\n \"message\": \"You must be authenticated to retrieve all sent webmentions.\"\n }\n ),\n 403,\n )\n\n if status == \"valid\":\n status = \"AND status = 'valid'\"\n elif status == \"invalid\":\n status = \"AND status = 'invalid'\"\n else:\n status = \"\"\n\n if not target:\n get_webmentions = cursor.execute(\"SELECT * FROM sent_webmentions;\")\n else:\n get_webmentions = cursor.execute(\n \"\"\"SELECT\n source,\n target,\n sent_date,\n status_code,\n response,\n webmention_endpoint\n FROM sent_webmentions\n WHERE target = ? {}\n ORDER BY sent_date ASC;\"\"\".format(\n status\n ),\n (target,),\n ).fetchall()\n\n result = change_to_json(get_webmentions)\n\n return jsonify(result), 200\n\n\n@main.route(\"/received\")\ndef retrieve_webmentions():\n target = request.args.get(\"target\").strip(\"/\") + \"/\"\n wm_property = request.args.get(\"property\")\n since = request.args.get(\"since\")\n key = request.args.get(\"key\")\n\n connection = sqlite3.connect(ROOT_DIRECTORY + \"/webmentions.db\")\n\n cursor = connection.cursor()\n\n where_clause = \"\"\n\n if target:\n where_clause = \"WHERE target = ? AND status = 'valid'\"\n attributes = (target,)\n\n if wm_property:\n where_clause = \"WHERE property = ? AND status = 'valid'\"\n attributes = (wm_property,)\n\n if target and wm_property:\n where_clause = \"WHERE target = ? and property = ? AND status = 'valid'\"\n attributes = (\n target,\n wm_property,\n )\n\n if since:\n where_clause = where_clause + \" AND sent_date > ?\"\n attributes = attributes + (since,)\n\n get_key = cursor.execute(\n \"SELECT api_key FROM user WHERE api_key = ?\", (key,)\n ).fetchone()\n\n if not get_key and session.get(\"me\") and where_clause == \"\":\n return (\n jsonify(\n {\"message\": \"You must be authenticated to retrieve all webmentions.\"}\n ),\n 403,\n )\n\n if not target:\n get_webmentions = cursor.execute(\"SELECT * FROM webmentions;\")\n result = change_to_json(get_webmentions)\n\n count = cursor.execute(\n \"SELECT COUNT(source), property FROM webmentions GROUP BY property;\"\n ).fetchall()\n else:\n get_webmentions = cursor.execute(\n f\"SELECT * FROM webmentions {where_clause} ORDER BY received_date DESC;\",\n attributes,\n )\n result = change_to_json(get_webmentions)\n\n count = cursor.execute(\n f\"SELECT COUNT(source), property FROM webmentions {where_clause} GROUP BY property;\",\n attributes,\n ).fetchall()\n\n aggregate_count = 0\n\n parsed_counts = {}\n\n for item in count:\n aggregate_count += item[0]\n parsed_counts[item[1]] = item[0]\n\n response = jsonify(\n {\n \"count\": aggregate_count,\n \"count_by_property\": parsed_counts,\n \"webmentions\": result,\n }\n )\n\n response.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n\n return response, 200\n\n\n@main.route(\"/webhook\", methods=[\"GET\", \"POST\"])\ndef webhook_check():\n connection = sqlite3.connect(ROOT_DIRECTORY + \"/webmentions.db\")\n key = request.args.get(\"key\")\n\n with connection:\n cursor = connection.cursor()\n\n get_key = cursor.execute(\n \"SELECT api_key FROM user WHERE api_key = ?\", (key,)\n ).fetchone()\n\n if (get_key and len(get_key) == 0) or not get_key:\n return (\n jsonify(\n {\"message\": \"You must be authenticated to access this resource.\"}\n ),\n 403,\n )\n\n feed_url = request.args.get(\"url\")\n\n if not feed_url:\n return jsonify({\"message\": \"You must provide a url to check.\"}), 400\n\n check_if_queued = cursor.execute(\n \"SELECT * FROM pending_webmentions WHERE to_check = ?\", (feed_url,)\n ).fetchone()\n\n if check_if_queued:\n return (\n jsonify({\"message\": \"This url is already queued to be checked.\"}),\n 400,\n )\n\n cursor.execute(\n \"INSERT INTO pending_webmentions (to_check) VALUES (?);\", (feed_url,)\n )\n\n return jsonify({\"message\": \"URLs queued for processing.\"}), 202\n\n\n@main.route(\"/stats\")\ndef stats_page():\n connection = sqlite3.connect(ROOT_DIRECTORY + \"/webmentions.db\")\n\n data_format = request.args.get(\"format\")\n\n with connection:\n cursor = connection.cursor()\n\n get_webmentions = cursor.execute(\n \"SELECT count(*) FROM webmentions;\"\n ).fetchone()[0]\n\n get_sent_webmentions = cursor.execute(\n \"SELECT count(*) FROM sent_webmentions;\"\n ).fetchone()[0]\n\n received_types = cursor.execute(\n \"SELECT property, count(*) FROM webmentions WHERE status = 'valid' GROUP BY property;\"\n ).fetchall()\n\n pending_webmention_count = cursor.execute(\n \"SELECT count(*) FROM webmentions WHERE status = 'validating';\"\n ).fetchone()[0]\n\n moderation_webmention_count = cursor.execute(\n \"SELECT count(*) FROM webmentions WHERE approved_to_show = 0;\"\n ).fetchone()[0]\n\n received_months = cursor.execute(\n \"SELECT strftime('%Y-%m', received_date) AS month, count(*) FROM webmentions WHERE status = 'valid' GROUP BY month;\"\n ).fetchall()\n\n received_years = cursor.execute(\n \"SELECT strftime('%Y', received_date) AS year, count(*) FROM webmentions WHERE status = 'valid' GROUP BY year;\"\n ).fetchall()\n\n if data_format == \"json\":\n return (\n jsonify(\n {\n \"webmentions\": get_webmentions,\n \"sent_webmentions\": get_sent_webmentions,\n \"received_types\": received_types,\n \"pending_webmention_count\": pending_webmention_count,\n \"moderation_webmention_count\": moderation_webmention_count,\n \"received_months\": received_months,\n \"received_years\": received_years,\n }\n ),\n 200,\n )\n\n return render_template(\n \"user/stats.html\",\n title=\"Webmention Statistics\",\n received_count=get_webmentions,\n sent_count=get_sent_webmentions,\n received_types=received_types,\n pending_webmention_count=pending_webmention_count,\n moderation_webmention_count=moderation_webmention_count,\n received_months=received_months,\n received_years=received_years,\n )\n\n\n@main.route(\"/rss\")\ndef rss():\n key = request.args.get(\"key\")\n\n connection = sqlite3.connect(ROOT_DIRECTORY + \"/webmentions.db\")\n\n cursor = connection.cursor()\n\n get_key = cursor.execute(\n \"SELECT api_key FROM user WHERE api_key = ?\", (key,)\n ).fetchone()\n\n if ((get_key and len(get_key) == 0) and not session.get(\"me\")) or not get_key:\n return (\n jsonify(\n {\"message\": \"You must be authenticated to retrieve all webmentions.\"}\n ),\n 403,\n )\n\n return send_from_directory(RSS_DIRECTORY + \"/static/\", \"webmentions.xml\")\n\n\n@main.route(\"/vouch\", methods=[\"GET\", \"POST\"])\ndef see_vouch_list():\n if not session.get(\"me\"):\n return (\n jsonify({\"message\": \"You must be authenticated to use this resource.\"}),\n 403,\n )\n\n if request.method == \"POST\":\n connection = sqlite3.connect(ROOT_DIRECTORY + \"/webmentions.db\")\n\n cursor = connection.cursor()\n\n with connection:\n domain = request.form.get(\"domain\")\n\n date_now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n if not domain:\n flash(\"You must provide a domain to vouch for.\")\n return redirect(\"/vouch\")\n\n domain = domain.split(\"/\")[2]\n\n check_if_vouched = cursor.execute(\n \"SELECT * FROM vouch WHERE domain = ?\", (domain,)\n ).fetchone()\n\n if check_if_vouched:\n flash(\"The domain you specified is already in your vouch list.\")\n return redirect(\"/vouch\")\n\n cursor.execute(\n \"INSERT INTO vouch VALUES (?, ?);\",\n (\n domain,\n date_now,\n ),\n )\n\n flash(\"Vouch added to list.\")\n\n return redirect(\"/vouch\")\n\n connection = sqlite3.connect(ROOT_DIRECTORY + \"/webmentions.db\")\n\n with connection:\n cursor = connection.cursor()\n\n get_vouch_list = cursor.execute(\"SELECT * FROM vouch;\").fetchall()\n\n return render_template(\n \"dashboard/vouch.html\", vouches=get_vouch_list, title=\"Vouch List \"\n )\n\n\n@main.route(\"/vouch/delete\", methods=[\"POST\"])\ndef delete_vouch():\n if not session.get(\"me\"):\n return (\n jsonify({\"message\": \"You must be authenticated to use this resource.\"}),\n 403,\n )\n\n connection = sqlite3.connect(ROOT_DIRECTORY + \"/webmentions.db\")\n\n with connection:\n cursor = connection.cursor()\n\n domain = request.form.get(\"domain\")\n\n if not domain:\n flash(\"You must provide a vouch domain to delete.\")\n return redirect(\"/vouch\")\n\n cursor.execute(\"DELETE FROM vouch WHERE domain = ?\", (domain,))\n\n flash(\"Vouch deleted.\")\n return redirect(\"/vouch\")\n\n\n@main.route(\"/static/images/\")\ndef send_images(filename):\n return send_from_directory(RSS_DIRECTORY + \"/static/images/\", filename)\n\n\n@main.route(\"/static/icons/\")\ndef send_icons(filename):\n return send_from_directory(RSS_DIRECTORY + \"/static/icons/\", filename)\n\n\n@main.route(\"/robots.txt\")\ndef robots():\n return send_from_directory(main.static_folder, \"robots.txt\")\n\n\n@main.route(\"/manifest.json\")\ndef manifest_json_file():\n return send_from_directory(main.static_folder, \"manifest.json\")\n\n\n@main.route(\"/favicon.ico\")\ndef favicon():\n return send_from_directory(main.static_folder, \"favicon.ico\")\n\n\n@main.route(\"/setup\")\ndef setup_page():\n if SHOW_SETUP:\n return render_template(\"setup.html\", title=\"Setup Your Webmention Endpoint\")\n\n abort(404)\n","repo_name":"capjamesg/webmention-receiver","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":24805,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"22"} +{"seq_id":"36573294208","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\nimport sys\n\nurl = str(sys.argv[1])\nresponse = requests.get(url)\nsoup = BeautifulSoup(response.content, \"html.parser\")\n\npriser = []\nadresser = []\n\nfor pris in soup.find_all(class_=\"mt-4 sm:mt-8 text-12 text-gray-500 flex flex-col sm:block\"):\n tmp = pris.text.replace(\" \", \"\")[10:20]\n tmp = tmp.replace(u'\\xa0', u'')\n priser.append(tmp)\n\nfor adresse in soup.find_all(\"span\", {\"class\" : \"text-14 text-gray-500\"}):\n tmp = adresse.text.split(\",\")\n adresser.append(tmp[0])\n\ndel adresser[0]\n\nprint(\"Har funnet: \" + str(len(priser)) + \" boligannonser.\")\nprint(\"Ser etter boligannonser vi allerede har lagt til...\")\n\nwith open('boligpriser.csv', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n lestAdresse = row[1:]\n \n if lestAdresse[0] in adresser:\n index = adresser.index(lestAdresse[0])\n adresser.remove(lestAdresse[0])\n del priser[index]\n\nif len(priser) == 0:\n print(\"Ingen priser igjen å legge til.\")\nelse:\n print(\"legger til \" + str(len(priser)) + \" boligannonser.\")\n i = 0\n with open(\"boligpriser.csv\", \"a\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerow([\"Pris\", \"Adresse\"])\n for pris in priser:\n writer.writerow([pris, adresser[i]])\n i = i + 1\n\n","repo_name":"SteinTokvam/finn-prisjekk","sub_path":"finn.py","file_name":"finn.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29858728709","text":"# -*- coding: utf-8 -*-\nimport pytest\n\nfrom schematics.common import *\nfrom schematics.models import Model\nfrom schematics.types import StringType, LongType, IntType, MD5Type\nfrom schematics.types.compound import ModelType, DictType, ListType\nfrom schematics.types.serializable import serializable\nfrom schematics.transforms import blacklist, whitelist, wholelist, export_loop\n\n\ndef test_serializable():\n class Location(Model):\n country_code = StringType()\n\n @serializable\n def country_name(self):\n return \"United States\" if self.country_code == \"US\" else \"Unknown\"\n\n location_US = Location({\"country_code\": \"US\"})\n\n assert location_US.country_name == \"United States\"\n\n d = location_US.serialize()\n assert d == {\"country_code\": \"US\", \"country_name\": \"United States\"}\n\n d = location_US.to_native()\n assert d == {\"country_code\": u\"US\", \"country_name\": \"United States\"}\n\n location_IS = Location({\"country_code\": \"IS\"})\n\n assert location_IS.country_name == \"Unknown\"\n\n d = location_IS.serialize()\n assert d == {\"country_code\": \"IS\", \"country_name\": \"Unknown\"}\n\n d = location_IS.to_native()\n assert d == {\"country_code\": \"IS\", \"country_name\": \"Unknown\"}\n\n\ndef test_serializable_to_native():\n class Location(Model):\n country_code = StringType()\n\n @serializable\n def country_name(self):\n return \"United States\" if self.country_code == \"US\" else \"Unknown\"\n\n loc = Location({'country_code': 'US'})\n\n d = loc.to_native()\n assert d == {'country_code': 'US', 'country_name': 'United States'}\n\n\ndef test_serializable_with_serializable_name():\n class Location(Model):\n country_code = StringType(serialized_name=\"cc\")\n\n @serializable(serialized_name=\"cn\")\n def country_name(self):\n return \"United States\" if self.country_code == \"US\" else \"Unknown\"\n\n location_US = Location({\"cc\": \"US\"})\n\n assert location_US.country_name == \"United States\"\n\n d = location_US.serialize()\n assert d == {\"cc\": \"US\", \"cn\": \"United States\"}\n\n\nclass PlayerIdType(LongType):\n def to_primitive(self, value, context=None):\n return str(value)\n\n\ndef test_serializable_with_custom_serializable_class():\n\n class Player(Model):\n id = LongType()\n\n @serializable(type=PlayerIdType())\n def player_id(self):\n return self.id\n\n player = Player({\"id\": 1})\n\n assert player.id == 1\n assert player.player_id == 1\n\n d = player.serialize()\n assert d == {\"id\": 1, \"player_id\": \"1\"}\n\n\ndef test_serializable_with_type_as_positional_argument():\n\n class Player(Model):\n id = LongType()\n\n @serializable(PlayerIdType)\n def player_id(self):\n return self.id\n\n player = Player({\"id\": 1})\n\n assert player.id == 1\n assert player.player_id == 1\n\n d = player.serialize()\n assert d == {\"id\": 1, \"player_id\": \"1\"}\n\n\ndef test_serializable_with_type_and_options():\n\n class Player(Model):\n id = LongType()\n\n @serializable(PlayerIdType(), serialized_name='playerId')\n def player_id(self):\n return self.id\n\n player = Player({\"id\": 1})\n\n assert player.id == 1\n assert player.player_id == 1\n\n d = player.serialize()\n assert d == {\"id\": 1, \"playerId\": \"1\"}\n\n\ndef test_serializable_with_model():\n class ExperienceLevel(Model):\n level = IntType()\n title = StringType()\n\n class Player(Model):\n total_points = IntType()\n\n @serializable(type=ModelType(ExperienceLevel))\n def xp_level(self):\n return ExperienceLevel(dict(level=self.total_points * 2, title=\"Best\"))\n\n player = Player({\"total_points\": 2})\n\n assert player.xp_level.level == 4\n\n d = player.serialize()\n assert d == {\"total_points\": 2, \"xp_level\": {\"level\": 4, \"title\": \"Best\"}}\n\n\ndef test_serializable_with_model_to_native():\n class ExperienceLevel(Model):\n level = IntType()\n title = StringType()\n\n class Player(Model):\n total_points = IntType()\n\n @serializable(type=ModelType(ExperienceLevel))\n def xp_level(self):\n return ExperienceLevel(dict(level=self.total_points * 2, title=\"Best\"))\n\n player = Player({\"total_points\": 2})\n\n assert player.xp_level.level == 4\n\n d = player.to_native()\n assert d == {\"total_points\": 2, \"xp_level\": {\"level\": 4, \"title\": \"Best\"}}\n\n\ndef test_serializable_with_model_when_None():\n class ExperienceLevel(Model):\n level = IntType()\n title = StringType()\n\n class Player(Model):\n total_points = IntType()\n\n @serializable(type=ModelType(ExperienceLevel))\n def xp_level(self):\n return None if not self.total_points else ExperienceLevel()\n\n player = Player({\"total_points\": 0})\n\n assert player.xp_level is None\n\n d = player.serialize()\n assert d == {\"total_points\": 0, \"xp_level\": None}\n\n\ndef test_serializable_with_model_hide_None():\n class ExperienceLevel(Model):\n level = IntType()\n title = StringType()\n\n class Player(Model):\n total_points = IntType()\n\n @serializable(type=ModelType(ExperienceLevel), serialize_when_none=False)\n def xp_level(self):\n return None if not self.total_points else ExperienceLevel()\n\n player = Player({\"total_points\": 0})\n\n assert player.xp_level is None\n\n d = player.serialize()\n assert d == {\"total_points\": 0}\n\n\ndef test_serializable_with_embedded_models_and_list():\n class Question(Model):\n id = LongType()\n\n class QuestionPack(Model):\n id = LongType()\n questions = ListType(ModelType(Question))\n\n class Game(Model):\n id = StringType()\n question_pack = ModelType(QuestionPack)\n\n q1 = Question({\"id\": 1})\n q2 = Question({\"id\": 2})\n\n game = Game({\n \"id\": \"1\",\n \"question_pack\": {\n \"id\": 2,\n \"questions\": [q1, q2]\n }\n })\n\n assert game.question_pack.questions[0] == q1\n assert game.question_pack.questions[1] == q2\n\n d = game.serialize()\n\n assert d == {\n \"id\": \"1\",\n \"question_pack\": {\n \"id\": 2,\n \"questions\": [\n {\n \"id\": 1,\n },\n {\n \"id\": 2,\n },\n ]\n }\n }\n\n\ndef test_serializable_with_embedded_models():\n class ExperienceLevel(Model):\n level = IntType()\n stars = IntType()\n\n @classmethod\n def from_total_points(cls, total_points, category_slug):\n return cls(dict(level=total_points * 2, stars=total_points))\n\n class CategoryStatsInfo(Model):\n category_slug = StringType()\n total_points = IntType(default=0)\n\n @serializable(type=ModelType(ExperienceLevel))\n def xp_level(self):\n return ExperienceLevel.from_total_points(self.total_points, self.category_slug)\n\n class PlayerInfo(Model):\n id = LongType()\n display_name = StringType()\n\n class PlayerCategoryInfo(PlayerInfo):\n categories = DictType(ModelType(CategoryStatsInfo))\n\n info = PlayerCategoryInfo(dict(\n id=\"1\",\n display_name=\"John Doe\",\n categories={\n \"math\": {\n \"category_slug\": \"math\",\n \"total_points\": 1\n }\n }\n ))\n\n assert info.categories[\"math\"].xp_level.level == 2\n assert info.categories[\"math\"].xp_level.stars == 1\n\n d = info.serialize()\n assert d == {\n \"id\": 1,\n \"display_name\": \"John Doe\",\n \"categories\": {\n \"math\": {\n \"category_slug\": \"math\",\n \"total_points\": 1,\n \"xp_level\": {\n \"level\": 2,\n \"stars\": 1,\n }\n }\n }\n }\n\n\ndef test_serializable_works_with_inheritance():\n class Location(Model):\n country_code = StringType()\n\n @serializable\n def country_name(self):\n return \"United States\" if self.country_code == \"US\" else \"Unknown\"\n\n class LocationWithCity(Location):\n city_code = StringType()\n\n @serializable\n def city_name(self):\n return \"Oklahoma\" if self.city_code == \"OK\" else \"Unknown\"\n\n location = LocationWithCity(dict(country_code=\"US\", city_code=\"OK\"))\n\n assert location.country_code == \"US\"\n assert location.country_name == \"United States\"\n assert location.city_code == \"OK\"\n assert location.city_name == \"Oklahoma\"\n\n d = location.serialize()\n assert d == {\n \"country_code\": \"US\",\n \"country_name\": \"United States\",\n \"city_code\": \"OK\",\n \"city_name\": \"Oklahoma\",\n }\n\n\ndef test_serialize_with_complex_types():\n class QuestionResource(Model):\n url = StringType()\n\n class Question(Model):\n question_id = StringType(required=True)\n resources = DictType(ListType(ModelType(QuestionResource)))\n\n q = Question(dict(\n question_id=\"1\",\n resources={\n \"pictures\": [{\n \"url\": \"http://www.mbl.is\",\n }]\n }\n ))\n\n d = q.serialize()\n assert d == dict(\n question_id=\"1\",\n resources={\n \"pictures\": [{\n \"url\": \"http://www.mbl.is\",\n }]\n }\n )\n\n q_with_no_resources = Question(dict(\n question_id=\"1\"\n ))\n\n d = q_with_no_resources.serialize()\n assert d == dict(\n question_id=\"1\",\n resources=None,\n )\n\n\ndef test_field_with_serialize_when_none():\n class Question(Model):\n id = StringType()\n question = StringType()\n resources = DictType(StringType, serialize_when_none=False)\n\n q = Question(dict(id=1, question=\"Who's the man?\"))\n\n d = q.serialize()\n assert d == {\n \"id\": \"1\",\n \"question\": \"Who's the man?\",\n }\n\n q = Question(dict(id=1, question=\"Who's the man?\", resources={\"A\": \"B\"}))\n\n d = q.serialize()\n assert d == {\n \"id\": \"1\",\n \"question\": \"Who's the man?\",\n \"resources\": {\"A\": \"B\"}\n }\n\n\ndef test_field_with_serialize_when_none_on_outer_only():\n class M(Model):\n listfield = ListType(StringType(serialize_when_none=True), serialize_when_none=False)\n dictfield = DictType(StringType(serialize_when_none=True), serialize_when_none=False)\n obj = M()\n obj.listfield = [None]\n obj.dictfield = {'foo': None}\n assert obj.serialize() == {'listfield': [None], 'dictfield': {'foo': None}}\n\n\ndef test_field_with_serialize_when_none_on_inner_only():\n class M(Model):\n listfield = ListType(StringType(serialize_when_none=False), serialize_when_none=True)\n dictfield = DictType(StringType(serialize_when_none=False), serialize_when_none=True)\n obj = M()\n obj.listfield = [None]\n obj.dictfield = {'foo': None}\n assert obj.serialize() == {'listfield': [], 'dictfield': {}}\n\n\ndef test_set_serialize_when_none_on_whole_model():\n class Question(Model):\n id = StringType(required=True)\n question = StringType()\n resources = DictType(StringType)\n\n class Options:\n serialize_when_none = False\n\n q = Question(dict(id=1))\n\n d = q.serialize()\n assert d == {\"id\": \"1\"}\n\n\ndef test_possible_to_override_model_wide_serialize_when_none():\n class Question(Model):\n id = StringType(required=True)\n question = StringType()\n resources = DictType(StringType)\n\n class Options:\n serialize_when_none = False\n\n class StrictQuestion(Question):\n strictness = IntType()\n\n class Options:\n serialize_when_none = True\n\n q = StrictQuestion(dict(id=1))\n\n d = q.serialize()\n assert d == {\"id\": \"1\", \"question\": None, \"resources\": None, \"strictness\": None}\n\n\ndef test_possible_to_override_model_wide_settings_per_field():\n class Question(Model):\n id = StringType(required=True)\n question = StringType()\n resources = DictType(StringType, serialize_when_none=True)\n\n class Options:\n serialize_when_none = False\n\n q = Question(dict(id=1))\n\n d = q.serialize()\n assert d == {\"id\": \"1\", \"resources\": None}\n\n\ndef test_complex_types_hiding_after_apply_role_leaves_it_empty():\n class QuestionResource(Model):\n name = StringType()\n url = StringType()\n\n class Options:\n serialize_when_none = False\n roles = {'public': whitelist('name')}\n\n class Question(Model):\n question_id = StringType(required=True)\n resources = DictType(ListType(ModelType(QuestionResource)))\n\n class Options:\n serialize_when_none = False\n roles = {'public': whitelist('question_id', 'resources')}\n\n q = Question(dict(\n question_id=\"1\",\n resources={\n \"pictures\": [{\n \"url\": \"http://www.mbl.is\",\n }]\n }\n ))\n\n d = q.serialize('public')\n assert d == {'question_id': '1'}\n\n\ndef test_serialize_none_fields_if_field_says_so():\n class TestModel(Model):\n inst_id = StringType(required=True, serialize_when_none=True)\n\n q = TestModel({'inst_id': 1})\n\n d = export_loop(TestModel, q, lambda field, value, context: None)\n assert d == {'inst_id': None}\n\n\ndef test_serialize_none_fields_if_export_loop_says_so():\n class TestModel(Model):\n inst_id = StringType(required=True, serialize_when_none=False)\n\n q = TestModel({'inst_id': 1})\n\n d = export_loop(TestModel, q, lambda field, value, context: None, export_level=DEFAULT)\n assert d == {'inst_id': None}\n\n\ndef test_serialize_print_none_always_gets_you_something():\n class TestModel(Model):\n pass\n\n q = TestModel()\n\n d = export_loop(TestModel, q, lambda field, value, context: None, export_level=DEFAULT)\n assert d == {}\n\n\ndef test_serializable_setter():\n class Location(Model):\n country_code = StringType()\n\n @serializable\n def country_name(self):\n return \"United States\" if self.country_code == \"US\" else \"Unknown\"\n\n @country_name.setter\n def country_name(self, value):\n self.country_code = {\"United States\": \"US\"}.get(value)\n\n location = Location()\n location.country_name = \"United States\"\n assert location.country_code == \"US\"\n\n d = location.serialize()\n assert d == {\"country_code\": \"US\", \"country_name\": \"United States\"}\n\n\ndef test_serializable_setter_override():\n class Player(Model):\n _id = IntType()\n\n @serializable(IntType())\n def id(self):\n return self._id\n\n @id.setter\n def id(self, value):\n self._id = value\n\n p = Player()\n p.id = \"1\"\n p.validate()\n\n assert type(1) == type(p.id)\n assert 1 == p.id\n\n\ndef test_serializable_setter_init():\n class Location(Model):\n country_code = StringType()\n\n @serializable\n def country_name(self):\n return \"United States\" if self.country_code == \"US\" else \"Unknown\"\n\n @country_name.setter\n def country_name(self, value):\n self.country_code = {\"United States\": \"US\"}.get(value)\n\n location = Location({\"country_name\": \"United States\"}, validate=True)\n assert location.country_code == \"US\"\n\n d = location.serialize()\n assert d == {\"country_code\": \"US\", \"country_name\": \"United States\"}\n\n\ndef test_roles_work_with_subclassing():\n class Address(Model):\n private_key = StringType()\n city = StringType()\n\n class Options:\n roles = {'public': blacklist('private_key')}\n\n class AddressWithPostalCode(Address):\n postal_code = IntType()\n\n a = AddressWithPostalCode(dict(\n postal_code=101,\n city=u\"Reykjavík\",\n private_key=\"secret\"\n ))\n\n d = a.serialize(role=\"public\")\n assert d == {\n \"city\": u\"Reykjavík\",\n \"postal_code\": 101,\n }\n\n\ndef test_role_propagate():\n class Address(Model):\n city = StringType()\n\n class Options:\n roles = {'public': whitelist('city')}\n\n class User(Model):\n name = StringType(required=True)\n password = StringType()\n addresses = ListType(ModelType(Address))\n\n class Options:\n roles = {'public': whitelist('name')}\n\n model = User({'name': 'a', 'addresses': [{'city': 'gotham'}]})\n assert model.addresses[0].city == 'gotham'\n\n d = model.serialize(role=\"public\")\n assert d == {\n \"name\": \"a\",\n }\n\n\ndef test_fails_if_role_is_not_found():\n class Player(Model):\n id = StringType()\n\n p = Player(dict(id=\"1\"))\n\n with pytest.raises(ValueError):\n p.serialize(role=\"public\")\n\n\ndef test_doesnt_fail_if_role_isnt_found_on_embedded_models():\n class ExperienceLevel(Model):\n level = IntType()\n title = StringType()\n\n class Options:\n roles = {\n \"public\": wholelist(),\n }\n\n class Player(Model):\n id = StringType()\n secret = StringType()\n\n xp_level = ModelType(ExperienceLevel)\n\n class Options:\n roles = {\n \"public\": blacklist(\"secret\")\n }\n\n p = Player(dict(\n id=\"1\",\n secret=\"super_secret\",\n xp_level={\n \"level\": 1,\n \"title\": \"Starter\"\n }\n ))\n\n d = p.serialize(role=\"public\")\n assert d == {\n \"id\": \"1\",\n \"xp_level\": {\n \"level\": 1,\n \"title\": \"Starter\",\n }\n }\n\n\ndef test_doesnt_fail_serialize_when_none_on_whole_model_with_roles():\n class Question(Model):\n id = StringType(required=True)\n question = StringType()\n resources = DictType(StringType)\n\n class Options:\n serialize_when_none = False\n roles = {\n \"public\": whitelist(\"id\"),\n }\n\n q = Question({\"id\": \"1\"})\n\n d = q.serialize(role=\"public\")\n assert d == {\"id\": \"1\"}\n\n\ndef test_uses_roles_on_embedded_models_if_found():\n class ExperienceLevel(Model):\n level = IntType()\n title = StringType()\n\n class Options:\n roles = {\n \"public\": blacklist(\"title\")\n }\n\n class Player(Model):\n id = StringType()\n secret = StringType()\n\n xp_level = ModelType(ExperienceLevel)\n\n class Options:\n roles = {\n \"public\": blacklist(\"secret\")\n }\n\n p = Player(dict(\n id=\"1\",\n secret=\"super_secret\",\n xp_level={\n \"level\": 1,\n \"title\": \"Starter\"\n }\n ))\n\n d = p.serialize(role=\"public\")\n assert d == {\n \"id\": \"1\",\n \"xp_level\": {\n \"level\": 1,\n }\n }\n\n\ndef test_serializable_with_dict_and_roles():\n class Player(Model):\n id = LongType()\n display_name = StringType()\n\n class Options:\n roles = {\n \"public\": blacklist(\"id\")\n }\n\n class Game(Model):\n id = StringType()\n result = IntType()\n players = DictType(ModelType(Player), coerce_key=lambda k: int(k))\n\n class Options:\n roles = {\n \"public\": blacklist(\"result\")\n }\n\n p1 = Player({\"id\": 1, \"display_name\": \"A\"})\n p2 = Player({\"id\": 2, \"display_name\": \"B\"})\n\n game = Game({\n \"id\": \"1\",\n \"players\": {\n 1: p1,\n 2: p2\n }\n })\n\n assert game.players[1] == p1\n assert game.players[2] == p2\n\n d = game.serialize(role=\"public\")\n\n assert d == {\n \"id\": \"1\",\n \"players\": {\n 1: {\n \"display_name\": \"A\"\n },\n 2: {\n \"display_name\": \"B\"\n },\n }\n }\n\n\ndef test_serializable_with_list_and_roles():\n class Player(Model):\n id = LongType()\n display_name = StringType()\n\n class Options:\n roles = {\n \"public\": blacklist(\"id\")\n }\n\n class Game(Model):\n id = StringType()\n result = IntType()\n players = ListType(ModelType(Player))\n\n class Options:\n roles = {\n \"public\": blacklist(\"result\")\n }\n\n p1 = Player({\"id\": 1, \"display_name\": \"A\"})\n p2 = Player({\"id\": 2, \"display_name\": \"B\"})\n\n game = Game({\n \"id\": \"1\",\n \"players\": [p1, p2]\n })\n\n assert game.players[0] == p1\n assert game.players[1] == p2\n\n d = game.serialize(role=\"public\")\n\n assert d == {\n \"id\": \"1\",\n \"players\": [\n {\n \"display_name\": \"A\",\n },\n {\n \"display_name\": \"B\",\n },\n ]\n }\n\n\ndef test_role_set_operations():\n\n protected = whitelist('email', 'password')\n all_fields = whitelist('id', 'name') + protected\n\n def count(n):\n while True:\n yield n\n n += 1\n\n class User(Model):\n id = IntType(default=next(count(42)))\n name = StringType()\n email = StringType()\n password = StringType()\n\n class Options:\n roles = {\n 'create': all_fields - ['id'],\n 'public': all_fields - ['password'],\n 'nospam': blacklist('password') + blacklist('email'),\n 'empty': whitelist(),\n 'everything': blacklist(),\n }\n\n roles = User.Options.roles\n assert len(roles['create']) == 3\n assert len(roles['public']) == 3\n assert len(roles['nospam']) == 2\n assert len(roles['empty']) == 0\n assert len(roles['everything']) == 0\n\n # Sets sort different with different Python versions. We should be getting something back\n # like: \"whitelist('password', 'email', 'name')\"\n s = str(roles['create'])\n assert s.startswith('whitelist(') and s.endswith(')')\n assert sorted(s[10:-1].split(', ')) == [\"'email'\", \"'name'\", \"'password'\"]\n\n # Similar, but now looking for: \n r = repr(roles['create'])\n assert r.startswith('')\n assert sorted(r[16:-2].split(', ')) == [\"'email'\", \"'name'\", \"'password'\"]\n\n data = {\n 'id': 'NaN',\n 'name': 'Arthur',\n 'email': 'adent@hitchhiker.gal',\n 'password': 'dolphins',\n }\n\n user = User(\n dict(\n (k, v) for k, v in data.items()\n if k in User._options.roles['create'] # filter by 'create' role\n )\n )\n\n d = user.serialize(role='public')\n\n assert d == {\n 'id': 42,\n 'name': 'Arthur',\n 'email': 'adent@hitchhiker.gal',\n }\n\n d = user.serialize(role='nospam')\n\n assert d == {\n 'id': 42,\n 'name': 'Arthur',\n }\n\n d = user.serialize(role='empty')\n\n assert d == {}\n\n d = user.serialize(role='everything')\n\n assert d == {\n 'email': 'adent@hitchhiker.gal',\n 'id': 42,\n 'name': 'Arthur',\n 'password': 'dolphins'\n }\n\n def test_md5_type(self):\n class M(Model):\n md5 = MD5Type()\n\n import hashlib\n myhash = hashlib.md5(\"hashthis\").hexdigest()\n m = M()\n m.md5 = myhash\n\n self.assertEqual(m.md5, myhash)\n d = m.serialize()\n self.assertEqual(d, {\n 'md5': myhash\n })\n\n m2 = M(d)\n self.assertEqual(m2.md5, myhash)\n\n\ndef test_serializable_with_list_and_default_role():\n class Player(Model):\n id = LongType()\n display_name = StringType()\n\n class Options:\n roles = {\n \"default\": blacklist(\"id\")\n }\n\n class Game(Model):\n id = StringType()\n result = IntType()\n players = ListType(ModelType(Player))\n\n class Options:\n roles = {\n \"default\": blacklist(\"result\")\n }\n\n p1 = Player({\"id\": 1, \"display_name\": \"A\"})\n p2 = Player({\"id\": 2, \"display_name\": \"B\"})\n\n game = Game({\n \"id\": \"1\",\n \"players\": [p1, p2]\n })\n\n assert game.players[0] == p1\n assert game.players[1] == p2\n\n d = game.serialize(role=\"default\")\n\n assert d == {\n \"id\": \"1\",\n \"players\": [\n {\n \"display_name\": \"A\",\n },\n {\n \"display_name\": \"B\",\n },\n ]\n }\n\n d = game.serialize()\n\n assert d == {\n \"id\": \"1\",\n \"players\": [\n {\n \"display_name\": \"A\",\n },\n {\n \"display_name\": \"B\",\n },\n ]\n }\n\n\ndef test_callable_role():\n acl_fields = {\n 'user_id_1': ['name'],\n 'user_id_2': ['name', 'password'],\n }\n\n class User(Model):\n name = StringType()\n password = StringType()\n\n u = User({'name': 'A', 'password': 'B'})\n\n user_1_acl = whitelist(*acl_fields['user_id_1'])\n d = u.serialize(role=user_1_acl)\n assert d == {'name': 'A'}\n\n user_2_acl = whitelist(*acl_fields['user_id_2'])\n d = u.serialize(role=user_2_acl)\n assert d == {'name': 'A', 'password': 'B'}\n","repo_name":"schematics/schematics","sub_path":"tests/test_serialize.py","file_name":"test_serialize.py","file_ext":"py","file_size_in_byte":25031,"program_lang":"python","lang":"en","doc_type":"code","stars":2572,"dataset":"github-code","pt":"22"} +{"seq_id":"26009567280","text":"\"\"\"Script used to calculate the error between the actual and the predicted annotations produced by Annotator\"\"\"\n\nimport json\nimport os.path\nimport sys\nfrom itertools import product\n\n\ndef calc_annotation_error(annotation1, annotation2, annoyances):\n markers = []\n for event in annotation1:\n markers.append({'soundscape': 1, 'isStart': True, 'time': event['start'], 'class': event['class']})\n markers.append({'soundscape': 1, 'isStart': False, 'time': event['end'], 'class': event['class']})\n for event in annotation2:\n markers.append({'soundscape': 2, 'isStart': True, 'time': event['start'], 'class': event['class']})\n markers.append({'soundscape': 2, 'isStart': False, 'time': event['end'], 'class': event['class']})\n markers.sort(key=lambda k: k['time'])\n\n error = 0\n classes1 = []\n classes2 = []\n a = 0\n for marker in markers:\n b = marker['time']\n if classes1:\n if classes2:\n error += (b - a) * min(abs(annoyances[d] - annoyances[c]) for c, d in product(classes1, classes2))\n else:\n error += (b - a) * min([annoyances[c] for c in classes1])\n else:\n if classes2:\n error += (b - a) * min([annoyances[c] for c in classes2])\n else:\n error += (b - a) * 0\n a = b\n\n if marker['soundscape'] == 1:\n if marker['isStart']:\n classes1.append(marker['class'])\n else:\n classes1.remove(marker['class'])\n elif marker['soundscape'] == 2:\n if marker['isStart']:\n classes2.append(marker['class'])\n else:\n classes2.remove(marker['class'])\n\n return error\n\n\nif __name__ == '__main__':\n true_annotation_filepath = sys.argv[1]\n pred_annotation_filepath = sys.argv[2]\n\n\n def read_annotation(filepath):\n events = []\n with open(filepath) as file:\n for line in file:\n try:\n split = line.split('\\t')\n event = {'start': float(split[0]), 'end': float(split[1]), 'class': split[2].rstrip()}\n events.append(event)\n except:\n pass # skip line\n return events\n\n\n true_annotation = read_annotation(true_annotation_filepath)\n pred_annotation = read_annotation(pred_annotation_filepath)\n\n with open(os.path.dirname(os.path.realpath(__file__)) + \"/../annoyances.json\") as file:\n annoyances = json.load(file)\n\n match = calc_annotation_error(true_annotation, pred_annotation, annoyances)\n\n print(match)\n","repo_name":"mthaak/hownoisy","sub_path":"util/calc_annotation_error.py","file_name":"calc_annotation_error.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"43519587058","text":"from django.shortcuts import get_object_or_404\nfrom django.http import HttpResponse\nfrom django.core.mail import send_mail\nfrom django.template.loader import render_to_string\nfrom django.conf import settings\n\nfrom datetime import datetime, timedelta, timezone\n\nfrom .models import Order, OrderItem, Address\n\nfrom products.models import Product\nfrom bundles.models import Bundle, BundleItem\nfrom printing.models import CustomPrintOrder\nfrom profiles.models import UserProfile\n\nimport json\nimport time\n\n\nclass StripeWH_Handler:\n\n def __init__(self, request):\n self.request = request\n\n def handle_event(self, event):\n return HttpResponse(\n content=f'Unhandled Webhook Received: {event.type}',\n status=200\n )\n\n def _send_confirmation_order(self, order):\n cust_email = order.cust_email\n subject = render_to_string(\n './confirmation_emails/confirmation_email_subject.txt',\n {\n 'order': order\n })\n body = render_to_string(\n './confirmation_emails/confirmation_email_body.txt',\n {\n 'order': order,\n 'contact_email': settings.DEFAULT_FROM_EMAIL\n })\n\n send_mail(\n subject,\n body,\n settings.DEFAULT_FROM_EMAIL,\n [cust_email]\n )\n\n def handle_payment_intent_succeeded(self, event):\n\n intent = event.data.object\n pid = intent.id\n cart = intent.metadata.cart\n save_info = intent.metadata.save_info\n\n billing_details = intent.charges.data[0].billing_details\n shipping_details = intent.shipping\n grand_total = round(intent.charges.data[0].amount / 100, 2)\n \n profile = None\n username = intent.metadata.username\n if username != 'AnonymousUser':\n profile = get_object_or_404(UserProfile, user__username=username)\n if save_info:\n profile.phone_number = shipping_details.phone\n profile.street_address_1 = shipping_details.address.line1\n profile.street_address_2 = shipping_details.address.line2\n profile.city_town = shipping_details.address.city\n profile.county_area = shipping_details.address.state\n profile.country = shipping_details.address.country\n profile.postal_code = shipping_details.address.postal_code\n profile.save()\n\n order_exists = False\n attempt = 1\n \"\"\"\n for customers with multiple orders in the system we only want\n to retrieve an order that was submitted within the past 10s\n \"\"\"\n utc_dt = datetime.now(timezone.utc) \n dt_threshold = utc_dt.astimezone() - timedelta(seconds=10)\n time.sleep(1)\n while attempt <= 5:\n orders = list(Order.objects.filter(\n cust_name__iexact=shipping_details.name,\n cust_email__iexact=billing_details.email,\n cust_phone__iexact=shipping_details.phone,\n grand_total=grand_total,\n original_cart=cart,\n stripe_pid=pid,\n ).order_by('-date'))\n orders = [order for order in orders if order.date > dt_threshold]\n if orders:\n order = orders[0]\n order_exists = True\n address = Address.objects.create(\n street_address_1=shipping_details.address.line1,\n street_address_2=shipping_details.address.line2,\n city_town=shipping_details.address.city,\n county_area=shipping_details.address.state,\n country=shipping_details.address.country,\n postal_code=shipping_details.address.postal_code)\n order.address = address\n break\n else:\n attempt += 1\n time.sleep(1)\n if order_exists:\n self._send_confirmation_order(order)\n return HttpResponse(\n content=(f'Webhook received: {event[\"type\"]} | SUCCESS: '\n 'Verified order already in database'),\n status=200)\n else:\n order = None\n try:\n address = Address.objects.create(\n street_address_1=shipping_details.address.line1,\n street_address_2=shipping_details.address.line2,\n city_town=shipping_details.address.city,\n county_area=shipping_details.address.state,\n country=shipping_details.address.country,\n postal_code=shipping_details.address.postal_code\n )\n order = Order.objects.create(\n cust_name=billing_details.name,\n cust_email=billing_details.email,\n cust_phone=billing_details.phone,\n address=address,\n original_cart=cart,\n stripe_pid=pid\n )\n\n # handle products in cart\n for item_id, item_data in json.loads(cart)['products'].items():\n product = get_object_or_404(Product, pk=item_id)\n order_item = OrderItem(\n order=order,\n product=product,\n quantity=item_data\n )\n product.qty_held -= item_data\n product.save()\n order_item.save()\n \n # handle bundles in cart\n for item_id, item_data in json.loads(cart)['bundles'].items():\n bundle = get_object_or_404(Bundle,bundle_id=item_id)\n bundle_items = list(BundleItem.objects.filter(\n bundle__bundle_id=item_id))\n for item in bundle_items:\n item.product.qty_held -= item.item_qty\n\n order_item = OrderItem(\n order=order,\n quantity=item_data,\n bundle=bundle)\n order_item.save()\n\n # handle custom prints in cart\n for item_id, item_data in json.loads(cart)['custom_prints'].items():\n custom_print_order = get_object_or_404(\n CustomPrintOrder, pk=item_id)\n order_item = OrderItem(\n order=order,\n quantity=item_data,\n custom_print_order=custom_print_order)\n order_item.save()\n \n except Exception as e:\n if order:\n order.delete()\n return HttpResponse(\n content=f'Webhook received: {event[\"type\"]} | ERROR: {e}',\n status=500)\n\n self._send_confirmation_order(order)\n return HttpResponse(\n content=(f'Webhook received: {event[\"type\"]} | SUCCESS: '\n 'Created order in webhook'),\n status=200)\n\n def handle_payment_intent_failed(self, event):\n return HttpResponse(\n content=f'Received: {event[\"type\"]}',\n status=200\n )\n","repo_name":"Cldwlkn13/balloonatics","sub_path":"checkout/webhook_handler.py","file_name":"webhook_handler.py","file_ext":"py","file_size_in_byte":7348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38066529429","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 3 11:18:50 2022\r\n\r\n@author: ljoly\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random as rd\r\n\r\n\r\n\r\nN = 100 # nombre d'agents économiques\r\nx = 0.1 # pourcentage de la fortune de l'agent le plus pauvre transféré dans chaque transaction\r\nW = 100000 # richesse totale de l'économie = 100000€\r\n\r\n# Répartition aléatoire de la richesse dans l'économie au début de la simulation\r\nv = np.random.rand(N)\r\nnormalized_v = v / sum(v)\r\nw = W*normalized_v\r\nw2 = W*normalized_v\r\nw3 = W*normalized_v\r\nw4 = W*normalized_v # on prend la même distribution de départ pour tous les modèles\r\n\r\nX = [m for m in range(1,N+1)]\r\nplt.plot(X,w,'.')\r\nplt.ylabel('richesse')\r\nplt.xlabel('agent n°')\r\nplt.show() # on affiche la répartition initiale de la richesse\r\n\r\n### Premier modèle naïf sans biais ni redistribution\r\n\r\ndef echange():\r\n indices = rd.sample(range(N),2) # on choisit aléatoirement 2 agents pour un échange\r\n i,j = indices[0],indices[1]\r\n m = x*min(w[i],w[j]) # richesse mise en jeu pour cet échange\r\n if np.random.randint(2): # tirage au sort pour savoir qui gagne la transaction\r\n w[i] += m # l'agent i remporte l'échange\r\n w[j] -= m\r\n else:\r\n w[j] += m # l'agent j remporte l'échange\r\n w[i] -= m\r\n return w\r\n\r\n\r\ndef model1():\r\n for l in range(10):\r\n for k in range(100000):\r\n echange()\r\n plt.plot(X,w,'.')\r\n plt.ylabel('richesse')\r\n plt.xlabel('agent n°')\r\n plt.show()\r\n \r\n### Second modèle avec biais\r\n\r\ndef bernoulli(p):\r\n if rd.random()<=p :\r\n return 1\r\n else : \r\n return 0\r\n \r\ndef echange2():\r\n indices = rd.sample(range(N),2) # on choisit aléatoirement 2 agents différents pour un échange\r\n i,j = indices[0],indices[1]\r\n m = x*min(w2[i],w2[j]) # richesse mise en jeu pour cet échange\r\n d = max(w2[i],w2[j]) - min(w2[i],w2[j]) # on prend la différence de richesse entre les ndvidus pour construire le biais\r\n p = 0.5 + d/(2*W) # d/W < 0.5 on a un paramètre biaisé proportionnellement au différentiel de richesse entre les 2 agents\r\n if max(w2[i],w2[j])==w2[i]: # l'agent i est le plus riche \r\n if bernoulli(p): # tirage au sort biaisé pour savoir qui gagne la transaction\r\n w2[i] += m # l'agent i remporte l'échange\r\n w2[j] -= m\r\n else:\r\n w2[j] += m # l'agent j remporte l'échange\r\n w2[i] -= m\r\n else: # l'agent j est le plus riche \r\n if bernoulli(p): # tirage au sort biaisé pour savoir qui gagne la transaction\r\n w2[j] += m # l'agent j remporte l'échange\r\n w2[i] -= m\r\n else:\r\n w2[i] += m # l'agent i remporte l'échange\r\n w2[j] -= m\r\n\r\n\r\ndef model2():\r\n plt.plot(X,w2,'.')\r\n plt.ylabel('richesse')\r\n plt.xlabel('agent n°')\r\n plt.show()\r\n for l in range(5):\r\n for k in range(100000):\r\n echange2()\r\n plt.plot(X,w2,'.')\r\n plt.ylabel('richesse')\r\n plt.xlabel('agent n°')\r\n plt.show()\r\n \r\n### Troisième modèle avec un impôt non progressif\r\n\r\nt = 0.1 # part prélevée en pourcentage de la richesse de chaque agent pour l'impôt\r\n\r\n\r\ndef echange3():\r\n indices = rd.sample(range(N),2) # on choisit aléatoirement 2 agents différents pour un échange\r\n i,j = indices[0],indices[1]\r\n m = x*min(w3[i],w3[j]) # richesse mise en jeu pour cet échange\r\n d = max(w3[i],w3[j]) - min(w3[i],w3[j]) # on prend la différence de richesse entre les indvidus pour construire le biais\r\n p = 0.5 + d/(2*W) # d/W < 0.5 on a un paramètre biaisé proportionnellement au différentiel de richesse entre les 2 agents\r\n S = t*W # somme totale rapportée par l'impôt\r\n if max(w3[i],w3[j]) == w3[i]: # l'agent i est le plus riche \r\n if bernoulli(p): # tirage au sort biaisé pour savoir qui gagne la transaction\r\n w3[i] += m # l'agent i remporte l'échange\r\n w3[j] -= m\r\n else:\r\n w3[j] += m # l'agent j remporte l'échange\r\n w3[i] -= m\r\n else: # l'agent j est le plus riche \r\n if bernoulli(p): # tirage au sort biaisé pour savoir qui gagne la transaction\r\n w3[j] += m # l'agent j remporte l'échange\r\n w3[i] -= m\r\n else:\r\n w3[i] += m # l'agent i remporte l'échange\r\n w3[j] -= m\r\n for l in range(len(w3)):\r\n w3[l] = (1-t)*w3[l] + S/N # l'impôt n'est pas progressif et est redistribué de manière égalitaire à tous les agents\r\n\r\n\r\n\r\ndef model3():\r\n plt.plot(X,w3,'.')\r\n plt.ylabel('richesse')\r\n plt.xlabel('agent n°')\r\n plt.show()\r\n for l in range(10):\r\n for k in range(10):\r\n echange3()\r\n plt.plot(X,w3,'.')\r\n plt.ylabel('richesse')\r\n plt.xlabel('agent n°')\r\n plt.show()\r\n \r\n### Quatrième modèle avec impôt progessif\r\n\r\ndef impot(x): # prend en argument une liste conteanant la richesse de chaque agent\r\n S = 0 # montant total d'impôt prélevé\r\n for k in range(len(x)):\r\n if (10000 <= x[k] < 20000) :\r\n S += 0.1*x[k]\r\n x[k] *= 0.9 # impôt sur la fortune de 10 %\r\n elif 20000 <= x[k] < 50000 :\r\n S += 0.2*x[k]\r\n x[k] *= 0.8 # impôt sur la fortune de 20 %\r\n elif x[k] >= 50000 :\r\n S += 0.3*x[k]\r\n x[k] *= 0.7 # impôt sur la fortune de 30 %\r\n for l in range(len(x)):\r\n x[l] += S/N # redistribution égalitaire\r\n\r\ndef echange4():\r\n indices = rd.sample(range(N),2) # on choisit aléatoirement 2 agents différents pour un échange\r\n i,j = indices[0],indices[1]\r\n m = x*min(w4[i],w4[j]) # richesse mise en jeu pour cet échange\r\n d = max(w4[i],w4[j]) - min(w4[i],w4[j]) # on prend la différence de richesse entre les indvidus pour construire le biais\r\n p = 0.5 + d/(2*W) # d/W < 0.5 on a un paramètre biaisé proportionnellement au différentiel de richesse entre les 2 agents\r\n if max(w4[i],w4[j]) == w4[i]: # l'agent i est le plus riche \r\n if bernoulli(p): # tirage au sort biaisé pour savoir qui gagne la transaction\r\n w4[i] += m # l'agent i remporte l'échange\r\n w4[j] -= m\r\n else:\r\n w4[j] += m # l'agent j remporte l'échange\r\n w4[i] -= m\r\n else: # l'agent j est le plus riche \r\n if bernoulli(p): # tirage au sort biaisé pour savoir qui gagne la transaction\r\n w4[j] += m # l'agent j remporte l'échange\r\n w4[i] -= m\r\n else:\r\n w4[i] += m # l'agent i remporte l'échange\r\n w4[j] -= m\r\n impot(w4)\r\n\r\n\r\n\r\ndef model4():\r\n plt.plot(X,w4,'.')\r\n plt.ylabel('richesse')\r\n plt.xlabel('agent n°')\r\n plt.show()\r\n for l in range(10):\r\n for k in range(5000):\r\n echange4()\r\n plt.plot(X,w4,'.')\r\n plt.ylabel('richesse')\r\n plt.xlabel('agent n°')\r\n plt.show()","repo_name":"LeaJoly/projet_physique_sciences_sociales","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":7729,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"22675775129","text":"from os import getenv\nfrom typing import Iterable\n\nfrom aws_lambda_powertools import Logger\nfrom mypy_boto3_iot.type_defs import PolicyTypeDef as IotPolicyTypeDef, GetPolicyResponseTypeDef as \\\n IoTGetPolicyResponseTypeDef\n\nimport resource_based_policy.resource_based_policy_model as model\nfrom aws.services.iot import IoT\nfrom resource_based_policy.step_functions_lambda.check_policy_for_organizations_dependency import \\\n CheckForOrganizationsDependency\nfrom resource_based_policy.step_functions_lambda.utils import DenormalizeResourceBasedPolicyResponse, \\\n DenormalizePolicyAnalyzerRequest, scan_regions\n\n\nclass IoTPolicy:\n def __init__(self, event: model.ScanServiceRequestModel):\n self.logger = Logger(service=self.__class__.__name__, level=getenv('LOG_LEVEL'))\n self.event = event\n self.account_id = event['AccountId']\n\n def scan(self) -> Iterable[model.ResourceBasedPolicyResponseModel]:\n return scan_regions(self.event, self.scan_single_region)\n\n def scan_single_region(self, region: str) -> Iterable[model.ResourceBasedPolicyResponseModel]:\n self.logger.info(f\"Scanning IoT Policies in {region}\")\n iot_client = IoT(self.account_id, region)\n iot_data: list[model.IoTData] = self._get_iot_data(iot_client)\n iot_names_policies = self._get_iot_policy(iot_data, iot_client)\n resources_dependent_on_organizations: list[\n model.PolicyAnalyzerResponse] = CheckForOrganizationsDependency().scan(iot_names_policies)\n iot_resources_for_region = list(DenormalizeResourceBasedPolicyResponse(self.event).model(\n resource, region) for resource in resources_dependent_on_organizations)\n return iot_resources_for_region\n\n def _get_iot_data(self, iot_client) -> list[model.IoTData]:\n iot_objects: list[IotPolicyTypeDef] = iot_client.list_policies()\n return list(self.denormalize_to_iot_data(iot_data) for iot_data in iot_objects)\n\n @staticmethod\n def denormalize_to_iot_data(iot_data: IotPolicyTypeDef) -> model.IoTData:\n data: model.IoTData = {\n \"policyName\": iot_data['policyName']\n }\n return data\n\n @staticmethod\n def _get_iot_policy(iot_data: list[model.IoTData], iot_client) -> list[model.PolicyAnalyzerRequest]:\n iot_policies = []\n for iot in iot_data:\n policy: IoTGetPolicyResponseTypeDef = iot_client.get_policy(\n iot.get('policyName')\n )\n if policy.get('policyDocument'):\n policy_object: model.PolicyAnalyzerRequest = DenormalizePolicyAnalyzerRequest().model(\n iot.get('policyName'),\n policy['policyDocument']\n )\n iot_policies.append(policy_object)\n return iot_policies\n","repo_name":"aws-solutions/account-assessment-for-aws-organizations","sub_path":"source/lambda/resource_based_policy/step_functions_lambda/scan_iot_policy.py","file_name":"scan_iot_policy.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"22"} +{"seq_id":"20611947949","text":"\"\"\"\nGiven a string containing only digits, restore it by returning all possible valid IP address combinations.\n\nExample:\n\nInput: \"25525511135\"\nOutput: [\"255.255.11.135\", \"255.255.111.35\"\n\n\n\n\n\"\"\"\n\n\nclass Solution:\n\n def restoreIpAddresses(self, s):\n \"\"\"\n :type s: str\n :rtype: List[str]\n \"\"\"\n #ref https://segmentfault.com/a/1190000008931557\n #tricky的方法\n # rs = []\n # for a in range(1,4):\n # for b in range(1,4):\n # for c in range(1,4):\n # for d in range(1,4):\n # if sum([a,b,c,d]) == len(s):\n # n1 = s[:a]\n # n2 = s[a:a+b]\n # n3 = s[a+b:a+b+c]\n # n4 = s[a+b+c:]\n\n # if self.isValid(n1) and self.isValid(n2) and self.isValid(n3) and self.isValid(n4):\n # ip = n1 + \".\" + n2 + \".\" + n3 + \".\" + n4\n # if len(ip) == len(s) + 3:\n # rs.append(ip)\n # return rs\n\n \"\"\"\n #234.245.256.123\n #2. 342 452 561 23 还剩2个点,最多可以有(2+1)位,而目前还有11位,不行, len(s)-index > (dot+1)*3\n\n dfs方法,事实上是上面的tricky的方法的抽象版本\n\n \"\"\"\n def dfs(nows, index, path, rs,dotnum):\n \n\n # if dotnum == 0 and len(nows) > 12:return\n # if dotnum == 1 and len(nows) > 9:return\n # if dotnum == 2 and len(nows) > 6:return\n if dotnum <= 2 and len(nows) > (4-dotnum)*3:return\n\n\n if dotnum == 3 and self.isValid(nows):\n rs.append(path+nows) \n return\n else:\n for i in range(1,4):\n tmp = nows[:i]\n if self.isValid(tmp):\n dfs(nows[i:],i,path+tmp+\".\",rs,dotnum+1)\n \n rs = []\n if len(s) < 4 or len(s) > 12: return rs\n dfs(s,0, \"\",rs,0)\n return rs\n\n def isValid(self,s):\n if len(s) <=0: return False\n if int(s) < 0 or int(s) > 255:\n return False\n if len(s) > 1 and s[0] == '0':\n return False\n return True \n\n\n\n\n\n def restoreIpAddresses_backtracing(self, s):\n \"\"\"\n :type s: str\n :rtype: List[str]\n \"\"\"\n \"\"\"\n ref:https://blog.csdn.net/u012501459/article/details/46804405\n\n \"\"\"\n \n \n rs = []\n \n self.helper(s, 0, 0, [], rs)\n # return rs\n for item in rs:\n print(item)\n\n def helper(self, s, num, pos, path, rs):\n if pos == len(s) or num == 4:\n rs.append(path[:len(path)-1])\n return\n # if(s.size()-pos>3*(4-num))\n if len(s) - pos > 3*(4-num):\n return\n\n if pos < len(s):\n path += s[pos:pos+1] + \".\"\n self.helper(s, num+1, pos+1, path, rs)\n path = path[:len(path)-1]\n\n if pos < len(s) - 1 and s[pos] != '0':\n path += s[pos:pos+2]+\".\"\n self.helper(s, num+1, pos+2, path, rs)\n path = path[:len(path)-2]\n if pos < len(s) - 2 and s[pos] != '0' and s[pos:pos+3] <= '255':\n path += s[pos:pos+3]+\".\"\n self.helper(s, num+1, pos+3, path, rs)\n path = path[:len(path)-3]\n\nso = Solution()\n\ns = \"25525511135\"\ns = \"010010\"\n# s=\"19216821\"\nprint(so.restoreIpAddresses(s))","repo_name":"Lobo2008/LeetCode","sub_path":"93_Restore_IP_Addresses.py","file_name":"93_Restore_IP_Addresses.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38518166810","text":"from django.test import TestCase\nfrom .helper_functions import *\nfrom datetime import date\n\n\"\"\"\nWhat to expect:\n - there will be 2 users: user2, user3\n - there will be 2 groups: group2, group3\n - there will be 2 grants: grant2, grant3\n - there will be 2 allocation usages: allocation_usage2, allocation_usage3\n\"\"\"\n\n\nclass TestMongoStorageManyStores(TestCase):\n def get_db(self):\n mc = MongoClient('mongodb://localhost/hpcbursar')\n return mc['hpcbursar']\n\n def test_store_many_users(self):\n db = self.get_db()\n logins = [\"user2\", \"user3\"]\n emails = [\"user2@cyfronet.pl\", \"user3@cyfronet.pl\"]\n statuses = [\"ACTIVE\", \"INACTIVE\"]\n first_names = [\"User\", \"User\"]\n last_names = [\"2\", \"3\"]\n opis = [\"\", \"\"]\n affiliations = [\n [Affiliation(\"ACADEMIC_UNIT_EMPLOYEE\", [\"Uniwersytet Warszawski\", \"Wydział Matematyki\"],\n \"ACTIVE\", date(2023, 6, 30))],\n [Affiliation(\"ACADEMIC_EMPLOYEE\", [\"Akademia Górniczo-Hutnicza im. Stanisława Staszica w Krakowie\",\n \"Akademickie Centrum Komputerowe Cyfronet AGH\"],\n \"INACTIVE\", date(2025, 12, 31))]]\n users, _ = create_and_store_many_users(logins, emails, statuses, first_names, last_names, opis, affiliations)\n for i in range(len(users)):\n result = db[\"user\"].find_one({\"login\": users[i].login})\n self.assertEqual(result[\"login\"], users[i].login)\n self.assertEqual(result[\"email\"], users[i].email)\n self.assertEqual(result[\"status\"], users[i].status)\n self.assertEqual(result[\"first_name\"], users[i].first_name)\n self.assertEqual(result[\"last_name\"], users[i].last_name)\n self.assertEqual(result[\"opi\"], users[i].opi)\n for j in range(len(users[i].affiliations)):\n self.assertEqual(result[\"affiliations\"][j][\"type\"], users[i].affiliations[j].type)\n self.assertEqual(result[\"affiliations\"][j][\"units\"], users[i].affiliations[j].units)\n self.assertEqual(result[\"affiliations\"][j][\"status\"], users[i].affiliations[j].status)\n self.assertEqual(result[\"affiliations\"][j][\"end\"], users[i].affiliations[j].end.isoformat())\n\n def test_store_many_groups(self):\n db = self.get_db()\n names = [\"group2\", \"group3\"]\n statuses = [\"ACCEPTED\", \"PENDING\"]\n members = [[\"user1\", \"user2\"], [\"user2\", \"user3\"]]\n leaders = [[\"user2\"], [\"user3\"]]\n groups, _ = create_and_store_many_groups(names, statuses, members, leaders)\n for i in range(len(groups)):\n result = db[\"group\"].find_one({\"name\": groups[i].name})\n self.assertEqual(result[\"name\"], groups[i].name)\n self.assertEqual(result[\"status\"], groups[i].status)\n self.assertEqual(result[\"members\"], groups[i].members)\n self.assertEqual(result[\"leaders\"], groups[i].leaders)\n\n def test_store_many_grants(self):\n db = self.get_db()\n names = [\"grant2\", \"grant3\"]\n groups = [\"group2\", \"group3\"]\n statuses = [\"ACTIVE\", \"INACTIVE\"]\n starts = [date(2022, 7, 19), date(2019, 1, 7)]\n ends = [date(2022, 9, 19), date(2020, 1, 7)]\n allocations = [[Allocation(\"allocation2\", \"CPU\", {\"timelimit\": 12, \"hours\": 24})],\n [Allocation(\"allocation3\", \"GPU\", {\"timelimit\": 100, \"hours\": 500})]]\n grants, _ = create_and_store_many_grants(names, groups, statuses, starts, ends, allocations)\n for i in range(len(grants)):\n result = db[\"grant\"].find_one({\"name\": grants[i].name})\n self.assertEqual(result[\"name\"], grants[i].name)\n self.assertEqual(result[\"group\"], grants[i].group)\n self.assertEqual(result[\"status\"], grants[i].status)\n self.assertEqual(result[\"start\"], grants[i].start.isoformat())\n self.assertEqual(result[\"end\"], grants[i].end.isoformat())\n for j in range(len(result[\"allocations\"])):\n self.assertEqual(result[\"allocations\"][j][\"name\"], grants[i].allocations[j].name)\n self.assertEqual(result[\"allocations\"][j][\"resource\"], grants[i].allocations[j].resource)\n self.assertEqual(result[\"allocations\"][j][\"parameters\"], grants[i].allocations[j].parameters)\n\n def test_store_many_allocation_usages(self):\n db = self.get_db()\n names = [\"allocation_usage2\", \"allocation_usage3\"]\n summaries = [Summary(datetime(2020, 4, 29), {\"hours\": 1, \"minutes\": 50}),\n Summary(datetime(2022, 6, 20), {\"minutes\": 50})]\n usages = [\n [Usage(datetime(2020, 4, 15), datetime(2020, 4, 10), datetime(2020, 4, 14), {\"hours\": 1, \"minutes\": 45}),\n Usage(datetime(2020, 4, 29), datetime(2020, 4, 25), datetime(2020, 4, 25), {\"minutes\": 5})],\n [Usage(datetime(2022, 6, 20), datetime(2022, 6, 18), datetime(2022, 6, 19), {\"minutes\": 50})]]\n allocation_usages, _ = create_and_store_many_allocation_usages(names, summaries, usages)\n for i in range(len(allocation_usages)):\n result = db[\"allocation_usage\"].find_one({\"name\": allocation_usages[i].name})\n self.assertEqual(result[\"name\"], allocation_usages[i].name)\n self.assertEqual(result[\"summary\"][\"timestamp\"],\n allocation_usages[i].summary.timestamp.isoformat())\n self.assertEqual(result[\"summary\"][\"resources\"], allocation_usages[i].summary.resources)\n for j in range(len(allocation_usages[i].usages)):\n self.assertEqual(result[\"usages\"][j][\"timestamp\"], allocation_usages[i].usages[j].timestamp.isoformat())\n self.assertEqual(result[\"usages\"][j][\"start\"], allocation_usages[i].usages[j].start.isoformat())\n self.assertEqual(result[\"usages\"][j][\"end\"], allocation_usages[i].usages[j].end.isoformat())\n self.assertEqual(result[\"usages\"][j][\"resources\"], allocation_usages[i].usages[j].resources)\n","repo_name":"cyfronet-hpc/hpcbursar","sub_path":"grantstorage/tests/test_storage/test_mongo/test_multi_store.py","file_name":"test_multi_store.py","file_ext":"py","file_size_in_byte":6082,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"39385710722","text":"import logging\nimport json\nimport importlib\nimport asyncio\nimport base64\nfrom datetime import datetime\nfrom aiogram import Bot, Dispatcher, types\nfrom aiohttp import ClientSession, BasicAuth\nfrom database import User, Message\nfrom database import Bot as BotModel\n\nLOG_FORMAT = '%(asctime)s,%(msecs)d %(levelname)s: %(message)s'\nLOGGER = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)\n\n__platform__ = 'telegram'\n\nhttp_session = ClientSession()\n\n\nclass TelegramBot(object):\n def __init__(self, bot_model: BotModel):\n self.bot = bot_model\n LOGGER.log(logging.INFO, msg=f'Starting Telegram bot %s' % self.bot.name)\n self.Bot = Bot(token=self.bot.token)\n self.Dispatcher = Dispatcher(bot=self.Bot)\n if self.bot.custom_handler:\n self.handler = getattr(importlib.import_module('handlers.' + self.bot.custom_handler),\n 'custom_handler')\n self.Dispatcher.register_message_handler(self.handler.start_handler,\n commands={\"start\", \"restart\", \"stop\"})\n self.Dispatcher.register_message_handler(self.handler.message_handler)\n LOGGER.log(logging.INFO, msg='Registered bot with custom handler %s' % self.bot.custom_handler)\n else:\n self.Dispatcher.register_message_handler(self.start_handler, commands={\"start\", \"restart\", \"stop\"})\n self.Dispatcher.register_message_handler(self.message_handler)\n LOGGER.log(logging.INFO, msg='Registered bot without custom handlers.')\n\n async def send_to(self, message, as_html=False, as_markdown_v2=False):\n user = User.get(message['chat_id'])\n if int(message['chat_id']) > 0 and user or int(message['chat_id']) < 0:\n if hasattr(self, 'handler'):\n if self.handler.can_send:\n try:\n result: types.Message = await self.handler.send_to(self.Bot, message)\n return dict(success=True, message_id=result.message_id)\n except Exception as e:\n return dict(success=False, detail=str(e))\n else:\n try:\n if as_html:\n result: types.Message = await self.Bot.send_message(chat_id=message['chat_id'],\n text=message['body'],\n parse_mode=types.ParseMode.HTML)\n elif as_markdown_v2:\n result: types.Message = await self.Bot.send_message(chat_id=message['chat_id'],\n text=message['body'],\n parse_mode=types.ParseMode.MARKDOWN_V2)\n elif 'photo_link' in message:\n link = message['photo_link']\n if as_html:\n result: types.Message = await self.Bot.send_photo(chat_id=message['chat_id'],\n photo=types.InputFile.from_url(link),\n caption=message['body'],\n parse_mode=types.ParseMode.HTML)\n elif as_markdown_v2:\n result: types.Message = await self.Bot.send_photo(chat_id=message['chat_id'],\n photo=types.InputFile.from_url(link),\n caption=message['body'],\n parse_mode=types.ParseMode.MARKDOWN_V2)\n else:\n result: types.Message = await self.Bot.send_photo(chat_id=message['chat_id'],\n photo=types.InputFile.from_url(link),\n caption=message['body'])\n elif 'photo' in message:\n file = types.InputFile(base64.b64decode(message['photo']), filename='photo.jpg')\n if as_html:\n result: types.Message = await self.Bot.send_photo(chat_id=message['chat_id'],\n photo=file,\n caption=message['body'],\n parse_mode=types.ParseMode.HTML)\n elif as_markdown_v2:\n result: types.Message = await self.Bot.send_photo(chat_id=message['chat_id'],\n photo=file,\n caption=message['body'],\n parse_mode=types.ParseMode.MARKDOWN_V2)\n else:\n result: types.Message = await self.Bot.send_photo(chat_id=message['chat_id'],\n photo=file,\n caption=message['body'])\n else:\n result: types.Message = await self.Bot.send_message(chat_id=message['chat_id'],\n text=message['body'])\n return dict(success=True, message_id=result.message_id)\n except Exception as e:\n LOGGER.log(logging.INFO, msg=\"Failed to send message, because %s\" % e)\n return dict(success=False, detail=e, teapot=True)\n else: # Обход проверки пользователя и его регистрации для стравления в обработчик бота (если таковой имеется)\n if hasattr(self, 'handler'):\n if self.handler.can_send:\n try:\n result: types.Message = await self.handler.send_to(self.Bot, message)\n return dict(success=True, message_id=result.message_id)\n except Exception as e:\n return dict(success=False, detail=str(e))\n else:\n LOGGER.log(logging.INFO, msg=\"Failed to send message, because broken bot (check handler)\")\n return dict(success=False, detail=\"broken\")\n\n async def edit(self, message):\n LOGGER.log(logging.INFO, msg=\"Request to edit message %s in chat %s\" % (message['message_id'],\n message['chat_id']))\n if self.bot.custom_handler:\n try:\n result = await self.handler.edit(self.Bot, message)\n return dict(success=True, message_id=result.message_id)\n except Exception as e:\n return dict(success=False, detail=e.args)\n else:\n try:\n result: types.Message = await self.Bot.edit_message_text(chat_id=message['chat_id'],\n message_id=message['message_id'],\n text=message['body'])\n return dict(success=True, message_id=result.message_id)\n except Exception as e:\n return dict(success=False, detail=e.args)\n\n async def start_handler(self, event: types.Message):\n LOGGER.log(logging.INFO, msg='Got start command from %s:%s' % (self.bot.platform, event.from_user.username))\n if not User.get(event.from_user.id):\n new_user = User(user_id=event.from_user.id, first_name=event.from_user.first_name,\n last_name=event.from_user.last_name, username=event.from_user.username,\n platform=self.bot.platform)\n new_user.save()\n else:\n if self.bot.start_callback:\n message = json.dumps(dict(bot=self.bot.name, chat_id=event.from_user.id, command=event.get_command()))\n if self.bot.callback_auth:\n await http_session.post(self.bot.start_callback,\n auth=BasicAuth(self.bot.callback_login, self.bot.callback_password),\n data=message)\n else:\n await http_session.post(self.bot.start_callback, data=message)\n else:\n if self.bot.start_message:\n await event.answer(self.bot.start_message)\n\n async def message_handler(self, event: types.Message):\n user = User.get(event.from_user.id)\n LOGGER.log(logging.INFO, msg='Fetched message from %s' % user.username)\n new_message = Message(date=datetime.now(), sender=user.username, body=event.text, recipient=self.bot.name,\n message_id=event.message_id, processed=True)\n new_message.save()\n if self.bot.message_callback:\n message = json.dumps(dict(bot=self.bot.name, chat_id=event.from_user.id, body=event.text))\n if self.bot.callback_auth:\n await http_session.post(self.bot.start_callback,\n auth=BasicAuth(self.bot.callback_login, self.bot.callback_password),\n data=message)\n else:\n await http_session.post(self.bot.start_callback, data=message)\n else:\n if self.bot.default_response:\n await event.answer(self.bot.default_response)\n\n async def start_polling(self):\n await self.Dispatcher.start_polling()\n\n async def close(self):\n await self.Bot.close_bot()\n self.bot.delete()\n\n\n__default_class__ = TelegramBot\n","repo_name":"no1tx/botfarm","sub_path":"platforms/telegram.py","file_name":"telegram.py","file_ext":"py","file_size_in_byte":10462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14773326988","text":"from pathlib import Path\nfrom platform import system\nfrom subprocess import Popen, TimeoutExpired, run, PIPE\n\n\ndef run_win_proc(cmd,\n timeout=10,\n out_path=''):\n \"\"\"Run a subprocess on Windows.\n\n Args:\n cmd (list): the command to be executed.\n timeout (float): Minute timeout for the command. Only runs the process if the value is positive.\n out_path (str): Path to where to write stdout.\n \"\"\"\n if timeout > 0 and system() == 'Windows':\n timeout_expired = False\n timeout *= 60 # minutes to seconds\n try:\n if out_path:\n pr = Popen(cmd, stdout=PIPE)\n else:\n pr = Popen(cmd)\n out, err = pr.communicate(timeout=timeout)\n except TimeoutExpired:\n kill = Path(cmd[1]).name\n kill = \"Taskkill /IM {} /F\".format(kill)\n _ = run(kill, capture_output=True)\n timeout_expired = True\n out = str.encode('Timeout Achieved.')\n if out_path:\n with open(out_path, 'wb') as f:\n _ = f.write(out)\n\n if timeout_expired:\n raise TimeoutExpired(' '.join(cmd), timeout)\n\n","repo_name":"MatteoLacki/vodkas","sub_path":"vodkas/subproc.py","file_name":"subproc.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42154656844","text":"import json\nimport sys\n\ndef get_comments(inpath):\n with open(inpath) as data_file:\n comments = json.load(data_file)\n comments_formatted = []\n for c in comments:\n comments_formatted.append(c[\"commentText\"]) # push each comment into list\n return comments_formatted\n\ndef get_vocab(type):\n with open(\"vocabulary/\"+type+\"_formatted.txt\") as vocab:\n vocab_formatted = []\n for v in vocab:\n vocab_formatted.append(v[:-1]) # push each vocabulary word into list\n return vocab_formatted\n\n\nif __name__ == \"__main__\":\n out_name = 'adverbs'\n if len(sys.argv) > 1:\n out_name = sys.argv[1]\n output = open('vocabulary/' + out_name + '_formatted.txt','w')\n with open(\"vocabulary/\" + out_name + \".txt\") as vocab:\n for v in vocab:\n output.write(v.lower()) # push each vocabulary word into list\n\n","repo_name":"catatonicTrepidation/DSoL","sub_path":"GetFromFile.py","file_name":"GetFromFile.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"45687019499","text":"import sys\nimport heapq\n\ninput = sys.stdin.readline\n\nn, e = map(int, input().split())\n\ngraph = [[] for _ in range(n+1)]\nfor _ in range(e):\n a, b ,c = map(int, input().split())\n graph[a].append([b, c])\n graph[b].append([a, c])\n\nv1, v2 = map(int, input().split())\n\n\ndef dijkstra(v):\n visited = [1e9] * (n + 1)\n heap = []\n heap.append((0, v))\n visited[v] = 0\n\n while heap:\n w, v = heapq.heappop(heap)\n\n if w > visited[v]:\n continue\n\n for nv, nw in graph[v]:\n if w + nw < visited[nv]:\n heapq.heappush(heap, (w+nw, nv))\n visited[nv] = visited[v] + nw\n\n return visited\n\n\nv_dist = dijkstra(1)\nv1_dist = dijkstra(v1)\nv2_dist = dijkstra(v2)\npath1 = v_dist[v1] + v1_dist[v2] + v2_dist[n]\npath2 = v_dist[v2] + v2_dist[v1] + v1_dist[n]\npath = min(path1, path2)\nprint(path if path < 1e9 else -1)\n\n","repo_name":"ehdbs0903/algorithm-python","sub_path":"Dijkstra/boj_1504.py","file_name":"boj_1504.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9204438050","text":"import cv2\nimport numpy as np\n'''\nThis method is useful to split or slice an image into different pieces\n\nsliceWidth, sliceHeight:\n These are the params that set the height and width of the slices in which the\n original image will be divided.\n\nimagePath:\n This is the location path of the original image that will be split\n\nimageName:\n This param is the seed name that the pieces will have, \n for example: '00000000'\n\nformat:\n This will establish the file extension of the image for example .png or .jpg\n\ndestDir: \n This is the path of the directory where the image slices will be saved\n'''\ndef myImageSlicer(sliceWidth,sliceHeight,imagePath):\n y = 0\n x = 0\n width = sliceWidth\n height = sliceHeight\n ai1 = 0\n ai2 = 0\n img = cv2.imread(imagePath)\n originalHeight = img.shape[0]\n originalWidth = img.shape[1]\n images_matrix = np.empty(shape=(int(originalHeight/sliceHeight),int(originalWidth/sliceWidth))+(0,)).tolist()\n while y < originalWidth:\n while x < originalHeight:\n crop_img = img[x:x+height, y:y+width]\n images_matrix[ai2][ai1] = crop_img\n #cv2.imwrite('slice_0'+str(ai2)+'_0'+str(ai1)+'.jpg', crop_img)\n x = x + height\n ai2 = ai2 + 1\n \n x = 0\n y = y + width\n ai2 = 0\n ai1 = ai1 + 1\n\n return images_matrix","repo_name":"ManfredGonzalez/scaling-augmentation","sub_path":"utils/image_splitter.py","file_name":"image_splitter.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"657296521","text":"\"\"\"Types and helpers for manipulating coordinates.\"\"\"\n\nfrom typing import NamedTuple, Union\n\nimport numpy as np\nfrom numpy import arctan2, float64, linalg\n\nAnyFloat = Union[float, float64]\n\n_Cartesian = NamedTuple('Cartesian', (\n ('x', AnyFloat),\n ('y', AnyFloat),\n ('z', AnyFloat),\n))\n\n\nclass Cartesian(_Cartesian):\n \"\"\"Cartesian coordinates.\"\"\"\n\n __slots__ = ()\n\n def tolist(self):\n \"\"\"Placeholder helper to ease migration within robotd.\"\"\"\n return list(self)\n\n\nSpherical = NamedTuple('Spherical', (\n ('rot_x', AnyFloat),\n ('rot_y', AnyFloat),\n ('dist', AnyFloat),\n))\n\nLegacyPolar = NamedTuple('LegacyPolar', (\n ('polar_x', AnyFloat),\n ('polar_y', AnyFloat),\n ('dist', AnyFloat),\n))\n\n\nPixelCoordinate = NamedTuple('PixelCoordinate', [('x', AnyFloat), ('y', AnyFloat)])\n\n\ndef cartesian_to_spherical(cartesian: Cartesian) -> Spherical:\n \"\"\"Convert a Cartesian coordinate into a spherical one.\"\"\"\n x, y, z = cartesian\n return Spherical(\n rot_x=arctan2(y, z),\n rot_y=arctan2(x, z),\n dist=linalg.norm(cartesian),\n )\n\n\ndef cartesian_to_legacy_polar(cartesian: Cartesian) -> LegacyPolar:\n \"\"\"\n Convert cartesian co-ordinate space to the legacy \"polar\" space.\n\n This is kept for compatibility only.\n \"\"\"\n cart_x, cart_y, cart_z = tuple(cartesian)\n polar_dist = np.linalg.norm(cartesian)\n polar_x = np.arctan2(cart_z, cart_x)\n polar_y = np.arctan2(cart_z, cart_y)\n return LegacyPolar(polar_x, polar_y, polar_dist)\n","repo_name":"sourcebots/sb-vision","sub_path":"sb_vision/coordinates.py","file_name":"coordinates.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"74518188215","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('exhibit/create/', views.create_exhibit, name='create_exhibit'),\n path('event/create/', views.create_event, name='create_event'),\n path('talent/create/', views.create_talent, name='create_talent'),\n path('review/create/', views.create_review, name='create_review'),\n path('favorite///',\n views.create_favorite, name='create_favorite'),\n path('review/delete//', views.delete_review, name='delete_review'),\n]","repo_name":"ekanshthakur15/Yogya-CS301-assignment","sub_path":"yogya/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"71609970936","text":"#!/usr/bin/env python\nfrom setuptools import setup, find_packages\n# import os\n\n\n# data_files = [(d, [os.path.join(d, f) for f in files])\n# for d, folders, files in os.walk(os.path.join('src', 'config'))]\n\nDESC ='syslog etl that converts syslog messages using grok and saves them to mongo'\nsetup(name='syslog_svc_etl',\n version='1.0',\n description=DESC,\n author='adam pridgen',\n author_email='dso@thecoverofnight.com',\n install_requires=['toml', 'rule_chains', 'pygrok', 'pymongo', 'pytz'],\n packages=find_packages('src'),\n package_dir={'': 'src'},\n include_package_data=True,\n package_data={\n 'syslog_svc_etl': [],\n },\n # data_files=data_files,\n)\n","repo_name":"deeso/syslog-etl","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20881437361","text":"import streamlit as st\nimport os\nfrom dotenv import load_dotenv\n\n\n\n\n# Create a function for each page\ndef home():\n \n #import os\n from langchain.text_splitter import CharacterTextSplitter\n\n # from langchain.text_splitter import RecursiveCharacterTextSplitter\n from langchain.prompts import PromptTemplate\n from langchain.chains import LLMChain\n from langchain.document_loaders import PyPDFLoader\n from langchain.document_loaders import TextLoader\n from langchain.vectorstores import Pinecone\n from langchain.chat_models import ChatOpenAI\n from langchain.embeddings.openai import OpenAIEmbeddings\n from langchain.chains.question_answering import load_qa_chain\n from langchain.chains import ConversationChain\n from langchain.chains.conversation.memory import ConversationBufferMemory\n from langchain.document_loaders import YoutubeLoader\n\n from langchain.chains.summarize import load_summarize_chain\n from langchain.document_loaders import UnstructuredURLLoader, SeleniumURLLoader\n import pinecone\n #import nltk\n\n #nltk.download('punkt')\n # openai_api_key = \"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\"\n\n os.environ[\"PINECONE_API_KEY\"] = pineconekey\n os.environ[\"OPENAI_API_KEY\"] = openai_api_key\n\n st.title(\"🦜\")\n # Add content specific to the home page\n # import streamlit as st\n import requests\n\n API_URLS = [\n \"https://flowise-production-a606.up.railway.app/api/v1/prediction/8f17b231-6b0c-4ab6-929d-214d368e111e\",\n # DOCgpt\n \"https://flowise-production-a606.up.railway.app/api/v1/prediction/6a421494-72c9-42f1-9520-a84591bbdc54\",\n # GoogleGPT\n \"https://flowise-production-a606.up.railway.app/api/v1/prediction/8f17b231-6b0c-4ab6-929d-214d368e111e\"\n # finetune answer from doc - web - openai\n\n ]\n\n # Initialize the selected API index\n selected_api_index = 2\n\n def query(payload):\n\n try:\n response = requests.post(API_URLS[0], json=payload)\n response.raise_for_status()\n return response.json()\n except requests.exceptions.RequestException as err:\n st.error(\"Error occurred:\", err)\n return None\n\n def query1(payload):\n try:\n response = requests.post(API_URLS[1], json=payload)\n response.raise_for_status()\n return response.json()\n except requests.exceptions.RequestException as err:\n st.error(\"Error occurred:\", err)\n return None\n\n # st.markdown(\"
    \", unsafe_allow_html=True)\n\n # List to hold the conversation history\n conversation = []\n chatlist = []\n\n if selected_api_index == 2:\n # Chat container\n\n # PDF UPLOADER\n\n docs_chunks = []\n\n # openai_api_key = os.environ.get('API_KEY')\n # openai_api_key = \"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\"\n # pineconekey = \"f4e3f5b8-fc9a-4d6d-be18-ba5f200e0e52\"\n # pineconeEnv = \"us-west1-gcp-free\"\n\n # Initialize Pinecone\n pinecone.init(api_key=pineconekey, environment=pineconeEnv)\n # index_name2 = \"babyagi\"\n\n # embeddings\n\n embeddings = OpenAIEmbeddings()\n\n # image = Image.open('ai.png')\n # st.image(image, caption='AI', width=200)\n\n with st.sidebar:\n docs_chunks1 = []\n loader_choice = st.radio(\"Select type of link🔗\", [\"🛑-Youtube URL\", \"🌐-Blog/website URL\"])\n\n \n\n url= st.text_input(\"🔗 Paste URL here:\")\n\n if loader_choice == \"🛑-Youtube URL\":\n if st.button(\"(RUN)✅ Press Here to Run\"):\n loader = YoutubeLoader.from_youtube_url(url, add_video_info=True)\n data = loader.load()\n print(\"Data loaded successfully with youtubeloader.\")\n\n text_splitter = CharacterTextSplitter(separator='\\n',\n chunk_size=1000,\n chunk_overlap=100)\n\n docs = text_splitter.split_documents(data)\n docs_chunks1.extend(docs)\n #st.write(docs)\n #pinecone.init(api_key=pineconekey, environment=pineconeEnv)\n\n index3 = Pinecone.from_documents(docs_chunks1, embeddings, index_name=index_name2)\n\n docs_chunks1.clear()\n\n\n if loader_choice==\"🌐-Blog/website URL\":\n \n\n\n urls_list = st.session_state.get('urls_list', [])\n\n if st.button(\"(ADD)🔼 Press Here to add url to the list \"):\n urls_list.append(url)\n st.session_state['urls_list'] = urls_list\n st.write(urls_list)\n\n if st.button(\"(CLEAR)🧹 Clear the list\"):\n st.session_state['urls_list'] = []\n st.write(\"List is empty now. Please paste your URLs one by one.\")\n\n if st.button(\"(RUN)✅ Press Here to Run\"):\n urls = [\n\n 'https://cobusgreyling.medium.com/openai-function-calling-98fbf9539d2a'\n ]\n\n try:\n loaders = UnstructuredURLLoader(urls=urls)\n data = loaders.load()\n print(\"Data loaded successfully with UnstructuredURLLoader.\")\n st.write(data)\n\n except Exception as e:\n st.write(\"URL not supported\")\n print(\"Error loading data with UnstructuredURLLoader:\", e)\n print(\"Trying with SeleniumURLLoader...\")\n\n try:\n loader = SeleniumURLLoader(urls=urls)\n data = loader.load()\n print(\"Data loaded successfully with SeleniumURLLoader.\")\n except Exception as e:\n st.write(\"URL not supported\")\n print(\"Error loading data with SeleniumURLLoader:\", e)\n print(\"Both loaders failed to load data.\")\n\n text_splitter = CharacterTextSplitter(separator='\\n',\n chunk_size=1000,\n chunk_overlap=100)\n docs = text_splitter.split_documents(data)\n\n docs_chunks1.extend(docs)\n \n\n index4 = Pinecone.from_documents(docs_chunks1, embeddings, index_name=index_name2)\n\n\n docs_chunks1.clear()\n\n\n\n\n\n def process_file(uploaded_file):\n\n bytes_data = uploaded_file.read()\n st.write(\"filename:\", uploaded_file.name)\n _, file_extension = os.path.splitext(uploaded_file.name)\n\n # write the uploaded file to disk\n with open(uploaded_file.name, 'wb') as f:\n f.write(bytes_data)\n\n documents = None\n if file_extension.lower() == '.pdf':\n # Load the PDF file with PyPDF Loader\n loader = PyPDFLoader(uploaded_file.name)\n documents = loader.load()\n\n elif file_extension.lower() == '.txt':\n # Load the text file with TextLoader\n loader = TextLoader(uploaded_file.name, encoding='utf8')\n documents = loader.load()\n\n else:\n raise ValueError(f\"Unsupported file type: {file_extension}\")\n\n return documents\n\n def split_docs(documents, chunk_size=1000, chunk_overlap=100):\n text_splitter = CharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n\n docs = text_splitter.split_documents(documents)\n return docs\n\n uploaded_files = st.file_uploader(\"Choose a PDF file\", accept_multiple_files=True)\n if st.button('upload'):\n\n if len(uploaded_files) > 0:\n for uploaded_file in uploaded_files:\n documents = process_file(uploaded_file)\n docs_chunks.extend(split_docs(documents))\n index1 = Pinecone.from_documents(docs_chunks, embeddings, index_name=index_name2, overwrite=True)\n\n\n\n\n # selected_api_index == 4\n\n # User input\n user_input = st.text_input(\"User:\", key=\"user_input\")\n submit_button = st.button(\"ASK\", key=\"submit_button\")\n\n # if selected_api_index==4:\n\n if submit_button and user_input:\n response = query({\"history\": conversation, \"question\": user_input})\n response2 = \"Hello! How can I assist you today?\"\n response1 = query1({\"history\": conversation, \"question\": user_input})\n # response1 = query1({\"history\": conversation, \"question\": user_input})\n # Add user input to conversation history\n conversation.append({\"role\": \"user\", \"question\": user_input})\n # Query the selected API\n\n if selected_api_index == 2:\n\n if response or response1 is not None:\n # Add bot response to conversation history\n chatlist.append({\"Dtllm_result\": response2, \"document result\": response})\n else:\n chatlist.append(\n {\n \"result\": \"bot\",\n \"content\": \"Sorry, I am unable to process your request at the moment.\",\n }\n )\n\n model_name = \"gpt-3.5-turbo\"\n llm = ChatOpenAI(temperature=0.2, model_name=model_name)\n conversation1 = ConversationChain(\n llm=llm,\n verbose=True,\n #memory=ConversationBufferMemory()\n )\n\n \n\n answer1 = conversation1.predict(\n input=f\"For the question{user_input} just answer it from this content given only give the content answer {chatlist} if u dont feel answer is correct or do not found the answer in the context provided or if the quetion is realted to realtime information strictly give strictly output with word No \")\n #input=f\"your goal is to provide accurate responses to user queries. you will utilize the provided content, specifically the {chatlist}, to generate an answer based on the user's {user_input}. If you can find a suitable answer in the content, you will provide it as the output. However, if you cannot find a relevant answer or struggle to provide an appropriate response, you will strictly give output with word {No} Please note that your responses will be limited to either an accurate answer from the content or a straightforward word {No} if the answer cannot be determined.\")\n #input=f\" you are an chat bot your work is to give correct response to the user from this user input :- {user_input} just answer it from this content given output answer should be either answer from content or give No as output if you donot know the answer this the content:-{chatlist} if u dont feel answer/response is correct for the user input or do not find the answer in the content strictly give strictly output with word No donot make any sentences\")\n st.write(\"answer 1\", answer1)\n chatlist.clear()\n\n \n\n\n\n if answer1==\"No\" :\n if response1 or response is not None:\n # Add bot response to conversation history\n chatlist.append({\"internet_result\": response1, \"document result\": response})\n else:\n chatlist.append(\n {\n \"result\": \"bot\",\n \"content\": \"Sorry, I am unable to process your request at the moment.\",\n }\n )\n\n model_name = \"gpt-3.5-turbo\"\n llm = ChatOpenAI(temperature=0.2, model_name=model_name)\n conversation1 = ConversationChain(\n llm=llm,\n verbose=True,\n #memory=ConversationBufferMemory()\n )\n\n answer2 = conversation1.predict(\n input=f\"For the question{user_input} just answer it from this content given {chatlist} strictly give output between internet_result and document result just give content answer\")\n st.write(\"answer 2 \", answer2)\n\n\n chatlist.clear()\n\n \n\n\n\n\n\n\n# Create a dictionary mapping page names to the corresponding functions\npages = {\n \"🦜\": home\n}\nst.sidebar.title(\"Paste your URL 🤖 BELOW\")\n# Add a sidebar to navigate between pages\npage = st.sidebar.radio(\".\", options=list(pages.keys()))\n\nwith st.sidebar:\n #APPLYING SECRETS\n load_dotenv()\n openai_api_key = os.getenv(\"openai_api_key\")\n pineconekey = os.getenv(\"pineconekey\")\n pineconeEnv = \"us-west1-gcp-free\"\n index_name2 = \"axstream\"\n serp_api = os.getenv(\"serp_api\")\n\nif openai_api_key and pineconekey and pineconeEnv and index_name2 and serp_api:\n\n st.success(\"!\")\n pages[page]()\n\n # You can use the API keys in your code here\n # For example, make API requests using the keys\n\n # ...\nelse:\n st.warning(\"Please fill correct API keys .\")\n\n# Execute the function corresponding to the selected page\n# pages[page]()\n","repo_name":"imadarsh9686/url","sub_path":"multiagent.py","file_name":"multiagent.py","file_ext":"py","file_size_in_byte":13416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13697948382","text":"from degas.products import makeMap\nfrom degas.analysis_setup import regridData, smoothCube\nfrom degas.masking import cubemask\nfrom spectral_cube import SpectralCube\n\nimport glob\nimport os\nimport shutil\nfrom astropy.table import Table\n\nrelease = 'IR6p1'\n\nreleaseDir = os.path.join(os.environ['ANALYSISDIR'],release)\ncoDir = os.path.join(os.environ['ANALYSISDIR'],'CO')\nhiDir = os.path.join(os.environ['ANALYSISDIR'],'ancillary_data','hi_from_jiayi','33arcsec')\n\nmultiDir = os.path.join(os.environ['ANALYSISDIR'],'ancillary_data','multiwavelength')\n\nregridDir = os.path.join(os.environ['ANALYSISDIR'],release+'_regrid_33as')\n\nif not os.path.exists(regridDir):\n os.mkdir(regridDir)\n\nhcnlist = glob.glob(os.path.join(releaseDir,'*HCN*hanning1.fits'))\n\nbeam = 33.357764283516 # arcsec; EMPIRE beam\nmaxnchan = 131\n\nfor hcn in hcnlist:\n\n name = os.path.basename(hcn).split('_')[0].upper()\n\n print(\"** processing \" + name + \" HCN **\")\n \n # process HCN -- just need to smooth here because taking as base.\n hcnCube = SpectralCube.read(hcn)\n hcnOutCube = hcnCube[0:maxnchan] \n hcnOut = hcn.replace('.fits','_maxnchan.fits')\n hcnOutCube.write(os.path.join(releaseDir,hcnOut),overwrite=True)\n\n hcn_smooth = smoothCube(hcnOut, releaseDir, beam=beam)\n shutil.copy(hcn_smooth,os.path.join(regridDir,os.path.basename(hcn_smooth)))\n \n print(\"** processing \" + name + \" HCO+ **\")\n\n # process HCO+ -- smooth and regrid\n hcop = hcn.replace('HCN','HCOp')\n hcop_smooth = smoothCube(hcop,releaseDir,beam=beam)\n regridData(hcn_smooth,hcop_smooth,regridDir) # HCN and HCO+ taken at same time, so not need to check for existance first.\n\n print(\"processing \" + name + \" 13CO\")\n\n # process 13CO -- smooth and regrid\n thirteenCO = hcn.replace('HCN','13CO')\n if os.path.exists(thirteenCO):\n thirteenCO_smooth = smoothCube(thirteenCO,releaseDir,beam=beam)\n regridData(hcn_smooth, thirteenCO_smooth,regridDir)\n\n print(\"processing \" + name + \" C18O\")\n\n # process C18O -- smooth and regrid\n c18o = hcn.replace('HCN','C18O')\n if os.path.exists(c18o):\n c18o_smooth = smoothCube(c18o, releaseDir, beam=beam)\n regridData(hcn_smooth,c18o_smooth,regridDir)\n\n print(\"** processing \" + name + \" 12CO **\")\n\n # process 12CO \n co = glob.glob(os.path.join(coDir,name+'_12CO??.fits'))[0]\n if os.path.exists(co):\n co_smooth = smoothCube(co, releaseDir, beam=beam)\n co_regrid = regridData(hcn_smooth,co_smooth,regridDir)\n \n \n #calculate 12CO masks (used in stacking initial noise estimate)\n peakCut = 5.0\n lowCut = 3.0\n if name in ['IC0342']:\n cubemask(co_regrid,\n name+'_12CO_mask.fits',\n outDir=regridDir,\n peakCut=peakCut,lowCut=lowCut,\n threeD=True,\n minBeamFrac=1.5,\n minNchan=5.0)\n\n elif name in ['NGC2903','NGC3521','NGC4321','NGC4535','NGC4569']:\n # use phangs parameters to create mask\n cubemask(co_regrid,\n name+'_12CO_mask.fits',\n outDir=regridDir,\n peakCut=peakCut, lowCut=lowCut,\n minBeamFrac=2.0,\n minNchan=3.0)\n\n elif name in ['NGC2146','NGC5055','NGC6946']:\n # use heracles parameters to create mask\n cubemask(co_regrid,\n name+'_12CO_mask.fits',\n outDir=regridDir,\n peakCut=peakCut,lowCut=lowCut,\n minBeamFrac=1.5)\n\n elif name in ['NGC0337']:\n cubemask(co_regrid,\n name+'_12CO_mask.fits',\n outDir=regridDir,\n peakCut=3.5,lowCut=2.0,\n minBeamFrac=1.5,\n minNchan=3.0,\n threeD=True)\n\n elif name in ['NGC3147']:\n cubemask(co_regrid,\n name+'_12CO_mask.fits',\n outDir=regridDir,\n peakCut=3.5,lowCut=2.0,\n minBeamFrac=1.5,threeD=True,minNchan=5.0) \n\n elif name in ['NGC3631']:\n cubemask(co_regrid,\n name+'_12CO_mask.fits',\n outDir=regridDir,\n peakCut=peakCut,lowCut=lowCut,\n skipChan=[5]) \n\n elif name in ['NGC4030']:\n cubemask(co_regrid,\n name+'_12CO_mask.fits',\n outDir=regridDir,\n peakCut=3.5,lowCut=2.0,\n minBeamFrac=1.0,threeD=True,minNchan=3.0)\n\n elif name in ['NGC4501']:\n cubemask(co_regrid,\n name+'_12CO_mask.fits',\n outDir=regridDir,\n peakCut=peakCut,lowCut=lowCut,\n skipChan=[15]) \n\n elif name in ['NGC4258']:\n cubemask(co_regrid,\n name+'_12CO_mask.fits',\n outDir=regridDir,\n peakCut=peakCut,lowCut=lowCut) \n\n elif name in ['NGC4414']:\n cubemask(co_regrid,\n name+'_12CO_mask.fits',\n outDir=regridDir,\n peakCut=peakCut,\n lowCut=lowCut)\n\n elif name in ['NGC4038']:\n cubemask(co_regrid,\n name+'_12CO_mask.fits',\n outDir=regridDir,\n peakCut=peakCut, lowCut=lowCut)\n else:\n print(\"no 12CO mask created\")\n \n \n # process CO products \n maskFile = os.path.join(regridDir,name+'_12CO_mask.fits')\n makeMap(co_regrid,regridDir,maptype='peakIntensity')\n makeMap(co_regrid,regridDir,maptype='peakVelocity',maskFile=maskFile)\n makeMap(co_regrid,regridDir,maptype='moment',order=0)\n makeMap(co_regrid,regridDir,maptype='moment',order=1,maskFile=maskFile)\n\n\n # process HI products -- regrid only -- already smoothed\n print(\"** processing \" + name + \" HI **\")\n\n hi_mom0 = glob.glob(os.path.join(hiDir,name+'*_21cm_*_mom0_smooth.fits'))\n if len(hi_mom0) > 0:\n hi_mom0 = hi_mom0[0]\n if os.path.exists(hi_mom0):\n regridData(hcn_smooth,hi_mom0,regridDir)\n\n print(\"** processing \" + name + \" SFR **\")\n\n # process SFR \n sfr = name +'_sfr_fuvw4_gauss15.fits' \n inFile = os.path.join(multiDir,'data','sfr',sfr)\n if os.path.exists(inFile): \n sfr_smooth = smoothCube(inFile, releaseDir,beam=beam)\n # WARNING: Could not parse unit MSUN/YR/KPC2 [spectral_cube.cube_utils]\n ## TODO -- CHECK TO MAKE SURE THIS IS OKAY.\n regridData(hcn_smooth,sfr_smooth,regridDir)\n\n sfr = name+'_sfr_nuvw4_gauss15.fits' \n inFile = os.path.join(multiDir,'data','sfr',sfr)\n if os.path.exists(inFile):\n sfr_smooth = smoothCube(inFile, releaseDir,beam=beam)\n regridData(hcn_smooth,sfr_smooth,regridDir)\n\n sfr = name+'_sfr_fuvw3_gauss15.fits' \n inFile = os.path.join(multiDir,'data','sfr',sfr)\n if os.path.exists(inFile):\n sfr_smooth = smoothCube(inFile, releaseDir, beam=beam)\n regridData(hcn_smooth,sfr_smooth,regridDir)\n\n sfr = name+'_sfr_nuvw3_gauss15.fits' \n inFile = os.path.join(multiDir,'data','sfr',sfr)\n if os.path.exists(inFile):\n sfr_smooth = smoothCube(inFile, releaseDir, beam=beam)\n regridData(hcn_smooth,sfr_smooth,regridDir)\n\n # process the stellar mass data\n print(\"** processing \" + name + \" Mstar **\")\n\n mstar = name + \"_mstar_gauss15.fits\"\n \n if os.path.exists(os.path.join(multiDir,'data','mstarirac1',mstar)):\n inFile = os.path.join(multiDir,'data','mstarirac1',mstar)\n mstar_smooth = smoothCube(inFile, releaseDir, beam=beam)\n regridData(hcn_smooth, mstar_smooth, regridDir)\n elif os.path.exists(os.path.join(multiDir,'data','mstarw1',mstar)):\n inFile = os.path.join(multiDir,'data','mstarw1',mstar)\n mstar_smooth = smoothCube(inFile, releaseDir, beam=beam)\n regridData(hcn_smooth, mstar_smooth, regridDir)\n else:\n print(\"No stellar mass map found!\")\n\n # process the stellar mass data\n print(\"** processing \" + name + \" LTIR **\")\n \n ltir = name + \"_LTIR_gauss33.fits\"\n inFile = os.path.join(multiDir,'data','LTIR_calc_33as',ltir)\n if os.path.exists(inFile):\n regridData(hcn_smooth, inFile,regridDir)\n else:\n print(\"No LTIR map found!\")\n \n\n \n","repo_name":"GBTSpectroscopy/degas","sub_path":"scripts/align_to_degas_33as.py","file_name":"align_to_degas_33as.py","file_ext":"py","file_size_in_byte":8563,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"15794370371","text":"import os\nfrom . import handlers\nimport logging\nfrom telegram.ext import Updater, Filters, CallbackContext\nfrom telegram.ext import CommandHandler, ConversationHandler, MessageHandler\nfrom tg.telegram_consts import WELCOME_MESSAGE\nfrom telegram import ReplyKeyboardMarkup, Update\n\n\ndef start(update: Update, context: CallbackContext):\n reply_keyboard = [['Get contact', 'Add contact'], ['Delete contact', 'Get all contacts']]\n update.message.reply_text(WELCOME_MESSAGE,\n reply_markup=ReplyKeyboardMarkup(\n reply_keyboard, one_time_keyboard=False, input_field_placeholder='Select action'\n ),\n )\n\n\ndef start_bot() -> None:\n updater = Updater(token=os.environ['TOKEN'], use_context=True)\n dispatcher = updater.dispatcher\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\n get_contact_handler = ConversationHandler(\n entry_points=[MessageHandler(Filters.regex(\"Get contact\"), handlers.start_get_contact)],\n states={\n handlers.NAME: [MessageHandler(Filters.text & ~Filters.command & ~Filters.regex(\n '^(Get contact|Add contact|Get all contacts|Delete contact|)$'), handlers.find_contact)],\n },\n allow_reentry=True,\n fallbacks=[CommandHandler('cancel', handlers.cancel)],\n )\n\n add_contact_handler = ConversationHandler(\n entry_points=[MessageHandler(Filters.regex(\"Add contact\"), handlers.start_add_contact)],\n states={\n handlers.NAME: [MessageHandler(Filters.text & ~Filters.command & ~Filters.regex(\n '^(Get contact|Add contact|Get all contacts|Delete contact|)$'), handlers.add_name)],\n handlers.PHONE: [MessageHandler(Filters.text & ~Filters.command & ~Filters.regex(\n '^(Get contact|Add contact|Get all contacts|Delete contact|)$'), handlers.add_phone)]\n },\n allow_reentry=True,\n fallbacks=[CommandHandler('cancel', handlers.cancel)],\n )\n\n delete_contact_handler = ConversationHandler(\n entry_points=[MessageHandler(Filters.regex('Delete contact'), handlers.start_delete_contact)],\n states={\n handlers.NAME: [MessageHandler(Filters.text & ~Filters.command & ~Filters.regex(\n '^(Get contact|Add contact|Get all contacts|Delete contact|)$'), handlers.delete_one)],\n },\n\n allow_reentry=True,\n fallbacks=[CommandHandler('cancel', handlers.cancel)],\n )\n get_all_contacts = MessageHandler(Filters.regex('Get all contacts'), handlers.get_all)\n\n json_handler = CommandHandler('json', handlers.json_handler)\n\n dispatcher.add_handler(get_contact_handler)\n dispatcher.add_handler(add_contact_handler)\n dispatcher.add_handler(get_all_contacts)\n dispatcher.add_handler(delete_contact_handler)\n dispatcher.add_handler(json_handler)\n\n start_handler = CommandHandler('start', start)\n dispatcher.add_handler(start_handler)\n updater.start_polling()\n updater.idle()\n","repo_name":"alladinattar/contacts-book","sub_path":"tg/tg.py","file_name":"tg.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"27152419244","text":"# -*- coding:utf-8 -*-\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\ndef find_mid(head):\n \"\"\"\n 找到该list中间的元素\n :param head: the head of list\n :return:\n \"\"\"\n slow = head\n fast = head\n\n while fast.next is not None and fast.next.next is not None:\n slow = slow.next\n fast = fast.next.next\n\n return slow\n\n\ndef find_mid_recursion(node, index):\n \"\"\"\n find the mid by recursion\n :param node:\n :param index:\n :return:\n \"\"\"\n global mid\n if node.next is not None:\n find_mid_recursion(node.next, index+1)\n else:\n mid = int(index / 2)\n\n if mid == index:\n return node\n\n return None\n\n\ndef check_cycle(node):\n \"\"\"\n check whether exist cycle in the linklist\n :param node:\n :return:\n \"\"\"\n slow = node\n fast = node\n\n while fast.next is not None and fast.next.next is not None:\n slow = slow.next\n fast = fast.next.next\n\n if slow == fast:\n return True\n\n return False\n\n\ndef reverse_recursion(curr, next):\n \"\"\"\n reverse the link list by recursion\n :param curr:\n :param next:\n :return:\n \"\"\"\n if next is None:\n return\n reverse_recursion(next, next.next)\n next.next = curr\n curr.next = None\n\n\ndef reverse(node):\n \"\"\"\n reverse the link list without recursion\n :param node:\n :return:\n \"\"\"\n curr = node\n next = node.next\n curr.next = None\n\n while next is not None:\n temp = next.next\n next.next = curr\n\n curr = next\n next = temp\n\n\ndef find_diff(node):\n \"\"\"\n in a sort link list, remove the duplicate value\n :param node:\n :return:\n \"\"\"\n curr = node\n record = None\n\n while curr.next is not None:\n next_node = curr.next\n if curr.val == next_node.val and record is None:\n record = curr\n elif curr.val != next_node.val and record is not None:\n record.next = next_node\n record = None\n\n curr = curr.next\n\n\ndef sum_list(l1, l2):\n \"\"\"\n add two link list together\n 将链表代表的数字进行相加即可,注意首位代表的是个位。3->1->5 代表513,5->9->2 代表295,最终计算结果为 8->0->8, 808。\n :param l1:\n :param l2:\n :return:\n \"\"\"\n over_ten = False\n s = ListNode(0)\n head = s\n\n while l1 is not None or l1 is not None:\n v1 = l1.val if l1 is not None else 0\n v2 = l2.val if l2 is not None else 0\n\n sum_value = v1 + v2\n sum_value = sum_value + 1 if over_ten else sum_value\n\n if sum_value >= 10:\n over_ten = True\n sum_value = sum_value % 10\n\n s.val = sum_value\n\n l1 = l1.next if l1 is not None else None\n l2 = l2.next if l2 is not None else None\n\n if l1 is None and l2 is None:\n break\n else:\n s.next = ListNode(0)\n s = s.next\n\n return head\n\n\ndef merge_two_list(l):\n \"\"\"\n split to two list\n :param l:\n :return:\n \"\"\"\n odd = ListNode(0)\n even = ListNode(0)\n\n odd_head = odd\n even_head = even\n\n index = 1\n while l is not None:\n if index % 2 != 0:\n odd.val = l.val\n odd.next = ListNode(None)\n odd = odd.next\n else:\n even.val = l.val\n even.next = ListNode(None)\n even = even.next\n\n l = l.next\n\n odd = None\n even = None\n\n reverse_recursion(odd_head, odd_head.next)\n reverse_recursion(even_head, even_head.next)\n\n # merge them together\n\n\ndef delete_n_element(l, n):\n \"\"\"\n 删除倒数n的element\n :param l:\n :param n:\n :return:\n \"\"\"\n index = 0\n curr = l\n corr = l\n\n while curr is not None:\n if index >= n + 1:\n corr = corr.next\n curr = curr.next\n index += 1\n\n print(corr.val)\n\n\n\n\n\nif __name__ == '__main__':\n n0 = ListNode(3)\n n1 = ListNode(1)\n n2 = ListNode(5)\n # n3 = ListNode(3)\n\n n0.next = n1\n n1.next = n2\n n2.next = None\n\n n3 = ListNode(5)\n n4 = ListNode(9)\n n5 = ListNode(2)\n\n n3.next = n4\n n4.next = n5\n n5.next = None\n\n delete_n_element(n0, 1)\n\n # print(head.val, head.next.val, head.next.next.val)\n","repo_name":"ShunyangLi/algorithm","sub_path":"List/List.py","file_name":"List.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73233474937","text":"from os import path, system\nimport pathlib\nimport torch\nimport hydra\nimport logging\n\nfrom . import eval\n\nfrom .sac import SAC\nfrom .envs import make_env\nfrom .logger import Logger\nfrom .replay_buffer import ReplayBuffer\nfrom .utils import fix_seed, get_space_info\nfrom .exploration_buffer import ExplorationBuffer\nfrom .replay_buffer_filler import ReplayBufferFiller\nfrom .rnet import rnet_utils, RNetMemory, RNetModel, RNetPairsSplitDataset\n\n\nlog = logging.getLogger(__name__)\n\n\ndef train_rnet(cfg, model, expl_buffer, vis_log, device):\n dataset = RNetPairsSplitDataset(cfg.rnet.dataset, expl_buffer)\n rnet_utils.train(cfg.rnet.train, model, dataset, device, vis_log)\n\n\ndef train_policy(\n cfg,\n expl_buffer,\n rnet_model,\n memory,\n space_info,\n device,\n vis_log,\n):\n env = make_env(cfg.env, space_info)\n uniform_action_fn = env.action_space.sample\n env.close()\n\n replay_buffer = ReplayBuffer(cfg.replay_buffer, space_info)\n agent = SAC(cfg.sac, space_info, device)\n replay_buffer_filler = ReplayBufferFiller(\n replay_buffer,\n expl_buffer,\n cfg,\n space_info,\n device,\n memory,\n rnet_model,\n agent=agent,\n uniform_action_fn=uniform_action_fn\n )\n\n procs, buffers, barriers, n_eval_done, info_keys = eval.start_procs(cfg, space_info)\n\n num_updates = 0\n for epoch in range(cfg.optim.num_epochs):\n log.info(f\"epoch: {epoch}\")\n\n # TRAIN\n replay_buffer.to(\"cpu\")\n log.info(\"filling replay buffer\")\n replay_buffer_filler.run()\n\n log.info(\"train one epoch\")\n replay_buffer.to(device)\n train_stats = agent.train_one_epoch(replay_buffer)\n log.info(\n \"train \" + \" - \".join([f\"{k}: {v:.2f}\" for k, v in train_stats.items()])\n )\n num_updates += train_stats[\"updates\"]\n train_stats[\"updates\"] = num_updates\n vis_log.add_stats(train_stats, epoch, \"train\")\n\n # EVAL\n if epoch % cfg.eval.interval_epochs == 0:\n eval_stats = eval.run(\n agent,\n cfg.eval.num_episodes,\n buffers,\n barriers,\n n_eval_done,\n info_keys,\n rnet_model,\n device\n )\n log.info(\n \"eval \" + \" - \".join([f\"{k}: {v:.2f}\" for k, v in eval_stats.items()])\n )\n vis_log.add_stats(eval_stats, epoch, \"eval\")\n\n if epoch % cfg.main.save_interval == 0:\n agent.save_checkpoint(cfg.main.logs_dir, epoch)\n\n for p in procs:\n p.join()\n\n\n@hydra.main(config_path=\"../conf\", config_name=\"config.yaml\")\ndef main(cfg):\n cfg.main.cwd = hydra.utils.get_original_cwd()\n logs_dir = pathlib.Path(cfg.main.cwd) / \"logs\" / cfg.main.name\n cfg.main.logs_dir = logs_dir.as_posix()\n logs_dir.mkdir(exist_ok=True)\n vis_log = Logger(cfg)\n log.info(f\"exp name: {cfg.main.name}\")\n\n fix_seed(cfg.main.seed)\n\n # setup paths and load\n rnet_path = logs_dir / \"model.pth\"\n embs_path = logs_dir / \"embs.pth\"\n memory_path = logs_dir / \"memory.npy\"\n if cfg.main.load_from_dir is not None:\n for file in [\"model.pth\", \"memory.npy\", \"embs.pth\"]:\n load_path = path.join(cfg.main.load_from_dir, file)\n if path.exists(load_path):\n log.info(f\"copying from {load_path}\")\n system(f\"cp {load_path} {logs_dir}/\")\n\n device = torch.device(\"cuda\")\n space_info = get_space_info(cfg.env.obs, cfg.env.action_dim)\n expl_buffer = ExplorationBuffer(cfg)\n\n if cfg.main.reward in [\"rnet\", \"graph\", \"graph_sig\"]:\n # RNet\n rnet_model = RNetModel(cfg.rnet.model, space_info).to(device)\n log.info(rnet_model)\n if path.exists(rnet_path):\n log.info(f\"Loading RNet from {rnet_path}\")\n rnet_model.load(rnet_path, device=device)\n else:\n log.info(\"Training RNet\")\n train_rnet(cfg, rnet_model, expl_buffer, vis_log, device)\n log.info(f\"Saving RNet to {rnet_path}\")\n rnet_model.save(rnet_path)\n else:\n rnet_model = None\n\n if cfg.main.train_until == \"rnet\":\n vis_log.close()\n return\n\n if cfg.main.reward in [\"rnet\", \"graph\", \"graph_sig\"]:\n # Exploration buffer embeddings\n if path.exists(embs_path):\n log.info(f\"Loading embeddings from {embs_path}\")\n embs = torch.load(embs_path)\n else:\n log.info(\"Embedding exploration_buffer\")\n embs = rnet_utils.embed_expl_buffer(expl_buffer, rnet_model, device)\n torch.save(embs, embs_path)\n expl_buffer.set_embs(embs)\n\n # Memory and graph\n memory = RNetMemory(cfg.rnet.memory, space_info, rnet_model.feat_size, device)\n if path.exists(memory_path):\n log.info(f\"Loading memory from {memory_path}\")\n memory.load(memory_path)\n else:\n log.info(\"Training memory\")\n memory.build(rnet_model, expl_buffer)\n if cfg.main.reward in [\"graph\", \"graph_sig\"]:\n # Nearest neigbhor\n if memory.nn_out is None:\n log.info(\"Computing NN\")\n nn = memory.compute_NN(expl_buffer.embs, rnet_model)\n expl_buffer.embs = expl_buffer.embs.to(\"cpu\")\n memory.set_nn(nn)\n if memory.edge2rb is None:\n log.info(\"Computing graph\")\n memory.compute_edges(rnet_model)\n memory.embs = memory.embs.to(\"cpu\")\n memory.save(memory_path)\n log.info(f\"Memory size: {len(memory)}\")\n log.info(\n f\"Number of connected components: {memory.get_nb_connected_components()}\"\n )\n else:\n memory = None\n\n if cfg.main.train_until == \"memory\":\n vis_log.close()\n return\n\n # Policy\n log.info(\"Training policy\")\n train_policy(\n cfg=cfg,\n expl_buffer=expl_buffer,\n rnet_model=rnet_model,\n memory=memory,\n space_info=space_info,\n device=device,\n vis_log=vis_log,\n )\n\n vis_log.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"facebookresearch/go-fresh","sub_path":"go_fresh/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6181,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"22"} +{"seq_id":"30941571984","text":"import os\nimport shutil\nimport time\nimport cv2\nimport numpy as np\nimport shutil\nfrom tqdm import tqdm\nimport collections\nfrom resources.create_img_list import create_img_list\ndef create_imgs_from_video(path_movie=None,fps='1/2',delete_previous_ACTUAL=False,delete_previous_DESIRED=True):\n '''path_change should be the MOV or mp4 path '''\n print('\\npath_movie = {} \\n'.format(path_movie))\n print('\\nfps={}\\n'.format(fps))\n time_i=str(time.time())\n time_i=time_i.split('.')[0]\n if str(type(fps)).find('str')==-1:\n print('This is not a string for the fps! {}'.format(str(type(fps))))\n elif path_movie.lower().find('.mp4')!=-1 or path_movie.lower().find('.mov')!=-1:\n return_dir=os.getcwd()\n\n basepath=os.path.dirname(path_movie)\n os.chdir(basepath)\n print(basepath)\n video_files=os.listdir(basepath)\n video_files=[w for w in video_files if w.lower().find('.mp4')!=-1 or w.lower().find('.mov')!=-1]\n if len(video_files)>0 and os.path.basename(basepath)!=os.path.basename(path_movie.split('.')[0]):\n print('MULTIPLE VIDEO FILES, moving to separate directories')\n for video_file_i in tqdm(video_files):\n video_file_i_path=os.path.join(basepath,video_file_i)\n video_file_i_dir_path_new=video_file_i_path.split('.')[0]\n try:\n os.makedirs(video_file_i_dir_path_new)\n except:\n pass\n shutil.move(video_file_i_path,video_file_i_dir_path_new)\n path_movie=os.path.join(os.path.join(basepath,os.path.basename(path_movie).split('.')[0]),os.path.basename(path_movie))\n basepath=os.path.dirname(path_movie)\n os.chdir(basepath)\n print(basepath)\n video=cv2.VideoCapture(path_movie)\n actual_video_fps=str(int(np.ceil(video.get(cv2.CAP_PROP_FPS))))\n actual_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)\n print(\"Actual video's FPS is = {}\".format(actual_video_fps))\n print('Actual frame count = {}'.format(actual_frames))\n movie_i_name=os.path.basename(path_movie).split('.')[0]\n basepath_desired=os.path.join(basepath,'FPS_DESIRED_{}'.format(fps.replace(\"/\",\"d\")))\n basepath=os.path.join(basepath,'FPS_ACTUAL_{}'.format(actual_video_fps,fps.replace(\"/\",\"d\")))\n if os.path.exists(basepath)==False:\n os.makedirs(basepath)\n if os.path.exists(basepath_desired)==False:\n os.makedirs(basepath_desired)\n folders_in_basepath=os.listdir(basepath)\n folders_in_basepath=[w for w in folders_in_basepath if os.path.isdir(os.path.join(basepath,w))]\n folders_in_basepath_desired=os.listdir(basepath_desired)\n folders_in_basepath_desired=[w for w in folders_in_basepath_desired if os.path.isdir(os.path.join(basepath_desired,w))]\n JPEGImages_path=os.path.join(basepath,'JPEGImages')\n Annotations_path=os.path.join(basepath,'Annotations')\n if 'Annotations' not in folders_in_basepath:\n os.makedirs(Annotations_path)\n else:\n #os.system('mv {} {}'.format(Annotations_path,Annotations_path+'_backup_{}'.format(time_i)))\n len_annos=len(os.listdir(Annotations_path))\n if len_annos>0:\n delete_previous_ACTUAL=False\n Annotations_path=Annotations_path+'_'+time_i\n else:\n os.system(f'rm -rf {Annotations_path}')\n os.makedirs(Annotations_path)\n \n if 'JPEGImages' not in folders_in_basepath:\n os.makedirs(JPEGImages_path) \n delete_previous_ACTUAL=True\n print(f'delete_previous_ACTUAL=={delete_previous_ACTUAL}')\n else:\n previous_JPEGImages_actual=os.listdir(JPEGImages_path)\n previous_JPEGImages_actual=[w for w in previous_JPEGImages_actual if w.find('.jpg')!=-1]\n if len(previous_JPEGImages_actual)>=actual_frames:\n print(f'No need to delete previous. ACTUAL frames found = {len(previous_JPEGImages_actual)}, while actual_frame count was {actual_frames}')\n delete_previous_ACTUAL=False\n else:\n print(f'Insufficient previous ACTUAL frames found = {len(previous_JPEGImages_actual)}, while actual_frame count was {actual_frames}')\n #os.system('mv {} {}'.format(JPEGImages_path,JPEGImages_path+'_backup_{}'.format(time_i)))\n if delete_previous_ACTUAL:\n os.system(f'rm -rf {JPEGImages_path}')\n os.makedirs(JPEGImages_path)\n print(f'delete_previous_ACTUAL=={delete_previous_ACTUAL}')\n else:\n print(f'delete_previous_ACTUAL=={delete_previous_ACTUAL}')\n pass #JPEGImages_path=JPEGImages_path#+'_'+time_i\n \n JPEGImages_path_desired=os.path.join(basepath_desired,'JPEGImages')\n Annotations_path_desired=os.path.join(basepath_desired,'Annotations')\n if 'Annotations' not in folders_in_basepath_desired:\n os.makedirs(Annotations_path_desired)\n else:\n #os.system('mv {} {}'.format(Annotations_path,Annotations_path+'_backup_{}'.format(time_i)))\n len_annos_desired=len(os.listdir(Annotations_path_desired))\n if len_annos_desired>0:\n delete_previous_DESIRED=False\n Annotations_path_desired=Annotations_path_desired+'_'+time_i\n else:\n os.system(f'rm -rf {Annotations_path_desired}')\n os.makedirs(Annotations_path_desired)\n if 'JPEGImages' not in folders_in_basepath_desired:\n os.makedirs(JPEGImages_path_desired) \n else:\n #os.system('mv {} {}'.format(JPEGImages_path,JPEGImages_path+'_backup_{}'.format(time_i)))\n if delete_previous_DESIRED:\n os.system(f'rm -rf {JPEGImages_path_desired}')\n else:\n JPEGImages_path_desired=JPEGImages_path_desired+'_'+time_i\n os.makedirs(JPEGImages_path_desired)\n\n\n if delete_previous_ACTUAL:\n #os.system('ffmpeg -i {} -qscale:v 2 -vf fps={} {}/{}_fps{}_%08d.jpg'.format(path_movie,fps,JPEGImages_path,movie_i_name,fps.replace('/','d').replace('.','p')))\n os.system('ffmpeg -i {} -qscale:v 2 -vf fps={} {}/{}_fps{}_frame%08d.jpg'.format(path_movie,actual_video_fps,JPEGImages_path,movie_i_name,actual_video_fps.replace('.','p')))\n else:\n print('USING previous full-frame rate to copy from.')\n actual_frames_found=os.listdir(JPEGImages_path)\n actual_frames_found=[os.path.join(JPEGImages_path,w) for w in actual_frames_found if w.find('.jpg')!=-1]\n actual_frames_dic={}\n for frame in tqdm(actual_frames_found):\n frame_count_i=int(frame.split('frame')[1].split('.jpg')[0])\n actual_frames_dic[frame_count_i]=frame\n od = collections.OrderedDict(sorted(actual_frames_dic.items()))\n counter=0.0\n desired_frames=[]\n last_frame=max(od.keys())\n frames_every=float(actual_video_fps)/eval(fps)\n for frame, frame_path in tqdm(od.items()):\n counter+=1\n if counter>frames_every:\n counter=0\n desired_frames.append(frame_path)\n shutil.copy(frame_path,JPEGImages_path_desired) #copy instead of move\n elif frame==1 or frame==last_frame:\n desired_frames.append(frame_path)\n shutil.copy(frame_path,JPEGImages_path_desired) #copy instead of move \n \n os.chdir(return_dir)\n print('creating img list')\n create_img_list(JPEGImages_path)\n print('finished creating img list')\n print('creating img list for desired')\n create_img_list(JPEGImages_path_desired)\n print('finished creating img list for desired')\n\n\n else:\n print('This is not a valid movie file. Needs to be .mp4 or .MOV. \\n Provided: {}'.format(path_movie))\n\nif __name__=='__main__':\n create_imgs_from_video('/media/steven/Elements/Drone_Videos/20220526_ADKDJI_P4V2_0027/DJI_0027.MOV')\n\n","repo_name":"stevensmiley1989/Full_Loop_YOLO","sub_path":"resources/create_imgs_from_video.py","file_name":"create_imgs_from_video.py","file_ext":"py","file_size_in_byte":8140,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"19398556033","text":"def search_contact(data, contact):\n data = data.split('\\n')\n list1 = []\n contact_found = True\n for i in data:\n if contact in i:\n contact_found = False\n list1.append(i)\n if contact_found is True:\n list1.append(f'Данные по запросу ({contact}) не найдены')\n return list1\n","repo_name":"EkaterinaKis/Python7","sub_path":"Phone directory/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73542275257","text":"from django.urls import path\nfrom . import views\nfrom .views import (\n RecipeListView,\n RecipeDetailView,\n RecipeCreateView,\n RecipeUpdateView,\n RecipeDeleteView,\n AboutView,\n index\n)\n\nurlpatterns = [\n path('', index, name='index'),\n path('home/', RecipeListView.as_view(), name='cookbook-home'),\n path('about/', AboutView.as_view(), name='cookbook-about'),\n path('recipe//', RecipeDetailView.as_view(), name='recipe-detail'),\n path('recipe//update/', RecipeUpdateView.as_view(), name='recipe-update'),\n path('recipe//delete/', RecipeDeleteView.as_view(), name='recipe-delete'),\n path('recipe/new/', RecipeCreateView.as_view(), name='recipe-create'),\n]","repo_name":"SandraBergstrom/theRecipeCollective","sub_path":"cookbook/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12368986241","text":"from rdflib import URIRef, BNode, Literal\nfrom rdflib.namespace import RDF, RDFS\n\n\ndef Sublocation(g, item, uri, BF):\n sublocation = BNode()\n g.add((uri, BF.sublocation, sublocation))\n g.add((sublocation, RDF.type, BF.Sublocation))\n label = Literal(item.shelf)\n g.add((sublocation, RDFS.label, label))\n\n return g","repo_name":"inacioigne/BiblioKeia","sub_path":"api/src/function/bibframe/Item/sublocation.py","file_name":"sublocation.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"31963406324","text":"from turtle import Turtle\nimport random\n\n\nMOVE_DISTANCE = 20\n\n\nclass Ball(Turtle):\n\n def __init__(self) -> None:\n super().__init__()\n\n self.x_movement = 10 * random.choice([-1, 1])\n self.y_movement = 10 * random.choice([-1, 1])\n\n self.shape(\"circle\")\n self.color(\"white\")\n self.penup()\n\n new_x = self.xcor() + random.choice([-1, 1])\n new_y = self.ycor() + random.choice([-1, 1])\n self.goto(new_x, new_y)\n\n def move(self):\n new_x = self.xcor() + self.x_movement\n new_y = self.ycor() + self.y_movement\n self.goto(new_x, new_y)\n\n def bounce_y(self):\n self.y_movement *= -1\n\n def bounce_x(self):\n self.x_movement *= -1\n\n def reset_position(self):\n self.goto(0, 0)\n self.bounce_x()\n self.y_movement *= random.choice([-1, 1])\n\n def collision(self, l_paddle, r_paddle, scoreboard):\n if self.ycor() > 280 or self.ycor() < -280:\n self.bounce_y()\n\n if self.distance(l_paddle) < 50 and self.xcor() < -320 or self.distance(r_paddle) < 50 and self.xcor() > 320:\n self.bounce_x()\n\n if self.xcor() < -380: \n scoreboard.r_score += 1\n self.reset_position()\n\n if self.xcor() > 380:\n scoreboard.l_score += 1\n self.reset_position()\n\n","repo_name":"nurmatthias/100DaysOfCode","sub_path":"day22/ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42297915394","text":"from utils.fillers import fillTestNColumn, fillEnableColumn, fillStepIDCounter\nfrom utils.importDictionary import importDictionary\nfrom utils.expressionSubtitution import substituteFunctions, removeTestTypeColumn, findExpressions\nfrom utils.fileImporter import importFunctionFiles, importBuildFile, generateRunFileFromBuildFile, TC_Build_File, \\\n Functions_File, TC_Run_File\n\n\nclass HIL_Functions_Handler:\n def __init__(self, function_verify_filename=\"\", function_verify_sheetName=\"\",\n build_filename=\"\", source_sheet=\"\", run_filename=\"\"):\n self.function_verify_filename = function_verify_filename\n self.function_verify_sheetName = function_verify_sheetName\n self.build_filename = build_filename\n self.source_sheet = source_sheet\n self.run_filename = run_filename\n self.build_file = None\n self.run_file = None\n\n def import_build_file(self):\n self.build_file = TC_Build_File(self.build_filename, self.source_sheet)\n\n @staticmethod\n def parse_json_path_file(json_data):\n hil_substitution_json = json_data['root']['HIL_substitution']\n\n filesSubstitution = hil_substitution_json['filesSubstitution']\n function_verify_filename = filesSubstitution['file_path']\n print('filePath for substitution file :', function_verify_filename)\n function_verify_sheetName = filesSubstitution['sheets_name']\n for sheet_name in function_verify_sheetName:\n print('sheets in substitution file :', sheet_name)\n\n file_TC_Build = hil_substitution_json['file_TC_Build']\n build_filename = file_TC_Build['file_path']\n print('filePath for build file :', build_filename)\n source_sheet = file_TC_Build['sheet_name']\n print('sheets in build file :', source_sheet)\n\n file_TC_Run = hil_substitution_json['file_TC_Run']\n run_file_path = file_TC_Run['file_path']\n return function_verify_filename, function_verify_sheetName, build_filename, source_sheet, run_file_path\n\n def run(self):\n\n self.import_build_file()\n\n print(\"import build file\")\n\n functionDictionary = {}\n\n for verifySheet in self.function_verify_sheetName:\n wsVerify = Functions_File(self.function_verify_filename, tc_sheet_name=verifySheet)\n\n res = importDictionary(function_file=wsVerify, functionDictionary=functionDictionary,\n dictionaryType=\"verify\")\n if res == 1:\n print(\n \"MINOR: Error of importDictionary found in \" + self.function_verify_filename + \",sheet : \" + verifySheet)\n exit()\n\n for k in functionDictionary:\n print(k)\n\n print(\"dizionario acquisito\")\n\n self.build_file.save_copy(self.run_filename)\n\n self.run_file = TC_Run_File(self.run_filename, self.source_sheet)\n\n # exit()\n # self.build_file.generateRunFileFromBuildFile(run_filename=self.run_filename)\n\n # exit()\n\n # wbRun, wsRun = generateRunFileFromBuildFile(workbookBuild=wbBuild,\n # sheetNameBuild=self.source_sheet,\n # run_filename=self.run_filename)\n print(\"saved RUN file\")\n\n findExpressions(wsStart=self.build_file.worksheet, substitutionDictionary=functionDictionary)\n print(\"find expressions done\")\n\n substituteFunctions(wsStart=self.build_file.worksheet, wsEnd=self.run_file.worksheet,\n substitutionDictionary=functionDictionary, copyStyle=True)\n print(\"substitution done\")\n\n\n # disableSequences(worksheet=wsRun)\n # removeTestTypeColumn(worksheet=self.run_file.worksheet)\n fillTestNColumn(worksheet=self.run_file.worksheet)\n fillEnableColumn(worksheet=self.run_file.worksheet)\n fillStepIDCounter(worksheet=self.run_file.worksheet)\n\n print(\"other operations\")\n\n # \" Salva\"\n self.run_file.save()\n #wbRun.save(filename=self.run_filename)\n\n print(\"file saved\")\n","repo_name":"stefanofortu/TC_tools","sub_path":"Classes/HIL_Function_Handler.py","file_name":"HIL_Function_Handler.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36827743909","text":"#!/usr/bin/env python3\n\nfarms = [{\"name\": \"NE Farm\", \"agriculture\": [\"sheep\", \"cows\", \"pigs\", \"chickens\", \"llamas\" , \"cats\"]},\n {\"name\": \"W Farm\", \"agriculture\": [\"pigs\", \"chickens\", \"llamas\"]},\n {\"name\": \"SE Farm\", \"agriculture\": [\"chickens\", \"carrots\", \"celery\"]}]\n\nNE_planimals= farms[0][\"agriculture\"]\nW_planimals= farms[1][\"agriculture\"]\nSE_planimals= farms[2][\"agriculture\"]\n\nprint(\"choose between the following farms: \")\nfor farm in farms:\n print(\"-\",farm[\"name\"])\n\nchoice= input(\">\").lower()\n\nif choice == \"ne farm\":\n for keys in NE_planimals:\n print(keys)\n \n\nelif choice == \"w farm\":\n for keys in W_planimals:\n print(keys)\n \n\nelif choice == \"se farm\": \n for keys in SE_planimals:\n print(keys)\n \n\n\n","repo_name":"cecheve3/mycode","sub_path":"farmcode/farm_loop2.py","file_name":"farm_loop2.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16617838644","text":"def test_cloudpickle(selenium):\n selenium.load_package(\"cloudpickle\")\n selenium.run(\n r\"\"\"\nimport cloudpickle\nsquared = lambda x: x ** 2\npickled_lambda = cloudpickle.dumps(squared)\n\nimport pickle\nnew_squared = pickle.loads(pickled_lambda)\nassert new_squared(2) == 4\n\nCONSTANT = 42\ndef my_function(data: int) -> int:\n return data + CONSTANT\n\npickled_function = cloudpickle.dumps(my_function)\ndepickled_function = pickle.loads(pickled_function)\nassert depickled_function(43) == 85\n\"\"\"\n )\n","repo_name":"marthendalnunes/pyodide","sub_path":"packages/cloudpickle/test_cloudpickle.py","file_name":"test_cloudpickle.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"73273232697","text":"import json\nimport tempfile\nimport unittest\n\nfrom energyplus_regressions.structures import (\n TextDifferences,\n MathDifferences,\n TableDifferences,\n EndErrSummary,\n TestEntry,\n CompletedStructure,\n ForceRunType,\n ReportingFreq\n)\n\n\nclass TestForceRunType(unittest.TestCase):\n\n def test_get_all(self):\n self.assertIsInstance(ForceRunType.get_all(), list)\n\n\nclass TestReportingFrequency(unittest.TestCase):\n\n def test_get_all(self):\n self.assertIsInstance(ReportingFreq.get_all(), list)\n\n\nclass TestTextDifferences(unittest.TestCase):\n\n def test_type_to_string(self):\n result = TextDifferences.diff_type_to_string(TextDifferences.EQUAL)\n self.assertIsInstance(result, str)\n result = TextDifferences.diff_type_to_string(TextDifferences.DIFFS)\n self.assertIsInstance(result, str)\n with self.assertRaises(Exception):\n TextDifferences.diff_type_to_string(-1000)\n\n def test_instance_to_dict(self):\n t = TextDifferences(diff_type=TextDifferences.DIFFS)\n obj = t.to_dict()\n self.assertIn('diff_type', obj)\n\n\nclass TestMathDifferences(unittest.TestCase):\n\n def test_construct_from_list(self):\n from_math_diff = ['some_diffs', '8 records', '2 big diffs', '3 small diffs']\n MathDifferences(from_math_diff) # should just pass, nothing to check really\n\n def test_instance_to_dict(self):\n from_math_diff = ['some_diffs', '8 records', '2 big diffs', '3 small diffs']\n m = MathDifferences(from_math_diff)\n obj = m.to_dict()\n self.assertIn('diff_type', obj)\n self.assertIn('num_records', obj)\n self.assertIn('count_of_big_diff', obj)\n self.assertIn('count_of_small_diff', obj)\n\n\nclass TestTableDifferences(unittest.TestCase):\n\n def test_construct_from_list(self):\n from_table_diff = ['msg', 'tbl_count', '2 big diffs', '3 small diffs', '2 equal', '2 strings', 'size_err', 1, 1]\n TableDifferences(from_table_diff) # should just pass, nothing to check really\n\n def test_instance_to_dict(self):\n from_table_diff = ['msg', 'tbl_count', '2 big diffs', '3 small diffs', '2 equal', '2 strings', 'size_err', 1, 1]\n t = TableDifferences(from_table_diff)\n obj = t.to_dict()\n self.assertIn('msg', obj)\n self.assertIn('table_count', obj)\n self.assertIn('big_diff_count', obj)\n self.assertIn('small_diff_count', obj)\n\n\nclass TestEndErrSummary(unittest.TestCase):\n\n def test_status_to_string(self):\n result = EndErrSummary.status_to_string(EndErrSummary.STATUS_UNKNOWN)\n self.assertIsInstance(result, str)\n result = EndErrSummary.status_to_string(EndErrSummary.STATUS_SUCCESS)\n self.assertIsInstance(result, str)\n result = EndErrSummary.status_to_string(EndErrSummary.STATUS_MISSING)\n self.assertIsInstance(result, str)\n result = EndErrSummary.status_to_string(EndErrSummary.STATUS_FATAL)\n self.assertIsInstance(result, str)\n with self.assertRaises(Exception):\n EndErrSummary.status_to_string(-1000)\n\n def test_instance_to_dict_successful(self):\n e = EndErrSummary(EndErrSummary.STATUS_SUCCESS, 2, EndErrSummary.STATUS_SUCCESS, -1)\n obj = e.to_dict()\n self.assertIn('simulation_status_case1', obj)\n self.assertIn('run_time_seconds_case1', obj)\n self.assertIn('simulation_status_case2', obj)\n self.assertIn('run_time_seconds_case2', obj)\n\n def test_instance_to_dict_failure(self):\n e = EndErrSummary(EndErrSummary.STATUS_FATAL, -1, EndErrSummary.STATUS_MISSING, -1)\n obj = e.to_dict()\n self.assertIn('simulation_status_case1', obj)\n self.assertNotIn('run_time_seconds_case1', obj) # won't be for unsuccessful runs\n self.assertIn('simulation_status_case2', obj)\n self.assertNotIn('run_time_seconds_case2', obj) # won't be for unsuccessful runs\n\n\nclass TestTestEntry(unittest.TestCase):\n\n @staticmethod\n def fully_populated_entry_successful(t):\n t.add_summary_result(EndErrSummary(EndErrSummary.STATUS_SUCCESS, 1, EndErrSummary.STATUS_SUCCESS, 1))\n t.add_math_differences(MathDifferences([1, 2, 3, 4]), MathDifferences.ESO)\n t.add_math_differences(MathDifferences([1, 2, 3, 4]), MathDifferences.MTR)\n t.add_math_differences(MathDifferences([1, 2, 0, 4]), MathDifferences.ZSZ)\n t.add_math_differences(MathDifferences([1, 2, 3, 4]), MathDifferences.SSZ)\n t.add_text_differences(TextDifferences(TextDifferences.EQUAL), TextDifferences.AUD)\n t.add_text_differences(TextDifferences(TextDifferences.DIFFS), TextDifferences.BND)\n t.add_text_differences(TextDifferences(TextDifferences.EQUAL), TextDifferences.DXF)\n t.add_text_differences(TextDifferences(TextDifferences.EQUAL), TextDifferences.EIO)\n t.add_text_differences(TextDifferences(TextDifferences.EQUAL), TextDifferences.ERR)\n t.add_text_differences(TextDifferences(TextDifferences.EQUAL), TextDifferences.MDD)\n t.add_text_differences(TextDifferences(TextDifferences.EQUAL), TextDifferences.MTD)\n t.add_text_differences(TextDifferences(TextDifferences.EQUAL), TextDifferences.RDD)\n t.add_text_differences(TextDifferences(TextDifferences.EQUAL), TextDifferences.SHD)\n t.add_text_differences(TextDifferences(TextDifferences.EQUAL), TextDifferences.DL_IN)\n t.add_text_differences(TextDifferences(TextDifferences.EQUAL), TextDifferences.DL_OUT)\n t.add_table_differences(TableDifferences([1, 1, 1, 1, 1, 1, 1, 1, 1]))\n return t\n\n @staticmethod\n def fully_populated_entry_failure(t):\n t.add_summary_result(EndErrSummary(EndErrSummary.STATUS_MISSING, 1, EndErrSummary.STATUS_FATAL, 1))\n return t\n\n def test_workflow(self):\n t = TestEntry('filename', 'weather')\n self.assertIsNone(t.summary_result)\n self.assertIsNone(t.eso_diffs)\n self.assertIsNone(t.mtr_diffs)\n self.assertIsNone(t.zsz_diffs)\n self.assertIsNone(t.ssz_diffs)\n self.assertIsNone(t.aud_diffs)\n self.assertIsNone(t.bnd_diffs)\n self.assertIsNone(t.dxf_diffs)\n self.assertIsNone(t.eio_diffs)\n self.assertIsNone(t.err_diffs)\n self.assertIsNone(t.mdd_diffs)\n self.assertIsNone(t.mtd_diffs)\n self.assertIsNone(t.rdd_diffs)\n self.assertIsNone(t.shd_diffs)\n self.assertIsNone(t.dl_in_diffs)\n self.assertIsNone(t.dl_out_diffs)\n self.assertIsNone(t.table_diffs)\n t = TestTestEntry.fully_populated_entry_successful(t)\n self.assertIsNotNone(t.summary_result)\n self.assertIsNotNone(t.eso_diffs)\n self.assertIsNotNone(t.mtr_diffs)\n self.assertIsNotNone(t.zsz_diffs)\n self.assertIsNotNone(t.ssz_diffs)\n self.assertIsNotNone(t.aud_diffs)\n self.assertIsNotNone(t.bnd_diffs)\n self.assertIsNotNone(t.dxf_diffs)\n self.assertIsNotNone(t.eio_diffs)\n self.assertIsNotNone(t.err_diffs)\n self.assertIsNotNone(t.mdd_diffs)\n self.assertIsNotNone(t.mtd_diffs)\n self.assertIsNotNone(t.rdd_diffs)\n self.assertIsNotNone(t.shd_diffs)\n self.assertIsNotNone(t.dl_in_diffs)\n self.assertIsNotNone(t.dl_out_diffs)\n self.assertIsNotNone(t.dl_out_diffs)\n obj = t.to_dict()\n self.assertIsInstance(obj, dict)\n\n\nclass TestCompletedStructure(unittest.TestCase):\n\n def test_workflow(self):\n c = CompletedStructure(\n '/a/source/dir', '/a/build/dir', '/b/source/dir', '/b/build/dir', '/r/dir1', '/r/dir2', 'dummy_start_time'\n )\n t = TestEntry('filename', 'weather')\n t = TestTestEntry.fully_populated_entry_successful(t)\n c.add_test_entry(t)\n t = TestEntry('file_that_failed', 'weather')\n t = TestTestEntry.fully_populated_entry_failure(t)\n c.add_test_entry(t)\n t = TestEntry('filename', 'weather')\n t = TestTestEntry.fully_populated_entry_successful(t)\n t.add_table_differences(TableDifferences([1, 1, 0, 1, 1, 1, 1, 1, 1])) # override the table data\n c.add_test_entry(t)\n\n def test_to_csv(self):\n c = CompletedStructure(\n '/a/source/dir', '/a/build/dir', '/b/source/dir', '/b/build/dir', '/r/dir1', '/r/dir2', 'dummy_start_time'\n )\n t = TestEntry('filename', 'weather')\n t = TestTestEntry.fully_populated_entry_successful(t)\n c.add_test_entry(t)\n valid_temp_csv_file = tempfile.mkstemp(suffix='.csv')[1]\n c.to_runtime_summary(valid_temp_csv_file) # not asserting anything, it should just pass\n with self.assertRaises(Exception):\n c.to_runtime_summary('/invalid/path')\n\n def test_to_json(self):\n c = CompletedStructure(\n '/a/source/dir', '/a/build/dir', '/b/source/dir', '/b/build/dir', '/r/dir1', '/r/dir2', 'dummy_start_time'\n )\n t = TestEntry('filename', 'weather')\n t = TestTestEntry.fully_populated_entry_successful(t)\n c.add_test_entry(t)\n valid_temp_json_file = tempfile.mkstemp(suffix='.json')[1]\n c.to_json_summary(valid_temp_json_file)\n with open(valid_temp_json_file) as f:\n json_body = f.read()\n obj = json.loads(json_body)\n self.assertIn('directories', obj)\n self.assertIn('runs', obj)\n self.assertIn('diffs', obj)\n self.assertIn('results_by_file', obj)\n\n def test_to_json_object_response(self):\n c = CompletedStructure(\n '/a/source/dir', '/a/build/dir', '/b/source/dir', '/b/build/dir', '/r/dir1', '/r/dir2', 'dummy_start_time'\n )\n t = TestEntry('filename', 'weather')\n t = TestTestEntry.fully_populated_entry_successful(t)\n c.add_test_entry(t)\n obj = c.to_json_summary()\n self.assertIn('directories', obj)\n self.assertIn('runs', obj)\n self.assertIn('diffs', obj)\n self.assertIn('results_by_file', obj)\n","repo_name":"NREL/EnergyPlusRegressionTool","sub_path":"energyplus_regressions/tests/test_structures.py","file_name":"test_structures.py","file_ext":"py","file_size_in_byte":9993,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"22"} +{"seq_id":"19779207250","text":"#https://leetcode.com/problems/set-mismatch/\n\nclass Solution:\n def findErrorNums(self, nums: List[int]) -> List[int]:\n nums_dict = collections.defaultdict(int)\n for num in nums:\n nums_dict[num] += 1\n if nums_dict[num] == 2:\n dup = num\n for i in range(1, len(nums) + 1):\n if nums_dict[i] == 0:\n return [dup, i]\n \n","repo_name":"eekstunt/leetcode","sub_path":"645. Set Mismatch.py","file_name":"645. Set Mismatch.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12638115360","text":"# -*- coding: utf-8 -*-\n# Time : 2022/1/20 16:16\n# Author : QIN2DIM\n# Github : https://github.com/QIN2DIM\n# Description:\nimport sys\nimport webbrowser\nfrom typing import Optional\n\nimport hcaptcha_challenger as solver\nfrom loguru import logger\nfrom webdriver_manager.chrome import ChromeType\nfrom webdriver_manager.core.utils import get_browser_version_from_os\n\n\ndef download_driver():\n # Detect environment variable `google-chrome`.\n browser_version = get_browser_version_from_os(ChromeType.GOOGLE)\n if browser_version != \"UNKNOWN\":\n return\n\n # `google-chrome` is missing from environment variables, prompting players to install manually.\n logger.critical(\n \"The current environment variable is missing `google-chrome`, \"\n \"please install Chrome for your system\"\n )\n logger.info(\n \"Ubuntu: https://linuxize.com/post/how-to-install-google-chrome-web-browser-on-ubuntu-20-04/\"\n )\n logger.info(\n \"CentOS 7/8: https://linuxize.com/post/how-to-install-google-chrome-web-browser-on-centos-7/\"\n )\n if \"linux\" not in sys.platform:\n webbrowser.open(\"https://www.google.com/chrome/\")\n\n logger.info(\"Re-execute the `install` scaffolding command after the installation is complete.\")\n\n\ndef do(yolo_onnx_prefix: Optional[str] = None, upgrade: Optional[bool] = False):\n \"\"\"下载项目运行所需的各项依赖\"\"\"\n onnx_prefix = yolo_onnx_prefix or solver.Prefix.YOLOv6n\n solver.install(onnx_prefix=onnx_prefix, upgrade=upgrade)\n\n\n@logger.catch()\ndef test():\n \"\"\"Check if the Challenger driver version is compatible\"\"\"\n","repo_name":"karpug/epic-awesome-gamer","sub_path":"src/apis/scaffold/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"23615687859","text":"import numpy as np\nimport torch\nfrom torch.nn import functional\n\ndef trace(A=None, B=None):\n if A is None:\n print('please input pytorch tensor')\n val = None\n elif B is None:\n val = torch.sum(A * A)\n else:\n val = torch.sum(A * B)\n return val\n\n\ndef mmread(R, type='float32'):\n row = R.row.astype(int)\n col = R.col.astype(int)\n val = torch.from_numpy(R.data.astype(type))\n index = torch.from_numpy(np.row_stack((row, col)))\n m, n = R.shape\n return torch.sparse.FloatTensor(index, val, torch.Size([m, n]))\n\ndef csr2test(test):\n return {str(r): {str(test.indices[ind]): int(1)\n for ind in range(test.indptr[r], test.indptr[r + 1])}\n for r in range(test.shape[0]) if test.indptr[r] != test.indptr[r + 1]}\n\ndef sort2query(run):\n m, n = run.shape\n return {str(i): {str(int(run[i, j])): float(1.0 / (j + 1)) for j in range(n)} for i in range(m)}\n\ndef sort2query_vector(run):\n return {str(int(run[j])): float(1.0 / (j + 1)) for j in range(len(run))}\n\n","repo_name":"JieZouIR/Qrec","sub_path":"lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"22"} +{"seq_id":"3213652030","text":"def test_error_during_setup(pytester):\n \"\"\"\n Setup exception should cause\n - all tests to error\n - teardown to not run\n \"\"\"\n pytester.copy_example(\"examples/test_error.py\")\n result = pytester.runpytest('test_error.py::test_error_during_setup', '-v', '-s')\n result.assert_outcomes(errors=3)\n result.stdout.no_fnmatch_line(\"param teardown\")\n result.stdout.re_match_lines(\n [\n \"ERROR test_error.py::test_error_during_setup[a] - assert 1 == 2\",\n \"ERROR test_error.py::test_error_during_setup[b] - assert 1 == 2\",\n \"ERROR test_error.py::test_error_during_setup[c] - assert 1 == 2\",\n ]\n )\n\ndef test_error_during_teardown(pytester):\n \"\"\"\n Teardown exception should cause\n - all tests to pass\n - last test to error\n - yes, this is normal-ish for pytest with parametrized errors.\n \"\"\"\n pytester.copy_example(\"examples/test_error.py\")\n result = pytester.runpytest('test_error.py::test_error_during_teardown', '-v', '-s')\n result.assert_outcomes(passed=3, errors=1)\n\ndef test_error_marker_bad_params(pytester):\n \"\"\"\n Markers that accept functions have to accept 2 or more.\n\n - all tests to pass\n - last test to error\n - yes, this is normal-ish for pytest with parametrized errors.\n \"\"\"\n pytester.copy_example(\"examples/test_marker_bad_params.py\")\n result = pytester.runpytest('-v', '-s')\n result.assert_outcomes(errors=1)\n result.stdout.re_match_lines(\n [\n \".*Interrupted: 1 error during collection.*\"\n ]\n )\n\n","repo_name":"okken/pytest-param-scope","sub_path":"tests/test_error.py","file_name":"test_error.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"25387478054","text":"\r\n\r\n\r\n\r\n\r\n#Black-Scholes Model ----------------\r\n\r\n\r\nfrom scipy.stats import norm\r\nimport matplotlib.pyplot as plt \r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\nclass Option:\r\n\r\n def __init__(self,intrest_rate, vol,maturity,S0,K):\r\n\r\n self.intrest_rate=intrest_rate\r\n self.vol=vol\r\n self.maturity=maturity\r\n self.S=S0\r\n self.K=K\r\n\r\n\r\n def d_d(self):\r\n\r\n d_plus=(np.log(self.S/self.K)+(self.intrest_rate+0.5* self.vol**2)*self.maturity)/(self.vol*np.sqrt(self.maturity))\r\n d_minus=d_plus-self.vol*np.sqrt(self.maturity)\r\n return [d_plus,d_minus]\r\n\r\n\r\n def call_price(self):\r\n\r\n d=self.d_d()\r\n\r\n c_price=self.S*norm.pdf(d[0])-self.K*np.exp(-self.intrest_rate*self.maturity)*norm.pdf(d[1])\r\n\r\n return c_price\r\n\r\n def put_price_using_parity(self):\r\n\r\n c=self.call_price()\r\n\r\n p_price=c-self.S+self.K*np.exp(-self.intrest_rate*self.maturity)\r\n\r\n return p_price\r\n\r\n def put_price(self):\r\n d=self.d_d()\r\n p_price=K*np.exp(-self.intrest_rate*self.maturity)*(1-norm.pdf(d[1]))-self.S*(1-norm.pdf(d[0]))\r\n return p_price\r\n\r\n #butterfly spread---- is a combinaison of two short call with slightly diffferent srike prices and two long call with the same strike price\r\n\r\n\r\n def butterfly_price(self,epsilon):\r\n self.K-=epsilon\r\n c_k_epsilon_=self.call_price()\r\n self.K+=2*epsilon\r\n c_k_epsilon=self.call_price()\r\n self.K-=epsilon\r\n c_k=self.call_price()\r\n return c_k_epsilon_+c_k_epsilon-2*c_k\r\n \r\n \r\n def call_greeks(self):\r\n d=self.d_d()\r\n \r\n call=self.call_price()\r\n \r\n delta=norm.pdf(d[0])\r\n \r\n gamma=np.exp(-d[0]**2/2)/self.vol*self.S*np.sqrt(2*np.pi*self.maturity)\r\n \r\n rho=self.K*self.maturity*np.exp(-self.intrest_rate*self.maturity)*norm.pdf(d[1])\r\n vegga=self.S*np.sqrt(self.maturity/2*np.pi)*np.exp(-d[0]**2/2)\r\n theta=(self.vol**2*self.S**2/2)*gamma+self.intrest_rate*(self.S*delta-call)\r\n return [delta,gamma,rho,vegga,theta] \r\n \r\n \r\n def put_greeks(self):\r\n d=self.d_d()\r\n put=self.put_price()\r\n delta=norm.pdf(d[0])-1\r\n gamma=np.exp(-d[0]**2/2)/self.vol*self.S*np.sqrt(2*np.pi*self.maturity)\r\n rho=self.K*self.maturity*np.exp(-self.intrest_rate*self.maturity)*norm.pdf(d[1])-self.K*self.maturity*np.exp(-self.intrest_rate*self.maturity)\r\n vegga=self.S*np.sqrt(self.maturity/2*np.pi)*np.exp(-d[0]**2/2)\r\n theta=(self.vol**2*self.S**2/2)*gamma+self.intrest_rate*(self.S*delta-put)\r\n return [delta,gamma,rho,vegga,theta]\r\n \r\n \r\n def butterfly_greeks(self,epsilon):\r\n self.K+=epsilon\r\n greeks_k_epsilon=np.array(self.call_greeks())\r\n self.K-=2*epsilon\r\n greeks_k_epsilon_=np.array(self.call_greeks())\r\n self.K+=epsilon\r\n greeks_k=np.array(self.call_greeks())\r\n greeks=greeks_k_epsilon+greeks_k_epsilon_-2*greeks_k\r\n return greeks\r\n \r\n \r\n def call_greeks_r(self,N_iterations,rmax):\r\n col=[\"r\",\"Delta\",\"Gamma\",\"Vegga\",\"Rho\",\"Theta\"]\r\n r=range(0,int(N_iterations*rmax),1)\r\n k=0\r\n A=np.zeros([int(N_iterations*rmax),6]) \r\n for i in r:\r\n self.intrest_rate=i/N_iterations \r\n g=[self.intrest_rate]+self.call_greeks()\r\n \r\n A[k,:]= np.array(g)\r\n k=k+1\r\n W=pd.DataFrame(A,columns=col)\r\n\r\n return W \r\n \r\n \r\n def call_greeks_vol(self,N_iterations,volmax): \r\n col=[\"vol\",\"Delta\",\"Gamma\",\"Vegga\",\"Rho\",\"Theta\"]\r\n r=range(0,int(N_iterations*volmax),1)\r\n k=0\r\n A=np.zeros([int(N_iterations*volmax),6]) \r\n for i in r:\r\n self.vol=i/N_iterations \r\n g=[self.vol]+self.call_greeks()\r\n A[k,:]= np.array(g)\r\n k=k+1\r\n W=pd.DataFrame(A,columns=col)\r\n\r\n return W \r\n \r\n \r\n def call_greeks_T(self,N_iterations, Tmax):\r\n \r\n col=[\"T\",\"Delta\",\"Gamma\",\"Vegga\",\"Rho\",\"Theta\"]\r\n r=range(0,int(N_iterations*Tmax),1)\r\n k=0\r\n A=np.zeros([int(N_iterations*Tmax),6]) \r\n for i in r:\r\n self.maturity=i/N_iterations\r\n g=[self.maturity]+self.call_greeks() \r\n A[k,:]= np.array(g)\r\n k=k+1\r\n W=pd.DataFrame(A,columns=col)\r\n\r\n return W \r\n \r\n def call_greeks_K(self,N_iterations, Kmax):\r\n col=[\"K\",\"Delta\",\"Gamma\",\"Vegga\",\"Rho\",\"Theta\"]\r\n r=range(0,int(N_iterations*Kmax),1)\r\n k=0\r\n A=np.zeros([int(N_iterations*Kmax),6]) \r\n for i in r:\r\n self.K=i/N_iterations \r\n g=[self.K]+self.call_greeks()\r\n \r\n A[k,:]= np.array(g)\r\n k=k+1\r\n W=pd.DataFrame(A,columns=col)\r\n\r\n return W \r\n\r\n\r\n\r\n# ---------------------Test------------------------\r\n\r\n\r\nr=0.05\r\nS=100\r\nK=100\r\nT=1/12\r\nsigma=0.25\r\n\r\n\r\noption=Option(r,sigma,T,S,K)\r\n\r\nc_price=option.call_price()\r\np_price=option.put_price()\r\n\r\nput_g=option.put_greeks()\r\n\r\ncall_g=option.call_greeks()\r\n\r\n\r\nprint(\"The call price is %f :\\n\",c_price)\r\n\r\n\r\nprint(\"The put price is %f: \\n\",p_price)\r\n\r\nprint(\"We present the greeks in the format: [delta,gamma,rho,vegga,theta]\")\r\n\r\nprint(\"The greeks for the underlying call \\n\")\r\nprint(call_g)\r\n\r\nprint(\"The greeks for the underlying put\\n\")\r\n\r\nprint(put_g)\r\n\r\ndelta_k=0.02\r\n\r\nprint(\"Butterfly spread with a spread deltaK=%f \\n\", delta_k)\r\n\r\n\r\nB_price=option.butterfly_price(delta_k)\r\n\r\n\r\nprint(\"The price of a butterfly spread is %f\\n\", B_price)\r\n\r\n\r\nB_greeks=option.butterfly_greeks(delta_k)\r\n\r\nprint(\"The greeks for butterfly spread \\n\")\r\n\r\nprint(B_greeks)\r\n\r\n\r\n\r\n\r\n\r\ndef greeks_r_plot(N_iterations,rmax):\r\n\r\n W=vanille.call_greeks_r(N_iterations,rmax)\r\n \r\n \r\n \r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"r\"]), np.array(W['Delta']), label='Delta')\r\n plt.title(\"Delta VS r\") \r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"r\"]), np.array(W['Gamma']), label='Gamma')\r\n plt.title(\"Gamma VS r\") \r\n \r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"r\"]), np.array(W['Rho']), label='Rho')\r\n plt.title(\"Rho Vs r\") \r\n \r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"r\"]), np.array(W[\"Vegga\"]), label='Vegga')\r\n plt.title(\"Vegga Vs r\") \r\n \r\n \r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"r\"]),np.array(W[\"Theta\"]), label='Theta')\r\n plt.title(\"Theta Vs r\") \r\n \r\n plt.show()\r\n\r\n\r\n\r\n\r\ndef greeks_vol_plot(N_iterations,volmax):\r\n\r\n W=vanille.call_greeks_vol(N_iterations,volmax)\r\n \r\n \r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"vol\"]), np.array(W['Delta']), label='Delta')\r\n plt.title(\"Delta VS vol\") \r\n\r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"vol\"]), np.array(W['Gamma']), label='Gamma')\r\n plt.title(\"Gamma VS vol\") \r\n\r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"vol\"]), np.array(W['Rho']), label='Rho')\r\n plt.title(\"Rho Vs vol\") \r\n\r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"vol\"]), np.array(W[\"Vegga\"]), label='Vegga')\r\n plt.title(\"Vegga Vs r\") \r\n\r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"vol\"]),np.array(W[\"Theta\"]), label='Theta')\r\n plt.title(\"Theta Vs r\") \r\n\r\n plt.show()\r\n\r\n\r\ndef greeks_T_plot(N_iterations,Tmax):\r\n\r\n W=vanille.call_greeks_T(N_iterations,Tmax)\r\n \r\n \r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"T\"]), np.array(W['Delta']), label='Delta')\r\n plt.title(\"Delta VS maturity\") \r\n\r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"T\"]), np.array(W['Gamma']), label='Gamma')\r\n plt.title(\"Gamma VS maturity\") \r\n\r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"T\"]), np.array(W['Rho']), label='Rho')\r\n plt.title(\"Rho Vs maturity\") \r\n\r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"T\"]), np.array(W[\"Vegga\"]), label='Vegga')\r\n plt.title(\"Vegga Vs maturity\") \r\n\r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"T\"]),np.array(W[\"Theta\"]), label='Theta')\r\n plt.title(\"Theta Vs maturity\") \r\n\r\n plt.show()\r\n\r\ndef greeks_K_plot(N_iterations,Kmax):\r\n\r\n W=vanille.call_greeks_K(N_iterations,Kmax)\r\n \r\n \r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"K\"]), np.array(W['Delta']), label='Delta')\r\n plt.title(\"Delta VS strike price\") \r\n\r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"K\"]), np.array(W['Gamma']), label='Gamma')\r\n plt.title(\"Gamma VS strike price\") \r\n\r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"K\"]), np.array(W['Rho']), label='Rho')\r\n plt.title(\"Rho Vs strike price\") \r\n\r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"K\"]), np.array(W[\"Vegga\"]), label='Vegga')\r\n plt.title(\"Vegga Vs strike price\") \r\n\r\n plt.show()\r\n \r\n plt.figure()\r\n plt.plot(np.array(W[\"K\"]),np.array(W[\"Theta\"]), label='Theta')\r\n plt.title(\"Theta Vs strike price\") \r\n\r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n#--------------------------plotting------------------------------------\r\nr=0.10\r\nS=95\r\nK=100\r\nT=1\r\nsigma=0.13\r\nvanille=Option(r,sigma,T,S,K)\r\nN_iterations=200\r\n\r\n\r\n\r\n\r\nprint(\"----------------call greeks plotting for different interest -----------\")\r\n\r\nrmax=0.2\r\n\r\n\r\n\r\ngreeks_r_plot(N_iterations,rmax)\r\n\r\n\r\nprint(\"----------------call greeks plotting for different volatility -----------\")\r\n\r\n\r\n\r\n\r\n\r\n\r\nvolmax=0.40\r\n\r\ngreeks_vol_plot(N_iterations,volmax)\r\n\r\n\r\nprint(\"----------------call greeks plotting for different maturity-----------\")\r\n\r\n\r\nTmax=10\r\n\r\ngreeks_T_plot(N_iterations,Tmax)\r\n\r\n\r\nprint(\"----------------call greeks plotting for different strike prices-----------\")\r\n\r\n\r\nKmax=3*K\r\n\r\ngreeks_T_plot(N_iterations,Kmax)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"HChahbi/Master-thesis","sub_path":"Black_Scholes.py","file_name":"Black_Scholes.py","file_ext":"py","file_size_in_byte":10847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35890421483","text":"#[Geosystem Research : Department of Coastal Management]\r\n#[Created by C.K. Park on 2019.04.11] edit 19.08.22\r\nimport sys\r\nimport os\r\n#os.path.join(os.getcwd(),\"Function\")\r\nimport numpy as np\r\nfrom datetime import date, timedelta\r\nfrom datetime import datetime\r\nfrom math import *\r\n\r\nprint(\"#KHOA Monthly OBS data collection Start==========\")\r\n\r\n#[Date Set]===============================================================================================================\r\ntoday = str(datetime.today().strftime('%Y%m%d'))\r\nMM = int(datetime.today().strftime('%m'))\r\nif MM == 1 :\r\n\tMM = 12\r\n\tYR = int(datetime.today().strftime('%Y'))-1\r\nelse : \r\n\tMM = MM - 1\r\n\tYR = int(datetime.today().strftime('%Y'))\r\n\r\nYM = str(date(YR,MM,1).strftime('%Y%m'))\r\nprint(YM,YR,MM)\r\n\r\n#[Monthly QC_DATA DOWNLOAD]================================================================================================\r\nos.system(r'\"C:\\Program Files (x86)\\GnuWin32\\bin\\wget.exe\" -P ./Result/OBS_Monthly/ http://10.27.90.53:8080/opendap/external/FORECAST_QUOTIENT/OBS/OBS_KHOA_'+YM+'.txt')\r\n\r\n#[Info Data Read]=========================================================================================\r\nOBS_INF = open('./Info/KHOA_OBS_INFO.csv','r',encoding='utf8')\r\nOBS_STN = [str(INFO) for INFO in OBS_INF.read().split()]\r\n#print(len(OBS_STN), len(OBS_STN)-1)\r\n\r\n#[OBS Data Read]==========================================================================================\r\nOBS_DATA = open('./Result/OBS_Monthly/OBS_KHOA_'+YM+'.txt','r',encoding='utf8')\r\nOBS = [str(INFO) for INFO in OBS_DATA.read().split()]\r\n\r\nMiss = -999\r\nii = 0\r\nqc_data = []\r\nwhile ii <= len(OBS)-1 : \r\n\tDEV = OBS[ii].split(',')\r\n\tPOINT = DEV[0] ; YMDHMS = DEV[1] ; SST = DEV[6] ; WSPD = DEV[16] ; WDIR = DEV[17] ; WHT = DEV[32]\r\n\t#print(YMDHMS, YMDHMS[10:14])\r\n\tif YMDHMS[10:14] == '0000' :\r\n\t\t#print(POINT, YMDHMS[0:10], SST, WSPD, WDIR)\r\n\t\tif WHT == 'NAN': WHT = Miss\r\n\t\tif SST == 'NAN' : SST = Miss\r\n\t\tif WDIR == 'NAN' : WDIR = Miss\r\n\t\tif WSPD == 'NAN' :\r\n\t\t\tWSPD = Miss ; USPD = Miss ; VSPD = Miss\r\n\t\telse :\r\n\t\t\tUSPD = round(-abs(float(WSPD))*sin(np.deg2rad(float(WDIR))),1)\r\n\t\t\tVSPD = round(-abs(float(WSPD))*cos(np.deg2rad(float(WDIR))),1)\r\n\t\t\t\r\n\t\tjj = 0\r\n\t\twhile jj < len(OBS_STN)-1:\r\n\t\t\tDEV2 = OBS_STN[jj].split(',')\r\n\t\t\tPOINT2 = DEV2[0] ; NAME = DEV2[1] ; TYPE = DEV2[2]\r\n\t\t\t#print(POINT, POINT2, NAME, TYPE)\r\n\t\t\t\r\n\t\t\tif POINT == POINT2 :\r\n\t\t\t\tOBS_POINT = POINT ; OBS_NAME = NAME ; OBS_TYPE = TYPE\r\n\t\t\t\tqc_data.append([OBS_POINT, OBS_NAME, YMDHMS[0:10], SST, float(WHT), WSPD, WDIR, USPD, VSPD, OBS_TYPE])\r\n\t\t\tjj = jj + 1\r\n\tii = ii + 1\r\n\t\r\n#ii = 0\r\n#while ii <= len(qc_data)-1 :\r\n#\tprint(qc_data[ii])\r\n#\tii = ii + 1\r\n\t\r\nqc_data_len = len(qc_data)\r\nOUT_FNAME='./Result/OBS_Monthly/OBS_KHOA_'+YM+'.csv'\r\nwith open(OUT_FNAME,'w',encoding='utf8') as file:\r\n\tfile.write('지점번호,지점명,년월일시,수온,파고,풍속,풍향,U풍속,V풍속,관측소\\n')\r\n\tii = 0\r\n\twhile ii <= qc_data_len-1:\r\n\t\tfile.write(f'{qc_data[ii][0]},{qc_data[ii][1]},{qc_data[ii][2]},{qc_data[ii][3]},{qc_data[ii][4]},{qc_data[ii][5]},{qc_data[ii][6]},{qc_data[ii][7]},{qc_data[ii][8]},{qc_data[ii][9]}\\n')\r\n\t\tii = ii + 1\r\n\r\nprint(\"#KHOA Monthly OBS data collection Complete==========\")","repo_name":"yujoooh/OCEAN_INDEX","sub_path":"0_8.KHOA_OBS_QC_MONTHLY.py","file_name":"0_8.KHOA_OBS_QC_MONTHLY.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"7437113429","text":"import pandas as pd\nimport plotly.graph_objs as go\nimport plotly.express as px\n\nclass TempVar:\n def __init__(self, data):\n self.data = data\n self.variation_df = None\n \n\n # this method creates a dataset containing for each city, for each year, the maximum \n # temperature, the minimum temperature, the average temperature and the temperature \n # variation (max - min)\n def TemperatureVariationCalculator(self):\n # empty list to store results\n results = []\n\n for city in self.data['City'].unique():\n city_data = self.data[self.data['City'] == city]\n\n for year in city_data['dt'].dt.year.unique():\n year_data = city_data[city_data['dt'].dt.year == year]\n\n \n max_temp = year_data['AverageTemperature'].max()\n min_temp = year_data['AverageTemperature'].min()\n variation = max_temp - min_temp\n avg_temp = year_data['AverageTemperature'].mean()\n\n # filling the list\n results.append({\n 'City': city,\n 'Year': year,\n 'Temp Max': max_temp,\n 'Temp Min': min_temp,\n 'Avg Temp': avg_temp,\n 'TemperatureVariation': variation\n })\n\n # convert the list to dataframe\n variation_df = pd.DataFrame(results)\n\n # saves the dataframe as an attribute of the class\n self.variation_df = variation_df\n\n return variation_df\n \n\n # this method does the same thing me for countries \n # (another method was created specifically to specify the name of variables)\n def CountryTemperatureVariationCalculator(self):\n results = []\n\n for country in self.data['Country'].unique():\n country_data = self.data[self.data['Country'] == country]\n\n for year in country_data['dt'].dt.year.unique():\n year_data = country_data[country_data['dt'].dt.year == year]\n\n max_temp = year_data['AverageTemperature'].max()\n min_temp = year_data['AverageTemperature'].min()\n variation = max_temp - min_temp\n avg_temp = year_data['AverageTemperature'].mean()\n\n results.append({\n 'Country': country,\n 'Year': year,\n 'Temp Max': max_temp,\n 'Temp Min': min_temp,\n 'Avg Temp': avg_temp,\n 'TemperatureVariation': variation\n })\n\n variation_df = pd.DataFrame(results)\n\n self.variation_df = variation_df\n\n return variation_df\n \n # this method creates a graph per variation_df (after being filtered by country/city).\n # A time series of data is created for the specific chosen city/country showing \n # the maximum, minimum and average temperature\n def TemperatureSeriesPlotter(self):\n self.data.sort_values(by='Year', inplace=True)\n\n trace_max = go.Scatter(x=self.data['Year'], y=self.data['Temp Max'], mode='lines', name='Max Temperature', line=dict(color='red'))\n trace_min = go.Scatter(x=self.data['Year'], y=self.data['Temp Min'], mode='lines', name='Min Temperature', line=dict(color='blue'))\n trace_avg = go.Scatter(x=self.data['Year'], y=self.data['Avg Temp'], mode='lines', name='Avg Temperature', line=dict(color='green'))\n\n fill_x = self.data['Year'].tolist() + self.data['Year'].tolist()[::-1]\n fill_y = self.data['Temp Max'].tolist() + self.data['Temp Min'].tolist()[::-1]\n\n\n trace_fill = go.Scatter(\n x=fill_x,\n y=fill_y,\n mode = 'lines',\n name='Temperature Range',\n fill='toself',\n fillcolor='rgba(0,100,80,0.2)',\n line=dict(color='rgba(255,255,255,0)')\n )\n\n fig = go.Figure()\n fig.add_trace(trace_fill)\n fig.add_trace(trace_max)\n fig.add_trace(trace_min)\n fig.add_trace(trace_avg)\n\n fig.update_layout(\n title='Temperature Time Series',\n xaxis_title='Year',\n yaxis_title='Temperature',\n showlegend=True\n )\n fig.update_layout(width=1300, height=600)\n return fig\n \n # this method allows you to view the monthly time series of several countries/cities \n # of your choice\n def GlobalMonthlyTemperaturePlotter(self, selection, is_country=True):\n if is_country:\n selected_data = self.data[self.data['Country'].isin(selection)]\n else:\n selected_data = self.data[self.data['City'].isin(selection)]\n\n selected_data['Month'] = selected_data['dt'].dt.month\n selected_data['Year'] = selected_data['dt'].dt.year\n\n fig = go.Figure()\n\n color_palette = px.colors.qualitative.Set1\n\n for i, item in enumerate(selection):\n monthly_avg_temp = selected_data[selected_data['Country' if is_country else 'City'] == item].groupby(['Year', 'Month'])['AverageTemperature'].mean().reset_index()\n monthly_avg_temp['Date'] = pd.to_datetime(monthly_avg_temp[['Year', 'Month']].assign(DAY=1))\n fig.add_trace(go.Scatter(x=monthly_avg_temp['Date'], y=monthly_avg_temp['AverageTemperature'], mode='lines', name=item, line=dict(color=color_palette[i])))\n\n fig.update_layout(\n title_text=\"Global Monthly Average Temperature\",\n paper_bgcolor='rgba(0,0,0,0)'\n )\n\n # range slider\n fig.update_layout(\n xaxis=dict(\n rangeselector=dict(\n buttons=list([\n dict(count=3, label=\"3m\", step=\"month\", stepmode=\"backward\"),\n dict(count=6, label=\"6m\", step=\"month\", stepmode=\"backward\"),\n dict(count=1, label=\"1y\", step=\"year\", stepmode=\"backward\"),\n dict(count=2, label=\"2y\", step=\"year\", stepmode=\"backward\"),\n dict(count=3, label=\"3y\", step=\"year\", stepmode=\"backward\"),\n dict(count=10, label=\"10y\", step=\"year\", stepmode=\"backward\"),\n dict(step=\"all\")\n ])\n ),\n rangeslider=dict(visible=True),\n type=\"date\"\n )\n \n )\n fig.update_layout(width=1300, height=600)\n\n return fig","repo_name":"sergiopicascia/dscoding-projects","sub_path":"sara.peri/progetto_dse/progetto_dse/dati.py","file_name":"dati.py","file_ext":"py","file_size_in_byte":6443,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"22"} +{"seq_id":"18958135783","text":"# %% 初始化\nimport requests\nimport re\nfrom datetime import datetime, timedelta\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36\"}\n\nquery = {\n 'key': 'f45b896485c79fe922e7f022a8bc6f71',\n 'token': 'ATTAae59e22e7d144839c54a444aa4f24d4f3ede09405b11ace472e773a78a23b0e8F2D629A2',\n}\n\nheaders = {\n \"Accept\": \"application/json\"\n}\n\n# %% \n# trello上获取患者住院号列表和id列表,并排除其他无关列表\ntpListRaw = requests.request(\n \"GET\",\n \"https://api.trello.com/1/boards/65296c002df7c2c909517c4e/lists\",\n headers=headers,\n params=query\n).json()\n\npattern = r'^[A-Za-z0-9]+-[\\u4e00-\\u9fa5]+-\\d+-[\\u4e00-\\u9fa5]*$'\n\ntpList = [{key: d[key] for key in ['id', 'name']}\n for d in tpListRaw if re.match(pattern, d['name'])]\nfor item in tpList:\n item['mrn'] = int(item['name'].split('-')[2])\n\nfor tpItem in tpList:\n\n # 获取相应患者的card列表\n tPatientCardList = requests.request(\n \"GET\",\n f\"https://api.trello.com/1/lists/{tpItem['id']}/cards\",\n headers=headers,\n params=query\n ).json()\n\n # 筛选出 tPatientCardLIst 中 name 为 “化验结果”的字典\n tLabCardID = None\n tLabRepoList = []\n for i in tPatientCardList:\n if i['name'] == \"化验结果\":\n tLabCardID = i['id']\n # 从tLabCardDesc字符串中提取出以$$开头和结尾的内容,并根据逗号分隔成一个列表\n tLabRepoList = re.findall(r'\\$\\$(.*?)\\$\\$', i['desc'])\n tLabRepoList = [item.strip() for item in tLabRepoList[0].split(\n ',')] if tLabRepoList else []\n break\n\n # 根据住院号获得检查列表\n hLabList = requests.get(\n f\"http://20.21.1.224:5537/api/api//LisReport/GetLisReportIndexHalf/{tpItem['mrn']}/1\").json()\n\n # 将hLabList 的repo项保存到 hLabRepoList里\n hLabRepoList = []\n for i in hLabList:\n hLabRepoList.append(i['repo'])\n\n if set(hLabRepoList).difference(set(tLabRepoList)):\n # 如果hLabRepoList 和 tLabRepoLis 相同则无需同步,如果不同就重新获取所有化验结果\n\n # 获取具体化验结果\n # 对hLabList里的每一个字典,构建url,格式为 \"http://20.21.1.224:5537/api/api/LisReport/GetLisReportDetail/9532290/{dodate}/{specimenid}/{domany}\"\n # 通过request获取具体化验结果,并转为list\n # 每个list 找到每个zdbz非空的字典,追加输出 xmmc、jg、zdbz、ckqj列到变量 hLabRes 里\n # 如果找不到非空的字典,则输出 “阴性”\n # 每次追加用hLabList里的 checkitem + dodate 分隔\n\n hLabRes = \"\"\n for lab in hLabList:\n url = f\"http://20.21.1.224:5537/api/api/LisReport/GetLisReportDetail/{tpItem['mrn']}/{lab['dodate']}/{lab['specimenid']}/{lab['domany']}\"\n lab_detail = requests.get(url).json()\n zdbz_dict_list = [d for d in lab_detail if d['zdbz']]\n if zdbz_dict_list:\n hLabRes += f\"{lab['checkitem']} {lab['dodate']}\\n```\\n\"\n for zdbz_dict in zdbz_dict_list:\n hLabRes += '\\t'.join([zdbz_dict['xmmc'], zdbz_dict['jg'],\n zdbz_dict['zdbz'], zdbz_dict['ckqj']])\n hLabRes += '\\n'\n hLabRes += '```\\n'\n else:\n hLabRes += f\"{lab['checkitem']} {lab['dodate']}: 阴性\\n\\n\"\n\n # 根据tLabCardID是否存在,判断是新建还是更新\n if tLabCardID:\n response = requests.request(\n \"PUT\",\n f\"https://api.trello.com/1/cards/{tLabCardID}\",\n headers=headers,\n params=dict({\"desc\": \"$$\" + ', '.join(hLabRepoList)+\"$$\\n\\n\"+hLabRes,\n \"due\": (datetime.now() + timedelta(hours=8)).isoformat()+\"Z\",\n \"dueComplete\": \"false\"},\n **query)\n )\n print(response.text)\n print(\"updated\")\n else:\n requests.request(\n \"POST\",\n \"https://api.trello.com/1/cards\",\n headers=headers,\n params=dict({\"idList\": tpItem['id'],\n \"name\": \"化验结果\",\n \"desc\": \"$$\" + ', '.join(hLabRepoList)+\"$$\\n\\n\"+hLabRes},\n **query)\n )\n\n\n# %%\n","repo_name":"zhihuai1982/MedTracker","sub_path":"labCard.py","file_name":"labCard.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35348967692","text":"\"\"\"Leer dos números enteros de dos dígitos y determinar si la suma de los dos números\norigina un número par\"\"\"\n\ntry: #capturamos los numeros\n\tnum1 = int(input(\"Ingrese el primer numero entero\"))\n\tnum2 = int(input(\"Ingrese el segundo numero entero\")) \n\n\tif (num1 and num2) >= 10 and (num2 and num1) <= 99: #verificamos si los numeros tienen dos digitos\n\t\n\t\tif ((num1 + num2)% 2 == 0): #verficamos si la suma de los numeros es par\n\n\t\t\tprint(f\"la suma de los dos numeros origina un numero par que es:{num1 + num2} \")\n\n\t\telse:\n\t\t\t\t#de lo contrario\n\t\t\tprint(f\"La suma de los dos numeros origina un numero impar que es: {num1 + num2}\" )\n\telse:\n\t\t\n\t\tprint(\"Al menos uno de los dos numeros esta fuera del rango\")\n\nexcept ValueError:\n\tprint(\"El dato ingresado debe ser numerico\")","repo_name":"yesidmoli/ejercicios_de_programacion_logica","sub_path":"python/ejercicios_condicionales py/en_español/ej_condicional13.py","file_name":"ej_condicional13.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"20946516617","text":"# a função \"sqrt\" fornece a raiz perfeita de um número, ou seja, retorna o valor com ponto flutuante caso seja necessario\nfrom math import sqrt\nn = int(input(\"Insira um número: \"))\ndobro = n * 2\ntriplo = n * 3\nraiz = float(sqrt(n))\nprint(f\"O dobro de {n} vale {dobro}, o triplo vale {triplo} e sua raiz quadrada é {raiz:,.2f}\")\n\n# Já a função \"isqrt\" retorna a raiz de forma arredondada, ou seja, o número mais proximo da raiz, por exemplo, a raiz de 5 seria 2.24, essa função retornaria 2.00\n\nfrom math import isqrt\nn = int(input(\"Insira um número: \"))\ndobro = n * 2\ntriplo = n * 3\nraiz = float(isqrt(n))\nprint(f\"O dobro de {n} vale {dobro}, o triplo vale {triplo} e sua raiz quadrada é {raiz}\")\n","repo_name":"DouglasLiebl/Python","sub_path":"Another things/raiz inteira e raiz perfeita .py","file_name":"raiz inteira e raiz perfeita .py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10694359109","text":"from .model import TangledModel\n\ndef model_to_tflite(input_model_path:str, output_model_path:str) -> None:\n print(f\"Converting {input_model_path} to {output_model_path}\")\n import tensorflow as tf\n\n model = TangledModel()\n model.load_weights(input_model_path)\n\n converter = tf.lite.TFLiteConverter.from_keras_model(model)\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n tflite_quant_model = converter.convert()\n\n with open(output_model_path, 'wb') as tflite_model_output:\n tflite_model_output.write(tflite_quant_model)\n\ndef model_to_tfjs(input_model_path:str, output_model_path:str) -> None:\n print(f\"Converting {input_model_path} to {output_model_path}\")\n import tensorflowjs as tfjs\n\n model = TangledModel()\n model.load_weights(input_model_path)\n\n tfjs.converters.save_keras_model(model, output_model_path)\n","repo_name":"jperryhouts/Tangler","sub_path":"tangler/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34440468437","text":"with open('17_2000.txt') as f:\n seq = list(map(int, f.readlines()))\n counter = 0\n min_diff = 9999999\n for i in range(len(seq) - 2):\n if seq[i] < seq[i + 1] < seq[i + 2]:\n counter += 1\n if seq[i + 2] - seq[i] < min_diff:\n min_diff = seq[i + 2] - seq[i]\nprint(counter, min_diff)\n","repo_name":"Stol777/FedorStol77","sub_path":"kompege 17/2000.py","file_name":"2000.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30176267075","text":"from flask import Flask\r\nfrom data import db_session\r\nfrom data.users import User\r\n\r\n\r\napp = Flask(__name__)\r\napp.config['SECRET_KEY'] = 'yandexlyceum_secret_key'\r\n\r\n\r\ndef add_user(surname, name, age, position, speciality, address, email):\r\n user = User()\r\n user.surname = surname\r\n user.name = name\r\n user.age = age\r\n user.position = position\r\n user.speciality = speciality\r\n user.address = address\r\n user.email = email\r\n return user\r\n\r\n\r\ndef main():\r\n db_session.global_init(\"db/blogs.db\")\r\n session = db_session.create_session()\r\n session.add(add_user('Scott', 'Ridley', 21, 'captain', 'research engineer', 'module_1', 'scott_chief@mars.org'))\r\n session.add(add_user('Smith', 'Tom', 32, 'Senior', 'researching engineer', 'module_1', 'smith324397@mars.org'))\r\n session.add(add_user('Petrov', 'Vasya', 24, 'Middle', 'researching engineer', 'module_0', 'Vasya_top@mars.org'))\r\n session.add(add_user('Ivanov', 'Petya', 23, 'Junior', 'researching engineer', 'module_0', 'Vasya_loh@mars.org'))\r\n session.commit()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"kotosusl/sqlalchemy","sub_path":"sqlalchemy_tasks/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12757693848","text":"\"\"\"\nVoronoi Diagram (Divide and Conquer) \nCourse: Design and Analysis of Algorithm, 2022 Fall, NSYSU\nAuthor: Bo Han, Chen\nStudent ID: B083040012\n\"\"\"\n\nfrom PyQt5.QtWidgets import *\nfrom matplotlib.backends.backend_qt5agg import FigureCanvas\nfrom matplotlib.figure import Figure\n\nclass diagramWidget(QWidget):\n def __init__(self, parent):\n QWidget.__init__(self, parent)\n self.canvas = FigureCanvas(Figure())\n\n vertical_layout = QVBoxLayout()\n vertical_layout.addWidget(self.canvas)\n self.canvas.axes = self.canvas.figure.add_subplot(111)\n self.setLayout(vertical_layout)\n\n def reset(self):\n self.canvas.axes.clear()\n self.canvas.axes.set_xlim([0, 600])\n self.canvas.axes.set_ylim([0, 600])\n self.canvas.axes.invert_yaxis()\n self.canvas.draw()","repo_name":"bhchen2001/NSYSU_Design_and_Analysis_of_Algorithm","sub_path":"code/obj/diagramwidget.py","file_name":"diagramwidget.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42941126852","text":"#coding:utf-8\n\n'''\n给定两个二叉树,想象当你将它们中的一个覆盖到另一个上时,两个二叉树的一些节点便会重叠。你需要将他们合并为一个新的二叉树。\n合并的规则是如果两个节点重叠,那么将他们的值相加作为节点合并后的新值,否则不为 NULL 的节点将直接作为新二叉树的节点。\n\n'''\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def mergeTrees(self, t1, t2):\n if not t1:\n return t2\n if not t2:\n return t1\n node = TreeNode(t1.val + t2.val)\n node.left = self.mergeTrees(t1.left, t2.left)\n node.right = self.mergeTrees(t1.right, t2.right)\n return node","repo_name":"BoatInTheRiver/codes_algorithm","sub_path":"leetcode/tree/617. 合并二叉树.py","file_name":"617. 合并二叉树.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"8083210909","text":"from django.shortcuts import get_object_or_404\nfrom django.views.generic import list_detail\nfrom models import Project, Type, Role, Skill\n\ndef projects_by_type(request, slug):\n\n # Look up type (and raise a 404 if it can't be found).\n type = get_object_or_404(Type, slug=slug)\n\n # Use the generic object_list view to return the list of projects\n return list_detail.object_list(\n request,\n queryset = Project.objects.filter(public=True, type=type),\n template_name = 'portfolio/projects_by_type.html',\n template_object_name = 'project',\n extra_context = {'type': type}\n )\n\ndef projects_by_role(request, type_slug, role_slug):\n\n # Look up role (and raise a 404 if it can't be found).\n role = get_object_or_404(Role, slug=role_slug)\n\n context = {'role': role}\n\n # If a specific type is requested, look up type (and raise a 404 if it can't be found)\n if type_slug != 'all':\n type = get_object_or_404(Type, slug=type_slug)\n context['type'] = type\n else:\n type = type_slug\n\n # Filter the queryset for the (type and) role requested\n if type != type_slug:\n queryset = Project.objects.filter(public=True, type=type, role=role)\n else:\n queryset = Project.objects.filter(public=True, role=role)\n\n # Use the generic object_list view to return the list of projects\n return list_detail.object_list(\n request,\n queryset,\n template_name = 'portfolio/projects_by_role.html',\n template_object_name = 'project',\n extra_context = context\n )\n\ndef projects_by_skill(request, slug):\n\n # Look up skill (and raise a 404 if it can't be found).\n skill = get_object_or_404(Skill, slug=slug)\n\n # Use the generic object_list view to return the list of projects\n return list_detail.object_list(\n request,\n queryset = Project.objects.filter(public=True, skills=skill),\n template_name = 'portfolio/projects_by_skill.html',\n template_object_name = 'project',\n extra_context = {'skill': skill}\n )\n\ndef project_detail(request, slug):\n\n # Look up project (and raise a 404 if it can't be found).\n project = get_object_or_404(Project, slug=slug)\n\n return list_detail.object_detail(\n request,\n queryset = Project.objects.filter(public=True),\n object_id = project.id,\n template_name = 'portfolio/project_detail.html',\n template_object_name = 'project'\n )\n","repo_name":"pigmonkey/django-portfolio","sub_path":"portfolio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"9312307053","text":"# python 3.7\n\"\"\"Contains the implementation of generator described in PGGAN.\n\nDifferent from the official tensorflow version in folder `pggan_tf_official`,\nthis is a simple pytorch version which only contains the generator part. This\nclass is specially used for inference.\n\nFor more details, please check the original paper:\nhttps://arxiv.org/pdf/1710.10196.pdf\n\"\"\"\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n__all__ = ['PGGANGeneratorNet']\n\n# Resolutions allowed.\n_RESOLUTIONS_ALLOWED = [8, 16, 32, 64, 128, 256, 512, 1024]\n\n# Initial resolution.\n_INIT_RES = 4\n\n\nclass PGGANGeneratorNet(nn.Module):\n \"\"\"Defines the generator network in PGGAN.\n\n NOTE: The generated images are with `RGB` color channels and range [-1, 1].\n \"\"\"\n\n def __init__(self,\n resolution=1024,\n z_space_dim=512,\n image_channels=3,\n fused_scale=False,\n fmaps_base=16 << 10,\n fmaps_max=512):\n \"\"\"Initializes the generator with basic settings.\n\n Args:\n resolution: The resolution of the output image. (default: 1024)\n z_space_dim: The dimension of the initial latent space. (default: 512)\n image_channels: Number of channels of the output image. (default: 3)\n fused_scale: Whether to fused `upsample` and `conv2d` together, resulting\n in `conv2d_transpose`. (default: False)\n fmaps_base: Base factor to compute number of feature maps for each layer.\n (default: 16 << 10)\n fmaps_max: Maximum number of feature maps in each layer. (default: 512)\n\n Raises:\n ValueError: If the input `resolution` is not supported.\n \"\"\"\n super().__init__()\n\n if resolution not in _RESOLUTIONS_ALLOWED:\n raise ValueError(f'Invalid resolution: {resolution}!\\n'\n f'Resolutions allowed: {_RESOLUTIONS_ALLOWED}.')\n\n self.init_res = _INIT_RES\n self.init_res_log2 = int(np.log2(self.init_res))\n self.resolution = resolution\n self.final_res_log2 = int(np.log2(self.resolution))\n self.z_space_dim = z_space_dim\n self.image_channels = image_channels\n self.fused_scale = fused_scale\n self.fmaps_base = fmaps_base\n self.fmaps_max = fmaps_max\n\n self.num_layers = (self.final_res_log2 - self.init_res_log2 + 1) * 2\n\n self.lod = nn.Parameter(torch.zeros(()))\n self.pth_to_tf_var_mapping = {'lod': 'lod'}\n for res_log2 in range(self.init_res_log2, self.final_res_log2 + 1):\n res = 2 ** res_log2\n block_idx = res_log2 - self.init_res_log2\n\n # First convolution layer for each resolution.\n if res == self.init_res:\n self.add_module(\n f'layer{2 * block_idx}',\n ConvBlock(in_channels=self.z_space_dim,\n out_channels=self.get_nf(res),\n kernel_size=self.init_res,\n padding=3))\n self.pth_to_tf_var_mapping[f'layer{2 * block_idx}.conv.weight'] = (\n f'{res}x{res}/Dense/weight')\n self.pth_to_tf_var_mapping[f'layer{2 * block_idx}.wscale.bias'] = (\n f'{res}x{res}/Dense/bias')\n else:\n self.add_module(\n f'layer{2 * block_idx}',\n ConvBlock(in_channels=self.get_nf(res // 2),\n out_channels=self.get_nf(res),\n upsample=True,\n fused_scale=self.fused_scale))\n if self.fused_scale:\n self.pth_to_tf_var_mapping[f'layer{2 * block_idx}.weight'] = (\n f'{res}x{res}/Conv0_up/weight')\n self.pth_to_tf_var_mapping[f'layer{2 * block_idx}.wscale.bias'] = (\n f'{res}x{res}/Conv0_up/bias')\n else:\n self.pth_to_tf_var_mapping[f'layer{2 * block_idx}.conv.weight'] = (\n f'{res}x{res}/Conv0/weight')\n self.pth_to_tf_var_mapping[f'layer{2 * block_idx}.wscale.bias'] = (\n f'{res}x{res}/Conv0/bias')\n\n # Second convolution layer for each resolution.\n self.add_module(\n f'layer{2 * block_idx + 1}',\n ConvBlock(in_channels=self.get_nf(res),\n out_channels=self.get_nf(res)))\n if res == self.init_res:\n self.pth_to_tf_var_mapping[f'layer{2 * block_idx + 1}.conv.weight'] = (\n f'{res}x{res}/Conv/weight')\n self.pth_to_tf_var_mapping[f'layer{2 * block_idx + 1}.wscale.bias'] = (\n f'{res}x{res}/Conv/bias')\n else:\n self.pth_to_tf_var_mapping[f'layer{2 * block_idx + 1}.conv.weight'] = (\n f'{res}x{res}/Conv1/weight')\n self.pth_to_tf_var_mapping[f'layer{2 * block_idx + 1}.wscale.bias'] = (\n f'{res}x{res}/Conv1/bias')\n\n # Output convolution layer for each resolution.\n self.add_module(\n f'output{block_idx}',\n ConvBlock(in_channels=self.get_nf(res),\n out_channels=self.image_channels,\n kernel_size=1,\n padding=0,\n wscale_gain=1.0,\n activation_type='linear'))\n self.pth_to_tf_var_mapping[f'output{block_idx}.conv.weight'] = (\n f'ToRGB_lod{self.final_res_log2 - res_log2}/weight')\n self.pth_to_tf_var_mapping[f'output{block_idx}.wscale.bias'] = (\n f'ToRGB_lod{self.final_res_log2 - res_log2}/bias')\n self.upsample = ResolutionScalingLayer()\n\n def get_nf(self, res):\n \"\"\"Gets number of feature maps according to current resolution.\"\"\"\n return min(self.fmaps_base // res, self.fmaps_max)\n\n def forward(self, z):\n if not (len(z.shape) == 2 and z.shape[1] == self.z_space_dim):\n raise ValueError(f'The input tensor should be with shape [batch_size, '\n f'latent_space_dim], where `latent_space_dim` equals to '\n f'{self.z_space_dim}!\\n'\n f'But {z.shape} received!')\n x = z.view(z.shape[0], self.z_space_dim, 1, 1)\n\n lod = self.lod.cpu().tolist()\n for res_log2 in range(self.init_res_log2, self.final_res_log2 + 1):\n if res_log2 + lod <= self.final_res_log2:\n block_idx = res_log2 - self.init_res_log2\n x = self.__getattr__(f'layer{2 * block_idx}')(x)\n x = self.__getattr__(f'layer{2 * block_idx + 1}')(x)\n image = self.__getattr__(f'output{block_idx}')(x)\n else:\n image = self.upsample(image)\n return image\n\n\nclass PixelNormLayer(nn.Module):\n \"\"\"Implements pixel-wise feature vector normalization layer.\"\"\"\n\n def __init__(self, epsilon=1e-8):\n super().__init__()\n self.eps = epsilon\n\n def forward(self, x):\n return x / torch.sqrt(torch.mean(x ** 2, dim=1, keepdim=True) + self.eps)\n\n\nclass ResolutionScalingLayer(nn.Module):\n \"\"\"Implements the resolution scaling layer.\n\n Basically, this layer can be used to upsample feature maps from spatial domain\n with nearest neighbor interpolation.\n \"\"\"\n\n def __init__(self, scale_factor=2):\n super().__init__()\n self.scale_factor = scale_factor\n\n def forward(self, x):\n return F.interpolate(x, scale_factor=self.scale_factor, mode='nearest')\n\n\nclass WScaleLayer(nn.Module):\n \"\"\"Implements the layer to scale weight variable and add bias.\n\n NOTE: The weight variable is trained in `nn.Conv2d` layer, and only scaled\n with a constant number, which is not trainable in this layer. However, the\n bias variable is trainable in this layer.\n \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, gain=np.sqrt(2.0)):\n super().__init__()\n fan_in = in_channels * kernel_size * kernel_size\n self.scale = gain / np.sqrt(fan_in)\n self.bias = nn.Parameter(torch.zeros(out_channels))\n\n def forward(self, x):\n return x * self.scale + self.bias.view(1, -1, 1, 1)\n\n\nclass ConvBlock(nn.Module):\n \"\"\"Implements the convolutional block.\n\n Basically, this block executes pixel-wise normalization layer, upsampling\n layer (if needed), convolutional layer, weight-scale layer, and activation\n layer in sequence.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size=3,\n stride=1,\n padding=1,\n dilation=1,\n add_bias=False,\n upsample=False,\n fused_scale=False,\n wscale_gain=np.sqrt(2.0),\n activation_type='lrelu'):\n \"\"\"Initializes the class with block settings.\n\n Args:\n in_channels: Number of channels of the input tensor fed into this block.\n out_channels: Number of channels of the output tensor.\n kernel_size: Size of the convolutional kernels.\n stride: Stride parameter for convolution operation.\n padding: Padding parameter for convolution operation.\n dilation: Dilation rate for convolution operation.\n add_bias: Whether to add bias onto the convolutional result.\n upsample: Whether to upsample the input tensor before convolution.\n fused_scale: Whether to fused `upsample` and `conv2d` together, resulting\n in `conv2d_transpose`.\n wscale_gain: The gain factor for `wscale` layer.\n activation_type: Type of activation function. Support `linear`, `lrelu`\n and `tanh`.\n\n Raises:\n NotImplementedError: If the input `activation_type` is not supported.\n \"\"\"\n super().__init__()\n\n self.pixel_norm = PixelNormLayer()\n\n if upsample and not fused_scale:\n self.upsample = ResolutionScalingLayer()\n else:\n self.upsample = nn.Identity()\n\n if upsample and fused_scale:\n self.use_conv2d_transpose = True\n self.weight = nn.Parameter(\n torch.randn(kernel_size, kernel_size, in_channels, out_channels))\n fan_in = in_channels * kernel_size * kernel_size\n self.scale = wscale_gain / np.sqrt(fan_in)\n else:\n self.use_conv2d_transpose = False\n self.conv = nn.Conv2d(in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=1,\n bias=add_bias)\n\n self.wscale = WScaleLayer(in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n gain=wscale_gain)\n\n if activation_type == 'linear':\n self.activate = nn.Identity()\n elif activation_type == 'lrelu':\n self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n elif activation_type == 'tanh':\n self.activate = nn.Hardtanh()\n else:\n raise NotImplementedError(f'Not implemented activation function: '\n f'{activation_type}!')\n\n def forward(self, x):\n x = self.pixel_norm(x)\n x = self.upsample(x)\n if self.use_conv2d_transpose:\n kernel = self.weight * self.scale\n kernel = F.pad(kernel, (0, 0, 0, 0, 1, 1, 1, 1), 'constant', 0.0)\n kernel = (kernel[1:, 1:] + kernel[:-1, 1:] +\n kernel[1:, :-1] + kernel[:-1, :-1])\n kernel = kernel.permute(2, 3, 0, 1)\n x = F.conv_transpose2d(x, kernel, stride=2, padding=1)\n x = x / self.scale\n else:\n x = self.conv(x)\n x = self.wscale(x)\n x = self.activate(x)\n return x\n","repo_name":"genforce/mganprior","sub_path":"models/pggan_generator_network.py","file_name":"pggan_generator_network.py","file_ext":"py","file_size_in_byte":11276,"program_lang":"python","lang":"en","doc_type":"code","stars":289,"dataset":"github-code","pt":"22"} +{"seq_id":"74686424374","text":"dataarray = []\nwith open(\"03/input\") as f:\n for line in f:\n dataarray.append(line.rstrip())\n\nwidth = len(dataarray[0])\n\n# Part One\n\ntreecount = 0\n\n# We start at the top-left\nposleft = 0\nlinenum = 0\n\nfor entry in dataarray:\n linenum += 1 # We're interested in the value on the next line\n posleft += 3 # Move across 3 spaces\n try:\n spot = dataarray[linenum][posleft % width]\n except:\n print(\"end of the road\")\n break\n print(linenum, posleft, spot)\n if spot == \"#\":\n treecount += 1\n\nprint(\"Part One:\", treecount)\n\n# Part Two\n\ntreecount = 0\nslopes = [[1,1],[3,1],[5,1],[7,1],[1,2]]\n\ntrees = []\nfor slope in slopes:\n posleft = 0\n linenum = 0\n slopetrees = 0\n for entry in dataarray:\n linenum += slope[1] # Down second value\n posleft += slope[0] # Right first value\n try:\n spot = dataarray[linenum][posleft % width]\n except:\n break\n if spot == \"#\":\n slopetrees += 1\n\n trees.append(slopetrees)\n\ntreecount = 1 # Multiplication!\nfor tree in trees:\n treecount = treecount * tree\n\nprint(\"Part Two:\", treecount)","repo_name":"Hiramiya/adventofcode","sub_path":"2020/03/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38213583577","text":"'''Program will retrive data on ethereum from Coin Gecko to create a candlestick\r\nchart that tracks the prices of ethereum over a period of 30 days'''\r\n\r\nimport pandas as pd\r\nfrom pycoingecko import CoinGeckoAPI \r\nimport plotly.graph_objects as go\r\n\r\n\r\n#initialize class/object\r\ncg = CoinGeckoAPI()\r\n\r\n\r\n######## Get data from Coin Gecko#######\r\n#retrieves data on ethereum in 30 day time period\r\nethereum_data = cg.get_coin_market_chart_by_id(id='ethereum', vs_currency = 'usd', days = 30)\r\n\r\neth_price_data = ethereum_data['prices']\r\n\r\n#######organize data and convert values to readable data#####\r\n\r\ndata = pd.DataFrame(eth_price_data, columns = ['TimeStamp', 'Price'])\r\n\r\ndata['Date'] = pd.to_datetime(data['TimeStamp'], unit=('ms'))\r\n\r\n\r\n\r\n\r\n\r\n#########Create candlestick chart with data############\r\n\r\n#Groups data by date to get daily min, max, open price and closing price\r\ncandlestick_data = data.groupby(data.Date.dt.date).agg({'Price':['min','max','first','last' ]})\r\n\r\n\r\n\r\n\r\n\r\nfig = go.Figure(data = [go.Candlestick(x = candlestick_data.index,\r\n open = candlestick_data['Price']['first'],\r\n high = candlestick_data['Price'][\"max\"],\r\n low = candlestick_data['Price']['min'],\r\n close = candlestick_data['Price']['last'])\r\n ])\r\n\r\nfig.update_layout(xaxis_rangeslider_visible = False, xaxis_title = 'Date',\r\n yaxis_title = 'Price(USD$)', title = 'Ethereum Prices over 30 days')\r\n \r\n\r\n\r\nfig.write_html('Ethereum price over 30 days.html', auto_open = True)","repo_name":"Andre-Pinto-De-Magalhaes/Ethereum_Candlestick","sub_path":"Ethereum price over 30 days.py","file_name":"Ethereum price over 30 days.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42940790602","text":"#coding:utf-8\n\n'''\n爱丽丝和鲍勃一起玩游戏,他们轮流行动。爱丽丝先手开局。\n\n最初,黑板上有一个数字 N 。在每个玩家的回合,玩家需要执行以下操作:\n\n选出任一 x,满足 0 < x < N 且 N % x == 0 。\n用 N - x 替换黑板上的数字 N 。\n如果玩家无法执行这些操作,就会输掉游戏。\n\n只有在爱丽丝在游戏中取得胜利时才返回 True,否则返回 False。假设两个玩家都以最佳状态参与游戏。\n\n'''\n'''\n多模拟几次会发现N为偶数,爱丽丝必赢,N为奇数,爱丽丝必输。\n所以直接代码为 return N % 2 == 0 一行即可。\n'''\nclass Solution:\n def divisorGame(self, N):\n if N == 1:\n return False\n dp = [False for _ in range(N + 1)]\n dp[2] = True\n for i in range(3, N + 1):\n for j in range(1, i // 2 + 1):\n if i % j == 0 and not dp[i - j]:\n dp[i] = True\n break\n return dp[N]","repo_name":"BoatInTheRiver/codes_algorithm","sub_path":"leetcode/Dynamic_programming/1025. 除数博弈.py","file_name":"1025. 除数博弈.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"14996462485","text":"from django.shortcuts import render,redirect\nfrom django.contrib.auth.forms import UserCreationForm\nfrom . forms import RegisterForm , UserUpdateForm ,UserUpdateName\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.views import LoginView\nfrom django.urls import reverse\ndef register(request):\n\tif request.method == 'POST':\n\t\tform = RegisterForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('Profile-page')\n\telse:\n\t\tform = RegisterForm()\n\treturn render(request,'Users/register.html',{'form':form})\n\n\n\n@login_required\ndef Profile(request):\n\tif request.method == 'POST':\n\t\tu_form=UserUpdateForm(request.POST,instance=request.user)\n\t\tn_form=UserUpdateName(request.POST,instance=request.user.profile)\n\t\t#print(u_form)\n\t\tif u_form.is_valid and n_form.is_valid:\n\t\t\tu_form.save()\n\t\t\tn_form.save()\n\t\t\treturn redirect('Profile-page')\n\telse:\n\t\tu_form=UserUpdateForm(instance=request.user)\n\t\tn_form=UserUpdateName(instance=request.user.profile)\n\n\tcontext={'u_form':u_form,'n_form':n_form}\t\n\treturn render(request,'Users/profile.html',context)\n\n\nclass MyLoginView(LoginView):\n def get_success_url(self):\n return reverse('user-tasks', args=[self.request.user.username])","repo_name":"crow-dz/ToDoit","sub_path":"Users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72889035255","text":"from __future__ import annotations\n\nimport typing\nfrom collections import abc\n\nimport gt4py.eve as eve\nfrom gt4py.eve import datamodels\nfrom gt4py.eve.utils import noninstantiable\n\n\n\"\"\"Customizable constraint-based inference.\n\nBased on the classical constraint-based two-pass type consisting of the following passes:\n 1. Constraint collection\n 2. Type unification\n\"\"\"\n\n\nV = typing.TypeVar(\"V\", bound=\"TypeVar\")\nT = typing.TypeVar(\"T\", bound=\"Type\")\n\n\n@noninstantiable\nclass Type(eve.Node, unsafe_hash=True): # type: ignore[call-arg]\n \"\"\"Base class for all types.\n\n The initial type constraint collection pass treats all instances of Type as hashable frozen\n nodes, that is, no in-place modification is used.\n\n In the type unification phase however, in-place modifications are used for efficient\n renaming/node replacements and special care is taken to handle hash values that change due to\n those modifications.\n \"\"\"\n\n def handle_constraint(\n self, other: Type, add_constraint: abc.Callable[[Type, Type], None]\n ) -> bool:\n \"\"\"Implement special type-specific constraint handling for `self` ≡ `other`.\n\n New constraints can be added using the provided callback (`add_constraint`). Should return\n `True` if the provided constraint `self` ≡ `other` was handled, `False` otherwise. If the\n handler detects an unsatisfiable constraint, raise a `TypeError`.\n \"\"\"\n return False\n\n\nclass TypeVar(Type):\n \"\"\"Type variable.\"\"\"\n\n idx: int\n\n _counter: typing.ClassVar[int] = 0\n\n @staticmethod\n def fresh_index() -> int:\n TypeVar._counter += 1\n return TypeVar._counter\n\n @classmethod\n def fresh(cls: type[V], **kwargs: typing.Any) -> V:\n \"\"\"Create a type variable with a previously unused index.\"\"\"\n return cls(idx=cls.fresh_index(), **kwargs)\n\n\nclass _TypeVarReindexer(eve.NodeTranslator):\n \"\"\"Reindex type variables in a type tree.\"\"\"\n\n def __init__(self, indexer: abc.Callable[[dict[int, int]], int]):\n super().__init__()\n self.indexer = indexer\n\n def visit_TypeVar(self, node: V, *, index_map: dict[int, int]) -> V:\n node = self.generic_visit(node, index_map=index_map)\n new_index = index_map.setdefault(node.idx, self.indexer(index_map))\n new_values = {\n typing.cast(str, k): (new_index if k == \"idx\" else v)\n for k, v in node.iter_children_items()\n }\n return node.__class__(**new_values)\n\n\n@typing.overload\ndef freshen(dtypes: list[T]) -> list[T]:\n ...\n\n\n@typing.overload\ndef freshen(dtypes: T) -> T:\n ...\n\n\ndef freshen(dtypes: list[T] | T) -> list[T] | T:\n \"\"\"Re-instantiate `dtype` with fresh type variables.\"\"\"\n if not isinstance(dtypes, list):\n assert isinstance(dtypes, Type)\n return freshen([dtypes])[0]\n\n def indexer(index_map: dict[int, int]) -> int:\n return TypeVar.fresh_index()\n\n index_map = dict[int, int]()\n return [_TypeVarReindexer(indexer).visit(dtype, index_map=index_map) for dtype in dtypes]\n\n\ndef reindex_vars(dtypes: typing.Any) -> typing.Any:\n \"\"\"Reindex all type variables, to have nice indices starting at zero.\"\"\"\n\n def indexer(index_map: dict[int, int]) -> int:\n return len(index_map)\n\n index_map = dict[int, int]()\n return _TypeVarReindexer(indexer).visit(dtypes, index_map=index_map)\n\n\nclass _FreeVariables(eve.NodeVisitor):\n \"\"\"Collect type variables within a type expression.\"\"\"\n\n def visit_TypeVar(self, node: TypeVar, *, free_variables: set[TypeVar]) -> None:\n self.generic_visit(node, free_variables=free_variables)\n free_variables.add(node)\n\n\ndef _free_variables(x: Type) -> set[TypeVar]:\n \"\"\"Collect type variables within a type expression.\"\"\"\n fv = set[TypeVar]()\n _FreeVariables().visit(x, free_variables=fv)\n return fv\n\n\nclass _Dedup(eve.NodeTranslator):\n \"\"\"Deduplicate type nodes that have the same value but a different `id`.\"\"\"\n\n def visit(self, node, *, memo: dict[T, T]) -> typing.Any: # type: ignore[override]\n if isinstance(node, Type):\n node = super().visit(node, memo=memo)\n return memo.setdefault(node, node)\n return node\n\n\ndef _assert_constituent_types(value: typing.Any, allowed_types: tuple[type, ...]) -> None:\n if isinstance(value, tuple):\n for el in value:\n _assert_constituent_types(el, allowed_types)\n else:\n assert isinstance(value, allowed_types)\n\n\nclass _Renamer:\n \"\"\"Efficiently rename (that is, replace) nodes in a type expression.\n\n Works by collecting all parent nodes of all nodes in a tree. If a node should be replaced by\n another, all referencing parent nodes can be found efficiently and modified in place.\n\n Note that all types have to be registered before they can be used in a `rename` call.\n\n Besides basic renaming, this also resolves `ValTuple` to full `Tuple` if possible after\n renaming.\n \"\"\"\n\n def __init__(self) -> None:\n self._parents = dict[Type, list[tuple[Type, str]]]()\n\n def register(self, dtype: Type) -> None:\n \"\"\"Register a type for possible future renaming.\n\n Collects the parent nodes of all nodes in the type tree.\n \"\"\"\n\n def collect_parents(node: Type) -> None:\n for field, child in node.iter_children_items():\n if isinstance(child, Type):\n self._parents.setdefault(child, []).append((node, typing.cast(str, field)))\n collect_parents(child)\n else:\n _assert_constituent_types(child, (int, str))\n\n collect_parents(dtype)\n\n def _update_node(self, node: Type, field: str, replacement: Type) -> None:\n \"\"\"Replace a field of a node by some other value.\n\n Basically performs `setattr(node, field, replacement)`. Further, updates the mapping of node\n parents and handles the possibly changing hash value of the updated node.\n \"\"\"\n # Pop the node out of the parents dict as its hash could change after modification\n popped = self._parents.pop(node, None)\n\n # Update the node’s field\n setattr(node, field, replacement)\n\n # Register `node` to be the new parent of `replacement`\n self._parents.setdefault(replacement, []).append((node, field))\n\n # Put back possible previous entries to the parents dict after possible hash change\n if popped:\n self._parents[node] = popped\n\n def rename(self, node: Type, replacement: Type) -> None:\n \"\"\"Rename/replace all occurrences of `node` to/by `replacement`.\"\"\"\n try:\n # Find parent nodes\n nodes = self._parents.pop(node)\n except KeyError:\n return\n\n for node, field in nodes:\n # Default case: just update a field value of the node\n self._update_node(node, field, replacement)\n\n\nclass _Box(Type):\n \"\"\"Simple value holder, used for wrapping root nodes of a type tree.\n\n This makes sure that all root nodes have a parent node which can be updated by the `_Renamer`.\n \"\"\"\n\n value: Type\n\n\nclass _Unifier:\n \"\"\"A classical type unifier (Robinson, 1971).\n\n Computes the most general type satisfying all given constraints. Uses a `_Renamer` for efficient\n type variable renaming.\n \"\"\"\n\n def __init__(self, dtypes: list[Type], constraints: set[tuple[Type, Type]]) -> None:\n # Wrap the original `dtype` and all `constraints` to make sure they have a parent node and\n # thus the root nodes are correctly handled by the renamer\n self._dtypes = [_Box(value=dtype) for dtype in dtypes]\n self._constraints = [(_Box(value=s), _Box(value=t)) for s, t in constraints]\n\n # Create a renamer and register `dtype` and all `constraints` types\n self._renamer = _Renamer()\n for dtype in self._dtypes:\n self._renamer.register(dtype)\n for s, t in self._constraints:\n self._renamer.register(s)\n self._renamer.register(t)\n\n def unify(self) -> tuple[list[Type] | Type, list[tuple[Type, Type]]]:\n \"\"\"Run the unification.\"\"\"\n unsatisfiable_constraints = []\n while self._constraints:\n constraint = self._constraints.pop()\n try:\n handled = self._handle_constraint(constraint)\n if not handled:\n # Try with swapped LHS and RHS\n handled = self._handle_constraint(constraint[::-1])\n except TypeError:\n # custom constraint handler raised an error as constraint is not satisfiable\n # (contrary to just not handled)\n handled = False\n\n if not handled:\n unsatisfiable_constraints.append((constraint[0].value, constraint[1].value))\n\n unboxed_dtypes = [dtype.value for dtype in self._dtypes]\n\n return unboxed_dtypes, unsatisfiable_constraints\n\n def _rename(self, x: Type, y: Type) -> None:\n \"\"\"Type renaming/replacement.\"\"\"\n self._renamer.register(x)\n self._renamer.register(y)\n self._renamer.rename(x, y)\n\n def _add_constraint(self, x: Type, y: Type) -> None:\n \"\"\"Register a new constraint.\"\"\"\n x = _Box(value=x)\n y = _Box(value=y)\n self._renamer.register(x)\n self._renamer.register(y)\n self._constraints.append((x, y))\n\n def _handle_constraint(self, constraint: tuple[_Box, _Box]) -> bool:\n \"\"\"Handle a single constraint.\"\"\"\n s, t = (c.value for c in constraint)\n if s == t:\n # Constraint is satisfied if LHS equals RHS\n return True\n\n if type(s) is TypeVar:\n assert s not in _free_variables(t)\n # Just replace LHS by RHS if LHS is a type variable\n self._rename(s, t)\n return True\n\n if s.handle_constraint(t, self._add_constraint):\n # Use a custom constraint handler if available\n return True\n\n if type(s) is type(t):\n assert s not in _free_variables(t) and t not in _free_variables(s)\n assert datamodels.fields(s).keys() == datamodels.fields(t).keys()\n for k in datamodels.fields(s).keys():\n sv = getattr(s, k)\n tv = getattr(t, k)\n if isinstance(sv, Type):\n assert isinstance(tv, Type)\n self._add_constraint(sv, tv)\n else:\n assert sv == tv\n return True\n\n # Constraint handling failed\n return False\n\n\n@typing.overload\ndef unify(\n dtypes: list[Type], constraints: set[tuple[Type, Type]]\n) -> tuple[list[Type], list[tuple[Type, Type]]]:\n ...\n\n\n@typing.overload\ndef unify(\n dtypes: Type, constraints: set[tuple[Type, Type]]\n) -> tuple[Type, list[tuple[Type, Type]]]:\n ...\n\n\ndef unify(\n dtypes: list[Type] | Type, constraints: set[tuple[Type, Type]]\n) -> tuple[list[Type] | Type, list[tuple[Type, Type]]]:\n \"\"\"\n Unify all given constraints.\n\n Returns the unified types and a list of unsatisfiable constraints.\n \"\"\"\n if isinstance(dtypes, Type):\n result_types, unsatisfiable_constraints = unify([dtypes], constraints)\n return result_types[0], unsatisfiable_constraints\n\n # Deduplicate type nodes, this can speed up later things a bit\n memo = dict[Type, Type]()\n dtypes = [_Dedup().visit(dtype, memo=memo) for dtype in dtypes]\n constraints = {_Dedup().visit(c, memo=memo) for c in constraints}\n del memo\n\n unifier = _Unifier(dtypes, constraints)\n return unifier.unify()\n","repo_name":"GridTools/gt4py","sub_path":"src/gt4py/next/type_inference.py","file_name":"type_inference.py","file_ext":"py","file_size_in_byte":11640,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"22"} +{"seq_id":"26994812057","text":"import datetime\nimport inspect\nimport json\nimport logging\nfrom typing import Optional\n\nimport dateutil.parser\nfrom pydantic import ValidationError\nfrom pydantic.main import BaseModel\n\nfrom ribes.errors import ParseError, BaseJsonRpcError, InvalidParamsError, InternalError\nfrom ribes.models import JsonRpcRequest, JsonRpcResponse, JsonRpcError, ErrorStatus\n\n\nclass Dispatcher:\n logger = logging.getLogger(__name__)\n method_registry = {}\n\n @staticmethod\n def dict_to_parameters(signature: inspect.Signature, *args, **kwargs) -> dict:\n result = {}\n try:\n for index, (param_name, param_info) in enumerate(signature.parameters.items()):\n value = args[index] if index < len(args) else kwargs[param_name]\n if issubclass(param_info.annotation, BaseModel):\n result[param_name] = param_info.annotation(**value)\n elif param_info.annotation is datetime.datetime:\n result[param_name] = dateutil.parser.isoparser().isoparse(value)\n elif param_info.annotation is inspect.Signature.empty:\n result[param_name] = value\n else:\n result[param_name] = param_info.annotation(value)\n return result\n except KeyError:\n raise InvalidParamsError()\n\n def to_jsonrpc_error(self, error) -> str:\n id = getattr(error, 'id', None)\n code = getattr(error, 'code', InternalError.code)\n message = getattr(error, 'message', InternalError.message)\n self.logger.error(f'Generated error {code} : {message}')\n if not isinstance(error, BaseJsonRpcError):\n self.logger.error(f'Exception: {error}')\n return JsonRpcError(error=ErrorStatus(code=code, message=message), id=id).json(exclude_none=True)\n\n def register(self, name: str, func):\n self.method_registry[name] = (func, inspect.signature(func), inspect.iscoroutinefunction(func))\n\n async def dispatch(self, request: str) -> Optional[str]:\n try:\n jsonrpc_request = JsonRpcRequest(**json.loads(request))\n self.logger.info(f'Request to method {jsonrpc_request.method}')\n method, method_signature, method_coro = self.method_registry[jsonrpc_request.method]\n if isinstance(jsonrpc_request.params, list):\n params = Dispatcher.dict_to_parameters(method_signature, *jsonrpc_request.params)\n else:\n params = self.dict_to_parameters(method_signature, **jsonrpc_request.params)\n response = (await method(**params)) if method_coro else method(**params)\n if jsonrpc_request.id:\n return JsonRpcResponse(result=response, id=jsonrpc_request.id).json(exclude_none=True)\n except ValidationError:\n return self.to_jsonrpc_error(ParseError())\n except Exception as error:\n return self.to_jsonrpc_error(error)\n","repo_name":"aiselis/ribes","sub_path":"ribes/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"17546512551","text":"# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport threading\nfrom posixpath import join\n\nfrom medusa import app\nfrom medusa.helpers import (\n create_https_certificates,\n generate_api_key,\n)\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.server.api.v1.core import ApiHandler\nfrom medusa.server.api.v2.alias import AliasHandler\nfrom medusa.server.api.v2.alias_source import (\n AliasSourceHandler,\n AliasSourceOperationHandler,\n)\nfrom medusa.server.api.v2.auth import AuthHandler\nfrom medusa.server.api.v2.base import BaseRequestHandler, NotFoundHandler\nfrom medusa.server.api.v2.config import ConfigHandler\nfrom medusa.server.api.v2.episode_history import EpisodeHistoryHandler\nfrom medusa.server.api.v2.episodes import EpisodeHandler\nfrom medusa.server.api.v2.guessit import GuessitHandler\nfrom medusa.server.api.v2.history import HistoryHandler\nfrom medusa.server.api.v2.internal import InternalHandler\nfrom medusa.server.api.v2.log import LogHandler\nfrom medusa.server.api.v2.notifications import NotificationsHandler\nfrom medusa.server.api.v2.postprocess import PostProcessHandler\nfrom medusa.server.api.v2.providers import ProvidersHandler\nfrom medusa.server.api.v2.recommended import RecommendedHandler\nfrom medusa.server.api.v2.schedule import ScheduleHandler\nfrom medusa.server.api.v2.search import SearchHandler\nfrom medusa.server.api.v2.series import SeriesHandler\nfrom medusa.server.api.v2.series_asset import SeriesAssetHandler\nfrom medusa.server.api.v2.series_change_indexer import SeriesChangeIndexer\nfrom medusa.server.api.v2.series_legacy import SeriesLegacyHandler\nfrom medusa.server.api.v2.series_mass_edit import SeriesMassEdit\nfrom medusa.server.api.v2.series_mass_operation import SeriesMassOperation\nfrom medusa.server.api.v2.series_operation import SeriesOperationHandler\nfrom medusa.server.api.v2.stats import StatsHandler\nfrom medusa.server.api.v2.system import SystemHandler\nfrom medusa.server.web import (\n CalendarHandler,\n KeyHandler,\n LoginHandler,\n LogoutHandler,\n TokenHandler,\n)\nfrom medusa.server.web.core.base import AuthenticatedStaticFileHandler\nfrom medusa.ws.handler import WebSocketUIHandler\n\nimport six\n\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.web import (\n Application,\n RedirectHandler,\n StaticFileHandler,\n url,\n)\n\nfrom tornroutes import route\n\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\ndef clean_url_path(*args, **kwargs):\n \"\"\"Make sure we end with a clean route.\"\"\"\n end_with_slash = kwargs.pop('end_with_slash', False)\n build_path = ''\n for arg in args:\n build_path = join(build_path.strip('/'), arg.strip('/'))\n\n build_path = '/' + build_path if build_path else ''\n\n if end_with_slash:\n build_path += '/'\n\n return build_path\n\n\ndef get_apiv2_handlers(base):\n \"\"\"Return api v2 handlers.\"\"\"\n return [\n\n # Order: Most specific to most generic\n # /api/v2/postprocess\n PostProcessHandler.create_app_handler(base),\n\n # /api/v2/providers\n ProvidersHandler.create_app_handler(base),\n\n # /api/v2/history/tvdb1234/episode\n EpisodeHistoryHandler.create_app_handler(base),\n\n # /api/v2/notifications\n NotificationsHandler.create_app_handler(base),\n\n # /api/v2/schedule\n ScheduleHandler.create_app_handler(base),\n\n # /api/v2/history\n HistoryHandler.create_app_handler(base),\n\n # /api/v2/search\n SearchHandler.create_app_handler(base),\n\n # /api/v2/guessit\n GuessitHandler.create_app_handler(base),\n\n # /api/v2/series/tvdb1234/episode\n EpisodeHandler.create_app_handler(base),\n\n # /api/v2/massedit\n SeriesMassEdit.create_app_handler(base),\n # /api/v2/massupdate\n SeriesMassOperation.create_app_handler(base),\n\n # /api/v2/series/changeindexer\n SeriesChangeIndexer.create_app_handler(base),\n # /api/v2/series/tvdb1234/operation\n SeriesOperationHandler.create_app_handler(base),\n # /api/v2/series/tvdb1234/asset\n SeriesAssetHandler.create_app_handler(base),\n # /api/v2/series/tvdb1234/legacy\n SeriesLegacyHandler.create_app_handler(base), # To be removed\n # /api/v2/series/tvdb1234\n SeriesHandler.create_app_handler(base),\n\n # /api/v2/config\n ConfigHandler.create_app_handler(base),\n\n # /api/v2/stats\n StatsHandler.create_app_handler(base),\n\n # /api/v2/internal\n InternalHandler.create_app_handler(base),\n\n # /api/v2/log\n LogHandler.create_app_handler(base),\n\n # /api/v2/alias-source/xem/operation\n AliasSourceOperationHandler.create_app_handler(base),\n # /api/v2/alias-source\n AliasSourceHandler.create_app_handler(base),\n\n # /api/v2/alias\n AliasHandler.create_app_handler(base),\n\n # /api/v2/system\n SystemHandler.create_app_handler(base),\n\n # /api/v2/authenticate\n AuthHandler.create_app_handler(base),\n\n # /api/v2/recommeded\n RecommendedHandler.create_app_handler(base),\n\n # Always keep this last!\n NotFoundHandler.create_app_handler(base)\n ]\n\n\nclass AppWebServer(threading.Thread):\n def __init__(self, options=None):\n threading.Thread.__init__(self)\n self.daemon = True\n self.alive = True\n self.name = 'TORNADO'\n\n self.options = options or {}\n self.options.setdefault('port', 8081)\n self.options.setdefault('host', '0.0.0.0')\n self.options.setdefault('log_dir', None)\n self.options.setdefault('username', '')\n self.options.setdefault('password', '')\n self.options.setdefault('web_root', '/')\n assert isinstance(self.options['port'], int)\n assert 'data_root' in self.options\n\n self.server = None\n self.io_loop = None\n\n # video root\n if app.ROOT_DIRS:\n root_dirs = app.ROOT_DIRS\n self.video_root = root_dirs[int(root_dirs[0]) + 1]\n else:\n self.video_root = None\n\n # web root\n if self.options['web_root']:\n app.WEB_ROOT = self.options['web_root'] = clean_url_path(self.options['web_root'])\n\n # Configure root to selected theme.\n app.WEB_ROOT = self.options['theme_path'] = clean_url_path(app.WEB_ROOT)\n\n # Configure the directory to the theme's data root.\n app.THEME_DATA_ROOT = self.options['theme_data_root'] = os.path.join(self.options['data_root'], app.THEME_NAME)\n\n # api root\n if not app.API_KEY:\n app.API_KEY = generate_api_key()\n self.options['api_root'] = r'{root}/api/(?:v1/)?{key}'.format(root=app.WEB_ROOT, key=app.API_KEY)\n self.options['api_v2_root'] = r'{root}/api/v2'.format(root=app.WEB_ROOT)\n\n # websocket root\n self.options['web_socket'] = r'{root}/ws'.format(root=app.WEB_ROOT)\n\n # tornado setup\n self.enable_https = self.options['enable_https']\n self.https_cert = self.options['https_cert']\n self.https_key = self.options['https_key']\n\n if self.enable_https:\n # If either the HTTPS certificate or key do not exist, make some self-signed ones.\n if not (self.https_cert and os.path.exists(self.https_cert)) or not (\n self.https_key and os.path.exists(self.https_key)):\n if not create_https_certificates(self.https_cert, self.https_key):\n log.info('Unable to create CERT/KEY files, disabling HTTPS')\n app.ENABLE_HTTPS = False\n self.enable_https = False\n\n if not (os.path.exists(self.https_cert) and os.path.exists(self.https_key)):\n log.warning('Disabled HTTPS because of missing CERT and KEY files')\n app.ENABLE_HTTPS = False\n self.enable_https = False\n\n # Load the app\n self.app = Application(\n [],\n debug=True,\n autoreload=False,\n gzip=app.WEB_USE_GZIP,\n xheaders=app.HANDLE_REVERSE_PROXY,\n cookie_secret=app.WEB_COOKIE_SECRET,\n login_url=r'{root}/login/'.format(root=self.options['theme_path']),\n log_function=self.log_request,\n )\n\n self.app.add_handlers('.*$', get_apiv2_handlers(self.options['api_v2_root']))\n\n # Websocket handler\n self.app.add_handlers('.*$', [\n (r'{base}/ui(/?.*)'.format(base=self.options['web_socket']), WebSocketUIHandler)\n ])\n\n # Static File Handlers\n self.app.add_handlers('.*$', [\n # favicon\n (r'{base}/favicon\\.ico()'.format(base=self.options['theme_path']), StaticFileHandler,\n {'path': os.path.join(self.options['theme_data_root'], 'assets', 'img', 'ico', 'favicon.ico')}),\n\n # images\n (r'{base}/images/(.*)'.format(base=self.options['theme_path']), StaticFileHandler,\n {'path': os.path.join(self.options['theme_data_root'], 'assets', 'img')}),\n\n # cached images\n (r'{base}/cache/images/(.*)'.format(base=self.options['theme_path']), StaticFileHandler,\n {'path': os.path.join(app.CACHE_DIR, 'images')}),\n\n # css\n (r'{base}/css/(.*)'.format(base=self.options['theme_path']), StaticFileHandler,\n {'path': os.path.join(self.options['theme_data_root'], 'assets', 'css')}),\n\n # javascript\n (r'{base}/js/(.*)'.format(base=self.options['theme_path']), StaticFileHandler,\n {'path': os.path.join(self.options['theme_data_root'], 'assets', 'js')}),\n\n # fonts\n (r'{base}/fonts/(.*)'.format(base=self.options['theme_path']), StaticFileHandler,\n {'path': os.path.join(self.options['theme_data_root'], 'assets', 'fonts')}),\n\n # videos\n (r'{base}/videos/(.*)'.format(base=self.options['theme_path']), StaticFileHandler,\n {'path': self.video_root}),\n\n # vue dist\n (r'{base}/vue/dist/(.*)'.format(base=self.options['theme_path']), StaticFileHandler,\n {'path': os.path.join(self.options['theme_data_root'], 'vue')}),\n\n # vue index.html\n (r'{base}/vue/?.*()'.format(base=self.options['theme_path']), AuthenticatedStaticFileHandler,\n {'path': os.path.join(self.options['theme_data_root'], 'index.html'), 'default_filename': 'index.html'}),\n ])\n\n # Used for hot-swapping themes\n # This is the 2nd rule from the end, because the last one is always `self.app.wildcard_router`\n self.app.static_file_handlers = self.app.default_router.rules[-2]\n\n # API v1 handlers\n self.app.add_handlers('.*$', [\n # Main handler\n (r'{base}(/?.*)'.format(base=self.options['api_root']), ApiHandler),\n\n # Key retrieval\n (r'{base}/getkey(/?.*)'.format(base=self.options['web_root']), KeyHandler),\n\n # Builder redirect\n (r'{base}/api/builder'.format(base=self.options['web_root']),\n RedirectHandler, {'url': '{base}/apibuilder/'.format(base=self.options['web_root'])}),\n\n # Webui login/logout handlers\n (r'{base}/login(/?)'.format(base=self.options['theme_path']), LoginHandler),\n (r'{base}/logout(/?)'.format(base=self.options['theme_path']), LogoutHandler),\n\n (r'{base}/token(/?)'.format(base=self.options['web_root']), TokenHandler),\n\n # Web calendar handler (Needed because option Unprotected calendar)\n (r'{base}/calendar'.format(base=self.options['web_root']), CalendarHandler),\n\n # webui handlers\n ] + self._get_webui_routes())\n\n def _get_webui_routes(self):\n webroot = self.options['theme_path']\n route._routes = list(reversed([url(webroot + u.regex.pattern, u.handler_class, u.kwargs, u.name) for u in route.get_routes()]))\n return route.get_routes()\n\n def run(self):\n # Start event loop in python3\n if six.PY3:\n import asyncio\n import sys\n\n # We need to set the WindowsSelectorEventLoop event loop on python 3 (3.8 and higher) running on windows\n if sys.platform == 'win32':\n try:\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n except AttributeError: # Only available since Python 3.7.0\n pass\n asyncio.set_event_loop(asyncio.new_event_loop())\n\n if self.enable_https:\n protocol = 'https'\n self.server = HTTPServer(self.app, ssl_options={'certfile': self.https_cert, 'keyfile': self.https_key})\n else:\n protocol = 'http'\n self.server = HTTPServer(self.app)\n\n log.info('Starting Medusa on {scheme}://{host}:{port}{web_root}/', {\n 'scheme': protocol, 'host': self.options['host'],\n 'port': self.options['port'], 'web_root': self.options['theme_path']\n })\n\n try:\n self.server.listen(self.options['port'], self.options['host'])\n except Exception as ex:\n if app.LAUNCH_BROWSER and not self.daemon:\n app.instance.launch_browser('https' if app.ENABLE_HTTPS else 'http', self.options['port'], app.WEB_ROOT)\n log.info('Launching browser and exiting')\n log.info('Could not start the web server on port {port}. Exception: {ex}', {\n 'port': self.options['port'],\n 'ex': ex\n })\n os._exit(1) # pylint: disable=protected-access\n\n try:\n self.io_loop = IOLoop.current()\n self.io_loop.start()\n except (IOError, ValueError):\n # Ignore errors like 'ValueError: I/O operation on closed kqueue fd'. These might be thrown during a reload.\n pass\n\n def shutDown(self):\n self.alive = False\n self.io_loop.stop()\n\n def log_request(self, handler):\n \"\"\"\n Write a completed HTTP request to the logs.\n\n This method handles logging Tornado requests.\n \"\"\"\n if not app.WEB_LOG:\n return\n\n level = None\n if handler.get_status() < 400:\n level = logging.INFO\n elif handler.get_status() < 500:\n # Don't log normal APIv2 RESTful responses as warnings\n if isinstance(handler, BaseRequestHandler):\n level = logging.INFO\n else:\n level = logging.WARNING\n else:\n # If a real exception was raised in APIv2,\n # let `BaseRequestHandler.log_exception` handle the logging\n if not isinstance(handler, BaseRequestHandler):\n level = logging.ERROR\n\n if level is None:\n return\n\n log.log(\n level,\n '{status} {summary} {time:.2f}ms',\n {\n 'status': handler.get_status(),\n 'summary': handler._request_summary(),\n 'time': 1000.0 * handler.request.request_time()\n }\n )\n","repo_name":"pymedusa/Medusa","sub_path":"medusa/server/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":15233,"program_lang":"python","lang":"en","doc_type":"code","stars":1665,"dataset":"github-code","pt":"22"} +{"seq_id":"36179994603","text":"funcionarios = list()\nclientes = list()\n\nwhile True:\n\n print(\"\\n\", \"_\"*20, \"TELA DE LOGON\", \"_\"*20)\n\n if len(funcionarios) == 0:\n\n print(\"\\nEsta e a primeira entrada no sistema.\\n\"\n \"\"\n \"cadastre um funcionario administrador\\n\")\n # print(\"\\nnenhum funcionario cadastrado\")\n\n func_login: str = input(\"login: \")\n func_senha: str = input(\"senha: \")\n\n funcionario = [func_login, func_senha]\n funcionarios.append(funcionario)\n\n print(\"\\nfuncionario cadastrado\\n\")\n\n # resposta: str = input(\"Deseja continuar cadastrando funcionarios? (sim ou não) \")\n\n print(\"\\nPara entrar no sistema informe um login.\\n\")\n\n entrada_login_func = input(\"login: \")\n\n for i in funcionarios:\n\n if entrada_login_func == i[0]:\n\n entrada_senha_func = input(\"senha:\")\n\n if entrada_senha_func == i[1]:\n\n print(\"\\nBem Vindo!\")\n\n\n\n while True:\n\n print(\"\"\"\n ----------------MENU-----------------\n \n Para cadastrar um cliente digite [1]\n Para visualizar todos os clientes digite [2]\n Para excluir um cliente digite [3]\n Para pesquisar um cliente digite [4]\n Para fechar o sistema digite [0]\n \n \"\"\")\n entrada: str = input(\"digite a opção desejada: \")\n\n\n\n if entrada == \"1\":\n\n cliente_nome = input(\"\\nQual o nome do cliente: \")\n\n placa = input(\"Informe a placa: \")\n\n cliente = [cliente_nome, placa]\n clientes.append(cliente)\n\n print(\"\\nCliente cadastrado no sitema.\")\n\n elif entrada == \"2\":\n\n for y in clientes:\n print(f\"Nome: {y[0]} Placa: {y[1]}\")\n\n elif entrada == \"3\":\n\n excluir: str = input(\"\\nDigite o nome do cliente que deseja remover: \")\n contador = 0\n\n for x in clientes:\n\n if excluir == x[0]:\n clientes.remove(clientes[contador])\n print(\"\\nCliente removido com sucesso.\")\n break\n\n contador += 1\n\n if contador == len(clientes):\n print(\"Cliente não encontrado.\")\n\n if entrada == \"4\":\n\n pesquisa = input(\"\\nQual cliente deseja pesquisar? \")\n\n for index in clientes:\n\n if index.__contains__(pesquisa):\n print(\"Encontrado\")\n break\n\n else:\n print(\"Cliente não encontrado\")\n\n\n if entrada == \"0\":\n\n print(\"\\nVocê saiu do sistema.\")\n break\n\n# ultimo","repo_name":"clbruna/AulasPython","sub_path":"atividade_estacionamento.py","file_name":"atividade_estacionamento.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30863462543","text":"\"\"\"empty message\n\nRevision ID: 74ba324c02bb\nRevises: 663639be58d3\nCreate Date: 2017-07-22 16:05:28.228173\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '74ba324c02bb'\ndown_revision = '663639be58d3'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('plantgenerations',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('created', sa.DateTime(), nullable=True),\n sa.Column('modified', sa.DateTime(), nullable=True),\n sa.Column('total_seeds_obtained', sa.Integer(), nullable=True),\n sa.Column('date_seeds_obtained', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('plantlineagegenerations',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('created', sa.DateTime(), nullable=True),\n sa.Column('modified', sa.DateTime(), nullable=True),\n sa.Column('plant_lineage_fk', sa.Integer(), nullable=True),\n sa.Column('plant_generation_fk', sa.Integer(), nullable=True),\n sa.Column('plant_previous_generation_fk', sa.Integer(), nullable=True),\n sa.Column('is_origin', sa.Boolean(), nullable=True),\n sa.ForeignKeyConstraint(['plant_generation_fk'], ['plantgenerations.id'], ),\n sa.ForeignKeyConstraint(['plant_lineage_fk'], ['plantlineages.id'], ),\n sa.ForeignKeyConstraint(['plant_previous_generation_fk'], ['plantgenerations.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('plantlineagegenerations')\n op.drop_table('plantgenerations')\n # ### end Alembic commands ###\n","repo_name":"jnitin/delacook-homestead-api","sub_path":"migrations/versions/74ba324c02bb_.py","file_name":"74ba324c02bb_.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27644812987","text":"from django.http import HttpResponse\nfrom django.test import TestCase, RequestFactory\nfrom django.contrib.sessions.backends.db import SessionStore as DatabaseSession\n\nfrom unittest import TestSuite\n\nfrom experiments import conf\nfrom experiments.experiment_counters import ExperimentCounter\nfrom experiments.middleware import ExperimentsRetentionMiddleware\nfrom experiments.signal_handlers import transfer_enrollments_to_user\nfrom experiments.utils import DummyUser, WebUser, participant\nfrom experiments.models import Experiment, ENABLED_STATE, Enrollment\n\nfrom django.contrib.auth import get_user_model\n\nTEST_ALTERNATIVE = 'blue'\nEXPERIMENT_NAME = 'backgroundcolor'\n\n\nclass BaseUserIncorporateTestCase(object):\n def __init__(self, *args, **kwargs):\n super(BaseUserIncorporateTestCase, self).__init__(*args, **kwargs)\n self.experiment_counter = ExperimentCounter()\n\n def test_can_incorporate(self):\n self.incorporating.incorporate(self.incorporated)\n\n def test_incorporates_enrollment_from_other(self):\n if not self._has_data():\n return\n\n try:\n experiment = Experiment.objects.create(name=EXPERIMENT_NAME, state=ENABLED_STATE)\n self.incorporated.set_alternative(EXPERIMENT_NAME, TEST_ALTERNATIVE)\n self.incorporating.incorporate(self.incorporated)\n self.assertEqual(self.incorporating.get_alternative(EXPERIMENT_NAME), TEST_ALTERNATIVE)\n finally:\n self.experiment_counter.delete(experiment)\n\n def _has_data(self):\n return not isinstance(self.incorporated, DummyUser) and not isinstance(self.incorporating, DummyUser)\n\n\ndef dummy(incorporating):\n return DummyUser()\n\n\ndef anonymous(incorporating):\n return WebUser(session=DatabaseSession())\n\n\ndef authenticated(incorporating):\n User = get_user_model()\n return WebUser(user=User.objects.create(username=['incorporating_user', 'incorporated_user'][incorporating]))\n\nuser_factories = (dummy, anonymous, authenticated)\n\n\ndef load_tests(loader, standard_tests, _):\n suite = TestSuite()\n suite.addTests(standard_tests)\n\n for incorporating in user_factories:\n for incorporated in user_factories:\n test_case = build_test_case(incorporating, incorporated)\n tests = loader.loadTestsFromTestCase(test_case)\n suite.addTests(tests)\n return suite\n\n\ndef build_test_case(incorporating, incorporated):\n class InstantiatedTestCase(BaseUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(InstantiatedTestCase, self).setUp()\n self.incorporating = incorporating(True)\n self.incorporated = incorporated(False)\n InstantiatedTestCase.__name__ = \"BaseUserIncorporateTestCase_into_%s_from_%s\" % (incorporating.__name__, incorporated.__name__)\n return InstantiatedTestCase\n\n\nclass IncorporateTestCase(TestCase):\n def setUp(self):\n self.experiment = Experiment.objects.create(name=EXPERIMENT_NAME, state=ENABLED_STATE)\n self.experiment_counter = ExperimentCounter()\n\n User = get_user_model()\n self.user = User.objects.create(username='incorporate_user')\n self.user.is_confirmed_human = True\n\n request_factory = RequestFactory()\n self.request = request_factory.get('/')\n self.request.session = DatabaseSession()\n participant(self.request).confirm_human()\n\n def tearDown(self):\n self.experiment_counter.delete(self.experiment)\n\n def _login(self):\n self.request.user = self.user\n transfer_enrollments_to_user(None, self.request, self.user)\n\n def test_visit_incorporate(self):\n alternative = participant(self.request).enroll(self.experiment.name, ['alternative'])\n\n ExperimentsRetentionMiddleware(self.request).process_response(self.request, HttpResponse())\n\n self.assertEqual(\n dict(self.experiment_counter.participant_goal_frequencies(self.experiment,\n alternative,\n participant(self.request)._participant_identifier()))[conf.VISIT_NOT_PRESENT_COUNT_GOAL],\n 1\n )\n\n self.assertFalse(Enrollment.objects.filter(user__isnull=False).exists())\n self._login()\n\n self.assertTrue(Enrollment.objects.filter(user__isnull=False).exists())\n self.assertIsNotNone(Enrollment.objects.all()[0].last_seen)\n self.assertEqual(\n dict(self.experiment_counter.participant_goal_frequencies(self.experiment,\n alternative,\n participant(self.request)._participant_identifier()))[conf.VISIT_NOT_PRESENT_COUNT_GOAL],\n 1\n )\n self.assertEqual(self.experiment_counter.goal_count(self.experiment, alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL), 1)\n self.assertEqual(self.experiment_counter.participant_count(self.experiment, alternative), 1)\n","repo_name":"mixcloud/django-experiments","sub_path":"experiments/tests/test_webuser_incorporate.py","file_name":"test_webuser_incorporate.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","stars":373,"dataset":"github-code","pt":"22"} +{"seq_id":"4317544557","text":"import socket\nimport sys\n\nclass Server:\n def __init__(self, robot_controller, host, port):\n self.robot_controller = robot_controller\n self.host = host\n self.port = port\n\n def listen(self):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n server_address = (self.host, self.port)\n sock.bind(server_address)\n\n sock.listen(1)\n\n print(\"Listening on port {}\".format(self.port))\n\n try:\n while True:\n connection, client_address = sock.accept()\n try:\n command = connection.recv(4096)\n if command:\n command = self._strip_command(command)\n print(\"Received command: {}\".format(command))\n result = str(self._dispatch(command))\n if result:\n connection.sendall(result.encode(\"utf-8\"))\n finally:\n connection.close()\n except KeyboardInterrupt:\n sock.shutdown(socket.SHUT_RDWR)\n sock.close()\n\n def _strip_command(self, command):\n command = command.decode(\"utf-8\")\n command = command.rstrip()\n return command\n\n def _dispatch(self, command):\n if command == \"start\":\n self.robot_controller.start()\n return \"starting\"\n elif command == \"stop\":\n self.robot_controller.stop()\n return \"stopping\"\n elif command == \"getdist\":\n return self.robot_controller.get_dist()\n elif command == \"getmotors\":\n return self.robot_controller.get_motors()\n else:\n return \"unknown command\"\n","repo_name":"marcndkk/WallFollower","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21281687835","text":"\ndef Lista_Chapeu():\n hatList = [1, 2, 3, 4, 5] # This is an existing list of numbers hidden in the hat.\n\n # Step 1: write a line of code that prompts the user\n # to replace the middle number with an integer number entered by the user.\n\n print ('\\nLista original : ',hatList)\n\n hatList[2] = int(input('Digite o número a se substituido na lista: '))\n\n print('Lista alterada :', hatList)\n print('Tamando original da lista é : ',len(hatList))\n\n # Step 2: write a line of code here that removes the last element from the list.\n\n del hatList[-1]\n hatList.ap\n\n print('Lista sem o último elemento: ',hatList)\n\n # Step 3: write a line of code here that prints the length of the existing list.\n\n print('Tamando final da lista é : ',len(hatList))\n\n\ndef Lista_Append():\n myList = [] # creating an empty list\n for i in range(5):\n myList.append(i + 1)\n\n print(myList)\n print()\n\ndef Lista_Insert():\n myList = [] # creating an empty list\n\n for i in range(5):\n myList.insert(0, i + 1)\n\n print(myList)\n\ndef Lista_Total():\n myList = [10, 1, 8, 3, 5]\n total = 0\n\n for i in range(len(myList)):\n total += myList[i]\n\n print(f'Total da lista {myList} é {total}')\n\ndef Lista_Total_OK():\n myList = [10, 1, 8, 3, 5]\n total = 0\n\n for i in myList:\n print(i,end=\"-\")\n total += i\n\n print(f'Total da lista {myList} é {total}')\n\n# Programa Principal:\n\ndef Lista_Inversao():\n myList = [10, 1, 8, 3, 5]\n length = len(myList)\n print(myList)\n for i in range(length // 2):\n myList[i], myList[length - i - 1] = myList[length - i - 1], myList[i]\n\n print(myList)\n\n\ndef Lista_beatles():\n # step 1\n beatles = []\n # step 2\n for i in range(3):\n beatles.append(input (f'Digite o {i+1}o participante da banda.'))\n\n print(\"Step 2:\", beatles)\n\n # step 3\n Novos_Membros = int(input (\"Quantos novos membros incluirá: \"))\n tam = len(beatles) + 1\n for i in range(Novos_Membros):\n beatles.append(input(f'Digite o {tam +i}o participante da banda.'))\n print(\"Step 3:\", beatles)\n\n # step 4\n Exclui_Membros = int(input(\"Quantos membros deseja excluir?: \"))\n for i in range(Exclui_Membros):\n print(beatles)\n j = int (input ('Qual membro excluirá? '))\n del beatles[j-1]\n print(\"Step 4:\", beatles)\n\n\n # step 5\n Novos_Membros = int(input(\"Quantos novos membros incluirá: \"))\n for i in range(Novos_Membros):\n print(beatles)\n j = int(input(f'Em qual posicao incluirá o {i+1}o participante ? '))\n beatles.insert(j-1, input(f'Digite o {j}o participante da banda.'))\n\n print(\"Step 5:\", beatles)\n\n\n # testing list legth\n print(\"Tamanho final da lista\", len(beatles))\n\ndef Alimenta_Lista():\n teste = []\n teste = \"1\",2,3,\"4\"\n print (teste)\n input (\"aguardo...\")\n\ndef Classifica_bolha():\n myList = []\n swapped = True\n num = int(input(\"Quantos elementos quer classificar?: \"))\n\n for i in range(num):\n val = float(input(f\"Insira o {i+1}o elemento: \"))\n myList.append(val)\n\n while swapped:\n swapped = False\n for i in range(len(myList) - 1):\n print (f\"Analisa se o {i+2}o é maior do que o {i+1}o \", myList, end=' ')\n print(input(\"Enter continua...\"),end='')\n if myList[i] > myList[i + 1]:\n swapped = True\n myList[i], myList[i + 1] = myList[i + 1], myList[i]\n print(f\"Mudei o {i+2}o pelo {i+1}o -->\", myList,end=' ')\n aguardo = input(\"Enter continua...\")\n else: print(\"Sem alteração..\")\n\n print(\"\\nClassificado:\")\n print(myList)\n\ndef Retira_Duplicado(): # 3.1.6.9 LAB: Operando com listas - básico\n myList = [] # [1, 2, 4, 4, 1, 4, 2, 6, 2, 9]\n mylist2 = []\n mylist3 = []\n\n while True:\n elem = int(input ('Insira um número inteiro ou [0] para sair: '))\n if elem == 0: break\n myList.append(elem)\n\n tamanho = len(myList)\n\n for n in range(tamanho):\n procura = myList[n]\n\n if procura not in myList[n+1:]:\n mylist2.append(procura)\n else:\n mylist3.append(procura)\n\n #\"for i in myList[n+1:]:\n # \"\" if i != procura:\n print(f'Lista dos exclusivos {mylist2}')\n print(f'Lista dos excluidos {mylist3}')\n\n\n print(\"\\nLista com os elementos únicos:\")\n print(mylist2)\n\ndef Lista_avancado():\n numeros = [x for x in range (21)]\n pares = [x for x in range (20) if x % 2 == 0]\n impares = []\n print (numeros,input('Numeros. Aguarda...'))\n for x in numeros:\n if x % 2 !=0: impares.append(x)\n print (impares,input(\"Impares. Aguarda...\"))\n print(pares, input(\"Pares. Aguarda...\"))\n\ndef Tabuleiro():\n Peça = '1'\n board = [[Peça for i in range(8)] for j in range(8)]\n print (board)\n\ndef xadres():\n Vazio = \"--\"\n Peão = \"Pe\"\n Torre = \"Tr\"\n Bispo = \"Bi\"\n Cavalo = \"Cv\"\n Rei = \"Re\"\n Rainha = \"Ra\"\n\n tabuleiro = []\n\n for i in range(8):\n if i == 1 or i == 6:\n linha = [Peão for i in range(8)]\n else:\n linha = [Vazio for i in range(8)]\n tabuleiro.append(linha)\n #print(linha)\n\n tabuleiro[0][0] = Torre\n tabuleiro[0][7] = Torre\n tabuleiro[7][0] = Torre\n tabuleiro[7][7] = Torre\n\n tabuleiro[0][1] = Cavalo\n tabuleiro[0][6] = Cavalo\n tabuleiro[7][1] = Cavalo\n tabuleiro[7][6] = Cavalo\n\n tabuleiro[0][2] = Bispo\n tabuleiro[0][5] = Bispo\n tabuleiro[7][2] = Bispo\n tabuleiro[7][5] = Bispo\n\n tabuleiro[0][3] = Rei\n tabuleiro[7][4] = Rei\n\n tabuleiro[0][4] = Rainha\n tabuleiro[7][3] = Rainha\n\n for i in range(8): print(tabuleiro[i])\n\n print(tabuleiro[7][7])\n\nwhile True:\n # Lista_Chapeu()\n # Lista_Append()\n # Lista_Insert()\n # Lista_Total()\n # Lista_Total_OK()\n # Lista_Inversao()\n # Lista_beatles()\n #Alimenta_Lista()\n #Classifica_bolha()\n # Retira_Duplicado()\n # Lista_avancado()\n # Tabuleiro()\n xadres()\n sair = input('Deseja repetir? ')\n if sair[0].upper() == \"S\" : continue\n print ('\\n Fim do programa\\n')\n break\n\n# Atualizar pelo Git Hub\n","repo_name":"MaxwellMGomes/Python_Cisco","sub_path":"3.1.4.6 - Lista Basica.py","file_name":"3.1.4.6 - Lista Basica.py","file_ext":"py","file_size_in_byte":6191,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39788000142","text":"import numpy as np\r\nfrom flask import Flask,request,jsonify,render_template,url_for\r\nimport pickle\r\n\r\napp = Flask(__name__)\r\nmodel = pickle.load(open('model_titanic.pkl','rb'))\r\n\r\n@app.route('/')\r\ndef home():\r\n\treturn render_template('index.html')\r\n\r\n@app.route('/predict',methods=['POST'])\r\ndef predict():\r\n\tfeatures=[str(x) for x in request.form.values()]\r\n\tprint(features)\r\n\tfinal = [np.array(features)]\r\n\tprediction = model.predict(final)\r\n\r\n\tif prediction==0:\r\n\t\ttt = \"Sorry,You Won't be able to Purchase!!\"\r\n\telse:\r\n\t\ttt= \"Yes Let's Have some Purchasing\"\r\n\r\n\treturn render_template('index.html',prediction_text='{}'.format(tt))\r\n\r\n@app.route('/predict_api',methods=['POST'])\r\ndef predict_api():\r\n '''\r\n For direct API calls trought request\r\n '''\r\n data = request.get_json(force=True)\r\n prediction = model.predict([np.array(list(data.values()))])\r\n\r\n output = prediction[0]\r\n return jsonify(output)\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n","repo_name":"Rituraj9/Predict_Purchase_ML","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"34035774167","text":"from datetime import datetime\nfrom PySide2.QtCore import Qt\nfrom PySide2.QtGui import QFont\nimport time\n\n\nclass TimeMarker:\n lastmarker = 0\n \n def __init__(self):\n self.timeFont = QFont(\"Arial\", 10)\n\n def paint(self, width, height, painter):\n now = time.time()\n if now > self.lastmarker + 1:\n self.lastmarker = now\n painter.setPen(Qt.gray)\n painter.drawLine(width-2, 0, width-2, height-1)\n painter.setFont(self.timeFont)\n time_str = datetime.now().strftime(\"%H:%M:%S\")\n painter.drawText(width-64, 16, time_str)\n","repo_name":"jvestman/pqcd","sub_path":"time_marker.py","file_name":"time_marker.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"3778748403","text":"import re\nfrom math import sqrt\nfrom functools import partial\nfrom tkinter import *\n\nOPERADORES = ['×','^','+','%','÷','-','/','sqrt(']\n\ndef mostrar_calculo(*args):\n if any(TELA_CALCULADORA.get().endswith(operators) for operators in OPERADORES) or not TELA_CALCULADORA.get():\n try:\n int(args[0])\n temp = TELA_CALCULADORA.get()\n temp += f'{args[0]}'\n TELA_CALCULADORA.set(temp)\n return\n except ValueError:\n return\n elif args:\n EQUACAO = TELA_CALCULADORA.get()\n if args[0] == 'sqrt(' and EQUACAO:\n TEMP = re.findall('[+-/×/÷/sqrt(/%/^()]|\\d+', TELA_CALCULADORA.get())\n TEMP2 = TEMP.pop()\n EQUACAO = ''.join(TEMP)\n EQUACAO += f'sqrt({TEMP2})'\n TELA_CALCULADORA.set(EQUACAO)\n return\n EQUACAO += f'{args[0]}'\n TELA_CALCULADORA.set(EQUACAO)\n\n\ndef resultado():\n EQUACAO_TEXTO = TELA_CALCULADORA.get().replace('×', '*').replace('÷', '/').replace('^', '**')\n eval(f'TELA_CALCULADORA.set({EQUACAO_TEXTO})')\n if len(TELA_CALCULADORA.get()) >= 26:\n try:\n TAMANHO_DO_RESULTADO = len(TELA_CALCULADORA.get()) - 4\n RESULTADO_SIMPLIFICADO = TELA_CALCULADORA.get()[0:4] + f'e{TAMANHO_DO_RESULTADO}'\n TELA_CALCULADORA.set(RESULTADO_SIMPLIFICADO)\n except ValueError:\n TELA_CALCULADORA.set(EQUACAO_TEXTO)\n return None\n return EQUACAO_TEXTO\n\n\nJANELA_PRINCIPAL = Tk()\nJANELA_PRINCIPAL.config(width=600, height=400, bg=\"#111111\")\n\n\n\nTELA_CALCULADORA = StringVar()\nLABEL = Label(textvariable=TELA_CALCULADORA, font=(\"Ubuntu Medium\", 24, \"normal\"), width=26, height=2, anchor='e')\nLABEL.config(fg=\"black\",bg=\"DARKGREEN\",highlightthickness=0)\nLABEL.grid(row=0,column=0,columnspan=4)\n\n\n\nNUMERO_1 = Button(text=\"1\",width=6,height=3,bg=\"#434242\",font=(\"Ubuntu Medium\",24,\"normal\"),fg=\"white\",highlightthickness=0,command=partial(mostrar_calculo,'1'))\nNUMERO_1.grid(row=4,column=0)\n\nNUMERO_2 = Button(text=\"2\",width=6,height=3,bg=\"#434242\",font=(\"Ubuntu Medium\",24,\"normal\"),fg=\"white\",highlightthickness=0,command=partial(mostrar_calculo,'2'))\nNUMERO_2.grid(row=4,column=1)\n\nNUMERO_3 = Button(text=\"3\",width=6,height=3,bg=\"#434242\",font=(\"Ubuntu Medium\",24,\"normal\"),fg=\"white\",highlightthickness=0,command=partial(mostrar_calculo,'3'))\nNUMERO_3.grid(row=4,column=2)\n\nNUMERO_4 = Button(text=\"4\",width=6,height=3,bg=\"#434242\",font=(\"Ubuntu Medium\",24,\"normal\"),fg=\"white\",highlightthickness=0,command=partial(mostrar_calculo,'4'))\nNUMERO_4.grid(row=3,column=0)\n\nNUMERO_5 = Button(text=\"5\",width=6,height=3,bg=\"#434242\",font=(\"Ubuntu Medium\",24,\"normal\"),fg=\"white\",highlightthickness=0,command=partial(mostrar_calculo,'5'))\nNUMERO_5.grid(row=3,column=1)\n\nNUMERO_6 = Button(text=\"6\",width=6,height=3,bg=\"#434242\",font=(\"Ubuntu Medium\",24,\"normal\"),fg=\"white\",highlightthickness=0,command=partial(mostrar_calculo,'6'))\nNUMERO_6.grid(row=3,column=2)\n\nNUMERO_7 = Button(text=\"7\",width=6,height=3,bg=\"#434242\",font=(\"Ubuntu Medium\",24,\"normal\"),fg=\"white\",highlightthickness=0,command=partial(mostrar_calculo,'7'))\nNUMERO_7.grid(row=2,column=0)\n\nNUMERO_8 = Button(text=\"8\",width=6,height=3,bg=\"#434242\",font=(\"Ubuntu Medium\",24,\"normal\"),fg=\"white\",highlightthickness=0,command=partial(mostrar_calculo,'8'))\nNUMERO_8.grid(row=2,column=1)\n\nNUMERO_9 = Button(text=\"9\",width=6,height=3,bg=\"#434242\",font=(\"Ubuntu Medium\",24,\"normal\"),fg=\"white\",highlightthickness=0,command=partial(mostrar_calculo,'9'))\nNUMERO_9.grid(row=2,column=2)\n\nNUMERO_0 = Button(text=\"0\",width=6,height=3,bg=\"#434242\",font=(\"Ubuntu Medium\",24,\"normal\"),fg=\"white\",highlightthickness=0,command=partial(mostrar_calculo,'0'))\nNUMERO_0.grid(row=5,column=1)\n\nMOSTRAR_RESULTADO = Button(text=\"=\",width=6,height=3,bg=\"#CC0000\",font=(\"Ubuntu Medium\",24,\"normal\"),fg=\"white\",highlightthickness=0,command=resultado)\nMOSTRAR_RESULTADO.grid(row=5,column=3)\n\n\n\n\nSOMA = Button(text=\"+\",width=6,height=3,bg=\"#22A39F\",font=(\"Ubuntu Medium\",24,\"normal\"),command=partial(mostrar_calculo,'+'))\nSOMA.grid(row=1,column=3)\n\nSUBTRACAO = Button(text=\"-\",width=6,height=3,bg=\"#22A39F\",font=(\"Ubuntu Medium\",24,\"normal\"),command=partial(mostrar_calculo,'-'))\nSUBTRACAO.grid(row=2,column=3)\n\nMULTIPLICACAO = Button(text=\"×\",width=6,height=3,bg=\"#22A39F\",font=(\"Ubuntu Medium\",24,\"normal\"),command=partial(mostrar_calculo,'×'))\nMULTIPLICACAO.grid(row=3,column=3)\n\nDIVISAO = Button(text=\"÷\",width=6,height=3,bg=\"#22A39F\",font=(\"Ubuntu Medium\",24,\"normal\"),command=partial(mostrar_calculo,'÷'))\nDIVISAO.grid(row=4,column=3)\n\nMODULO = Button(text=\"%\",width=6,height=3,bg=\"#22A39F\",font=(\"Ubuntu Medium\",24,\"normal\"),command=partial(mostrar_calculo,'%'))\nMODULO.grid(row=1,column=0)\n\nRAIZ = Button(text=\"√\",width=6,height=3,bg=\"#22A39F\",font=(\"Ubuntu Medium\",24,\"normal\"),command=partial(mostrar_calculo,'sqrt('))\nRAIZ.grid(row=1,column=1)\n\nPOTENCIA = Button(text=\"^\",width=6,height=3,bg=\"#22A39F\",font=(\"Ubuntu Medium\",24,\"normal\"),command=partial(mostrar_calculo,'^'))\nPOTENCIA.grid(row=1,column=2)\n\n\n\n\nJANELA_PRINCIPAL.mainloop()","repo_name":"JeanArthurCostaDias/CalculadoraPython","sub_path":"CalculadoraPython/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42110911983","text":"import docx\n\n\nm_doc = docx.Document(\"test_read.docx\")\n\n\n# 提取表格\ndef get_table():\n flag = 0\n for table in m_doc.tables:\n for row_index, row in enumerate(table.rows): # 按行遍历\n for col_index, cell in enumerate(row.cells): # 获取行中单元格\n if \"量化\" in cell.text:\n flag += 1\n\n print(\"量化一词在表中出现: \", flag)\n\n\n# 提取图片\ndef get_pic():\n dict_rel = m_doc.part._rels\n for rel in dict_rel:\n rel = dict_rel[rel]\n print(rel.target_ref)\n if \"image\" in rel.target_ref:\n image_name = rel.target_ref.split(\"/\")[-1]\n word_name = \"test_word\"\n save_path = \"word_pic\"\n with open(\"{}/{}\".format(save_path, word_name+\"_\"+image_name), \"wb\") as f:\n f.write(rel.target_part.blob)\n\n\n# 获取段落\ndef get_para():\n m_para = m_doc.paragraphs\n for para in m_para:\n print(para.text)\n\n\n# 获取块\ndef get_run():\n \"\"\"\n 1. 获取段落\n 2. 获取段落中的块: runs\n \"\"\"\n for para in m_doc.paragraphs[:2]:\n for run in para.runs:\n print(run.text)\n\n\n# 文字计数(不包含表格)\ndef get_count():\n count = 0\n for para in m_doc.paragraphs:\n for run in para.runs:\n if \"量化\" in run.text:\n count += 1\n print(\"量化一词在文档中出现: \", count)\n\n\n# 获取所有标题(重要)\nfrom docx.enum.style import WD_STYLE_TYPE\n\ndef get_title():\n \"\"\"\n \"\"\"\n title = m_doc.styles\n for i in title:\n if i.type == WD_STYLE_TYPE.PARAGRAPH:\n print(i.name)\n\n\n# 获取标题内容\ndef get_para_style():\n for para in m_doc.paragraphs:\n # if para.style.name == \"Heading 1\": # 如果段落等于标题一\n # print(\"标题一: \", para.text)\n # if para.style.name == \"Heading 2\": # 如果段落等于标题二\n # print(\"标题二: \", para.text)\n if para.style.name == \"Title\": # 如果段落等于标题六\n print(\"标题六: \", para.text)\n\n\n# 获取所有标题( 正则表达式 )\nimport re\ndef get_all_title():\n for para in m_doc.paragraphs:\n if re.match(\"^Heading \\d+$\", para.style.name): # 获取所有段落\n print(para.text)\n\n\n# 获取所有正文\ndef get_text_style():\n for para in m_doc.paragraphs[:50]:\n if para.style.name == \"Normal\": # 如果段落等于标题一\n print(\"正文: \", para.text)\n\n\nif __name__ == '__main__':\n get_pic()\n # get_para_style()\n","repo_name":"dumengru/processing_word_documents","sub_path":"word_read.py","file_name":"word_read.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"12314145210","text":"#Importing package and modules \nimport arcpy, os, sys\n\n#Setting envrionment\narcpy.env.overwriteOutput = True\n\n#Path to the MOHigherEdCopy shapefile\ninputFC = r\"c:\\Geo\\geo465\\Aawaj_Joshi\\lab09\\lab09\\data\\MOHigherEd.shp\"\n\n#Path to a new copy \noutputFC = r\"c:\\Geo\\geo465\\Aawaj_Joshi\\lab09\\lab09\\data\\MOHigherEdCopy.shp\"\n\n#Copying from inputFC to outputFC\narcpy.Copy_management(inputFC, outputFC)\n\n#Creating a list of necesary fields\nfields = ['State', 'Type']\n\n#Creating update cursor for feature class\nwith arcpy.da.UpdateCursor(outputFC, fields) as upCursor:\n #Iterating through each row\n for row in upCursor:\n \n #Updating records with no value in State field to have value \"MO\"\n if (row[0] == \" \"):\n row[0] = \"MO\"\n upCursor.updateRow(row)\n\n #Deleting facilities that are of Thelogical Type\n if (row[1] == \"Theological\"):\n upCursor.deleteRow()\n \n #Updating the Type of Technical/Professional\" to just \"Professional\"\n if \"Professional\" in row[1]:\n row[1] = \"Professional\"\n upCursor.updateRow(row)","repo_name":"aawajjoshi/Data-Manipulation-With-Cursors-ArcMap-Script","sub_path":"6. Update Cursor (update row and delete row).py","file_name":"6. Update Cursor (update row and delete row).py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35390277931","text":"import requests\nimport smtplib\n\nmy_email = EMAIL \nmy_password = PASSWORD \nrecipient = EMAIL \n\nSTOCK_NAME = \"TSLA\"\n# STOCK_NAME = \"IDT\"\nCOMPANY_NAME = \"Tesla Inc\"\n\nSTOCK_ENDPOINT = \"https://www.alphavantage.co/query\"\nSTOCK_API_KEY = YOUR_API_KEY\n\nNEWS_ENDPOINT = \"https://newsapi.org/v2/everything\"\nNEWS_API_KEY = YOUR_API_KEY\n\nup_chart = \"📈\"\ndown_chart = \"📉\"\n\nstock_parameters = {\n \"function\": \"TIME_SERIES_DAILY\",\n \"symbol\": STOCK_NAME,\n \"apikey\": STOCK_API_KEY,\n }\n\nstock_response = requests.get(STOCK_ENDPOINT, stock_parameters)\nstock_response.raise_for_status()\nstock_data = stock_response.json()\n\ndays = stock_data['Time Series (Daily)']\ndates = list(days)[:2]\n\none_day_ago_low = float(days[dates[0]][\"3. low\"])\nprint(f\"Yesterday: {one_day_ago_low}\")\n\ntwo_days_ago_low = float(days[dates[1]][\"3. low\"])\nprint(f\"Day before Yesterday: {two_days_ago_low}\")\n\ndifference = abs(one_day_ago_low - two_days_ago_low)\ndifference = round(difference, 2)\n\ndiff_percentage = ((one_day_ago_low - two_days_ago_low) / two_days_ago_low) * 100\nprint(f\"% diff: {diff_percentage}\")\nprint(\"-\"*50)\n\n# diff_percentage = 5.68 # for testing purposes\n# diff_percentage = -6.41 # for testing purposes\n\nif abs(diff_percentage) >= 5:\n print(\"Getting news...\")\n news_parameters = {\n \"q\": COMPANY_NAME,\n \"from\": dates[1],\n \"sortBy\": \"publishedAt\",\n \"apiKey\": NEWS_API_KEY,\n }\n\n news_response = requests.get(NEWS_ENDPOINT, news_parameters)\n news_data = news_response.json()\n\n first_three_articles = news_data[\"articles\"][:3]\n\n headlines_and_description = [[article[\"title\"], article[\"description\"], article[\"url\"]] for article in first_three_articles]\n\n with smtplib.SMTP(\"smtp.gmail.com\", 587) as connection:\n connection.starttls()\n print(\"Logging in with email...\")\n connection.login(my_email, my_password)\n if diff_percentage >= 5:\n chart = up_chart\n text = \"up\"\n elif diff_percentage <= -5:\n chart = down_chart\n text = \"down\"\n connection.sendmail(\n from_addr=my_email,\n to_addrs=recipient,\n msg=(\n f\"Subject:{COMPANY_NAME} is {text} {abs(diff_percentage)}% {chart}! Here is the news.\\n\\n\"\n f\"Here are the three articles that could possibly have something to do with this price change.\\n\"\n f\"{'-' * 50}\\n\"\n f\"Headline: {headlines_and_description[0][0]}\\n\"\n f\"Brief: {headlines_and_description[0][1]}\\n\"\n f\"Continue reading here: {headlines_and_description[0][2]}\\n\"\n f\"{'-' * 50}\\n\"\n f\"Headline: {headlines_and_description[1][0]}\\n\"\n f\"Brief: {headlines_and_description[1][1]}\\n\"\n f\"Continue reading here: {headlines_and_description[1][2]}\\n\"\n f\"{'-' * 50}\\n\"\n f\"Headline: {headlines_and_description[2][0]}\\n\"\n f\"Brief: {headlines_and_description[2][1]}\\n\"\n f\"Continue reading here: {headlines_and_description[2][2]}\\n\"\n f\"{'-' * 50}\\n\"\n ).encode('utf-8')\n )\n print(\"Email Sent Successfully!\")\nelse:\n print(\"No big fluctuations in price...\")\n\n# Optional: Format the message like this:\n\"\"\"\nTSLA: 🔺2%\nHeadline: Were Hedge Funds Right About Piling Into Tesla Inc. (TSLA)?. \nBrief: We at Insider Monkey have gone over 821 13F filings that hedge funds and prominent investors are required to file by the SEC The 13F filings show the funds' and investors' portfolio positions as of March 31st, near the height of the coronavirus market crash.\nor\n\"TSLA: 🔻5%\nHeadline: Were Hedge Funds Right About Piling Into Tesla Inc. (TSLA)?. \nBrief: We at Insider Monkey have gone over 821 13F filings that hedge funds and prominent investors are required to file by the SEC The 13F filings show the funds' and investors' portfolio positions as of March 31st, near the height of the coronavirus market crash.\n\"\"\"\n\n","repo_name":"python3xdev/100DaysOfCodePython","sub_path":"Day 36 - Intermediate +/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"21392359991","text":"'''\n@author: Sid Probstein\n@contact: sid@swirl.today\n'''\n\nfrom datetime import datetime\n\nfrom jsonpath_ng import parse\nfrom jsonpath_ng.exceptions import JsonPathParserError\n\nfrom swirl.processors.processor import *\nfrom swirl.processors.utils import clean_string, create_result_dictionary\nfrom swirl.connectors.utils import get_mappings_dict\n \n############################################# \n############################################# \n\nclass GenericQueryProcessor(QueryProcessor):\n\n type = 'GenericQueryProcessor'\n \n def process(self):\n return clean_string(self.query_string).strip()\n\n############################################# \n\nclass TestQueryProcessor(QueryProcessor):\n\n type = 'TestQueryProcessor'\n \n def process(self):\n return clean_string(self.query_string).strip() + \" test\"\n\n############################################# \n\nclass GenericResultProcessor(ResultProcessor):\n\n type=\"GenericResultProcessor\"\n\n def process(self):\n\n list_results = []\n result_number = 1\n for result in self.results:\n swirl_result = create_result_dictionary()\n # payload = {}\n # report searchprovider rank, not ours\n swirl_result['searchprovider_rank'] = result_number\n swirl_result['date_retrieved'] = str(datetime.now())\n\n ############################################# \n # copy fields, avoiding collisions\n for key in result.keys():\n if key in swirl_result.keys():\n if not swirl_result[key]:\n swirl_result[key] = result[key]\n # end for\n\n if swirl_result['date_published'] == \"\":\n swirl_result['date_published'] = 'unknown'\n\n # final assembly\n\n swirl_result['payload'] = {}\n # try to find a title, if none provided\n if swirl_result['title'] == \"\":\n if swirl_result['url']:\n swirl_result['title'] = swirl_result['url']\n elif swirl_result['author']:\n swirl_result['title'] = swirl_result['author']\n # end if\n # end if\n swirl_result['searchprovider'] = self.provider.name\n list_results.append(swirl_result)\n result_number = result_number + 1\n if result_number > self.provider.results_per_query: \n break\n # end for\n\n self.processed_results = list_results\n return self.processed_results\n\n############################################# \n\nclass TestResultProcessor(ResultProcessor):\n\n type=\"TestResultProcessor\"\n\n def process(self):\n\n # to do: test to ensure operation on a SWIRL result, i.e. after Generic or MappingResultProcessor\n for item in self.results:\n item['test'] = True\n\n self.processed_results = self.results\n return self.processed_results\n\n############################################# \n\nclass DuplicateHalfResultProcessor(ResultProcessor):\n\n type=\"DuplicateHalfResultProcessor\"\n\n def process(self):\n\n # to do: test to ensure operation on a SWIRL result, i.e. after Generic or MappingResultProcessor\n switch = 0\n results_hd = []\n for item in self.results:\n if switch == 0:\n results_hd.append(item)\n switch = 1\n continue\n if switch == 1:\n switch = 0\n continue\n # end for\n\n self.processed_results = self.results + results_hd\n return self.processed_results\n\n","repo_name":"simrit1/swirl-search","sub_path":"swirl/processors/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":3635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"17212242477","text":"from selenium import webdriver\nimport time\nimport telegram\nimport configparser\n\n\ncounter = 0\nconfig = configparser.ConfigParser()\nconfig.read('/home/notroot/PycharmProjects/ProjectInfo/config')\nbot = telegram.Bot(token=config['DEFAULT']['telegram_api_key'])\noptions = webdriver.FirefoxOptions()\noptions.add_argument('-headless')\ndifference_limit = float(config['DEFAULT']['difference_limit'])\nsleep_time = int(config['DEFAULT']['sleep_time'])\n\n\ndef close_browser():\n lykke.close()\n paribu.close()\n xe.close()\n\n\nwhile True:\n lykke = webdriver.Firefox(firefox_options=options)\n lykke.get(\"https://www.lykke.com/\")\n paribu = webdriver.Firefox(firefox_options=options)\n paribu.get(\"https://www.paribu.com/\")\n xe = webdriver.Firefox(firefox_options=options)\n xe.get(\"http://www.xe.com/currencyconverter/convert/?From=USD&To=TRY\")\n\n xe_usd = float(xe.find_element_by_class_name(\"uccResultAmount\").text)\n lykke_tl = xe_usd * float(lykke.find_element_by_class_name(\"pair__value\").text)\n paribu_header = paribu.title\n paribu_tl = float(''.join(filter(lambda x: x.isdigit(), paribu_header)))\n\n difference_prb_to_ly = paribu_tl/lykke_tl\n if difference_prb_to_ly > difference_limit:\n difference_limit = difference_prb_to_ly\n bot.send_message(chat_id=config['DEFAULT']['chat_id'],\n text=\"paribu: {0}\\nlykke: {1}\\nfark: {2}\\nyüzde: {3}\"\n .format(str(paribu_tl), str(lykke_tl), str(paribu_tl - lykke_tl), str((difference_limit - 1) * 100)))\n\n else:\n counter += 1\n\n if counter > 10:\n counter = 0\n difference_limit = int(config['DEFAULT']['difference_limit'])\n\n close_browser()\n time.sleep(sleep_time)\n","repo_name":"onurerhan/ProjectInfo","sub_path":"source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72681025337","text":"from pyspark import SparkContext\nimport sys\nimport itertools\nfrom itertools import combinations\nfrom itertools import product\nimport time\nimport math\nimport csv\nimport functools\nfrom functools import reduce\nfrom operator import add\n\nfilter = int(sys.argv[1])\nsupport = int(sys.argv[2])\n\ncandidates = []\nfrequents = []\n\nsc = SparkContext.getOrCreate()\n\n# make new ta feng dataset\nrddFromFile = sc.textFile(str(sys.argv[3])).map(lambda line: line.split(\",\"))\nheader = rddFromFile.first()\nrddFromFile = rddFromFile.filter(lambda row: row != header) # filter out the header\n\nrdd = rddFromFile.map(lambda x : ('-'.join([x[0].replace('\"',''), x[1].replace('\"','')]), int(x[5].replace('\"','')))).collect()\n\nwith open('processed.txt', 'w+') as fout:\n writer = csv.writer(fout)\n writer.writerow(['DATE-CUSTOMER_ID', 'PRODUCT_ID'])\n for line in rdd:\n writer.writerow(line)\n\n# start timer\nstart_time = time.time()\n\n# read in processed.txt\nrddFromFile = sc.textFile('processed.txt')\nheader = rddFromFile.first()\nrddFromFile = rddFromFile.filter(lambda row: row != header) # filter out the header\n\n# filter out customers who bought more than k items\nrddSplit = rddFromFile.map(lambda f: (f.split(',')[0], f.split(',')[1]))\nrdd = rddSplit.groupByKey().mapValues(set).filter(lambda g: len(g[1]) > filter)\n\n# get count of whole dataset\nrdd_size = rdd.count()\n\n# pass one function\ndef pass_one(p, rdd_size):\n frequents = [] # list of all frequent items\n\n chunk = [list(e[1]) for e in p]\n\n ps = math.ceil((len(chunk)/rdd_size)*support) # new threshold\n \n merged = list(itertools.chain.from_iterable(chunk)) # partition merged\n \n # calculate singles\n singles = dict()\n for x in merged:\n if(x not in singles):\n singles[x] = merged.count(x)\n \n frequent_items = []\n \n # get frequent singles\n for key,val in singles.items():\n if(val >= ps):\n frequent_items.append(key)\n \n frequent_items = sorted(frequent_items)\n single_tups = [(e,) for e in frequent_items]\n frequents.append(single_tups)\n \n frequent = True\n comb_val = 2\n fset = set(frequent_items)\n \n # start apriori\n while(frequent):\n # for each basket, find combinations with intersection of frequent items and basket\n data = dict()\n for basket in chunk:\n li = sorted(set(basket) & set(fset))\n basket_combs = list(combinations(li, comb_val))\n for bc in basket_combs:\n bc = tuple(bc)\n if(bc not in data):\n data[bc] = 1\n else:\n data[bc] = data[bc] + 1\n \n frequent_items = []\n\n # check support threshold\n for key,val in data.items():\n if(val >= ps):\n frequent_items.append(key)\n \n frequent_items = sorted(frequent_items)\n frequents.append(frequent_items)\n \n comb_val = comb_val + 1 # increase combination value\n \n if(len(frequent_items) == 0):\n frequent = False\n \n # create new combination set\n fset = set()\n for f in frequent_items:\n f = set(f)\n fset = fset | f\n \n print(frequents)\n return frequents\n \n# pass 1\ncandidates = list(set(itertools.chain.from_iterable(rdd.mapPartitions(lambda p: pass_one(p, rdd_size)).collect())))\ncandidates.sort(key = len)\n\n# pass 2\nbaskets = rdd.map(lambda v: v[1]).collect()\nitemsets = dict()\nfor basket in baskets:\n for candidate in candidates:\n if(set(candidate).issubset(basket)):\n if(candidate in itemsets):\n itemsets[candidate] = itemsets[candidate] + 1\n else:\n itemsets[candidate] = 1\n\nfrequent_items = sc.parallelize(list(itemsets.items())).filter(lambda t: t[1] >= support).map(lambda f: f[0]).collect()\nfrequent_items.sort(key = len)\n\n# write to file\nwith open(str(sys.argv[4]), 'w') as f:\n f.write(\"Candidates:\\n\")\n \n max_value = len(candidates[-1])\n candidates_list = []\n for x in range(1, max_value+1):\n temp_list = [e for e in candidates if len(e) == x]\n temp_list = sorted(temp_list)\n candidates_list.append(temp_list)\n \n for candidate in candidates_list:\n f.write(str(candidate).replace(\"[\", \"\").replace(\"]\",\"\").replace(\",)\",\")\").replace(\"), (\", \"),(\"))\n f.write(\"\\n\\n\")\n \n max_value = len(frequent_items[-1])\n f_list = []\n for x in range(1,max_value+1):\n temp_list = [e for e in frequent_items if len(e) == x]\n temp_list = sorted(temp_list)\n f_list.append(temp_list)\n \n f.write(\"Frequent Itemsets:\\n\")\n for frequent in f_list:\n f.write(str(frequent).replace(\"[\", \"\").replace(\"]\",\"\").replace(\",)\",\")\").replace(\"), (\", \"),(\"))\n f.write(\"\\n\\n\")\n \nduration = time.time() - start_time\n\nprint(\"Duration: \", duration)\n\n","repo_name":"raajraj/SON_Algorithm","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":4944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11090114510","text":"#!/opt/sct/python/envs/venv_sct/bin/python\n#\n# Functional connectivity maps\n\nimport numpy\nimport nibabel\nimport nilearn\nimport pandas\nimport matplotlib.pyplot as pyplot\nfrom nilearn.input_data import NiftiMasker,NiftiLabelsMasker\nfrom nilearn.masking import intersect_masks,unmask\nfrom nilearn.regions import img_to_signals_labels\n\nmoco_file = 'fmri_moco.nii.gz'\nregbp_file = 'fmri_regbp.nii.gz'\nlevel_file = 'fmri_cord_labeled.nii.gz'\ngm_file = 'fmri_gmcut.nii.gz'\ngm_csv = 'fmri_gmcut.csv'\n\n# Make slice-label ROIs from the GM file so we can easily do slice-wise masking\nimg = nibabel.load(gm_file)\nnslices = img.shape[2]\nslice_img = list()\nfor s in range(nslices):\n slice_regbp = numpy.zeros(img.get_data().shape)\n slice_regbp[:,:,s] = 1\n slice_img.append( nibabel.Nifti1Image(slice_regbp,img.affine,img.header) )\n\n# Get the ROI label info\nroi_info = pandas.read_csv(gm_csv)\n\n# Compute connectivity within each slice, applying bandpass filter\nprint(\"Connectivity computation\")\nfor s in range(nslices):\n \n # ROI signals. Labels are the same every time because they come from\n # the full gm_file. The slice_img varies but is only an extra mask.\n # Signals must be standardized for the connectivity calc below\n roi_regbp,roi_labels = img_to_signals_labels(regbp_file,gm_file,slice_img[s])\n roi_regbp = nilearn.signal.clean(roi_regbp,detrend=True, standardize=True)\n roi_horns = roi_info[\"horn\"][roi_info[\"label\"]==roi_labels]\n\n roi_moco,roi_moco_labels = img_to_signals_labels(moco_file,gm_file,slice_img[s])\n if not roi_moco_labels==roi_labels:\n raise Exception('Label mismatch')\n \n # Plot before and after filtering for 1 ROI\n fig,axs = pyplot.subplots(2,1)\n axs[0].plot(range(roi_moco.shape[0]),roi_moco[:,0])\n axs[0].set_yticklabels([])\n axs[0].set_title('%s signal, slice %d' % (roi_horns[0],s))\n axs[1].plot(range(roi_regbp.shape[0]),roi_regbp[:,0])\n axs[1].set_yticklabels([])\n axs[1].set_title('After regression+bandpass')\n axs[1].set_xlabel('Volume')\n fig.tight_layout()\n fig.savefig('roisignal_%s_slice%d.png' % (roi_horns[0],s))\n\n # Get filtered fmri data for this slice\n slice_masker = NiftiMasker(slice_img[s])\n slice_regbp = slice_masker.fit_transform(regbp_file)\n slice_regbp = nilearn.signal.clean(slice_regbp,detrend=True, standardize=True)\n #print('Slice data size %d,%d' % slice_regbp.shape)\n\n # Connectivity matrix computation. Relies on the detrend and standardize \n # steps so we are working with mean 0, SD 1 data.\n # Otherwise we will get some nonsense instead of an actual correlation coef.\n # Normalizing factor is N, not N-1, because nilearn.signal._standardize scales\n # using numpy.std with default dof 0.\n r_roi_mat = numpy.dot(roi_regbp.T, roi_regbp) / (roi_regbp.shape[0])\n\n # Flatten the conn matrices to the unique values\n k1,k2 = numpy.triu_indices(roi_regbp.shape[1],k=1)\n r_roi_vec = r_roi_mat[k1,k2]\n z_roi_vec = numpy.arctanh(r_roi_vec) * numpy.sqrt(roi_regbp.shape[0]-3)\n roi_labelvec = [\"{}_{}\".format(a,b) for a,b in zip(roi_horns[k1],roi_horns[k2])]\n \n # Get level labels. Hack - list the same image twice because img_to_signals_labels\n # requires 4D input for some reason. Trim the duplicate off afterwards\n level_data,level_labels = img_to_signals_labels([level_file,level_file],gm_file,\n slice_img[s],strategy=\"median\")\n level_data = level_data[0,:]\n if not level_labels==roi_labels:\n raise Exception(\"Label mismatch\")\n level = numpy.round(numpy.median(level_data))\n\n # Build data frame of slicewise results\n # DataFrame.append handles varying/mismatched colnames correctly\n colnames = [\"metric\",\"slice\",\"level\"] + roi_labelvec\n rowdataR = [\"R\",\"%d\" % s,\"%d\" % level] + [\"%0.3f\" % x for x in r_roi_vec]\n rowdataZ = [\"Z\",\"%d\" % s,\"%d\" % level] + [\"%0.3f\" % x for x in z_roi_vec]\n thisR = pandas.DataFrame([rowdataR],columns=colnames)\n thisZ = pandas.DataFrame([rowdataZ],columns=colnames)\n print(thisR)\n if s==0:\n roiR = thisR\n roiZ = thisZ\n else:\n roiR = roiR.append(thisR)\n roiZ = roiZ.append(thisZ)\n \n # Connectivity map computation\n # Relies on standardization to mean 0, sd 1 above\n r_slice_regbp = numpy.dot(slice_regbp.T, roi_regbp) / roi_regbp.shape[0]\n z_slice_regbp = numpy.arctanh(r_slice_regbp) * numpy.sqrt(roi_regbp.shape[0]-3)\n #print( 'R %d,%d ranges %f,%f' % (r_slice_regbp.shape[0],r_slice_regbp.shape[1],\n # r_slice_regbp.min(),r_slice_regbp.max()) )\n r_slice_img = slice_masker.inverse_transform(r_slice_regbp.T)\n z_slice_img = slice_masker.inverse_transform(z_slice_regbp.T)\n\n # Put R back into image space slice by slice. Initialized to zero\n # and slices don't overlap, so we can just add one at a time\n if s==0:\n r_img = r_slice_img # Initialize\n z_img = z_slice_img\n else:\n r_img = nilearn.image.math_img(\"a+b\",a=r_img,b=r_slice_img)\n z_img = nilearn.image.math_img(\"a+b\",a=z_img,b=z_slice_img)\n\n\n# Save complete R,Z images to file\nfor k,horn in enumerate(roi_horns):\n nilearn.image.index_img(r_img,k).to_filename('fmri_R_%s_inslice.nii.gz' % horn)\n nilearn.image.index_img(z_img,k).to_filename('fmri_Z_%s_inslice.nii.gz' % horn)\n\nroiR.to_csv('R_inslice.csv',index=False)\nroiZ.to_csv('Z_inslice.csv',index=False)\n","repo_name":"baxpr/sct-singularity","sub_path":"fmri_pipeline/compute_connectivity_slice.py","file_name":"compute_connectivity_slice.py","file_ext":"py","file_size_in_byte":5409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10525706695","text":"import torch\nimport torch.nn as nn\nfrom logger import log\n\n\nclass double_conv(nn.Module):\n def __init__(self, in_ch, out_ch, mid_ch=None):\n super(double_conv, self).__init__()\n if mid_ch is None:\n mid_ch = out_ch\n self.conv1 = nn.Conv2d(in_ch, mid_ch, 3, padding=1)\n self.bn1 = nn.BatchNorm2d(mid_ch)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(mid_ch, out_ch, 3, padding=1)\n self.bn2 = nn.BatchNorm2d(out_ch)\n self.relu = nn.ReLU(inplace=True)\n\n def layers_list(self):\n return [self.conv1,self.bn1,self.conv2,self.bn2]\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu1(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n return x\n \nclass down(nn.Module):\n def __init__(self, in_ch, out_ch, pool=True):\n super(down, self).__init__()\n self.pool = pool\n if pool:\n self.mp = nn.MaxPool2d(2)\n self.conv = double_conv(in_ch, out_ch)\n\n def layers_list(self):\n return self.conv.layers_list()\n\n def forward(self, x):\n if self.pool:\n x = self.mp(x)\n x = self.conv(x)\n return x\n \nclass up(nn.Module):\n def __init__(self, in_ch, out_ch, mid_ch):\n super(up, self).__init__()\n self.up = nn.Upsample(scale_factor=2, mode='nearest')\n self.conv = double_conv(in_ch, out_ch, mid_ch=mid_ch)\n\n def layers_list(self):\n return self.conv.layers_list()\n\n def forward(self, x1, x2=None, x3=None):\n x1 = self.up(x1)\n if x2 is None and x3 is None:\n x = x1\n elif x3 is None:\n x = torch.cat([x2, x1], dim=1)\n else:\n x = torch.cat([x2, x3, x1], dim=1)\n x = self.conv(x)\n return x\n\nclass mid(nn.Module):\n def __init__(self, in_ch, out_ch, small_ch=None):\n super(mid, self).__init__()\n self.mp = nn.MaxPool2d(2)\n self.conv1 = nn.Conv2d(in_ch, out_ch, 3, padding=1)\n if small_ch is None:\n self.conv2 = nn.Conv2d(out_ch, in_ch, 3, padding=1)\n else:\n self.conv2 = nn.Conv2d(out_ch, small_ch, 3, padding=1)\n\n def layers_list(self):\n return [self.conv1,self.conv2]\n\n def forward(self, x):\n x = self.mp(x)\n x = self.conv1(x)\n x = self.conv2(x)\n return x\n\nclass outconv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(outconv, self).__init__()\n self.conv = nn.Conv2d(in_ch, out_ch, 1)\n self.sigm = nn.Sigmoid()\n\n def layers_list(self):\n return [self.conv]\n\n def forward(self, x):\n x = self.conv(x)\n return self.sigm(x)\n\nclass UNet(nn.Module):\n def __init__(self, n_classes=1, base=10):\n super(UNet, self).__init__()\n self.down1 = down(6, 2**base, pool=False)\n self.down2 = down(2**base, 2**(base+1))\n self.down3 = down(2**(base+1), 2**(base+2))\n self.down4 = down(2**(base+2), 2**(base+3))\n self.mid = mid(2**(base+3), 2**(base+4))\n self.up1 = up(2**(base+4), 2**(base+2), 2**(base+3))\n self.up2 = up(2**(base+3), 2**(base+1), 2**(base+2))\n self.up3 = up(2**(base+2), 2**base, 2**(base+1))\n self.up4 = up(2**(base+1), 2**base, 2**base)\n self.outc = outconv(2**base, n_classes)\n\n def forward(self, x):\n x1 = self.down1(x)\n x2 = self.down2(x1)\n x3 = self.down3(x2)\n x4 = self.down4(x3)\n x5 = self.mid(x4)\n x = self.up1(x5, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)\n x = self.up4(x, x1)\n x = self.outc(x)\n return x\n\n def backpropagation(self, prediction, target, optimizer):\n loss = self.dice_loss(prediction, target)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n return loss\n\n def dice_loss(self, inputs, targets, smooth=1):\n # comment out if your model contains a sigmoid or equivalent activation layer\n #inputs = torch.sigmoid(inputs)\n\n # flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n\n intersection = (inputs * targets).sum()\n dice = (2. * intersection + smooth) / (inputs.sum() + targets.sum() + smooth)\n\n return 1 - dice\n\n\nclass UNetRGBD(nn.Module):\n def __init__(self, n_classes=1, base=10):\n super(UNetRGBD, self).__init__()\n self.rgb_down1 = down(5, 2**base, pool=False)\n self.rgb_down2 = down(2**base, 2**(base + 1))\n self.rgb_down3 = down(2**(base + 1), 2**(base + 2))\n self.rgb_down4 = down(2**(base + 2), 2**(base + 3))\n self.depth_down1 = down(1, 2**base, pool=False)\n self.depth_down2 = down(2**base, 2**(base + 1))\n self.depth_down3 = down(2**(base + 1), 2**(base + 2))\n self.depth_down4 = down(2**(base + 2), 2**(base + 3))\n self.mid = mid(2**(base + 4), 2**(base + 5), 2**(base + 4))\n self.up1 = up(2**(base + 4), 2**(base + 3), 2**(base + 4))\n self.up2 = up(2**(base + 4), 2**(base + 2), 2**(base + 3))\n self.up3 = up(2**(base + 3), 2**(base + 1), 2**(base + 2))\n self.up4 = up(2**(base + 2), 2**(base + 1), 2**(base + 1))\n self.outc = outconv(2**(base + 1), n_classes)\n\n def forward(self, x):\n rgb, depth = x\n rgb1 = self.rgb_down1(rgb)\n rgb2 = self.rgb_down2(rgb1)\n rgb3 = self.rgb_down3(rgb2)\n rgb4 = self.rgb_down4(rgb3)\n \n depth1 = self.depth_down1(depth)\n depth2 = self.depth_down2(depth1)\n depth3 = self.depth_down3(depth2)\n depth4 = self.depth_down4(depth3)\n\n x5 = self.mid(torch.cat([rgb4, depth4], dim=1))\n x = self.up1(x5)\n x = self.up2(x, rgb3, depth3)\n x = self.up3(x, rgb2, depth2)\n x = self.up4(x, rgb1, depth1)\n x = self.outc(x)\n return x\n \n def backpropagation(self, prediction, target, optimizer):\n loss = self.dice_loss(prediction, target)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n return loss\n\n def dice_loss(self, inputs, targets, smooth=1):\n # comment out if your model contains a sigmoid or equivalent activation layer\n #inputs = torch.sigmoid(inputs)\n\n # flatten label and prediction tensors\n inputs = inputs.view(-1)\n targets = targets.view(-1)\n\n intersection = (inputs * targets).sum()\n dice = (2. * intersection + smooth) / (inputs.sum() + targets.sum() + smooth)\n\n return 1 - dice","repo_name":"51n3D/Interactive-rgbd-segmentation","sub_path":"unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":6601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2881334729","text":"# Define the graph as an adjacency list\r\ngraph = {\r\n 'Arad': ['Zerind', 'Sibiu', 'Timisoara'],\r\n 'Zerind': ['Arad', 'Oradea'],\r\n 'Oradea': ['Zerind', 'Sibiu'],\r\n 'Sibiu': ['Arad', 'Oradea', 'Fagaras', 'Rimnicu Vilcea'],\r\n 'Timisoara': ['Arad', 'Lugoj'],\r\n 'Lugoj': ['Timisoara', 'Mehadia'],\r\n 'Mehadia': ['Lugoj', 'Drobeta'],\r\n 'Drobeta': ['Mehadia', 'Craiova'],\r\n 'Craiova': ['Drobeta', 'Rimnicu Vilcea', 'Pitesti'],\r\n 'Rimnicu Vilcea': ['Sibiu', 'Craiova', 'Pitesti'],\r\n 'Fagaras': ['Sibiu', 'Bucharest'],\r\n 'Pitesti': ['Rimnicu Vilcea', 'Craiova', 'Bucharest'],\r\n 'Bucharest': ['Fagaras', 'Pitesti']\r\n}\r\n\r\n# Define edge weights (distances)\r\nweights = {\r\n ('Arad', 'Zerind'): 75,\r\n ('Arad', 'Sibiu'): 140,\r\n ('Arad', 'Timisoara'): 118,\r\n ('Zerind', 'Oradea'): 71,\r\n ('Oradea', 'Sibiu'): 151,\r\n ('Sibiu', 'Fagaras'): 99,\r\n ('Sibiu', 'Rimnicu Vilcea'): 80,\r\n ('Timisoara', 'Lugoj'): 111,\r\n ('Lugoj', 'Mehadia'): 70,\r\n ('Mehadia', 'Drobeta'): 75,\r\n ('Drobeta', 'Craiova'): 120,\r\n ('Craiova', 'Rimnicu Vilcea'): 146,\r\n ('Craiova', 'Pitesti'): 138,\r\n ('Rimnicu Vilcea', 'Pitesti'): 97,\r\n ('Fagaras', 'Bucharest'): 211,\r\n ('Pitesti', 'Bucharest'): 101\r\n}\r\n\r\ndef dfs_with_weights(graph, weights, start, goal, path=None, cost=0):\r\n if path is None:\r\n path = [start]\r\n\r\n if start == goal:\r\n return path, cost\r\n\r\n if start not in graph:\r\n return None\r\n\r\n shortest_path = None\r\n\r\n for neighbor in graph[start]:\r\n if neighbor not in path:\r\n new_result = dfs_with_weights(graph, weights, neighbor, goal, path + [neighbor], cost + weights.get((start, neighbor), 0))\r\n\r\n if new_result is not None:\r\n new_path, new_cost = new_result\r\n if shortest_path is None or new_cost < shortest_path[1]:\r\n shortest_path = (new_path, new_cost)\r\n\r\n return shortest_path\r\n\r\n\r\n# start_city = 'Arad'\r\n# goal_city = 'Bucharest'\r\nstart_city = input(\"Enter the start city: \")\r\ngoal_city = input(\"Enter the goal city: \")\r\nresult = dfs_with_weights(graph, weights, start_city, goal_city)\r\n\r\nif result:\r\n result_path, total_cost = result\r\n print(f\"Shortest Path from {start_city} to {goal_city}: {result_path}\")\r\n print(f\"Total Cost: {total_cost}\")\r\nelse:\r\n print(f\"No path found from {start_city} to {goal_city}\")","repo_name":"Taylor-Omondi-Odhiambo/A.I","sub_path":"df2.py","file_name":"df2.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"39156167733","text":"import os\n\ndirname = os.path.dirname(__file__)\nfile = open(dirname+\"/input_data.txt\", \"r\")\n\ninput_list = [line.strip() for line in file.readlines()]\n\ndef get_priority(letter):\n priority = ord(letter)-64\n if priority <= 26:\n # capitals\n priority += 26\n else:\n priority -= 32\n return priority\n\n\ngrouped_lines = []\nfor i in range(len(input_list)):\n if i % 3 == 0:\n current_group = []\n\n current_group.append(input_list[i])\n\n if i % 3 == 2:\n grouped_lines.append(current_group)\n\n\nsum = 0\nfor line_group in grouped_lines:\n rucksackA = line_group[0]\n rucksackB = line_group[1]\n rucksackC = line_group[2]\n\n common = list(set(rucksackA).intersection(rucksackB).intersection(rucksackC))\n priority = get_priority(common[0])\n sum += priority\n\n\nprint(\"The total priority of rucksacks is {}\".format(sum))\n\n","repo_name":"makah21803/Advent_of_code","sub_path":"2022/Day3/day3_extra.py","file_name":"day3_extra.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"5270762198","text":"import csv\nimport pandas as pd\nimport plotly.express as px\nimport numpy as np\n\ndf = pd.read_csv(\"data.csv\")\n\ntoefl = df[\"TOEFL Score\"].tolist()\nchances = df[\"Chance of Admit\"].tolist()\n\ntoefl2 = np.array(toefl)\nchances2 = np.array(chances)\n\nm,c = np.polyfit(toefl, chances2, 1)\nx = 250\ny = []\n\nline = (m*x) + c\nprint(\"Chances of getting admission on 250 TOEFL is: \" + str(line))\nfor x in toefl2:\n yvalue = (m*x)+c\n y.append(yvalue)\n\n\nfig = px.scatter(x=toefl2, y=chances2)\nfig.update_layout(shapes = [\n dict(\n type = \"line\",\n y0 = min(y),\n y1 = max(y),\n x0 = min(toefl2),\n x1 = max(toefl2),\n )\n])\nfig.show()","repo_name":"sjpokedoke/project114","sub_path":"toefl.py","file_name":"toefl.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37252395463","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nhttps://plotly.com/python/plotly-express/\r\n\r\n@author: Nick\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport plotly.express as px\r\nfrom plotly.offline import plot\r\n\r\ndf = pd.read_csv(\"Computers.csv\")\r\n\r\n# define the target feature and other meaningful features\r\ntargets = [\"price\"]\r\nfeatures = [\"speed\", \"hd\", \"ram\", \"screen\", \"ads\", \"trend\"]\r\ncolors = [\"cd\", \"multi\", \"premium\"]\r\n\r\n# plot a matrix of scatter plots to see the whole data set\r\nfig = px.scatter_matrix(df, \r\n dimensions=targets + features,\r\n color=colors[2],\r\n opacity=0.7)\r\nfig.update_traces(diagonal_visible=False)\r\nplot(fig)\r\n\r\n# these 3 variables have the strongest separation for the target\r\ngroup = [\"price\", \"ram\", \"hd\"]\r\n\r\n# plot the group of 3 variables across categories\r\nfig = px.scatter_3d(df, x=group[0], y=group[1], z=group[2],\r\n color=colors[2], opacity=0.7)\r\nplot(fig)\r\n\r\n# plot two variables across categories\r\nfig = px.density_contour(df, x=group[0], y=group[1], marginal_x=\"histogram\", \r\n marginal_y=\"box\", color=colors[2])\r\nplot(fig)\r\n\r\n# plot a singl variable across categories\r\nfig = px.strip(df, y=group[1], color=colors[2])\r\nplot(fig)\r\n","repo_name":"N-ickMorris/Supervised","sub_path":"computers_plots.py","file_name":"computers_plots.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70650918777","text":"import datetime\nfrom djangoAPI.models import *\nfrom project.models import *\nfrom djangoAPI.graphql.projects import *\nfrom django.shortcuts import render, get_object_or_404, get_list_or_404\n\n\ndef user_projects(usr_id):\n \"\"\"\n Display all projects that are the user belongs to\n \"\"\"\n usr_obj = UserTbl.objects.get(pk=usr_id)\n usr_projs = []\n links = UserProjectLinkTbl.objects.filter(user_id=usr_id)\n # usr_role = DesignProjectHumanRoleTypeTbl.objects.get(pk=usr_obj.role_id)\n for link in links:\n proj = DesignProjectTbl.objects.get(pk=link.project_id)\n proj = UserProjects(id=proj.id, project_number=proj.id,\n project_name=proj.name, user_role_id=usr_obj.role_id)\n usr_projs.append(proj)\n return usr_projs\n\n\ndef project_details(proj_id):\n \"\"\"\n Display the Details of a Project\n \"\"\"\n proj = get_object_or_404(DesignProjectTbl, pk=proj_id)\n disp_proj_detail = ProjectDetails()\n disp_proj_detail.__dict__ = proj.__dict__.copy()\n disp_proj_detail.bus_unit_name = proj.op_bus_unit.name\n disp_proj_detail.start_date = proj.planned_date_range.lower\n disp_proj_detail.end_date = proj.planned_date_range.upper\n # put in placeholder data for contacts in case nothing is found\n disp_proj_detail.project_manager = 'Does Not Exist'\n disp_proj_detail.project_manager_email = 'example@example.ca'\n disp_proj_detail.key_business_unit_contact = 'Does Not Exist'\n disp_proj_detail.key_business_unit_contact_email = 'example@example.ca'\n disp_proj_detail.asset_data_steward = 'Does Not Exist'\n disp_proj_detail.asset_data_steward_email = 'example@example.ca'\n persons = get_list_or_404(UserTbl, role_id='b')\n for person in persons:\n try:\n links = UserProjectLinkTbl.objects.filter(user_id=person.pk)\n for link in links:\n if link.project_id == proj.id:\n disp_proj_detail.project_manager = person.get_full_name()\n disp_proj_detail.project_manager_email = person.auth_user.email\n break\n except Exception:\n pass\n persons = get_list_or_404(UserTbl, role_id='c')\n for person in persons:\n try:\n links = UserProjectLinkTbl.objects.filter(user_id=person.pk)\n for link in links:\n if link.project_id == proj.id:\n disp_proj_detail.key_business_unit_contact = person.get_full_name()\n disp_proj_detail.key_business_unit_contact_email = person.auth_user.email\n break\n except Exception:\n pass\n persons = get_list_or_404(UserTbl, role_id='d')\n for person in persons:\n try:\n links = UserProjectLinkTbl.objects.filter(user_id=person.pk)\n for link in links:\n if link.project_id == proj.id:\n disp_proj_detail.asset_data_steward = person.get_full_name()\n disp_proj_detail.asset_data_steward_email = person.auth_user.email\n break\n except Exception:\n pass\n return disp_proj_detail\n\n\ndef project_phases(project_id):\n \"\"\"\n Returns all construction phases associated with a design project\n \"\"\"\n result = []\n objs = list(ConstructionPhaseTbl.objects.filter(\n design_project=project_id))\n for obj in objs:\n new_obj = ConstructionPhases()\n new_obj.__dict__ = obj.__dict__.copy()\n new_obj.start_date = obj.planned_date_range.lower\n new_obj.end_date = obj.planned_date_range.upper\n result.append(new_obj)\n return result\n\n\ndef construction_phase(phase_id):\n \"\"\"\n Return one construction phase by id\n \"\"\"\n obj = ConstructionPhaseTbl.objects.get(pk=phase_id)\n new_obj = ConstructionPhases()\n new_obj.__dict__ = obj.__dict__.copy()\n new_obj.start_date = obj.planned_date_range.lower\n new_obj.end_date = obj.planned_date_range.upper\n return new_obj\n\n\ndef project_role(request, project_id):\n \"\"\"\n Returns one project for editing roles\n \"\"\"\n usr_id = request.user.id\n usr_obj = UserTbl.objects.get(pk=usr_id)\n link = UserProjectLinkTbl.objects.filter(user_id=usr_id).get(project_id=project_id)\n # usr_role = DesignProjectHumanRoleTypeTbl.objects.get(pk=usr_obj.role_id)\n proj = link.project\n proj = UserProjects(id=proj.id, project_number=proj.id,\n project_name=proj.name, user_role_id=usr_obj.role_id)\n return proj\n\n\ndef update_project_role(user_id, data):\n \"\"\"\n change the role of the user\n \"\"\"\n # Currently there is a AllHumanRoleType and a UserRole table,\n # changing the AllHumanRoleType changes the role for all projects\n # the user is a part of, while changing the UserRole would be project\n # specific there are no specs for UserRole\n # TODO\n\n\ndef update_design_project(user_id, new):\n \"\"\"\n sort through the data and updates as necessary\n compares against the old project details\n \"\"\"\n old = project_details(new.get('id'))\n for key, value in new.__dict__.items():\n if old.__dict__[key] != value:\n print(key, value)\n","repo_name":"CityofToronto/tw-front-back","sub_path":"django/project/commons.py","file_name":"commons.py","file_ext":"py","file_size_in_byte":5174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34494204053","text":"from gtts import gTTS\nimport os\nfrom tkinter import *\n\ndef textToSpeech():\n text = entry.get()\n language = 'en'\n output = gTTS(text=text, lang=language, slow=False)\n output.save('output.mp3')\n os.system(\"start output.mp3\")\n\nroot = Tk()\nroot.title(\"Text To Speech Convertor\")\n\ncanvas = Canvas(root, width=400, height=300)\ncanvas.pack()\n\nlabel = Label(root, text=\"Enter text \")\ncanvas.create_window(200, 150, window=label)\n\nentry = Entry(root)\ncanvas.create_window(200, 180, window=entry)\n\nbutton = Button(text=\"Convert\", command=textToSpeech)\ncanvas.create_window(200, 230, window=button)\n\nroot.mainloop()","repo_name":"Sreenitti/Python-Projects","sub_path":"Text_to_Speech_Convertor.py","file_name":"Text_to_Speech_Convertor.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43315292230","text":"from ratelimit import ProtectedException\nfrom ratelimit import OverLimits\n\nimport time\nimport threading\nimport logging\nimport queue\n\nfrom util import twittertime as twittertime\n\nlogger = logging.getLogger(__name__)\n\nclass CompetitionTweetsScraperWorker(threading.Thread):\n def __init__(self, scrapeservice, rlapi, tweetservice, stats):\n threading.Thread.__init__(self)\n self.rlapi = rlapi\n self.tweetservice = tweetservice\n self.scrapeservice = scrapeservice\n self.stats = stats\n\n def run(self):\n try:\n job = self.scrapeservice.dequeue()\n if job is None:\n return\n\n (user_id, job_since_id) = (job[\"user_id\"], job[\"since_id\"])\n\n since_id = job_since_id\n\n last_tweets = self.tweetservice.tweets_where('user_id = %s', [user_id], 1, 'tweet_id desc')\n\n last_tweet_id = None\n\n if len(last_tweets):\n last_tweet_id = last_tweets[0]['tweet_id']\n\n if since_id is not None:\n since_id = max(since_id, last_tweet_id)\n else:\n since_id = last_tweet_id\n\n if since_id is None:\n logger.debug('Getting tweets for %s, starting with whenever', user_id)\n else:\n logger.debug('Getting tweets for %s, starting with %d', user_id, since_id)\n\n params = {'user_id': user_id, 'count': 200}\n\n if since_id is not None and since_id > 0:\n params['since_id'] = since_id\n\n try:\n resp = self.rlapi.request('statuses/user_timeline', params)\n except ProtectedException as e:\n logger.info('%s is protected', user_id)\n return\n except OverLimits:\n self.scrapeservice.enqueue({\n \"user_id\": user_id,\n \"since_id\": job_since_id\n })\n return\n\n for tweet in resp:\n self.tweetservice.queue_tweet(tweet, False)\n\n try:\n self.stats.log_point('tweet', twittertime(tweet['created_at']))\n except:\n pass\n\n if since_id is None:\n since_id = tweet['id']\n else:\n since_id = max(since_id, tweet['id'])\n\n tweets = len(resp)\n self.tweetservice.commit()\n\n if tweets > 0:\n self.scrapeservice.enqueue({\n \"user_id\": user_id,\n \"since_id\": since_id\n })\n\n except Exception as err:\n logger.exception('Caught error: %s' % (str(err)))\n\n","repo_name":"SMISC/logging","sub_path":"provision/src/scraper/competitiontweetsworker.py","file_name":"competitiontweetsworker.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12826985729","text":"#!/usr/bin/env python\n# coding: utf-8\n# @File Name: 11_multiple_sequences.py\n# @Author: Joshua Liu\n# @Email: liuchaozhen@neusoft.com\n# @Create Date: 2016-02-02 10:02:04\n# @Last Modified: 2016-02-02 11:02:03\n# @Description:\n'''\n## 问题\n\n如何同时遍历多个序列?\n\n## 方案\n\n使用内建的 zip() 函数,zip(a, b) 函数,内部创建一个 tuples(x, y),x是a中的元素,y是b中的元素,遍历以短的那个序列长度结束。\n'''\n\nxpts = [1, 5, 4, 2, 10, 7]\nypts = [101, 78, 37, 15, 62, 99]\nfor x, y in zip(xpts, ypts):\n print(x, y)\n\n'''\n输出如下:\n\n1 101\n5 78\n4 37\n2 15\n10 62\n7 99\n'''\n\na = [1, 2, 3]\nb = ['w', 'x', 'y', 'z']\nfor i in zip(a, b):\n print(i)\n\n'''\n输出如下:\n\n(1, 'w')\n(2, 'x')\n(3, 'y')\n\n如果想以长的那个序列作为遍历的依据,可以使用 zip_longest() 代替:\n'''\n\nfrom itertools import zip_longest\nfor i in zip_longest(a, b):\n print(i)\n\n'''\n输出如下:\n\n(1, 'w')\n(2, 'x')\n(3, 'y')\n(None, 'z')\n\n\n## 讨论\n\nzip() 函数一般用于数据配对,比如 zip(a, b)。zip() 还可以传入多个序列,比如\n'''\n\na = [1, 2, 3]\nb = ['a', 'b', 'c']\nc = ['A', 'B', 'C']\nfor i in zip(a, b, c):\n print(i)\n\n'''\n输出如下:\n\n(1, 'a', 'A')\n(2, 'b', 'B')\n(3, 'c', 'C')\n\n最后,别忘了,zip() 函数产生了一个迭代器,如果想保存到列表里,得用 list() 函数\n'''\n\nprint(zip(a, b))\nprint(list(zip(a, b)))\n'''\n\n[(1, 'a'), (2, 'b'), (3, 'c')]\n'''\n","repo_name":"fooyou/Exercise","sub_path":"python/3rd_cook_book/Chapter_4_Iterators_Generators/11_multiple_sequences.py","file_name":"11_multiple_sequences.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27647438427","text":"from contextlib import contextmanager\nfrom os import path\nimport re\nimport sys\n\nfrom invoke.vendor.lexicon import Lexicon\nfrom invoke import MockContext, Result, Config, Exit\nfrom docutils.utils import Reporter\nfrom unittest.mock import Mock, patch, call\nimport pytest\nfrom pytest import skip\nfrom pytest_relaxed import trap, raises\n\nfrom invocations.packaging.semantic_version_monkey import Version\nfrom invocations.packaging.release import (\n Changelog,\n Release,\n Tag,\n UndefinedReleaseType,\n VersionFile,\n _latest_and_next_version,\n _latest_feature_bucket,\n _release_and_issues,\n _release_line,\n all_,\n prepare,\n push,\n build,\n load_version,\n publish,\n status,\n upload,\n test_install as install_test_task, # to avoid pytest treating as test func\n ns as release_ns,\n)\n\n\nclass release_line_:\n def assumes_bugfix_if_release_branch(self):\n c = MockContext(run=Result(\"2.7\"))\n assert _release_line(c)[1] == Release.BUGFIX\n\n def assumes_feature_if_main(self):\n c = MockContext(run=Result(\"main\"))\n assert _release_line(c)[1] == Release.FEATURE\n\n def assumes_feature_if_master(self):\n c = MockContext(run=Result(\"master\"))\n assert _release_line(c)[1] == Release.FEATURE\n\n def is_undefined_if_arbitrary_branch_name(self):\n c = MockContext(run=Result(\"yea-whatever\"))\n assert _release_line(c)[1] == Release.UNDEFINED\n\n def is_undefined_if_specific_commit_checkout(self):\n # Just a sanity check; current logic doesn't differentiate between e.g.\n # 'gobbledygook' and 'HEAD'.\n c = MockContext(run=Result(\"HEAD\"))\n assert _release_line(c)[1] == Release.UNDEFINED\n\n\nclass latest_feature_bucket_:\n def base_case_of_single_release_family(self):\n bucket = _latest_feature_bucket(\n dict.fromkeys([\"unreleased_1_feature\"])\n )\n assert bucket == \"unreleased_1_feature\"\n\n def simple_ordering_by_bucket_number(self):\n bucket = _latest_feature_bucket(\n dict.fromkeys([\"unreleased_1_feature\", \"unreleased_2_feature\"])\n )\n assert bucket == \"unreleased_2_feature\"\n\n def ordering_goes_by_numeric_not_lexical_order(self):\n bucket = _latest_feature_bucket(\n dict.fromkeys(\n [\n \"unreleased_1_feature\",\n # Yes, releases like 10.x or 17.x are unlikely, but\n # definitely plausible - think modern Firefox for example.\n \"unreleased_10_feature\",\n \"unreleased_23_feature\",\n \"unreleased_202_feature\",\n \"unreleased_17_feature\",\n \"unreleased_2_feature\",\n ]\n )\n )\n assert bucket == \"unreleased_202_feature\"\n\n\nclass release_and_issues_:\n class bugfix:\n # TODO: factor out into setup() so each test has some excluded/ignored\n # data in it - helps avoid naive implementation returning x[0] etc.\n\n def no_unreleased(self):\n release, issues = _release_and_issues(\n changelog={\"1.1\": [], \"1.1.0\": [1, 2]},\n branch=\"1.1\",\n release_type=Release.BUGFIX,\n )\n assert release == \"1.1.0\"\n assert issues == []\n\n def has_unreleased(self):\n skip()\n\n class feature:\n def no_unreleased(self):\n # release is None, issues is empty list\n release, issues = _release_and_issues(\n changelog={\"1.0.1\": [1], \"unreleased_1_feature\": []},\n branch=\"main\",\n release_type=Release.FEATURE,\n )\n assert release is None\n assert issues == []\n\n def has_unreleased(self):\n # release is still None, issues is nonempty list\n release, issues = _release_and_issues(\n changelog={\"1.0.1\": [1], \"unreleased_1_feature\": [2, 3]},\n branch=\"main\",\n release_type=Release.FEATURE,\n )\n assert release is None\n assert issues == [2, 3]\n\n def undefined_always_returns_None_and_empty_list(self):\n skip()\n\n\nclass find_package_:\n def can_be_short_circuited_with_config_value(self):\n skip()\n\n def seeks_directories_with_init_py_in_em(self):\n skip()\n\n def blacklists_common_non_public_modules(self):\n skip()\n\n def errors_if_cannot_find_anything(self):\n skip()\n\n def errors_if_ambiguous_results(self):\n # I.e. >1 possible result\n skip()\n\n\nclass load_version_:\n def setup(self):\n sys.path.insert(0, support_dir)\n\n def teardown(self):\n sys.path.remove(support_dir)\n\n def _expect_version(self, expected, config_val=None):\n config = {\"package\": \"fakepackage\"}\n if config_val is not None:\n config[\"version_module\"] = config_val\n c = MockContext(Config(overrides={\"packaging\": config}))\n assert load_version(c) == expected\n\n # NOTE: these all also happen to test the Python bug re: a unicode value\n # given to `__import__(xxx, fromlist=['onoz'])`. No real point making\n # another one.\n\n def defaults_to_underscore_version(self):\n self._expect_version(\"1.0.0\")\n\n def can_configure_which_module_holds_version_data(self):\n self._expect_version(\"1.0.1\", config_val=\"otherversion\")\n\n @patch(\"invocations.packaging.release.sys.modules\", wraps=sys.modules)\n def reloads_version_in_case_edited_during_run(self, modules):\n # NOTE: mock doesn't mock/wrap dunder-attrs well (eg see python core\n # bug #25597) so we gotta rub some more on top, esp for eg\n # Python 3.8+ importlib which does additional setattrs and pops.\n # (but we still wraps= in @patch as it smooths over other bits we don't\n # care about mocking, at least under Python <3.8)\n even_faker_package = Mock(_version=Mock(__version__=\"1.0.0\"))\n modules.__getitem__.return_value = even_faker_package\n modules.get.return_value = even_faker_package\n self._expect_version(\"1.0.0\")\n # Expect our own internal pops (the stdlib ones, eg under 3.8+, don't\n # exactly match these - no 2nd arg - so we can be pretty sure this\n # won't incorrectly pass due to them)\n modules.pop.assert_any_call(\"fakepackage._version\", None)\n modules.pop.assert_any_call(\"fakepackage\", None)\n\n def errors_usefully_if_version_module_not_found(self):\n skip()\n\n\nclass latest_and_next_version_:\n def next_patch_of_bugfix_release(self):\n versions = _latest_and_next_version(\n Lexicon(\n {\n \"release_type\": Release.BUGFIX,\n \"latest_line_release\": Version(\"1.2.2\"),\n \"latest_overall_release\": Version(\"1.4.1\"), # realism!\n }\n )\n )\n assert versions == (Version(\"1.2.2\"), Version(\"1.2.3\"))\n\n def next_minor_of_feature_release(self):\n versions = _latest_and_next_version(\n Lexicon(\n {\n \"release_type\": Release.FEATURE,\n \"latest_line_release\": None, # realism!\n \"latest_overall_release\": Version(\"1.2.2\"),\n }\n )\n )\n assert versions == (Version(\"1.2.2\"), Version(\"1.3.0\"))\n\n\n# Multi-dimensional scenarios, in relatively arbitrary nesting order:\n# - what type of release we're talking about (based on branch name)\n# - whether there appear to be unreleased issues in the changelog\n# - comparison of version file contents w/ latest release in changelog\n# TODO: ... (pypi release, etc)\n\nsupport_dir = path.join(path.dirname(__file__), \"_support\")\n\n# Sentinel for targeted __import__ mocking. Is a string so that it can be\n# expected in tests about the version file, etc.\n# NOTE: needs to not shadow any real imported module name!\nFAKE_PACKAGE = \"fakey_mcfakerson_not_real_in_any_way\"\n\n# NOTE: can't easily slap this on the test class itself due to using inner\n# classes. If we can get the inner classes to not only copy attributes but also\n# decorators (seems unlikely?), we could organize more \"naturally\".\n# NOTE: OTOH, it's actually nice to use this in >1 top level class, so...meh?\n@contextmanager\ndef _mock_context(self):\n \"\"\"\n Context manager for a mocked Invoke context + other external patches.\n\n Specifically:\n\n - Examine test class attributes for configuration; this allows easy\n multidimensional test setup.\n - Where possible, the code under test relies on calling shell commands via\n the Context object, so we pass in a MockContext for that.\n - Where not possible (eg things which must be Python-level and not\n shell-level, such as version imports), mock with the 'mock' lib as usual.\n\n :yields:\n an `invoke.context.MockContext` created & modified as described above.\n \"\"\"\n #\n # Generate config & context from attrs\n #\n\n changelog_file = \"{}.rst\".format(self._changelog)\n config = Config(\n overrides={\n \"packaging\": {\n \"changelog_file\": path.join(support_dir, changelog_file),\n \"package\": FAKE_PACKAGE,\n }\n }\n )\n tag_output = \"\"\n if hasattr(self, \"_tags\"):\n tag_output = \"\\n\".join(self._tags) + \"\\n\"\n # NOTE: Result first posarg is stdout string data.\n run_results = {\n # Branch detection\n \"git rev-parse --abbrev-ref HEAD\": self._branch,\n # Changelog update action - just here so it can be called\n re.compile(r\"\\$EDITOR.*\"): True,\n # Git tags\n \"git tag\": tag_output,\n # Git status/commit/tagging\n re.compile(\"git tag .*\"): True,\n re.compile(\"git commit.*\"): True,\n # NOTE: some tests will need to override this, for now default to a\n # result that implies a commit is needed\n 'git status --porcelain | egrep -v \"^\\\\?\"': Result(\n \"M somefile\", exited=0\n ),\n }\n context = MockContext(config=config, run=run_results, repeat=True)\n\n #\n # Execute converge() inside a mock environment\n #\n\n # Allow targeted import mocking, leaving regular imports alone.\n real_import = __import__\n\n def fake_import(*args, **kwargs):\n if args[0] is not FAKE_PACKAGE:\n return real_import(*args, **kwargs)\n return Mock(_version=Mock(__version__=self._version))\n\n import_patcher = patch(\"builtins.__import__\", side_effect=fake_import)\n\n with import_patcher:\n yield context\n\n\ndef _mock_status(self):\n with _mock_context(self) as c:\n return status(c)\n\n\n@trap\ndef _expect_actions(self, *actions):\n _mock_status(self)\n stdout = sys.stdout.getvalue()\n for action in actions:\n # Check for action's text value in the table which gets printed.\n # (Actual table formatting is tested in an individual test.)\n err = \"Didn't find {} in stdout:\\n\\n{}\".format(action, stdout)\n assert action.value in stdout, err\n\n\nclass status_:\n class overall_behavior:\n _branch = \"1.1\"\n _changelog = \"unreleased_1.1_bugs\"\n _version = \"1.1.1\"\n _tags = (\"1.1.0\", \"1.1.1\")\n\n @trap\n def displays_expectations_and_component_statuses(self):\n _mock_status(self)\n\n # TODO: make things more organic/specific/less tabular:\n #\n # current git branch: xxx (implies type yyy)\n # changelog: xxx\n # so the next release would be: a.b.c (or: 'so the release we're\n # cutting/expecting is a.b.c')\n # version file: \n # git tag: (maybe including\n # latest that is found? that's extra logic...)\n # etc...\n\n parts = dict(\n changelog=Changelog.NEEDS_RELEASE.value,\n version=VersionFile.NEEDS_BUMP.value,\n tag=Tag.NEEDS_CUTTING.value,\n )\n for part in parts:\n parts[part] = re.escape(parts[part])\n parts[\"header_footer\"] = r\"-+( +-+)?\"\n # NOTE: forces impl to follow specific order, which is good\n regex = r\"\"\"\n{header_footer}\nChangelog +{changelog}\nVersion +{version}\nTag +{tag}\n{header_footer}\n\"\"\".format(\n **parts\n ).strip()\n output = sys.stdout.getvalue()\n err = \"Expected:\\n\\n{}\\n\\nGot:\\n\\n{}\".format(regex, output)\n err += \"\\n\\nRepr edition...\\n\\n\"\n err += \"Expected:\\n\\n{!r}\\n\\nGot:\\n\\n{!r}\".format(regex, output)\n assert re.match(regex, output) is not None, err\n\n @trap # just for cleaner test output\n def returns_lexica_for_reuse(self):\n actions = Lexicon(\n changelog=Changelog.NEEDS_RELEASE,\n version=VersionFile.NEEDS_BUMP,\n tag=Tag.NEEDS_CUTTING,\n all_okay=False,\n )\n found_actions, found_state = _mock_status(self)\n assert found_actions == actions\n # Spot check state, don't need to check whole thing...\n assert found_state.branch == self._branch\n assert found_state.latest_version == Version(\"1.1.1\")\n assert found_state.tags == [Version(x) for x in self._tags]\n\n # TODO: I got this attribute jazz working in pytest but see if there is a\n # 'native' pytest feature that works better (while still in conjunction\n # with nested tasks, ideally)\n class release_line_branch:\n _branch = \"1.1\"\n\n class unreleased_issues:\n _changelog = \"unreleased_1.1_bugs\"\n\n class file_version_equals_latest_in_changelog:\n _version = \"1.1.1\"\n\n class tags_only_exist_for_past_releases:\n _tags = (\"1.1.0\", \"1.1.1\")\n\n def changelog_release_version_update_tag_update(self):\n _expect_actions(\n self,\n Changelog.NEEDS_RELEASE,\n VersionFile.NEEDS_BUMP,\n Tag.NEEDS_CUTTING,\n )\n\n class version_file_is_newer:\n _version = \"1.1.2\"\n\n class tags_only_exist_for_past_releases:\n _tags = (\"1.1.0\", \"1.1.1\")\n\n def changelog_release_version_okay_tag_update(self):\n _expect_actions(\n self,\n Changelog.NEEDS_RELEASE,\n VersionFile.OKAY,\n Tag.NEEDS_CUTTING,\n )\n\n class changelog_version_is_newer:\n _version = \"1.1.0\"\n # Undefined situation - unsure how/whether to test\n\n class no_unreleased_issues:\n _changelog = \"no_unreleased_1.1_bugs\"\n\n class file_version_equals_latest_in_changelog:\n _version = \"1.1.2\"\n\n class tag_for_new_version_present:\n _tags = (\"1.1.0\", \"1.1.1\", \"1.1.2\")\n\n def no_updates_necessary(self):\n _expect_actions(\n self, Changelog.OKAY, VersionFile.OKAY, Tag.OKAY\n )\n\n class tag_for_new_version_missing:\n _tags = (\"1.1.0\", \"1.1.1\")\n\n def tag_needs_cutting_still(self):\n _expect_actions(\n self,\n Changelog.OKAY,\n VersionFile.OKAY,\n Tag.NEEDS_CUTTING,\n )\n\n class version_file_out_of_date:\n _version = \"1.1.1\"\n\n class tag_missing:\n _tags = (\"1.1.0\", \"1.1.1\") # no 1.1.2\n\n def changelog_okay_version_needs_bump_tag_needs_cut(self):\n _expect_actions(\n self,\n Changelog.OKAY,\n VersionFile.NEEDS_BUMP,\n Tag.NEEDS_CUTTING,\n )\n\n # TODO: as in other TODOs, tag can't be expected to exist/be up\n # to date if any other files are also not up to date. so tag\n # present but version file out of date, makes no sense, would\n # be an error.\n\n class version_file_is_newer:\n _version = \"1.1.3\"\n\n def both_technically_okay(self):\n skip() # see TODO below\n _expect_actions(\n self,\n # TODO: display a 'warning' state noting that your\n # version outpaces your changelog despite your\n # changelog having no unreleased stuff in it. Still\n # \"Okay\" (no action needed), not an error per se, but\n # still \"strange\".\n Changelog.OKAY,\n VersionFile.OKAY,\n )\n\n class main_branch:\n _branch = \"main\"\n\n class unreleased_issues:\n _changelog = \"unreleased_1.x_features\"\n\n class file_version_equals_latest_in_changelog:\n _version = \"1.0.1\"\n\n class latest_tag_same_as_file_version:\n _tags = (\"1.0.0\", \"1.0.1\")\n\n def changelog_release_version_update_tag_cut(self):\n # TODO: do we want some sort of \"and here's _what_ you\n # ought to be adding as the new release and/or version\n # value\" aspect to the actions? can leave up to user\n # for now, but, more automation is better.\n _expect_actions(\n self,\n Changelog.NEEDS_RELEASE,\n VersionFile.NEEDS_BUMP,\n Tag.NEEDS_CUTTING,\n )\n\n # TODO: if there's somehow a tag present for a release as yet\n # uncut...which makes no sense as changelog still has no\n # release. Would represent error state!\n\n # TODO: what if the version file is newer _but not what it needs to\n # be for the branch_? e.g. if it was 1.0.2 here (where latest\n # release is 1.0.1 but branch (main) implies desire is 1.1.0)?\n\n class version_file_is_newer:\n _version = \"1.1.0\"\n\n class new_tag_not_present:\n _tags = (\"1.0.1\",)\n\n def changelog_release_version_okay(self):\n _expect_actions(\n self,\n # TODO: same as above re: suggesting the release\n # value to the edit step\n Changelog.NEEDS_RELEASE,\n VersionFile.OKAY,\n Tag.NEEDS_CUTTING,\n )\n\n class changelog_version_is_newer:\n _version = \"1.2.0\"\n # TODO: as with bugfix branches, this is undefined, except here\n # it's even moreso because...well it's even more wacky. why\n # would we have anything >1.1.0 when the changelog itself only\n # even goes up to 1.0.x??\n\n class no_unreleased_issues:\n _changelog = \"no_unreleased_1.x_features\"\n\n class file_version_equals_latest_in_changelog:\n _version = \"1.1.0\"\n\n class tag_present:\n _tags = (\"1.0.2\", \"1.1.0\")\n\n def all_okay(self):\n _expect_actions(\n self, Changelog.OKAY, VersionFile.OKAY, Tag.OKAY\n )\n\n class tag_missing:\n _tags = \"1.0.2\"\n\n def changelog_and_version_okay_tag_needs_cut(self):\n _expect_actions(\n self,\n Changelog.OKAY,\n VersionFile.OKAY,\n Tag.NEEDS_CUTTING,\n )\n\n class undefined_branch:\n _branch = \"whatever\"\n _changelog = \"nah\"\n _tags = (\"nope\",)\n\n @raises(UndefinedReleaseType)\n def raises_exception(self):\n _mock_status(self)\n\n\ndef _confirm(which):\n path = \"invocations.packaging.release.confirm\"\n\n def _wrapper(f):\n return trap(patch(path, return_value=which)(f))\n\n return _wrapper\n\n\n_confirm_true = _confirm(True)\n_confirm_false = _confirm(False)\n\n\n# This is shit but I'm too tired and angry right now to give a fuck.\ndef _run_prepare(c, mute=True, **kwargs):\n try:\n return prepare(c, **kwargs)\n except Exit:\n if not mute:\n raise\n\n\nclass prepare_:\n\n # NOTE: mostly testing the base case of 'everything needs updating',\n # all the permutations are tested elsewhere.\n _branch = \"1.1\"\n _changelog = \"unreleased_1.1_bugs\"\n _version = \"1.1.1\"\n _tags = (\"1.1.0\",)\n\n @_confirm_false\n def displays_status_output(self, _):\n with _mock_context(self) as c:\n _run_prepare(c)\n output = sys.stdout.getvalue()\n for action in (\n Changelog.NEEDS_RELEASE,\n VersionFile.NEEDS_BUMP,\n Tag.NEEDS_CUTTING,\n ):\n err = \"Didn't see '{}' text in status output!\".format(action.name)\n assert action.value in output, err\n\n @patch(\"invocations.packaging.release.status\")\n def short_circuits_when_no_work_to_do(self, status):\n status.return_value = Lexicon(all_okay=True), Lexicon()\n with _mock_context(self) as c:\n # True retval, one call to status(), and no barfing on lack of\n # run() mocking, all point to the short circuit happening\n assert _run_prepare(c) is True\n assert status.call_count == 1\n\n @trap\n @patch(\"invocations.console.input\", return_value=\"no\")\n def prompts_before_taking_action(self, mock_input):\n with _mock_context(self) as c:\n _run_prepare(c)\n assert mock_input.call_args[0][0] == \"Take the above actions? [Y/n] \"\n\n @_confirm_false\n def if_prompt_response_negative_no_action_taken(self, _):\n with _mock_context(self) as c:\n _run_prepare(c)\n # TODO: move all action-y code into subroutines, then mock them and\n # assert they were never called?\n # Expect that only the status-y run() calls were made.\n assert c.run.call_count == 2\n commands = [x[0][0] for x in c.run.call_args_list]\n assert commands[0].startswith(\"git rev-parse\")\n assert commands[1].startswith(\"git tag\")\n\n @_confirm_true\n def opens_EDITOR_with_changelog_when_it_needs_update(self, _):\n with _mock_context(self) as c:\n _run_prepare(c)\n # Grab changelog path from the context config, why not\n path = c.config.packaging.changelog_file\n # TODO: real code should probs expand EDITOR explicitly so it can\n # run w/o a shell wrap / require a full env?\n cmd = \"$EDITOR {}\".format(path)\n c.run.assert_any_call(cmd, pty=True, hide=False, dry=False)\n\n @_confirm_true\n def opens_EDITOR_with_version_file_when_it_needs_update(self, _):\n with _mock_context(self) as c:\n _run_prepare(c)\n path = \"{}/_version.py\".format(FAKE_PACKAGE)\n # TODO: real code should probs expand EDITOR explicitly so it can\n # run w/o a shell wrap / require a full env?\n cmd = \"$EDITOR {}\".format(path)\n c.run.assert_any_call(cmd, pty=True, hide=False, dry=False)\n\n @_confirm_true\n def commits_and_adds_git_tag_when_needs_cutting(self, _):\n with _mock_context(self) as c:\n _run_prepare(c)\n version = \"1.1.2\" # as changelog has issues & prev was 1.1.1\n # Ensure the commit necessity test happened. (Default mock_context\n # sets it up to result in a commit being necessary.)\n check = 'git status --porcelain | egrep -v \"^\\\\?\"'\n c.run.assert_any_call(check, hide=True, warn=True)\n commit = 'git commit -am \"Cut {}\"'.format(version)\n tag = 'git tag -a {} -m \"\"'.format(version)\n for cmd in (commit, tag):\n c.run.assert_any_call(cmd, hide=False, dry=False, echo=True)\n\n @_confirm_true\n def does_not_commit_if_no_commit_necessary(self, _):\n with _mock_context(self) as c:\n # Set up for a no-commit-necessary result to check command\n check = 'git status --porcelain | egrep -v \"^\\\\?\"'\n c.set_result_for(\"run\", check, Result(\"\", exited=1))\n _run_prepare(c)\n # Expect NO git commit\n commands = [x[0][0] for x in c.run.call_args_list]\n assert not any(x.startswith(\"git commit\") for x in commands)\n # Expect git tag\n c.run.assert_any_call(\n 'git tag -a 1.1.2 -m \"\"', hide=False, dry=False, echo=True\n )\n\n class final_status_check:\n @_confirm_true\n @patch(\"invocations.packaging.release.status\")\n def run_twice_when_not_short_circuiting(self, status, _):\n status.side_effect = [\n (\n Lexicon(\n changelog=Changelog.NEEDS_RELEASE,\n version=VersionFile.OKAY,\n tag=Tag.OKAY,\n all_okay=False,\n ),\n Lexicon(),\n ),\n (Lexicon(all_okay=True), Lexicon()),\n ]\n with _mock_context(self) as c:\n # Mute off - want kaboom if Exit raised\n _run_prepare(c, mute=False)\n assert status.call_count == 2\n\n @_confirm_true\n @patch(\"invocations.packaging.release.status\")\n def exits_if_still_not_all_okay(self, status, _):\n status.side_effect = [\n (\n Lexicon(\n changelog=Changelog.NEEDS_RELEASE,\n version=VersionFile.OKAY,\n tag=Tag.OKAY,\n all_okay=False,\n ),\n Lexicon(),\n ),\n (Lexicon(all_okay=False), Lexicon()),\n ]\n with _mock_context(self) as c:\n with pytest.raises(Exit, match=r\"Something went wrong\"):\n _run_prepare(c, mute=False)\n assert status.call_count == 2\n\n class dry_run_prepare:\n @patch(\"invocations.packaging.release.status\")\n def exits_early_like_non_dry_run_on_all_okay(self, status):\n status.return_value = Lexicon(all_okay=True), Lexicon()\n with _mock_context(self) as c:\n assert _run_prepare(c, dry_run=True) is True\n assert status.call_count == 1\n\n @patch(\"invocations.packaging.release.status\")\n def does_not_fail_fast_on_bad_release_type(self, status):\n status.side_effect = UndefinedReleaseType\n with _mock_context(self) as c:\n _run_prepare(c, dry_run=True)\n\n @patch(\"invocations.console.input\")\n def does_not_prompt_to_confirm(self, mock_input):\n with _mock_context(self) as c:\n _run_prepare(c, dry_run=True)\n assert not mock_input.called\n\n def dry_runs_all_prep_commands(self):\n # Reminder: default state of mocked context is \"everything needs\n # updates\"\n with _mock_context(self) as c:\n _run_prepare(c, dry_run=True)\n dry_runs = [\n x[1][0] for x in c.run.mock_calls if x[2].get(\"dry\", False)\n ]\n for pattern in (\n r\"\\$EDITOR .*\\.rst\",\n r\"\\$EDITOR .*_version\\.py\",\n r\"git commit.*\",\n r\"git tag -a.*\",\n ):\n assert any(re.match(pattern, x) for x in dry_runs)\n\n @patch(\"invocations.packaging.release.status\")\n def does_not_run_final_status_check(self, status):\n # Slight cheat: other actions all actually ok even tho all_okay is\n # false. means no needing to mock the run() calls etc.\n status.return_value = (\n Lexicon(\n changelog=Changelog.OKAY,\n version=VersionFile.OKAY,\n tag=Tag.OKAY,\n all_okay=False,\n ),\n Lexicon(),\n )\n with _mock_context(self) as c:\n _run_prepare(c, dry_run=True)\n # The end step was skipped\n assert status.call_count == 1\n\n # Don't want a full re-enactment of status_ test tree, but do want to spot\n # check that actions not needing to be taken, aren't...\n class lack_of_action:\n _changelog = \"no_unreleased_1.1_bugs\"\n\n @_confirm_true\n def no_changelog_update_needed_means_no_changelog_edit(self, _):\n with _mock_context(self) as c:\n _run_prepare(c)\n # TODO: as with the 'took no actions at all' test above,\n # proving a negative sucks - eventually make this subroutine\n # assert based. Meh.\n path = c.config.packaging.changelog_file\n cmd = \"$EDITOR {}\".format(path)\n err = \"Saw {!r} despite changelog not needing update!\".format(\n cmd\n )\n assert cmd not in [x[0][0] for x in c.run.call_args_list], err\n\n\n# NOTE: yea...this kinda pushes the limits of sane TDD...meh\n# NOTE: possible that the actual codes blessings emits differ based on\n# termcap/etc; consider sucking it up and just calling blessings directly in\n# that case, even though it makes the tests kinda tautological.\n# TODO: yes, when I personally went from TERM=xterm-256color to\n# TERM=screen-256color, that made these tests break! Updating test machinery to\n# account for now, but...not ideal!\nclass component_state_enums_contain_human_readable_values:\n class changelog:\n def okay(self):\n expected = \"\\x1b[32m\\u2714 no unreleased issues\\x1b(B\\x1b[m\"\n assert Changelog.OKAY.value == expected\n\n def needs_release(self):\n expected = \"\\x1b[31m\\u2718 needs :release: entry\\x1b(B\\x1b[m\"\n assert Changelog.NEEDS_RELEASE.value == expected\n\n class version_file:\n def okay(self):\n expected = \"\\x1b[32m\\u2714 version up to date\\x1b(B\\x1b[m\"\n assert VersionFile.OKAY.value == expected\n\n def needs_bump(self):\n expected = \"\\x1b[31m\\u2718 needs version bump\\x1b(B\\x1b[m\"\n assert VersionFile.NEEDS_BUMP.value == expected\n\n class tag:\n def okay(self):\n assert Tag.OKAY.value == \"\\x1b[32m\\u2714 all set\\x1b(B\\x1b[m\"\n\n def needs_cutting(self):\n expected = \"\\x1b[31m\\u2718 needs cutting\\x1b(B\\x1b[m\"\n assert Tag.NEEDS_CUTTING.value == expected\n\n\n@contextmanager\ndef _expect_setuppy(flags, python=\"python\", config=None, yield_rmtree=False):\n kwargs = dict(run=True)\n if config is not None:\n kwargs[\"config\"] = config\n c = MockContext(**kwargs)\n # Make sure we don't actually run rmtree regardless\n with patch(\"invocations.packaging.release.rmtree\") as rmtree:\n if yield_rmtree:\n yield c, rmtree\n else:\n yield c\n c.run.assert_called_once_with(\"{} setup.py {}\".format(python, flags))\n\n\nclass build_:\n _sdist_flags = \"sdist -d dist\"\n _wheel_flags = \"build -b build bdist_wheel -d dist\"\n _both_flags = \"sdist -d dist build -b build bdist_wheel -d dist\"\n _oh_dir = \"sdist -d {0} build -b {1} bdist_wheel -d {0}\".format(\n path.join(\"dir\", \"dist\"), path.join(\"dir\", \"build\")\n )\n\n class sdist:\n def indicates_sdist_builds(self):\n with _expect_setuppy(self._both_flags) as c:\n build(c, sdist=True)\n\n def on_by_default(self):\n with _expect_setuppy(self._both_flags) as c:\n build(c)\n\n def can_be_disabled_via_config(self):\n config = Config(dict(packaging=dict(sdist=False)))\n with _expect_setuppy(self._wheel_flags, config=config) as c:\n build(c)\n\n def kwarg_wins_over_config(self):\n config = Config(dict(packaging=dict(sdist=True)))\n with _expect_setuppy(self._wheel_flags, config=config) as c:\n build(c, sdist=False)\n\n class wheel:\n def indicates_explicit_build_and_wheel(self):\n with _expect_setuppy(self._wheel_flags) as c:\n build(c, sdist=False, wheel=True)\n\n def on_by_default(self):\n with _expect_setuppy(self._wheel_flags) as c:\n build(c, sdist=False)\n\n def can_be_disabled_via_config(self):\n config = Config(dict(packaging=dict(wheel=False)))\n with _expect_setuppy(self._sdist_flags, config=config) as c:\n build(c)\n\n def kwarg_wins_over_config(self):\n config = Config(dict(packaging=dict(wheel=True)))\n with _expect_setuppy(self._sdist_flags, config=config) as c:\n build(c, wheel=False)\n\n @raises(Exit)\n def kabooms_if_sdist_and_wheel_both_False(self):\n build(MockContext(), sdist=False, wheel=False)\n\n class directory:\n def defaults_to_blank_or_cwd(self):\n with _expect_setuppy(self._both_flags) as c:\n build(c)\n\n def if_given_affects_build_and_dist_dirs(self):\n with _expect_setuppy(self._oh_dir) as c:\n build(c, directory=\"dir\")\n\n def may_be_given_via_config(self):\n config = Config(dict(packaging=dict(directory=\"dir\")))\n with _expect_setuppy(self._oh_dir, config=config) as c:\n build(c)\n\n def kwarg_wins_over_config(self):\n config = Config(dict(packaging=dict(directory=\"NOTdir\")))\n with _expect_setuppy(self._oh_dir, config=config) as c:\n build(c, directory=\"dir\")\n\n class python:\n def defaults_to_python(self):\n with _expect_setuppy(self._both_flags, python=\"python\") as c:\n build(c, python=\"python\")\n\n def may_be_overridden(self):\n with _expect_setuppy(self._both_flags, python=\"fython\") as c:\n build(c, python=\"fython\")\n\n def can_be_given_via_config(self):\n config = Config(dict(packaging=dict(python=\"python17\")))\n with _expect_setuppy(\n self._both_flags, config=config, python=\"python17\"\n ) as c:\n build(c)\n\n def kwarg_wins_over_config(self):\n config = Config(dict(packaging=dict(python=\"python17\")))\n with _expect_setuppy(\n self._both_flags, config=config, python=\"python99\"\n ) as c:\n build(c, python=\"python99\")\n\n class clean:\n def _expect_with_rmtree(self):\n return _expect_setuppy(self._both_flags, yield_rmtree=True)\n\n def defaults_to_False_meaning_no_clean(self):\n with self._expect_with_rmtree() as (c, rmtree):\n build(c)\n assert not rmtree.called\n\n def True_means_clean_both_dirs(self):\n with self._expect_with_rmtree() as (c, rmtree):\n build(c, clean=True)\n rmtree.assert_any_call(\"dist\", ignore_errors=True)\n rmtree.assert_any_call(\"build\", ignore_errors=True)\n\n def understands_directory_option(self):\n with _expect_setuppy(self._oh_dir, yield_rmtree=True) as (\n c,\n rmtree,\n ):\n build(c, directory=\"dir\", clean=True)\n rmtree.assert_any_call(\n path.join(\"dir\", \"build\"), ignore_errors=True\n )\n rmtree.assert_any_call(\n path.join(\"dir\", \"dist\"), ignore_errors=True\n )\n\n def may_be_configured(self):\n config = Config(dict(packaging=dict(clean=True)))\n with _expect_setuppy(\n self._both_flags, yield_rmtree=True, config=config\n ) as (c, rmtree):\n build(c)\n rmtree.assert_any_call(\"dist\", ignore_errors=True)\n rmtree.assert_any_call(\"build\", ignore_errors=True)\n\n def kwarg_wins_over_config(self):\n config = Config(dict(packaging=dict(clean=True)))\n with _expect_setuppy(\n self._both_flags, yield_rmtree=True, config=config\n ) as (c, rmtree):\n build(c, clean=False)\n rmtree.assert_any_call(\"dist\", ignore_errors=True)\n rmtree.assert_any_call(\"build\", ignore_errors=True)\n\n\nclass upload_:\n def _check_upload(self, c, kwargs=None, flags=None, extra=None):\n \"\"\"\n Expect/call upload() with common environment and settings/mocks.\n\n Returns the full command constructed, typically for further\n examination.\n \"\"\"\n\n def mkpath(x):\n return path.join(\"somedir\", \"dist\", x)\n\n with patch(\"invocations.packaging.release.glob\") as glob:\n tgz, whl = mkpath(\"foo.tar.gz\"), mkpath(\"foo.whl\")\n glob.side_effect = lambda x: [tgz if x.endswith(\"gz\") else whl]\n # Do the thing!\n upload(c, \"somedir\", **(kwargs or {}))\n glob.assert_any_call(mkpath(\"*.tar.gz\"))\n glob.assert_any_call(mkpath(\"*.whl\"))\n self.files = \"{} {}\".format(whl, tgz)\n cmd = \"twine upload\"\n if flags:\n cmd += \" {}\".format(flags)\n cmd += \" {}\".format(self.files)\n if extra:\n cmd += \" {}\".format(extra)\n return cmd\n\n def twine_uploads_dist_contents_with_wheels_first(self):\n c = MockContext(run=True)\n c.run.assert_called_once_with(self._check_upload(c))\n\n def may_target_alternate_index(self):\n c = MockContext(run=True)\n cmd = self._check_upload(\n c, kwargs=dict(index=\"lol\"), flags=\"--repository lol\"\n )\n c.run.assert_called_once_with(cmd)\n\n @patch(\"builtins.print\")\n def dry_run_just_prints_and_ls(self, print):\n c = MockContext(run=True)\n cmd = self._check_upload(c, kwargs=dict(dry_run=True))\n print.assert_any_call(\"Would publish via: {}\".format(cmd))\n c.run.assert_called_once_with(\"ls -l {}\".format(self.files))\n\n @patch(\"invocations.packaging.release.getpass.getpass\")\n def allows_signing_via_gpg(self, getpass):\n c = MockContext(run=True, repeat=True)\n getpass.return_value = \"super sekrit\"\n twine_upload = self._check_upload(\n c, kwargs=dict(sign=True), extra=\"somedir/dist/*.asc\"\n )\n calls = c.run.mock_calls\n # Looked for gpg\n assert calls[0] == call(\"which gpg\", hide=True, warn=True)\n # Signed wheel\n flags = \"--detach-sign --armor --passphrase-fd=0 --batch --pinentry-mode=loopback\" # noqa\n template = \"gpg {} somedir/dist/foo.{{}}\".format(flags)\n assert calls[1][1][0] == template.format(\"whl\")\n # Spot check: did use in_stream to submit passphrase\n assert \"in_stream\" in calls[1][2]\n # Signed tgz\n assert calls[2][1][0] == template.format(\"tar.gz\")\n # Uploaded (and w/ asc's)\n c.run.assert_any_call(twine_upload)\n\n\nclass _Kaboom(Exception):\n pass\n\n\nclass publish_:\n class base_case:\n def does_all_the_things(self, fakepub):\n c, mocks = fakepub\n # Execution\n publish(c)\n # Unhides stdout\n assert c.config.run.hide is False\n # Build\n mocks.build.assert_called_once_with(\n c, sdist=True, wheel=True, directory=\"tmpdir\"\n )\n # Twine check\n splat = path.join(\"tmpdir\", \"dist\", \"*\")\n mocks.twine_check.assert_called_once_with(dists=[splat])\n # Install test\n mocks.test_install.assert_called_once_with(c, directory=\"tmpdir\")\n # Upload\n mocks.upload.assert_called_once_with(\n c, directory=\"tmpdir\", index=None, sign=False, dry_run=False\n )\n # Tmpdir cleaned up\n mocks.rmtree.assert_called_once_with(\"tmpdir\")\n\n def cleans_up_on_error(self, fakepub):\n c, mocks = fakepub\n mocks.build.side_effect = _Kaboom\n with pytest.raises(_Kaboom):\n publish(MockContext(run=True))\n mocks.rmtree.assert_called_once_with(mocks.mkdtemp.return_value)\n\n def monkeypatches_readme_renderer(self, fakepub):\n # Happens at module load time but is just a data structure change\n import readme_renderer.rst\n\n assert (\n readme_renderer.rst.SETTINGS[\"halt_level\"]\n == Reporter.INFO_LEVEL\n )\n assert (\n readme_renderer.rst.SETTINGS[\"report_level\"]\n == Reporter.INFO_LEVEL\n )\n\n class index:\n def passed_to_upload(self, fakepub):\n c, mocks = fakepub\n publish(c, index=\"dev\")\n assert mocks.upload.call_args[1][\"index\"] == \"dev\"\n\n def honors_config(self, fakepub):\n c, mocks = fakepub\n c.config.packaging = dict(index=\"prod\")\n publish(c)\n assert mocks.upload.call_args[1][\"index\"] == \"prod\"\n\n def kwarg_beats_config(self, fakepub):\n c, mocks = fakepub\n c.config.packaging = dict(index=\"prod\")\n publish(c, index=\"dev\")\n assert mocks.upload.call_args[1][\"index\"] == \"dev\"\n\n class sign:\n def passed_to_upload(self, fakepub):\n c, mocks = fakepub\n publish(c, sign=True)\n assert mocks.upload.call_args[1][\"sign\"] is True\n\n def honors_config(self, fakepub):\n c, mocks = fakepub\n c.config.packaging = dict(sign=True)\n publish(c)\n assert mocks.upload.call_args[1][\"sign\"] is True\n\n def kwarg_beats_config(self, fakepub):\n c, mocks = fakepub\n c.config.packaging = dict(sign=False)\n publish(c, sign=True)\n assert mocks.upload.call_args[1][\"sign\"] is True\n\n class sdist:\n def defaults_True_and_passed_to_build(self, fakepub):\n c, mocks = fakepub\n publish(c)\n assert mocks.build.call_args[1][\"sdist\"] is True\n\n def may_be_overridden(self, fakepub):\n c, mocks = fakepub\n publish(c, sdist=False)\n assert mocks.build.call_args[1][\"sdist\"] is False\n\n class wheel:\n def defaults_True_and_passed_to_build(self, fakepub):\n c, mocks = fakepub\n publish(c)\n assert mocks.build.call_args[1][\"wheel\"] is True\n\n def may_be_overridden(self, fakepub):\n c, mocks = fakepub\n publish(c, wheel=False)\n assert mocks.build.call_args[1][\"wheel\"] is False\n\n def directory_affects_tmpdir(self, fakepub):\n c, mocks = fakepub\n publish(c, directory=\"explicit\")\n assert not mocks.mkdtemp.called\n assert mocks.build.call_args[1][\"directory\"] == \"explicit\"\n\n class dry_run:\n def causes_tmpdir_cleanup_to_be_skipped(self, fakepub):\n c, mocks = fakepub\n publish(c, dry_run=True)\n assert not mocks.rmtree.called\n\n def causes_tmpdir_cleanup_to_be_skipped_on_exception(self, fakepub):\n c, mocks = fakepub\n mocks.build.side_effect = _Kaboom\n with pytest.raises(_Kaboom):\n publish(c, dry_run=True)\n assert not mocks.rmtree.called\n\n def passed_to_upload(self, fakepub):\n c, mocks = fakepub\n publish(c, dry_run=True)\n assert mocks.upload.call_args[1][\"dry_run\"] is True\n\n\nclass test_install_:\n def installs_all_archives_in_fresh_venv_with_matching_pip(self, install):\n c = install\n # Basic test, uses guts of fixture\n install_test_task(c, directory=\"whatever\")\n # Import attempt was made\n c.run.assert_any_call(\"tmpdir/bin/python -c 'import foo'\")\n\n def skips_import_test_when_asked_to(self, install):\n c = install\n install_test_task(c, directory=\"whatever\", skip_import=True)\n # No import attempt\n for unwanted in (call(\"tmpdir/bin/python -c 'import foo'\"),):\n assert unwanted not in c.run.mock_calls\n\n def does_mypy_import_when_py_typed_present(self, install):\n c = install\n # Mock out the pathlib exists call as positive (default is negative)\n c.set_exists(True)\n install_test_task(c, directory=\"whatever\")\n # Mypy installed and executed\n c.run.assert_any_call(\"tmpdir/bin/pip install mypy\")\n # NOTE: not actually the same 2 tmpdirs here but I'm already so sick of\n # all these mocks, jeez\n c.run.assert_any_call(\"cd tmpdir && tmpdir/bin/mypy -c 'import foo'\")\n\n def skips_mypy_import_when_no_py_typed(self, install):\n c = install\n # Mock out the pathlib exists call as explicitly false, why not\n c.set_exists(False)\n install_test_task(c, directory=\"whatever\")\n # Mypy NOT installed or executed\n for unwanted in (\n call(\"tmpdir/bin/pip install mypy\"),\n call(\"cd tmpdir && tmpdir/bin/mypy -c 'import foo'\"),\n ):\n assert unwanted not in c.run.mock_calls\n\n def skips_mypy_import_when_skipping_regular_import(self, install):\n c = install\n c.set_exists(True)\n install_test_task(c, directory=\"whatever\", skip_import=True)\n # Mypy NOT installed or executed\n for unwanted in (\n call(\"tmpdir/bin/python -c 'import foo'\"),\n call(\"tmpdir/bin/pip install mypy\"),\n call(\"cd tmpdir && tmpdir/bin/mypy -c 'import foo'\"),\n ):\n assert unwanted not in c.run.mock_calls\n\n\nclass push_:\n def pushes_with_follow_tags(self):\n \"git-pushes with --follow-tags\"\n c = MockContext(run=True)\n push(c)\n c.run.assert_called_once_with(\"git push --follow-tags --no-verify\")\n\n @trap\n @patch(\"invocations.environment.os.environ\", dict(CIRCLECI=\"\"))\n def honors_dry_run(self):\n c = MockContext(run=True)\n push(c, dry_run=True)\n c.run.assert_called_once_with(\n \"git push --follow-tags --no-verify --dry-run\", echo=True\n )\n\n @trap\n @patch(\"invocations.environment.os.environ\", dict(CIRCLECI=\"true\"))\n def dry_run_dry_runs_the_invocation_itself_if_in_ci(self):\n c = MockContext(run=True)\n push(c, dry_run=True)\n c.run.assert_called_once_with(\n \"git push --follow-tags --no-verify\", echo=True, dry=True\n )\n\n @trap\n @patch(\"invocations.environment.os.environ\", dict(CIRCLECI=\"true\"))\n def ci_check_only_applies_to_dry_run_behavior(self):\n # Yes, technically already covered by base tests, but...\n c = MockContext(run=True)\n push(c, dry_run=False)\n c.run.assert_called_once_with(\"git push --follow-tags --no-verify\")\n\n\nclass all_task:\n @patch(\"invocations.packaging.release.prepare\")\n @patch(\"invocations.packaging.release.publish\")\n @patch(\"invocations.packaging.release.push\")\n def runs_primary_workflow(self, push, publish, prepare):\n c = MockContext(run=True)\n all_(c)\n # TODO: this doesn't actually prove order of operations. not seeing an\n # unhairy way to do that, but not really that worried either...:P\n prepare.assert_called_once_with(c, dry_run=False)\n publish.assert_called_once_with(c, dry_run=False)\n push.assert_called_once_with(c, dry_run=False)\n\n @patch(\"invocations.packaging.release.prepare\")\n @patch(\"invocations.packaging.release.publish\")\n @patch(\"invocations.packaging.release.push\")\n def passes_through_dry_run_flag(self, push, publish, prepare):\n c = MockContext(run=True)\n all_(c, dry_run=True)\n prepare.assert_called_once_with(c, dry_run=True)\n publish.assert_called_once_with(c, dry_run=True)\n push.assert_called_once_with(c, dry_run=True)\n\n def bound_to_name_without_underscore(self):\n assert all_.name == \"all\"\n\n\nclass namespace:\n def contains_all_tasks(self):\n names = \"\"\"\n all\n build\n prepare\n publish\n push\n status\n test-install\n upload\n \"\"\".split()\n assert set(release_ns.task_names) == set(names)\n\n def all_is_default_task(self):\n assert release_ns.default == \"all\"\n\n def hides_stdout_by_default(self):\n assert release_ns.configuration()[\"run\"][\"hide\"] == \"stdout\"\n","repo_name":"pyinvoke/invocations","sub_path":"tests/packaging/release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":49393,"program_lang":"python","lang":"en","doc_type":"code","stars":157,"dataset":"github-code","pt":"22"} +{"seq_id":"41072316515","text":"import sys\nimport os\nimport random\nimport datetime\n\nDROPBOX_DIR = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))\nsys.path.append(DROPBOX_DIR)\n\nfrom sMDT import db\n\ndef keyDay(record):\n return record.date.day\n\ndef getFirstTensionRecord(records):\n earliestTensionDate = datetime.datetime(year=datetime.MAXYEAR, month=1, day=1)\n for record in records:\n if record.date < earliestTensionDate:\n earliestTensionDate = record.date\n firstTensionRecord = None\n firstTensionDate = datetime.datetime(year=datetime.MINYEAR, month=1, day=1)\n for record in records:\n if record.date.day == earliestTensionDate.day and record.date.month == earliestTensionDate.month:\n if record.date > firstTensionDate:\n firstTensionRecord = record\n firstTensionDate = record.date\n return firstTensionRecord\n\n\ndef getLastTension(records):\n lastRecordDate = datetime.datetime(year=datetime.MINYEAR, month=1, day=1)\n lastRecord = None\n for record in records:\n if record.date > lastRecordDate:\n lastRecord = record\n lastRecordDate = lastRecord.date\n return lastRecord\n\ndef swageDateKey(tube):\n try:\n date = tube.swage.get_record().date\n if date == None:\n return datetime.datetime(year=datetime.MINYEAR, month=1, day=1)\n else:\n return date\n except:\n return datetime.datetime(year=datetime.MINYEAR, month=1, day=1)\n\ndatab = db.db()\ntubes = datab.get_tubes()\ntubes.sort(key=swageDateKey, reverse=True)\n\nf = open('database.csv','w')\nf.write('Barcode,Status [Pass/Incomplete/Fail],First Tension [g],First Frequency [Hz],First Tension Date [YYYY-MM-DD HH:MM:SS],')\nf.write('Last Tension [g],Last Frequency [Hz],Last Tension Date [YYYY-MM-DD HH:MM:SS],Leak Rate,')\nf.write('Leak Date [YYYY-MM-DD HH:MM:SS],Dark Current [nA],Dark Current Date,Raw Length [mm],')\nf.write('Swage Length [mm], Swage Date [YYYY-MM-DD HH:MM:SS]\\n')\n\nfor tube in tubes:\n barcode = tube.get_ID()\n status = tube.status().name\n ###### Tension Data\n # Returns the last tension record on the first day it was tensioned\n try:\n record = getFirstTensionRecord(tube.tension.get_record(mode='all'))\n first_tension = record.tension\n first_frequency = record.frequency\n first_tension_date = record.date \n except:\n first_tension = None\n first_frequency = None\n first_tension_date = None\n\n # Returns the last tension record, this is defined as the second tension\n try:\n lastRecord = getLastTension(tube.tension.get_record(mode='all'))\n last_tension = lastRecord.tension\n last_frequency = lastRecord.frequency\n last_tension_date = lastRecord.date\n except:\n last_tension = None\n last_frequency = None\n last_tension_date = None\n\n ###### Leak Data\n try:\n leak = tube.leak.get_record().leak_rate\n leak_date = tube.leak.get_record().date\n except:\n leak = None\n leak_date = None\n\n ###### Dark current\n try:\n current = tube.current.get_record().dark_current\n current_date = tube.current.get_record().date\n except:\n current = None\n current_date = None\n\n ###### Swage Info\n try:\n swage_date = tube.swage.get_record().date\n raw_length = tube.swage.get_record().raw_length\n swage_length = tube.swage.get_record().swage_length\n except:\n swage_date = None\n raw_length = None\n swage_length = None\n f.write(f'{barcode},{status},{first_tension},{first_frequency},{first_tension_date},{last_tension},{last_frequency},{last_tension_date},')\n f.write(f'{leak},{leak_date},{current},{current_date},{raw_length},{swage_length},{swage_date}\\n')\n\nf.close()","repo_name":"dravinflores/smdt","sub_path":"utilities/create_csv_file.py","file_name":"create_csv_file.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"20609787707","text":"from Console import Console\nfrom Constants import Constants\nimport http.server\nimport os\nimport os.path\n\nclass Webservice(http.server.BaseHTTPRequestHandler):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(Webservice, self).__init__(*args, **kwargs)\n\n\tdef do_GET(self):\n\t\tif os.path.isfile(os.getcwd() + self.server.constants.root + self.path):\n\t\t\tending = self.path.split(\".\")[-1]\n\t\t\ttype = \"text/html\"\n\t\t\tif ending == \"html\":\n\t\t\t\ttype = \"text/html\"\n\t\t\telif ending == \"js\":\n\t\t\t\ttype = \"text/javascript\"\n\t\t\telif ending == \"css\":\n\t\t\t\ttype = \"text/css\"\n\t\t\tf = open(os.getcwd() + self.server.constants.root + self.path, 'rb')\n\t\t\tself.send_response(200)\n\t\t\tself.send_header('Content-Type', type)\n\t\t\tself.end_headers()\n\t\t\tself.wfile.write(f.read())\n\t\telse:\n\t\t\tself.send_response(404)\n\t\t\tself.send_header('Content-Type', 'text/html')\n\t\t\tself.end_headers()\n\t\t\tself.wfile.write(bytes('404 - Not Found

    404 - Page Not Found

    ', 'UTF-8'))\n\t\t\n\tdef log_message(self, format, *args):\n\t\tif self.command == \"GET\":\n\t\t\tself.server.console.request(self.path, self.client_address[0], self.client_address[1])\n\t\telse:\n\t\t\tself.server.console.unknown(args, self.client_address[0], self.client_address[1])","repo_name":"JosephGarrone/PyServer","sub_path":"Webservice.py","file_name":"Webservice.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"1064688953","text":"#!/usr/bin/env python\n\nimport numpy as np\nfrom scipy import signal\nimport matplotlib.pyplot as plt\n\ndef g(t, sigma=1.0, delta=0.0):\n return np.exp(-(t - delta)**2 / (2.0 * sigma**2)) / ((2.0 * np.pi)**0.5 * sigma)\n\n\ndef sgnl(t, t_r, t_d):\n return (np.exp(-t/t_d) - np.exp(-t/t_r)) / (t_d - t_r)\n\n\nif __name__ == \"__main__\":\n tt = np.arange(0, 100, 0.01)\n sigma, delta = 0.5, 10.0\n t_r, t_d = 5.0, 50.0\n model_sgnl = signal.convolve(sgnl(tt, t_r, t_d),\n g(tt, sigma, delta),\n mode=\"same\") / sum(g(tt))\n t_r2, t_d2 = 3.0, 10.0\n model_sgnl2 = signal.convolve(sgnl(tt, t_r2, t_d2),\n g(tt, sigma, delta),\n mode=\"same\") / sum(g(tt))\n \n print(\"S(sgnl, {}, {}) = {:e}\\tS(model) = {:e}\\nS(sgnl, {}, {}) = {:e}\\tS(model) = {:e}\".\n format(t_r, t_d, np.sum(sgnl(tt, t_r, t_d)), np.sum(model_sgnl),\n t_r2, t_d2, np.sum(sgnl(tt, t_r2, t_d2)), np.sum(model_sgnl2)))\n \n plt.plot(sgnl(tt, t_r, t_d), label=\"1st sgnl\")\n plt.plot(sgnl(tt, t_r2, t_d2), label=\"2nd sgnl\")\n plt.plot(model_sgnl, label=\"Model sgnl\")\n plt.plot(model_sgnl2, label=\"Model sgnl2\")\n plt.legend(loc=\"upper right\")\n plt.show()\n \n #plt.plot(g(tt, sigma, delta))\n #plt.plot(sgnl(tt, t_r, t_d))\n #plt.plot(model_sgnl)\n #plt.show()\n\n \n \"\"\"\n fig, (ax_g, ax_signal, ax_filt) = plt.subplots(3, 1, sharex=True)\n ax_g.plot(g(tt))\n ax_g.set_title(\"Gauss IRF\")\n ax_signal.plot(sgnl(tt, t_r, t_d))\n ax_signal.set_title(\"Signal\")\n fig.tight_layout()\n fig.show()\n \"\"\"\n","repo_name":"F1sher/hpge_dsp","sub_path":"hpge_det/model_signal.py","file_name":"model_signal.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"1529348882","text":"import json\nimport pickle\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\ndef get_mean_feats2(img_feats, tracks_ids):\n mean_gallery = []\n for k in tracks_ids:\n mean_gallery.append(img_feats[(k,)])\n mean_gallery = np.vstack(mean_gallery)\n mean_gallery = torch.from_numpy(mean_gallery)\n mean_gallery = F.normalize(mean_gallery, p=2, dim=-1).numpy()\n return mean_gallery\n\nclass SimilarityToMRR(object):\n def __init__(self,track_json_path):\n with open(track_json_path) as f:\n tracks = json.load(f)\n self.tracks = tracks\n self.tracks_ids = list(tracks.keys())\n self.img_feats = dict()\n self.nlp_feats = dict()\n\n def get_mean_img_feats(self, img_feats):\n mean_gallery = []\n for k in self.tracks_ids:\n tmp = []\n if k not in img_feats:\n continue\n for fid in img_feats[k]:\n tmp.append(img_feats[k][fid])\n tmp = np.vstack(tmp)\n tmp = np.mean(tmp, 0)\n mean_gallery.append(tmp)\n mean_gallery = np.vstack(mean_gallery)\n return mean_gallery\n\n def calculuate_similarity(self):\n # print(len(self.img_feats.keys()))\n nlp_feats = self.nlp_feats\n # print(\"img num: \", len(self.img_feats))\n # print(\"nlp num: \", len(self.nlp_feats))\n img_feats = [self.get_mean_img_feats(self.img_feats)]\n results = dict()\n weights = 1\n for query in self.tracks_ids:\n if query not in self.img_feats:\n continue\n score = 0.\n # for i in range(len(nlp_feats)):\n q = nlp_feats[query]\n score += np.mean(np.matmul(q, img_feats[0].T), 0)\n index = np.argsort(score)[::-1]\n results[query]=[]\n for i in index:\n results[query].append(self.tracks_ids[i])\n return results\n\n def calculate_mrr(self, results):\n recall_5 = 0.\n recall_10 = 0.\n mrr = 0.\n if len(results) == 0:\n return mrr, recall_5, recall_10\n for query in results:\n result = results[query]\n target = query\n try:\n rank = result.index(target)\n except ValueError:\n rank = len(results.keys())\n rank += 1\n if rank < 10:\n recall_10 += 1\n if rank < 5:\n recall_5 += 1\n mrr += 1.0 / rank\n recall_5 /= len(results)\n recall_10 /= len(results)\n mrr /= len(results)\n return mrr, recall_5, recall_10\n\n def get_mrr(self):\n return self.calculate_mrr(self.calculuate_similarity())\n\n \n def update_img_feats(self,visual_embeds):\n self.img_feats.update(visual_embeds)\n\n def update_nlp_feats(self,textual_embeds):\n self.nlp_feats.update(textual_embeds)\n\n def reset(self):\n self.img_feats = dict()\n self.nlp_feats = dict()\n ","repo_name":"zef1611/AIC23_NLRetrieval_HCMIU_CVIP","sub_path":"src/metrics/mrr.py","file_name":"mrr.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"22"} +{"seq_id":"29857824872","text":"import os\nimport io\nimport functools\n\nfrom .utils import inference_result\n\nASSETS_DIR = os.path.join(os.path.dirname(__file__), \"assets\")\n\n\n@functools.lru_cache(maxsize=None)\ndef _fallback_file() -> bytes:\n with open(os.path.join(ASSETS_DIR, \"dummy_file.glb\"), \"rb\") as f:\n file_data = f.read()\n return file_data\n\n\n@functools.lru_cache(maxsize=None)\ndef _fallback_image() -> bytes:\n with open(os.path.join(ASSETS_DIR, \"dummy_thumbnail.png\"), \"rb\") as f:\n thumbnail_data = f.read()\n return thumbnail_data\n\n\ndef fallback_inference() -> inference_result:\n file_data, thumbnail_data = _fallback_file(), _fallback_image()\n return inference_result(\n file=io.BytesIO(file_data), thumbnail=io.BytesIO(thumbnail_data),\n voxelized_file=io.BytesIO(file_data), voxelized_thumbnail=io.BytesIO(thumbnail_data)\n )\n","repo_name":"studio-YAIVERSE/Backend","sub_path":"studio_YAIVERSE/apps/main/pytorch/fallback.py","file_name":"fallback.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"22"} +{"seq_id":"40083061032","text":"import pygame\r\nimport random, time\r\nfrom collections import deque, namedtuple\r\nimport typing\r\nfrom typing import Self\r\nfrom functools import partial\r\n\r\npygame.init()\r\n\r\nFT_FONT_NAME = 'microsoftyahei'\r\nFT_font = {25:pygame.font.SysFont(FT_FONT_NAME, 25)}\r\nCOLOR = {\"red\":(250, 50, 30), \"blue\":(50, 150, 250), \"white\":(240, 240, 240), \"grey\":(100,100,100)}\r\ndef get_FTfont(size:int, fontname:str=None):\r\n if size in FT_font:\r\n return FT_font[size]\r\n else:\r\n FT_font[size] = pygame.font.SysFont(FT_FONT_NAME, size)\r\n return FT_font[size]\r\n\r\ndef fuc_none(*arg, **args):\r\n pass\r\n\r\nclass Scene():\r\n def __init__(self) -> None:\r\n self.gameobj = {}\r\n pass\r\n def getobj(self, name):\r\n if name in self.gameobj:\r\n return self.gameobj[name]\r\n def addobj(self, gameobject):\r\n if not gameobject.name in self.gameobj:\r\n self.gameobj[gameobject.name] = gameobject\r\n def addobjs(self, *gameobjects):\r\n for gameobject in gameobjects:\r\n self.addobj(gameobject)\r\n def popobj(self, name):\r\n if name in self.gameobj:\r\n return self.gameobj.pop(name)\r\n def draw(self, screen):\r\n for name in self.gameobj:\r\n self.gameobj[name].draw(screen)\r\n def eventact(self, event:pygame.event.Event):\r\n for name in self.gameobj:\r\n self.gameobj[name].eventact(event)\r\n \r\nclass FloatBox_Scene(Scene):\r\n def __init__(self) -> None:\r\n super().__init__()\r\n self.now = None\r\n def eventact(self, event: pygame.event.Event):\r\n for name in self.gameobj:\r\n self.gameobj[name].eventact(event)\r\n if event.type == pygame.MOUSEBUTTONDOWN and self.gameobj[name].rect.collidepoint(*event.pos):\r\n self.now = self.gameobj[name]\r\n\r\nclass UIBase():\r\n def __init__(self, name:str, scene:Scene, coord:list, anchor:list=(0,0)) -> None:\r\n self.name = name\r\n if scene != None:\r\n self.scene = scene\r\n self.scene.addobj(self)\r\n self.rect = pygame.Rect(0,0,0,0)\r\n self.anchor =anchor\r\n self.setcoord(coord)\r\n '''self.children = {}\r\n pass\r\n def addchild(self, child:Self):\r\n if not child.name in self.children:\r\n self.children[child.name] = child\r\n def addchildren(self, *children):\r\n for child in children:\r\n self.addchild(child)'''\r\n def setcoord(self, coord:list):\r\n self.coord = list(coord)\r\n self.rect = pygame.Rect(0,0,self.rect.width,self.rect.height)\r\n self.rect.x = self.coord[0]- self.anchor[0]*self.rect.w\r\n self.rect.y = self.coord[1]-self.anchor[1]*self.rect.h\r\n def setrect(self, rect:pygame.Rect):\r\n self.rect = rect.copy()\r\n self.width = self.rect.w\r\n self.height = self.rect.h\r\n self.rect.x = self.coord[0]- self.anchor[0]*self.rect.w\r\n self.rect.y = self.coord[1]-self.anchor[1]*self.rect.h\r\n def movecoord(self, rel):\r\n self.coord[0] += rel[0]\r\n self.coord[1] += rel[1]\r\n self.setcoord(self.coord)\r\n #for name in self.children:\r\n # self.children[name].movecoord(rel)\r\n def draw(self, screen:pygame.Surface):\r\n screen.blit(self.image, self.coord)\r\n def updata(self):\r\n pass\r\n def eventact(self, event:pygame.event.Event):\r\n pass\r\n\r\nclass ComposeBox(UIBase):\r\n def __init__(self, name: str, scene: Scene, coord: list, anchor: list=[0,0]) -> None:\r\n super().__init__(name, scene, coord, anchor)\r\n self.children = {}\r\n def addchild(self, child:UIBase):\r\n if not child.name in self.children:\r\n self.children[child.name] = child\r\n def addchildren(self, *children):\r\n for child in children:\r\n self.addchild(child)\r\n def getchild(self, name):\r\n if name in self.children:\r\n return self.children[name]\r\n def draw(self, screen: pygame.Surface):\r\n for name in self.children:\r\n self.children[name].draw(screen)\r\n def eventact(self, event: pygame.event.Event):\r\n for name in self.children:\r\n self.children[name].eventact(event)\r\n def setcoord(self, coord: tuple):\r\n super().setcoord(coord)\r\n def movecoord(self, rel):\r\n super().movecoord(rel)\r\n for name in self.children:\r\n self.children[name].movecoord(rel)\r\n \r\nclass RectBlock(UIBase):\r\n def __init__(self, name:str, scene:Scene, width:int, height:int, color:tuple, coord:list, anchor:list=[0,0], radius:int=0) -> None:\r\n super().__init__(name, scene, coord, anchor)\r\n self.color = color\r\n self.radius = radius\r\n self.rect = pygame.Rect(*self.coord,width, height)\r\n pass\r\n def draw(self, screen:pygame.Surface):\r\n pygame.draw.rect(screen, self.color, self.rect, 0, self.radius)\r\n\r\nclass IRectBlock(UIBase):\r\n def __init__(self, name:str, scene:Scene, width:int, height:int, color:tuple, coord:list, anchor:list=[0,0], radius:list=0) -> None:\r\n super().__init__(name, scene, coord, anchor)\r\n self.color = color\r\n self.radius = radius\r\n self.rect = pygame.Rect(*self.coord,width, height)\r\n self.image = pygame.Surface(self.rect.size, pygame.SRCALPHA)\r\n self.image.fill(self.color)\r\n\r\nclass ScaleBar(UIBase):\r\n def __init__(self, name:str, scene:Scene, length:int, height:int, color:tuple, keycolor:tuple, coord:list, fact=(1,0), anchor:list=[0,0]) -> None:\r\n super().__init__(name, scene, coord, anchor)\r\n self.color = color\r\n self.fact = fact\r\n self.fuc = fuc_none\r\n self.height = height\r\n self.length = length\r\n if fact[1] == 0:#横向\r\n self.height, self.length = self.length, self.height\r\n self.key = RectBlock(name+\"_key\", scene, length*0.1, height, keycolor, coord)\r\n else:\r\n self.key = RectBlock(name+\"_key\", scene, height, length*0.1, keycolor, coord)\r\n self.rect = pygame.Rect(*coord, self.height, self.length)\r\n self.setcoord(self.coord)\r\n self.setkeyrect(self.rect.copy())\r\n self.fl_down = False\r\n self.scalenum = 0.0\r\n\r\n pass\r\n def linkobj(self, obj):\r\n pass\r\n def linkfuc(self, fuc:fuc_none):\r\n self.fuc = fuc\r\n fuc(self.scalenum)\r\n def setkeyrect(self, pos:list):\r\n if self.fact[1] == 0:\r\n i = 0\r\n else:\r\n i = 1\r\n if pos[i] < self.rect[i]+self.rect[i+2]*0.95:\r\n if pos[i] > self.rect[i]+self.rect[i+2]*0.05:\r\n pass\r\n else:\r\n pos[i] = self.rect[i]+self.rect[i+2]*0.05\r\n else:\r\n pos[i] = self.rect[i]+self.rect[i+2]*0.95\r\n if self.fact[0] == 1 or self.fact[1] == 1:\r\n self.scalenum = (pos[i]-(self.rect[i]+self.rect[i+2]*0.05))/(self.rect[i+2]*0.9)\r\n \r\n else:\r\n self.scalenum = (self.rect[i+2]*0.95-pos[i]+self.rect[i])/(self.rect[i+2]*0.9)\r\n if self.fact[1] == 0:\r\n self.key.setcoord((pos[0]-self.rect[2]*0.05, self.rect[1]))\r\n else:\r\n self.key.setcoord((self.rect[0], pos[1]-self.rect[3]*0.05))\r\n self.fuc(self.scalenum)\r\n def eventact(self, event:pygame.event.Event):\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1 and self.rect.collidepoint(*event.pos):\r\n self.fl_down = True\r\n self.startpos = event.pos\r\n self.setkeyrect(list(event.pos))\r\n elif event.type == pygame.MOUSEMOTION:\r\n if self.fl_down == True:\r\n self.setkeyrect(list(event.pos))\r\n elif event.type == pygame.MOUSEBUTTONUP:\r\n if self.fl_down == True and event.button == 1:\r\n self.fl_down = False\r\n pass\r\n def draw(self, screen):\r\n pygame.draw.rect(screen, self.color, self.rect)\r\n self.key.draw(screen)\r\n\r\nclass SlideBox(ComposeBox):\r\n def __init__(self, name: str, scene: Scene, imgwidth:int, imgheight:int, area:pygame.Rect, coord:list, anchor:list=[0,0]) -> None:\r\n super().__init__(name, scene, coord, anchor)\r\n self.image = pygame.Surface((imgwidth,imgheight))\r\n self.image.fill(COLOR[\"blue\"])\r\n if type(area) != pygame.Rect:\r\n area = pygame.Rect(*area)\r\n self.imgheight = imgheight\r\n self.imgwidth = imgwidth\r\n self.scalenum = [0.0, 0.0]\r\n self.speed = 5\r\n self.area = area\r\n def eventact(self, event:pygame.event.Event):\r\n if event.type == pygame.MOUSEWHEEL and self.rect.collidepoint(*pygame.mouse.get_pos()):\r\n self.area.top += event.y * self.speed\r\n if self.area.bottom > self.imgheight:\r\n self.area.bottom = self.imgheight\r\n elif self.area.top < 0:\r\n self.area.top = 0\r\n self.scalenum[0] = self.area.top/(self.imgheight - self.area.height)\r\n def changescalenumx(self, scalenum:int):\r\n self.scalenum[0] = scalenum\r\n self.setarea()\r\n def changescalenumy(self, scalenum:int):\r\n self.scalenum[1] = scalenum\r\n self.setarea()\r\n def setarea(self):\r\n self.area.x = (self.imgwidth - self.area.w)*self.scalenum[0]\r\n self.area.top = (self.imgheight - self.area.h)*self.scalenum[1]\r\n def draw(self, screen: pygame.Surface):\r\n screen.blit(self.image, self.coord, self.area)\r\n\r\nclass FloatSlideBox(SlideBox):\r\n def __init__(self, name: str, scene: Scene, imgwidth:int, imgheight:int, area:pygame.Rect, coord:list, anchor:list=[0, 0]) -> None:\r\n super().__init__(name, scene, imgwidth, imgheight, area, coord, anchor)\r\n\r\ndef changeslidboxscalenum(obj:SlideBox, i, scalenum):\r\n obj.scalenum[i] = scalenum\r\n obj.setarea()\r\n\r\nclass Image(UIBase):\r\n def __init__(self, name:str, image:pygame.Surface, coord:list, anchor:list=[0,0]) -> None:\r\n self.image = image\r\n rect = self.image.get_rect()\r\n super().__init__(name, coord, anchor)\r\n pass\r\n\r\n\r\nclass Label(UIBase):\r\n def __init__(self, name:str, scene:Scene, font:pygame.font.Font, text:str, color:tuple, bgcolor:None, coord:list, anchor:list=[0,0]) -> None:\r\n super().__init__(name, scene, coord, anchor)\r\n self.font = font\r\n self.text = text\r\n self.color = color\r\n self.bgcolor = bgcolor\r\n self.prep()\r\n pass\r\n def reset(self, font:pygame.font.Font, text, color, coord:tuple, anchor=(0,0)):\r\n self.font = font\r\n self.text = text\r\n self.color = color\r\n self.coord = coord\r\n self.anchor = anchor\r\n self.prep()\r\n return self\r\n def change_text(self, text):\r\n self.text = text\r\n self.prep()\r\n def prep(self):\r\n self.image = self.font.render(self.text, True, self.color, self.bgcolor)\r\n\r\nclass Button(RectBlock):\r\n def __init__(self, name:str, scene:Scene, text:str, textcolor:tuple, font:pygame.font.Font, width:int, height:int, color:tuple, fuc:function, coord:list, anchor=[0,0], radius=0) -> None:\r\n super().__init__(name, scene, width, height, color, coord, anchor, radius)\r\n self.label = Label(name+\"_label\", scene, font, text, textcolor, self.rect.center, (0.5,0.5))\r\n self.fuc = fuc\r\n def eventact(self, event: pygame.event.Event):\r\n if event.type == pygame.MOUSEBUTTONDOWN and self.rect.collidepoint(*event.pos):\r\n self.fuc()\r\n def draw(self, screen: pygame.Surface):\r\n super().draw(screen)\r\n self.label.draw(screen)\r\n\r\nclass Entry(Label):\r\n def __init__(self, name:str, scene: Scene, font: pygame.font.Font, minwidth:int, text:str, color:tuple, bgcolor:tuple=None, coord:list=[0,0], anchor:list=[0,0], fuc:function=fuc_none) -> None:\r\n self.minwidth = minwidth\r\n super().__init__(name, scene, font, text, color, bgcolor, coord, anchor)\r\n self.f_focus = False\r\n self.fuc = fuc\r\n def eventact(self, event: pygame.event.Event):\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if self.rect.collidepoint(*event.pos):\r\n self.f_focus = True\r\n else:\r\n self.f_focus = False\r\n elif self.f_focus == True and event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_BACKSPACE:# and len(self.text) > 0:\r\n self.text = self.text[:-1]\r\n elif event.key == pygame.K_RETURN:\r\n self.fuc(self.text)\r\n else:\r\n self.text += event.unicode\r\n self.prep()\r\n def setrect(self, rect: pygame.Rect):\r\n super().setrect(rect)\r\n if self.rect.w < self.minwidth:\r\n self.rect.w = self.minwidth\r\n self.width = self.minwidth\r\n def draw(self, screen: pygame.Surface):\r\n super().draw(screen)\r\n pygame.draw.rect(screen, (50,50,50), self.rect, 1)\r\n\r\nclass EntrySelect(Entry):\r\n def __init__(self, name, scene: Scene, font: pygame.font.Font, minwidth: int, text: str, color, bgcolor: None, coord:list, anchor=[0,0], fuc=None) -> None:\r\n super().__init__(name, scene, font, minwidth, text, color, bgcolor, coord, anchor, fuc)\r\n\r\nclass MenuNode(Label):\r\n def __init__(self, name, font: pygame.font.Font, text: str, color, bgcolor: None, coord:list, anchor=[0,0], fuc=fuc_none) -> None:\r\n super().__init__(name, None, font, text, color, bgcolor, coord, anchor)\r\n self.fuc = fuc\r\n \r\n\r\nclass ContextMenu(UIBase):\r\n def __init__(self, name: str, scene: Scene, width: int, height: int, coord:list, anchor:list=[0,0]) -> None:\r\n super().__init__(name, scene, coord, anchor)\r\n self.active = False\r\n self.menu = {}\r\n def draw(self, screen: pygame.Surface):\r\n return super().draw(screen)\r\n\r\nclass FloatBox(RectBlock):\r\n def __init__(self, name: str, scene: Scene, width: int, height: int, color: tuple, coord: list, anchor: list = [0, 0], radius: int = 0) -> None:\r\n super().__init__(name, scene, width, height, color, coord, anchor, radius)\r\n self.flag = [0,0]\r\n self.f_move = False\r\n def eventact(self, event: pygame.event.Event):\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n pos = event.pos\r\n rect = self.rect\r\n n = 5\r\n if rect.x-n < pos[0] < rect.x+n:\r\n self.flag[0] = -1\r\n elif rect.right-n < pos[0] < rect.right+n:\r\n self.flag[0] = 1\r\n if rect.y-n < pos[1] < rect.y+n:\r\n self.flag[1] = -1\r\n elif rect.bottom-n < pos[1] < rect.bottom+n:\r\n self.flag[1] = 1\r\n elif rect.collidepoint(*pos):\r\n self.f_move = True\r\n elif event.type == pygame.MOUSEMOTION:\r\n if self.flag != [0,0]:\r\n pos = event.pos\r\n flag = self.flag\r\n if flag[0] == -1:\r\n self.width = self.rect.right - pos[0]\r\n self.coord[0] = pos[0]\r\n elif flag[0] == 1:\r\n self.width = pos[0] - self.rect.x\r\n if flag[1] == -1:\r\n self.height = self.rect.bottom - pos[1]\r\n self.coord[1] = pos[1]\r\n elif flag[1] == 1:\r\n self.height = pos[1] - self.rect.y\r\n self.setcoord(self.coord)\r\n elif self.f_move == True:\r\n self.movecoord(event.rel)\r\n elif event.type == pygame.MOUSEBUTTONUP and (self.flag != [0,0] or self.f_move == True):\r\n self.flag = [0,0]\r\n self.f_move = False\r\n def draw(self, screen: pygame.Surface):\r\n pygame.draw.rect(screen, self.color, self.rect, 0)\r\n pygame.draw.rect(screen, COLOR[\"grey\"], self.rect, 2)\r\n \r\n\r\n\r\ndef change_rect(obj:UIBase, text:str):\r\n l = text.split(\",\")\r\n obj.width = int(l[2])\r\n obj.height = int(l[3])\r\n obj.setcoord(( int(l[0]), int(l[1]) ))\r\n\r\ndef main():\r\n pygame.init()\r\n Height = 700\r\n Width = 1400\r\n screen = pygame.display.set_mode((Width, Height))\r\n screen_rect = screen.get_rect()\r\n pygame.display.set_caption(\"贪吃蛇\")\r\n ts = pygame.time.Clock()\r\n global SC_1\r\n SC_1 = Scene()\r\n floatSC = FloatBox_Scene()\r\n font14 = get_FTfont(14)\r\n mainwindow = ComposeBox(\"mainwidow\", SC_1, [0,0])\r\n mainwindowrect = pygame.Rect(0,0,Width-200, Height)\r\n mainwindow.addchildren(\r\n SlideBox(\"mianwindow\", SC_1, 2000, 2000, pygame.Rect(0,0,Width-200,Height), (0,0)),\r\n ScaleBar(\"mainwindow_scalebarx\", SC_1, mainwindowrect.width, 15, COLOR[\"white\"], COLOR[\"grey\"], (0, Height), (1,0), (0,1)),\r\n ScaleBar(\"mainwindow_scalebary\", SC_1, Height, 15, COLOR[\"white\"], COLOR[\"grey\"], (mainwindowrect.right,0), (0,1), (1,0))\r\n )\r\n mainwindowslidebox = SC_1.getobj(\"mianwindow\")\r\n SC_1.getobj(\"mainwindow_scalebarx\").linkfuc(SC_1.getobj(\"mianwindow\").changescalenumx)\r\n SC_1.getobj(\"mainwindow_scalebary\").linkfuc(SC_1.getobj(\"mianwindow\").changescalenumy)\r\n mainban = ComposeBox(\"mianban\", SC_1, [0,0], [1,0])\r\n mainban.addchildren(\r\n RectBlock(\"mianban_bg\", SC_1, 200, Height, COLOR[\"grey\"], [0,0], [0,0]),\r\n Label(\"mianban_name_k\", SC_1, font14, \"name:\", COLOR[\"white\"], None, (5,0)),\r\n Label(\"mianban_name_v\", SC_1, font14, \"name\", COLOR[\"white\"], None, (60,0)),\r\n Label(\"mianban_rect_k\", SC_1, font14, \"rect:\", COLOR[\"white\"], None, (5,20)),\r\n Entry(\"mianban_rect_v\", SC_1, font14, 20, \"rect\", COLOR[\"white\"], None, (50,20), (0,0), fuc_none)\r\n )\r\n mainban.movecoord((Width-200,0))\r\n Streeobj = ComposeBox(\"streeobj\", SC_1, [0,0])\r\n Streeobj.addchildren(\r\n mainwindow,\r\n mainban\r\n )\r\n mb_name = SC_1.getobj(\"mianban_name_v\")\r\n mb_rect = SC_1.getobj(\"mianban_rect_v\")\r\n floatbox = FloatBox(\"floatbox\", SC_1, 100, 100, COLOR[\"blue\"], screen_rect.center)\r\n floatSC.addobjs(floatbox)\r\n mb_rect_fuc = partial(change_rect, floatbox)\r\n mb_rect.fuc = mb_rect_fuc\r\n\r\n global keep_going\r\n keep_going = True\r\n active = True\r\n FPS = 60\r\n jgtime = 0.0\r\n while keep_going:\r\n screen.fill(COLOR[\"white\"])\r\n dt = ts.tick(FPS) / 1000 #帧率\r\n jgtime += dt\r\n \r\n for event in pygame.event.get():\r\n #print(event.type)\r\n floatSC.eventact(event)\r\n mb_rect.eventact(event)\r\n mainwindow.eventact(event)\r\n if event.type == pygame.QUIT: # 退出事件\r\n keep_going = False\r\n \r\n if jgtime > 0.2:\r\n jgtime = 0.0\r\n if floatSC.now != None:\r\n mb_name.change_text(floatSC.now.name)\r\n if not mb_rect.f_focus:\r\n mb_rect.change_text(str(floatSC.now.rect)[6:-2])\r\n\r\n floatSC.draw(mainwindowslidebox.image)\r\n Streeobj.draw(screen)\r\n pygame.display.update() # 刷新屏幕\r\n \r\n pygame.quit()\r\n return 0\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n","repo_name":"qwe15526/My_pygame_test","sub_path":"small_ui.py","file_name":"small_ui.py","file_ext":"py","file_size_in_byte":19046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32912085757","text":"import logging\nimport socket\nimport telnetlib\nimport time\nfrom enum import Enum\nfrom typing import Final, List, Union\n\nlogger = logging.getLogger(__name__)\n\n\nclass PeSwitchState(str, Enum):\n ON = \"on\"\n OFF = \"off\"\n PENDING = \"pending\"\n\n\nclass Pe6108ava:\n _DEFAULT_USERNAME = \"teladmin\"\n _DEFAULT_PASSWORD = \"telpwd\"\n _DEFAULT_PORT = 23\n _DEFAULT_TIMEOUT: Final[float] = 1.0\n _BR = \"\\r\\n\"\n _PROMPT = \"> \"\n _DEFAULT_OFF_DURATION: Final[float] = 10.0\n _MINIMUM_OFF_DURATION: Final[float] = 5.0\n\n def __init__(\n self,\n hostname: str,\n port: int = _DEFAULT_PORT,\n timeout: float = _DEFAULT_TIMEOUT,\n username: str = _DEFAULT_USERNAME,\n password: str = _DEFAULT_PASSWORD,\n ):\n self._ipaddr = socket.gethostbyname(hostname)\n self._port = port\n self._timeout = timeout\n self._username = username\n self._password = password\n self._tn: Union[telnetlib.Telnet, None] = None\n\n def _parse_reply(self, data: bytes) -> List[str]:\n lines = [line for line in data.decode().split(self._BR) if len(line.strip()) > 0]\n return lines\n\n def _read_and_parse(self, tail: str) -> List[str]:\n if self._tn is None:\n raise AssertionError\n return self._parse_reply(self._tn.read_until(tail.encode(), timeout=self._timeout))\n\n def _read_until_prompt_and_parse(self) -> List[str]:\n rep = self._read_and_parse(self._BR + self._PROMPT)\n return rep\n\n def open(self) -> bool:\n self._tn = telnetlib.Telnet(host=self._ipaddr, port=self._port, timeout=self._timeout)\n for _ in range(3):\n rv = self._open()\n if rv:\n logger.info(f\"connection is established with {self._ipaddr}\")\n return rv\n else:\n self._tn.write(self._BR.encode())\n else:\n logger.info(f\"failed to established connection with {self._ipaddr}\")\n self._tn = None\n return False\n\n def _open(self) -> bool:\n if self._tn is None:\n raise AssertionError\n rep = self._read_and_parse(\"Login: \")\n if len(rep) == 0 or rep[-1] != \"Login: \":\n return False\n self._tn.write((self._username + self._BR).encode())\n rep = self._read_and_parse(\"Password: \")\n if len(rep) == 0 or rep[-1] != \"Password: \":\n return False\n self._tn.write(self._password.encode() + b\"\\r\\n\")\n rep = self._read_until_prompt_and_parse()\n if len(rep) == 0 or rep[-1] != self._PROMPT:\n return False\n return True\n\n def _exec_cmd_auto_open(self, cmd: str) -> List[str]:\n if self._tn is None:\n self.open()\n else:\n try:\n return self._exec_cmd(cmd)\n except BrokenPipeError:\n self.open()\n return self._exec_cmd(cmd)\n\n def _exec_cmd(self, cmd: str) -> List[str]:\n if self._tn is None:\n raise AssertionError\n self._tn.write((cmd + self._BR).encode())\n rep = self._read_until_prompt_and_parse()\n if len(rep) == 0:\n raise RuntimeError(\"no response\")\n elif rep[-1] != \"> \":\n raise RuntimeError(\"truncated output, no prompt is detected\")\n else:\n return rep[:-1]\n\n def _validate_switch_index(self, idx: int) -> None:\n if not 1 <= idx <= 8:\n raise ValueError(\"invalid index of switch: {idx}\")\n\n def check_switch(self, idx: int) -> PeSwitchState:\n self._validate_switch_index(idx)\n cmd = f\"read status o{idx:02d} simple\"\n reply = self._exec_cmd_auto_open(cmd)\n if len(reply) != 2 or reply[0] != cmd or reply[1] not in {\"on\", \"off\", \"pending\"}:\n msg = \"/\".join(reply)\n raise RuntimeError(f\"failed to read status of switch, unexpected reply '{msg}' is received\")\n return PeSwitchState(reply[1])\n\n def is_turned_on(self, idx: int) -> bool:\n return self.check_switch(idx) == PeSwitchState.ON\n\n def is_turned_off(self, idx: int) -> bool:\n return self.check_switch(idx) == PeSwitchState.OFF\n\n def turn_switch(self, idx: int, status: PeSwitchState, no_switch_ok: bool = False) -> None:\n self._validate_switch_index(idx)\n if status not in {PeSwitchState.ON, PeSwitchState.OFF}:\n raise ValueError(f\"invalid swtich state: '{status.value}'\")\n if self.check_switch(idx) == status:\n if not no_switch_ok:\n logger.warning(f\"switch {idx} is already {status.value}, no thing happens\")\n return\n\n cmd = f\"sw o{idx:02d} imme {status.value}\"\n reply = self._exec_cmd(cmd)\n if len(reply) != 2 or reply[1] != f\" Outlet<{idx:02d}> command is setting \":\n msg = \"/\".join(reply)\n raise RuntimeError(f\"failed to change the status of switch: unexpected reply '{msg}' is received\")\n\n t0 = time.perf_counter()\n for _ in range(10):\n time.sleep(1)\n if self.check_switch(idx) == status:\n break\n else:\n raise RuntimeError(\n f\"failed to turn {status.value} the switch for {int(time.perf_counter()-t0)} seconds, something wrong!\"\n )\n logger.info(f\"switch {self._ipaddr}:{idx} is turned {status.value}\")\n return\n\n def turn_switch_on(self, idx: int, no_switch_ok=True):\n self.turn_switch(idx, PeSwitchState.ON, no_switch_ok)\n\n def turn_switch_off(self, idx: int, no_switch_ok=True):\n self.turn_switch(idx, PeSwitchState.OFF, no_switch_ok)\n\n def powercycle_switch(\n self, idx: int, off_duration: float = _DEFAULT_OFF_DURATION, no_switch_ok: bool = False\n ) -> None:\n self._validate_switch_index(idx)\n if off_duration < self._MINIMUM_OFF_DURATION:\n raise ValueError(f\"too short off-time: {off_duration} seconds\")\n\n self.turn_switch(idx, PeSwitchState.OFF, no_switch_ok)\n time.sleep(off_duration)\n self.turn_switch(idx, PeSwitchState.ON)\n return\n","repo_name":"quel-inc/quelware","sub_path":"quel_inst_tool/quel_inst_tool/pe6108ava.py","file_name":"pe6108ava.py","file_ext":"py","file_size_in_byte":6089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72956288375","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n\n# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"../input\"))\n\n# Any results you write to the current directory are saved as output.\n\n\n\n\nTRAIN_DF = '../input/humpback-whale-identification/train.csv'\nSUB_Df = '../input/humpback-whale-identification/sample_submission.csv'\nTRAIN = '../input/humpback-whale-identification/train/'\nTEST = '../input/humpback-whale-identification/test/'\nP2H = '../input/metadata/p2h.pickle'\nP2SIZE = '../input/metadata/p2size.pickle'\nBB_DF = \"../input/metadata/bounding_boxes.csv\"\n\n\n\n\nget_ipython().system('pip install lap')\nfrom pandas import read_csv\ntagged = dict([(p, w) for _, p, w in read_csv(TRAIN_DF).to_records()])\nsubmit = [p for _, p, _ in read_csv(SUB_Df).to_records()]\njoin = list(tagged.keys()) + submit\nlen(tagged),len(submit),len(join),list(tagged.items())[:5],submit[:5]\n\n\n\n\nfrom os.path import isfile\nfrom PIL import Image as pil_image\nfrom tqdm import tqdm_notebook as tqdm\ndef expand_path(p):\n if isfile(TRAIN + p):\n return TRAIN + p\n if isfile(TEST + p):\n return TEST + p\n return p\n\n\n\n\nimport gzip\nimport pickle\nfrom lap import lapjv\nfrom math import sqrt\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom imagehash import phash\nfrom keras.utils import Sequence\nimport time\n\n\n\n\nimport platform\nimport random\nimport keras\nimport sys\nfrom scipy.ndimage import affine_transform\nfrom keras import backend as K\nfrom keras.preprocessing.image import img_to_array,array_to_img\n\n\n\n\ndef show_whale(imgs, per_row=2):\n n = len(imgs)\n rows = (n + per_row - 1) // per_row\n cols = min(per_row, n)\n fig, axes = plt.subplots(rows, cols, figsize=(24 // per_row * cols, 24 // per_row * rows))\n for ax in axes.flatten(): ax.axis('off')\n for i, (img, ax) in enumerate(zip(imgs, axes.flatten())): ax.imshow(img.convert('RGB'))\n\n#returns the image we need\ndef read_raw_image(p):\n img = pil_image.open(expand_path(p))\n return img\n\n\n\n\nwith open(P2H, 'rb') as f:\n p2h = pickle.load(f)\nh2ps = {}\nfor p, h in p2h.items():\n if h not in h2ps: h2ps[h] = []\n if p not in h2ps[h]: h2ps[h].append(p)\nwith open(P2SIZE, 'rb') as f:\n p2size = pickle.load(f)\np2bb = pd.read_csv(BB_DF).set_index(\"Image\")\nold_stderr = sys.stderr\nsys.stderr = open('/dev/null' if platform.system() != 'Windows' else 'nul', 'w')\nsys.stderr = old_stderr\nimg_shape = (384, 384, 1)\nanisotropy = 2.15\ncrop_margin = 0.1 \n\n\n\n\ndef prefer(ps):\n if len(ps) == 1: return ps[0]\n best_p = ps[0]\n best_s = p2size[best_p]\n for i in range(1, len(ps)):\n p = ps[i]\n s = p2size[p]\n if s[0] * s[1] > best_s[0] * best_s[1]: \n best_p = p\n best_s = s\n return best_p\n\nh2p = {}\nfor h, ps in h2ps.items():\n h2p[h] = prefer(ps)\nlen(h2p), list(h2p.items())[:5]\n\n\n\n\ndef build_transform(rotation, shear, height_zoom, width_zoom, height_shift, width_shift):\n\n rotation = np.deg2rad(rotation)\n shear = np.deg2rad(shear)\n rotation_matrix = np.array(\n [[np.cos(rotation), np.sin(rotation), 0], [-np.sin(rotation), np.cos(rotation), 0], [0, 0, 1]])\n shift_matrix = np.array([[1, 0, height_shift], [0, 1, width_shift], [0, 0, 1]])\n shear_matrix = np.array([[1, np.sin(shear), 0], [0, np.cos(shear), 0], [0, 0, 1]])\n zoom_matrix = np.array([[1.0 / height_zoom, 0, 0], [0, 1.0 / width_zoom, 0], [0, 0, 1]])\n shift_matrix = np.array([[1, 0, -height_shift], [0, 1, -width_shift], [0, 0, 1]])\n return np.dot(np.dot(rotation_matrix, shear_matrix), np.dot(zoom_matrix, shift_matrix))\n\n\n\n\ndef read_cropped_image(p, augment):\n\n # If an image id was given, convert to filename\n if p in h2p:\n p = h2p[p]\n size_x, size_y = p2size[p]\n\n # Determine the region of the original image we want to capture based on the bounding box.\n row = p2bb.loc[p]\n x0, y0, x1, y1 = row['x0'], row['y0'], row['x1'], row['y1']\n dx = x1 - x0\n dy = y1 - y0\n x0 -= dx * crop_margin\n x1 += dx * crop_margin + 1\n y0 -= dy * crop_margin\n y1 += dy * crop_margin + 1\n if x0 < 0:\n x0 = 0\n if x1 > size_x:\n x1 = size_x\n if y0 < 0:\n y0 = 0\n if y1 > size_y:\n y1 = size_y\n dx = x1 - x0\n dy = y1 - y0\n if dx > dy * anisotropy:\n dy = 0.5 * (dx / anisotropy - dy)\n y0 -= dy\n y1 += dy\n else:\n dx = 0.5 * (dy * anisotropy - dx)\n x0 -= dx\n x1 += dx\n\n # Generate the transformation matrix randomly\n trans = np.array([[1, 0, -0.5 * img_shape[0]], [0, 1, -0.5 * img_shape[1]], [0, 0, 1]])\n trans = np.dot(np.array([[(y1 - y0) / img_shape[0], 0, 0], [0, (x1 - x0) / img_shape[1], 0], [0, 0, 1]]), trans)\n if augment:\n trans = np.dot(build_transform(\n random.uniform(-5, 5),\n random.uniform(-5, 5),\n random.uniform(0.8, 1.0),\n random.uniform(0.8, 1.0),\n random.uniform(-0.05 * (y1 - y0), 0.05 * (y1 - y0)),\n random.uniform(-0.05 * (x1 - x0), 0.05 * (x1 - x0))\n ), trans)\n trans = np.dot(np.array([[1, 0, 0.5 * (y1 + y0)], [0, 1, 0.5 * (x1 + x0)], [0, 0, 1]]), trans)\n\n # Read the image, transform to black and white and convert to numpy array\n img = read_raw_image(p).convert('L')\n img = img_to_array(img)\n\n # Apply affine transformation\n matrix = trans[:2, :2]\n offset = trans[:2, 2]\n img = img.reshape(img.shape[:-1])\n img = affine_transform(img, matrix, offset, output_shape=img_shape[:-1], order=1, mode='constant',\n cval=np.average(img))\n img = img.reshape(img_shape)\n\n # Normalize to zero mean and unit variance\n img -= np.mean(img, keepdims=True)\n img /= np.std(img, keepdims=True) + K.epsilon()\n return img\n\n\n\n\nfrom keras import regularizers\nfrom keras.engine.topology import Input\nfrom keras.layers import Activation, Add, BatchNormalization, Concatenate, Conv2D, Dense, Flatten, GlobalMaxPooling2D, Lambda, MaxPooling2D, Reshape\nfrom keras.models import Model\nfrom keras.optimizers import Adam\n\n\n\n\ndef subblock(x, filter, **kwargs):\n x = BatchNormalization()(x)\n y = x\n y = Conv2D(filter, (1, 1), activation='relu', **kwargs)(y) # Reduce the number of features to 'filter'\n y = BatchNormalization()(y)\n y = Conv2D(filter, (3, 3), activation='relu', **kwargs)(y) # Extend the feature field\n y = BatchNormalization()(y)\n y = Conv2D(K.int_shape(x)[-1], (1, 1), **kwargs)(y) # no activation # Restore the number of original features\n y = Add()([x, y]) # Add the bypass connection\n y = Activation('relu')(y)\n return y\n\n#The branch model of the Siamese Nueral Network is a normal CNN\n#We perform the CNN to get the images into vectors so that they can be compared during the head branch\ndef build_model(lr, l2, activation='sigmoid'):\n \n # BRANCH MODEL\n \n regul = regularizers.l2(l2)\n optim = Adam(lr=lr)\n kwargs = {'padding': 'same', 'kernel_regularizer': regul}\n\n inp = Input(shape=img_shape) \n x = Conv2D(64, (9, 9), strides=2, activation='relu', **kwargs)(inp)\n\n x = MaxPooling2D((2, 2), strides=(2, 2))(x) \n for _ in range(2):\n x = BatchNormalization()(x)\n x = Conv2D(64, (3, 3), activation='relu', **kwargs)(x)\n\n x = MaxPooling2D((2, 2), strides=(2, 2))(x) \n x = BatchNormalization()(x)\n x = Conv2D(128, (1, 1), activation='relu', **kwargs)(x) \n for _ in range(4):\n x = subblock(x, 64, **kwargs)\n\n x = MaxPooling2D((2, 2), strides=(2, 2))(x) \n x = BatchNormalization()(x)\n x = Conv2D(256, (1, 1), activation='relu', **kwargs)(x) \n for _ in range(4):\n x = subblock(x, 64, **kwargs)\n x = MaxPooling2D((2, 2), strides=(2, 2))(x) \n x = BatchNormalization()(x)\n x = Conv2D(384, (1, 1), activation='relu', **kwargs)(x) \n for _ in range(4):\n x = subblock(x, 96, **kwargs)\n\n x = MaxPooling2D((2, 2), strides=(2, 2))(x) \n x = BatchNormalization()(x)\n x = Conv2D(512, (1, 1), activation='relu', **kwargs)(x) \n for _ in range(4):\n x = subblock(x, 128, **kwargs)\n\n x = GlobalMaxPooling2D()(x) # 512\n branch_model = Model(inp, x)\n\n \n # HEAD MODEL\n \n mid = 32\n xa_inp = Input(shape=branch_model.output_shape[1:])\n xb_inp = Input(shape=branch_model.output_shape[1:])\n x1 = Lambda(lambda x: x[0] * x[1])([xa_inp, xb_inp])\n x2 = Lambda(lambda x: x[0] + x[1])([xa_inp, xb_inp])\n x3 = Lambda(lambda x: K.abs(x[0] - x[1]))([xa_inp, xb_inp])\n x4 = Lambda(lambda x: K.square(x))(x3)\n x = Concatenate()([x1, x2, x3, x4])\n x = Reshape((4, branch_model.output_shape[1], 1), name='reshape1')(x)\n\n # Per feature NN with shared weight is implemented using CONV2D with appropriate stride.\n x = Conv2D(mid, (4, 1), activation='relu', padding='valid')(x)\n x = Reshape((branch_model.output_shape[1], mid, 1))(x)\n x = Conv2D(1, (1, mid), activation='linear', padding='valid')(x)\n x = Flatten(name='flatten')(x)\n\n # Weighted sum implemented as a Dense layer.\n x = Dense(1, use_bias=True, activation=activation, name='weighted-average')(x)\n head_model = Model([xa_inp, xb_inp], x, name='head')\n \n \n # SIAMESE NEURAL NETWORK\n \n img_a = Input(shape=img_shape)\n img_b = Input(shape=img_shape)\n xa = branch_model(img_a) \n xb = branch_model(img_b) \n x = head_model([xa, xb]) \n model = Model([img_a, img_b], x)\n model.compile(optim, loss='binary_crossentropy', metrics=['binary_crossentropy', 'acc'])\n return model, branch_model, head_model\n\n\nmodel, branch_model, head_model = build_model(64e-5, 0)\nhead_model.summary()\n\n\n\n\nfrom keras.utils import plot_model\nplot_model(head_model, to_file='head-model.png')\npil_image.open('head-model.png')\n\n\n\n\nh2ws = {}\nnew_whale = 'new_whale'\nfor p, w in tagged.items():\n if w != new_whale: \n h = p2h[p]\n if h not in h2ws: h2ws[h] = []\n if w not in h2ws[h]: h2ws[h].append(w)\nfor h, ws in h2ws.items():\n if len(ws) > 1:\n h2ws[h] = sorted(ws)\n\nw2hs = {}\nfor h, ws in h2ws.items():\n if len(ws) == 1: # Use only unambiguous pictures\n w = ws[0]\n if w not in w2hs: w2hs[w] = []\n if h not in w2hs[w]: w2hs[w].append(h)\nfor w, hs in w2hs.items():\n if len(hs) > 1:\n w2hs[w] = sorted(hs)\n\n\n\n\ntrain = [] # A list of training image ids\nfor hs in w2hs.values():\n if len(hs) > 1:\n train += hs\nrandom.shuffle(train)\ntrain_set = set(train)\n\nw2ts = {} # Associate the image ids from train to each whale id.\nfor w, hs in w2hs.items():\n for h in hs:\n if h in train_set:\n if w not in w2ts:\n w2ts[w] = []\n if h not in w2ts[w]:\n w2ts[w].append(h)\nfor w, ts in w2ts.items():\n w2ts[w] = np.array(ts)\n\nt2i = {} # The position in train of each training image id\nfor i, t in enumerate(train):\n t2i[t] = i\n\n\n\n\ntry:\n from lap import lapjv\n segment = False\nexcept ImportError:\n print('Module lap not found, emulating with much slower scipy.optimize.linear_sum_assignment')\n segment = True\n from scipy.optimize import linear_sum_assignment\nclass TrainingData(Sequence):\n def __init__(self, score, steps=1000, batch_size=32):\n \"\"\"\n @param score the cost matrix for the picture matching\n @param steps the number of epoch we are planning with this score matrix\n \"\"\"\n super(TrainingData, self).__init__()\n self.score = -score # Maximizing the score is the same as minimuzing -score.\n self.steps = steps\n self.batch_size = batch_size\n for ts in w2ts.values():\n idxs = [t2i[t] for t in ts]\n for i in idxs:\n for j in idxs:\n self.score[\n i, j] = 10000.0 # Set a large value for matching whales -- eliminates this potential pairing\n self.on_epoch_end()\n\n def __getitem__(self, index):\n start = self.batch_size * index\n end = min(start + self.batch_size, len(self.match) + len(self.unmatch))\n size = end - start\n assert size > 0\n a = np.zeros((size,) + img_shape, dtype=K.floatx())\n b = np.zeros((size,) + img_shape, dtype=K.floatx())\n c = np.zeros((size, 1), dtype=K.floatx())\n j = start // 2\n for i in range(0, size, 2):\n a[i, :, :, :] = read_for_training(self.match[j][0])\n b[i, :, :, :] = read_for_training(self.match[j][1])\n c[i, 0] = 1 # This is a match\n a[i + 1, :, :, :] = read_for_training(self.unmatch[j][0])\n b[i + 1, :, :, :] = read_for_training(self.unmatch[j][1])\n c[i + 1, 0] = 0 # Different whales\n j += 1\n return [a, b], c\n\n def on_epoch_end(self):\n if self.steps <= 0: return # Skip this on the last epoch.\n self.steps -= 1\n self.match = []\n self.unmatch = []\n _, _, x = lapjv(self.score) # Solve the linear assignment problem\n y = np.arange(len(x), dtype=np.int32)\n\n # Compute a derangement for matching whales\n for ts in w2ts.values():\n d = ts.copy()\n while True:\n random.shuffle(d)\n if not np.any(ts == d): break\n for ab in zip(ts, d): self.match.append(ab)\n\n # Construct unmatched whale pairs from the LAP solution.\n for i, j in zip(x, y):\n if i == j:\n print(self.score)\n print(x)\n print(y)\n print(i, j)\n assert i != j\n self.unmatch.append((train[i], train[j]))\n\n # Force a different choice for an eventual next epoch.\n self.score[x, y] = 10000.0\n self.score[y, x] = 10000.0\n random.shuffle(self.match)\n random.shuffle(self.unmatch)\n # print(len(self.match), len(train), len(self.unmatch), len(train))\n assert len(self.match) == len(train) and len(self.unmatch) == len(train)\n\n def __len__(self):\n return (len(self.match) + len(self.unmatch) + self.batch_size - 1) // self.batch_size\n\n\n\n\ndef read_for_training(p):\n \"\"\"\n Read and preprocess an image with data augmentation (random transform).\n \"\"\"\n return read_cropped_image(p, True)\n\ndef read_for_validation(p):\n \"\"\"\n Read and preprocess an image without data augmentation (use for testing).\n \"\"\"\n return read_cropped_image(p, False)\n\n\n\n\nscore = np.random.random_sample(size=(len(train), len(train)))\ndata = TrainingData(score)\n(a, b), c = data[0]\na.shape, b.shape, c.shape\n\n\n\n\n# First pair is for matching whale\nimgs = [array_to_img(a[2]), array_to_img(b[2])]\nshow_whale(imgs, per_row=2)\n\n\n\n\n# First pair is for not matching whale\nimgs = [array_to_img(a[3]), array_to_img(b[3])]\nshow_whale(imgs, per_row=2)\n\n\n\n\n#These are used to calculate the scores of the CNN on the training set\n# A Keras generator to evaluate only the BRANCH MODEL\nclass FeatureGen(Sequence):\n def __init__(self, data, batch_size=64, verbose=1):\n super(FeatureGen, self).__init__()\n self.data = data\n self.batch_size = batch_size\n self.verbose = verbose\n if self.verbose > 0: self.progress = tqdm(total=len(self), desc='Features')\n\n def __getitem__(self, index):\n start = self.batch_size * index\n size = min(len(self.data) - start, self.batch_size)\n a = np.zeros((size,) + img_shape, dtype=K.floatx())\n for i in range(size): a[i, :, :, :] = read_for_validation(self.data[start + i])\n if self.verbose > 0:\n self.progress.update()\n if self.progress.n >= len(self): self.progress.close()\n return a\n\n def __len__(self):\n return (len(self.data) + self.batch_size - 1) // self.batch_size\n \n# A Keras generator to evaluate on the HEAD MODEL on features already pre-computed.\n# It computes only the upper triangular matrix of the cost matrix if y is None.\nclass ScoreGen(Sequence):\n def __init__(self, x, y=None, batch_size=2048, verbose=1):\n super(ScoreGen, self).__init__()\n self.x = x\n self.y = y\n self.batch_size = batch_size\n self.verbose = verbose\n if y is None:\n self.y = self.x\n self.ix, self.iy = np.triu_indices(x.shape[0], 1)\n else:\n self.iy, self.ix = np.indices((y.shape[0], x.shape[0]))\n self.ix = self.ix.reshape((self.ix.size,))\n self.iy = self.iy.reshape((self.iy.size,))\n self.subbatch = (len(self.x) + self.batch_size - 1) // self.batch_size\n if self.verbose > 0:\n self.progress = tqdm(total=len(self), desc='Scores')\n\n def __getitem__(self, index):\n start = index * self.batch_size\n end = min(start + self.batch_size, len(self.ix))\n a = self.y[self.iy[start:end], :]\n b = self.x[self.ix[start:end], :]\n if self.verbose > 0:\n self.progress.update()\n if self.progress.n >= len(self): self.progress.close()\n return [a, b]\n\n def __len__(self):\n return (len(self.ix) + self.batch_size - 1) // self.batch_size\n\n\n\n\ndef set_lr(model, lr):\n K.set_value(model.optimizer.lr, float(lr))\n\ndef get_lr(model):\n return K.get_value(model.optimizer.lr)\n\n\n\n\n#converting the upper triangular matrix into a square matrix\ndef score_reshape(score, x, y=None):\n\n if y is None:\n # When y is None, score is a packed upper triangular matrix.\n # Unpack, and transpose to form the symmetrical lower triangular matrix.\n m = np.zeros((x.shape[0], x.shape[0]), dtype=K.floatx())\n m[np.triu_indices(x.shape[0], 1)] = score.squeeze()\n m += m.transpose()\n else:\n m = np.zeros((y.shape[0], x.shape[0]), dtype=K.floatx())\n iy, ix = np.indices((y.shape[0], x.shape[0]))\n ix = ix.reshape((ix.size,))\n iy = iy.reshape((iy.size,))\n m[iy, ix] = score.squeeze()\n return m\n\n\n\n\n#use the FeatureGen and the ScoreGen to calculate the score for our trained models\ndef compute_score(verbose=1):\n\n features = branch_model.predict_generator(FeatureGen(train, verbose=verbose), max_queue_size=12, workers=6,\n verbose=0)\n score = head_model.predict_generator(ScoreGen(features, verbose=verbose), max_queue_size=12, workers=6, verbose=0)\n score = score_reshape(score, features)\n return features, score\n\n\n\n\ndef make_steps(step, ampl):\n global w2ts, t2i, steps, features, score, histories\n random.shuffle(train)\n\n w2ts = {}\n for w, hs in w2hs.items():\n for h in hs:\n if h in train_set:\n if w not in w2ts: w2ts[w] = []\n if h not in w2ts[w]: w2ts[w].append(h)\n for w, ts in w2ts.items(): w2ts[w] = np.array(ts)\n\n # Map training picture hash value to index in 'train' array \n t2i = {}\n for i, t in enumerate(train): t2i[t] = i\n\n # Compute the match score for each picture pair\n features, score = compute_score()\n\n # Train the model for 'step' epochs\n history = model.fit_generator(\n TrainingData(score + ampl * np.random.random_sample(size=score.shape), steps=step, batch_size=32),\n initial_epoch=steps, epochs=steps + step, max_queue_size=12, workers=6, verbose=1).history\n steps += step\n\n # Collect history data\n history['epochs'] = steps\n history['ms'] = np.mean(score)\n history['lr'] = get_lr(model)\n print(history['epochs'], history['lr'], history['ms'])\n histories.append(history)\n\n\n\n\nhistories = []\nsteps = 0\ntmp = keras.models.load_model('../input/piotte/mpiotte-standard.model')\nmodel.set_weights(tmp.get_weights())\nmodel.summary()\n\n\n\n\ndef prepare_submission(threshold, filename):\n\n vtop = 0\n vhigh = 0\n pos = [0, 0, 0, 0, 0, 0]\n with open(filename, 'wt', newline='\\n') as f:\n f.write('Image,Id\\n')\n for i, p in enumerate(tqdm(submit)):\n t = []\n s = set()\n a = score[i, :]\n for j in list(reversed(np.argsort(a))):\n h = known[j]\n if a[j] < threshold and new_whale not in s:\n pos[len(t)] += 1\n s.add(new_whale)\n t.append(new_whale)\n if len(t) == 5: break;\n for w in h2ws[h]:\n assert w != new_whale\n if w not in s:\n if a[j] > 1.0:\n vtop += 1\n elif a[j] >= threshold:\n vhigh += 1\n s.add(w)\n t.append(w)\n if len(t) == 5: break;\n if len(t) == 5: break;\n if new_whale not in s: pos[5] += 1\n assert len(t) == 5 and len(s) == 5\n f.write(p + ',' + ' '.join(t[:5]) + '\\n')\n return vtop, vhigh, pos\n\ntic = time.time()\nh2ws = {}\nfor p, w in tagged.items():\n if w != new_whale: # Use only identified whales\n h = p2h[p]\n if h not in h2ws: h2ws[h] = []\n if w not in h2ws[h]: h2ws[h].append(w)\nknown = sorted(list(h2ws.keys()))\n\n# Dictionary of picture indices\nh2i = {}\nfor i, h in enumerate(known): h2i[h] = i\n\n# Evaluate the model.\nfknown = branch_model.predict_generator(FeatureGen(known), max_queue_size=20, workers=10, verbose=0)\nfsubmit = branch_model.predict_generator(FeatureGen(submit), max_queue_size=20, workers=10, verbose=0)\nscore = head_model.predict_generator(ScoreGen(fknown, fsubmit), max_queue_size=20, workers=10, verbose=0)\nscore = score_reshape(score, fknown, fsubmit)\n\n\nprepare_submission(0.99, 'submission.csv')\ntoc = time.time()\n\n","repo_name":"aorursy/lost-nb","sub_path":"cpe695yimeng_fifth.py","file_name":"cpe695yimeng_fifth.py","file_ext":"py","file_size_in_byte":22032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29139204910","text":"import rclpy\nfrom rclpy.node import Node\nfrom rclpy.qos import QoSDurabilityPolicy\nfrom rclpy.qos import QoSHistoryPolicy\nfrom rclpy.qos import QoSProfile\nfrom rclpy.qos import QoSReliabilityPolicy\nfrom rclpy.callback_groups import ReentrantCallbackGroup\nfrom std_msgs.msg import Int16\nimport RPi.GPIO as GPIO\n\nclass FanPwmControl(Node):\n \n def __init__(self,channel):\n super().__init__('FanPwm')\n self.channel = channel\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.channel,GPIO.OUT)\n self.pwm = GPIO.PWM(self.channel,2)\n self.pwm.start(10)\n self.gas_grade = 0\n \n QOS_RKL10V = QoSProfile(\n reliability=QoSReliabilityPolicy.RELIABLE,\n history=QoSHistoryPolicy.KEEP_LAST,\n depth=10,\n durability=QoSDurabilityPolicy.VOLATILE)\n \n self.subscribe_pollu_grade = self.create_subscription(\n Int16,\n 'pollu_grade',\n self.get_pollu_grade,\n QOS_RKL10V\n )\n \n self.subscribe_gas_grade = self.create_subscription(\n Int16,\n 'gas_sensor_pub',\n self.get_gas_grade,\n QOS_RKL10V \n )\n def get_gas_grade(self,msg):\n self.gas_grade = msg.data\n \n def get_pollu_grade(self,msg): \n self.pollu_grade = msg.data\n \n if self.gas_grade == 2:\n self.pwm.ChangeDutyCycle(100)\n self.get_logger().info('유해가스가 감지되어 팬이 터보모드로 작동합니다.')\n elif self.gas_grade < 2:\n \n if self.pollu_grade == 0:\n self.pwm.ChangeDutyCycle(30)\n self.get_logger().info('팬이 저속모드 입니다.')\n elif self.pollu_grade == 1:\n self.pwm.ChangeDutyCycle(50)\n self.get_logger().info('팬이 중속모드 입니다.')\n elif self.pollu_grade == 2:\n self.pwm.ChangeDutyCycle(80)\n self.get_logger().info('팬이 고속모드 입니다.')\n else:\n self.pwm.ChangeDutyCycle(100)\n self.get_logger().info('팬이 터보모드 입니다.') \n \n\n \n\n\ndef main(args=None):\n rclpy.init(args=args)\n try:\n channel = 18\n fan_mode = FanPwmControl(channel)\n try:\n rclpy.spin(fan_mode)\n except KeyboardInterrupt:\n fan_mode.get_logger().info('Keyboard Interrupt (SIGINT)')\n GPIO.cleanup()\n finally:\n fan_mode.destroy_node()\n finally:\n GPIO.cleanup()\n rclpy.shutdown()\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Hyounjun-Oh/Hanium_PURIBOT","sub_path":"src/fan_pwm/fan_pwm/fan_pwm.py","file_name":"fan_pwm.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"19504200397","text":"import os\nfrom time import sleep\nimport requests\nimport re\n\n# readme url\nURL = \"https://dash.readme.com/api/v1/docs\"\n# category id for API reference\nCATEGORY_ID = \"63e4e160c60b2e001dd1cc4e\"\n\n\ndef checkIfDocIsPresent(title, headers):\n\n check_url = URL + \"/\" + title\n response = requests.get(check_url, headers=headers)\n\n if response.status_code == 200:\n return True\n else:\n return False\n\n\ndef publishDoc(title, body, order):\n key = os.environ.get(\"README_API_KEY\")\n payload = {\n \"title\": title,\n \"type\": \"basic\",\n \"body\": body,\n \"category\": CATEGORY_ID,\n \"hidden\": False,\n \"order\": order,\n }\n headers = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\",\n \"authorization\": \"Basic \" + key,\n }\n\n isDocPresent = checkIfDocIsPresent(title, headers)\n if isDocPresent:\n # update doc\n update_url = URL + \"/\" + title # title == slug\n response = requests.put(update_url, json=payload, headers=headers)\n if response.status_code != 200:\n print(response.text)\n else:\n print(\"Updated \", title)\n else:\n # create doc\n response = requests.post(URL, json=payload, headers=headers)\n if response.status_code != 201:\n print(response.text)\n else:\n print(\"Created \", title)\n\n\ndef extract_rpc_commands(rst_content):\n manpages_block = re.search(\n r\"\\.\\. block_start manpages(.*?)\" r\"\\.\\. block_end manpages\",\n rst_content,\n re.DOTALL,\n )\n if manpages_block:\n commands = re.findall(\n r\"\\b([a-zA-Z0-9_-]+)\" r\"\\s+<([^>]+)>\\n\", manpages_block.group(1)\n )\n return commands\n return []\n\n\ndef main():\n # path to the rst file from where we fetch all the RPC commands\n path_to_rst = \"doc/index.rst\"\n with open(path_to_rst, \"r\") as file:\n rst_content = file.read()\n\n commands = extract_rpc_commands(rst_content)\n if commands:\n order = 0\n for name, file in commands:\n print(f\"{name}\\t\\t{file}\")\n with open(\"doc/\" + file) as f:\n body = f.read()\n publishDoc(name, body, order)\n order = order + 1\n sleep(3)\n else:\n print(\"No commands found in the Manpages block.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ElementsProject/lightning","sub_path":".github/scripts/sync-rpc-cmds.py","file_name":"sync-rpc-cmds.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","stars":2670,"dataset":"github-code","pt":"22"} +{"seq_id":"27030707588","text":"from django.core.management.base import BaseCommand, CommandError\nimport scraper\n\nclass Command(BaseCommand):\n args = ''\n help = 'Scraps the data from the url'\n\n def handle(self, *args, **options):\n try:\n scraper.scrap_hindi_songs()\n scraper.scrap_eng_songs()\n except:\n raise CommandError('Oh Shoot')\n\n self.stdout.write('Successfully fetched data')\n","repo_name":"shubhendusaurabh/top10songs","sub_path":"charts/management/commands/runscraper.py","file_name":"runscraper.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25006191988","text":"\"\"\"\r\nCreated by yfDong on 10/7/17.\r\n\"\"\"\r\nimport os\r\nimport math\r\nimport re\r\nimport jieba\r\nimport numpy as np\r\nimport feature_selection\r\n\r\nfrom sklearn.datasets import load_files\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\n\r\n\r\ndef translate(bytesstr):\r\n line = bytesstr.strip().decode('gbk', 'ignore') # .decode('utf-8', 'ignore') default Unicode\r\n p2 = re.compile(u'[^\\u4e00-\\u9fa5]') # 中文的编码范围是:\\u4e00到\\u9fa5\r\n zh = \"\".join(p2.split(line)).strip()\r\n zh = \"\".join(zh.split())\r\n outStr = zh # 经过相关处理后得到中文的文本\r\n return outStr\r\n\r\n\r\ndef classifier():\r\n print('Loading dataset, 80% for training, 20% for testing...')\r\n dataset_dir_name = \"F://Projects/PycharmProjects/Naive Bayes classifier/Chinese documents collection/documents collection\"\r\n\r\n movie_reviews = load_files(dataset_dir_name)\r\n #中文分词\r\n movie_reviews.data = [\" \".join(jieba.cut(translate(doc_str), cut_all=False)) for doc_str in movie_reviews.data]\r\n #划分测试集和训练集\r\n doc_str_list_train, doc_str_list_test, doc_class_list_train, doc_class_list_test = \\\r\n train_test_split(movie_reviews.data, movie_reviews.target,\r\n test_size=0.2, random_state=0)\r\n\r\n vectorizer = CountVectorizer()\r\n word_tokenizer = vectorizer.build_tokenizer()\r\n doc_terms_list_train = [word_tokenizer(doc_str) for doc_str in doc_str_list_train]\r\n doc_terms_list_test = [word_tokenizer(doc_str) for doc_str in doc_str_list_test]\r\n # 用互信息提取特征\r\n #feature_selection.feature_selection_MI(doc_terms_list_train, doc_class_list_train, movie_reviews.target_names,150)\r\n\r\n terms_test_list = feature_selection.get_terms_list(doc_terms_list_test)\r\n\r\n #将提取的特征文件以列表形式存储\r\n class_features = []\r\n\r\n f=open('F://Projects/PycharmProjects/Naive Bayes classifier/output/feature_selection.txt','r')\r\n for line in f.readlines():\r\n temp = []\r\n for i in line.split(\" \"):\r\n if i != \"\\n\":\r\n temp.append(i)\r\n class_features.append(temp)\r\n\r\n #朴素贝叶斯\r\n def bayes(doc_test, class_test):\r\n word_num = {}\r\n D = len(doc_terms_list_train)\r\n len1 = len(doc_test)\r\n N = len(movie_reviews.target_names)\r\n y = 0\r\n b=0\r\n for clas in doc_class_list_train:#计算训练语料库中类别y包含的文档总数\r\n if (clas == class_test):\r\n y += 1\r\n a = float(math.log(((y + 1) /( D + N)))) #平滑,拉普拉斯修正\r\n\r\n for i in range(0, len1):\r\n if (doc_test[i] in class_features[class_test]):\r\n word_num[doc_test[i]] = word_num.get(doc_test[i],0) + 1\r\n\r\n\r\n for i in class_features[class_test]:\r\n b += float(math.log((word_num.get(i,0) + 1)/(y + 12650))) #平滑,拉普拉斯修正\r\n return a+b\r\n\r\n i = 0\r\n m = 0\r\n\r\n for doc in doc_terms_list_test:\r\n tem = 0\r\n max = bayes(doc, 0)\r\n\r\n for k in range(1,len(movie_reviews.target_names)):\r\n if(max < bayes(doc,k)):\r\n max = bayes(doc,k)\r\n tem = k\r\n\r\n if(doc_class_list_test[i] == tem):\r\n m += 1\r\n print(movie_reviews.target_names[doc_class_list_test[i]], movie_reviews.target_names[tem])\r\n i += 1\r\n\r\n print(float(m/len(doc_terms_list_test)))\r\n\r\nclassifier()","repo_name":"DongYunfeng/Navie_Bayes_Classifier","sub_path":"naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15104877732","text":"import torch\nimport chromadb\nfrom uuid import uuid4 as uuid\nimport streamlit as st\nfrom pypdf import PdfReader\nfrom ctransformers import AutoModelForCausalLM\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\nmodel_dir = 'models/'\nmodel_name = \"ggml-model-gpt4all-falcon-q4_0.bin\"\n\nqa_model_name = \"google/flan-t5-base\"\n\n# Create a database connection\n@st.cache_resource\ndef db():\n return chromadb.Client()\n\ndatabase = db()\n\n# Create a collection\n@st.cache_resource\ndef get_collection():\n return database.create_collection(\"pdfs\")\n\ncollection = get_collection()\n\n# Create the generation model\n@st.cache_resource\ndef get_gen_model():\n return AutoModelForCausalLM.from_pretrained(\n model_path_or_repo_id=f\"{model_dir}{model_name}\", model_type=\"falcon\"\n )\n\ngen_model = get_gen_model()\n\n# Create the qa model\n@st.cache_resource\ndef get_qa_model():\n return T5ForConditionalGeneration.from_pretrained(qa_model_name, device_map=\"auto\")\n\nqa_model = get_qa_model()\n\n\n# Create the qa tokenizer\n@st.cache_resource\ndef get_qa_tokenizer():\n return T5Tokenizer.from_pretrained(qa_model_name, legacy=False)\n\nqa_tokenizer = get_qa_tokenizer()\n\n\n# Chunk the text\ndef get_chunks(seq, size, overlap):\n if size < 1 or overlap < 0:\n raise ValueError('size must be >= 1 and overlap >= 0')\n\n for i in range(0, len(seq) - overlap, size - overlap):\n yield seq[i:i + size]\n\n\n# Split the text into paragraphs\ndef to_paragraphs(text):\n return list(filter(lambda x : x != '', text.split('\\n\\n')))\n\n\n# Add a pdf to Chroma\n@st.cache_resource\ndef add_pdf(pdf_file):\n pdf = PdfReader(pdf_file)\n\n for page in pdf.pages:\n\n # Create chunks of the pdf on paragraph to keep a maximum of context\n paragraphs = to_paragraphs(page.extract_text())\n\n # if the paragraph is too long, split it into chunks\n chunks = []\n for paragraph in paragraphs:\n chunks.extend(list(get_chunks(paragraph, 1000, 100)))\n\n for chunk in chunks:\n\n # skip the small chunks with less than 10 characters\n # this is to avoid adding data with no context\n if len(chunk) < 10:\n continue\n\n # Add the chunks to the db\n collection.add(documents=[chunk], metadatas=[{\"source\": pdf_file.name, \"page\": page.page_number}], ids=[str(uuid())])\n\n\n# Generate the response\n@st.cache_data\ndef generate(query, documents):\n global qa_model, qa_tokenizer, gen_model\n\n # Create an empty container for the response stream\n container = st.empty()\n\n # Use the qa model to extract the answer from the documents\n qa_prompt = f\"Answer the following question: '{query}' with the context: {documents}\"\n qa_inputs = qa_tokenizer(qa_prompt, return_tensors=\"pt\").input_ids.to(device)\n\n qa_generation = qa_model.generate(qa_inputs, max_new_tokens=300)\n qa_result = qa_tokenizer.decode(qa_generation[0], skip_special_tokens=True)\n\n # Use the generation model to generate the response\n gen_prompt = f\"Generate an answer to the question: '{query}' by using the response: '{qa_result}'\"\n text = \"\"\n\n print(gen_prompt)\n\n for token in gen_model(gen_prompt, max_new_tokens=600, stream=True):\n container.empty()\n text += token\n container.write(text)\n\n\ndef main():\n global collection\n\n st.title(\"Pdf QA\")\n\n # Load a PDF\n pdf_file = st.file_uploader(\"Upload a PDF\")\n\n # Add the PDF to the collection\n if pdf_file:\n add_pdf(pdf_file)\n\n query = st.text_input(\"Ask a question\")\n\n if st.button(\"Ask\") and query != \"\":\n\n # Query the database for results\n result = collection.query(query_texts=query, n_results=2)\n\n if result is None:\n st.write(\"No results found\")\n return\n\n # Generate the response\n generate(query, result['documents'])\n\n st.write(\"Sources:\")\n for i in range(2):\n with st.expander(f\"Source {i}\"):\n\n st.write(f\"Source: {result['metadatas'][0][i]['source']}, Page: {result['metadatas'][0][i]['page']}\")\n st.write(\"Text:\")\n st.write(result[\"documents\"][0][i])\n\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"Romainlg29/llms","sub_path":"question-answering/pdf-qa-with-t5-and-falcon/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30659832151","text":"#######################################\r\n# IMPORTS #\r\n#######################################\r\nimport os\r\nimport random\r\nfrom time import monotonic\r\nimport gc\r\n\r\n# import adafruit_imageload\r\nimport analogio\r\nimport audiocore\r\nimport audiopwmio\r\nimport board\r\nimport displayio\r\nimport storage\r\nimport sdcardio\r\n\r\nfrom adafruit_crickit import crickit\r\nfrom adafruit_ssd1351 import SSD1351\r\n\r\n\r\n#######################################\r\n# INIT #\r\n#######################################\r\n## Seesaw\r\nss = crickit.seesaw\r\n\r\n## Servos\r\ncrickit.servo_1.set_pulse_width_range(min_pulse=500, max_pulse=2200)\r\ncrickit.servo_2.set_pulse_width_range(min_pulse=500, max_pulse=2200)\r\nDELAY = 0.006\r\ncrickit.servo_1.angle = 70 # UP - DOWN\r\ncrickit.servo_2.angle = 90 # LEFT - RIGHT\r\n\r\n## Release any resources currently in use for the displays\r\ndisplayio.release_displays()\r\n\r\n## SPI connection\r\nspi = board.SPI()\r\ntft_cs = board.D9\r\ntft_dc = board.D5\r\nrst = board.D6\r\n# sd_cs = DigitalInOut(board.D10)\r\n\r\n## Random\r\nseed = analogio.AnalogIn(board.A1)\r\nrandom.seed(seed.value)\r\nseed.deinit()\r\n\r\n## Display\r\ndisplay_bus = displayio.FourWire(spi, command=tft_dc, chip_select=tft_cs,\r\n reset=rst, baudrate=16000000)\r\ndisplay = SSD1351(display_bus, width=128, height=128, rotation=180)\r\n\r\n## SD card\r\nsdcard = sdcardio.SDCard(spi, board.D10)\r\nvfs = storage.VfsFat(sdcard)\r\nstorage.mount(vfs, '/sd')\r\n\r\n## Audio output\r\ngc_audio = audiopwmio.PWMAudioOut(board.A0)\r\naudio_file = None\r\n\r\n## Motor\r\nmotor1 = crickit.dc_motor_1\r\nmotor2 = crickit.dc_motor_2\r\n\r\n## Buttons\r\nBUTTON_1 = crickit.SIGNAL1\r\nBUTTON_2 = crickit.SIGNAL2\r\nss.pin_mode(BUTTON_1, ss.INPUT_PULLUP)\r\nss.pin_mode(BUTTON_2, ss.INPUT_PULLUP)\r\n\r\n\r\n#######################################\r\n# FUNCTIONS #\r\n#######################################\r\ndef wait(wait):\r\n now = monotonic()\r\n while (monotonic() - now) < wait :\r\n pass\r\n\r\n\r\ndef attitude(image) :\r\n # bitmap, palette = adafruit_imageload.load(f\"/sd/images/{image}.bmp\",\r\n # bitmap=displayio.Bitmap,\r\n # palette=displayio.Palette)\r\n # tile_grid = displayio.TileGrid(bitmap, pixel_shader=palette)\r\n bitmap = displayio.OnDiskBitmap(f\"/sd/images/{image}.bmp\")\r\n tile_grid = displayio.TileGrid(bitmap, pixel_shader=bitmap.pixel_shader)\r\n group = displayio.Group()\r\n group.append(tile_grid)\r\n display.show(group)\r\n gc.collect()\r\n\r\ndef color(color = 0x000000) :\r\n color_bitmap = displayio.Bitmap(128, 128, 1)\r\n color_palette = displayio.Palette(1)\r\n color_palette[0] = color\r\n\r\n tile_grid = displayio.TileGrid(color_bitmap,\r\n pixel_shader=color_palette,\r\n x=0, y=0)\r\n group = displayio.Group()\r\n group.append(tile_grid)\r\n display.show(group)\r\n\r\ndef play_file(filename):\r\n audio_path = \"/sd/sounds/{}\".format(filename)\r\n files = [file for file in os.listdir(audio_path)]\r\n global audio_file # pylint: disable=global-statement\r\n\r\n if gc_audio.playing:\r\n gc_audio.stop()\r\n if audio_file:\r\n audio_file.close()\r\n\r\n audio_file = open(\"{}/{}\".format(audio_path, random.choice(files)), \"rb\")\r\n wav = audiocore.WaveFile(audio_file)\r\n gc_audio.play(wav)\r\n while gc_audio.playing:\r\n pass\r\n gc_audio.stop()\r\n audio_file.close()\r\n gc.collect()\r\n\r\ndef control(servo, start, end, delay=DELAY, increment=1):\r\n if int(end) < int(start) :\r\n increment = -increment\r\n for angle in range(int(start), int(end), increment): # min to max degrees\r\n servo.angle = angle\r\n wait(delay)\r\n\r\n#######################################\r\n# BEHAVIOR #\r\n#######################################\r\ndef hello(): \r\n attitude(\"nice\")\r\n control(crickit.servo_1, crickit.servo_1.angle, 40, DELAY)\r\n attitude(\"nice\")\r\n play_file(\"hello\")\r\n attitude(\"happy\")\r\n wait(1)\r\n control(crickit.servo_1, crickit.servo_1.angle, 70, DELAY)\r\n\r\ndef grumpy():\r\n attitude(\"what\")\r\n control(crickit.servo_1, crickit.servo_1.angle, 40, DELAY)\r\n\r\n play_file(\"hey\")\r\n attitude(\"doubt\")\r\n control(crickit.servo_1, crickit.servo_1.angle, 40, DELAY)\r\n wait(1)\r\n control(crickit.servo_2, crickit.servo_2.angle, 50, DELAY)\r\n play_file(\"ah\")\r\n control(crickit.servo_1, crickit.servo_1.angle, 70, DELAY)\r\n wait(1)\r\n control(crickit.servo_2, crickit.servo_2.angle, 90, DELAY)\r\n\r\ndef love():\r\n attitude(\"happy\")\r\n control(crickit.servo_1, crickit.servo_1.angle, 40, DELAY)\r\n play_file(\"heyho\")\r\n wait(0.5)\r\n attitude(\"love\")\r\n play_file(\"hooo\")\r\n wait(1)\r\n attitude(\"happy\")\r\n play_file(\"haha\")\r\n control(crickit.servo_1, crickit.servo_1.angle, 70, DELAY)\r\n\r\ndef dead(): \r\n attitude(\"dead\")\r\n control(crickit.servo_1, crickit.servo_1.angle, 40, DELAY)\r\n play_file(\"ok\")\r\n wait(1)\r\n control(crickit.servo_1, crickit.servo_1.angle, 70, DELAY)\r\n\r\ndef wtf(): \r\n attitude(\"small\")\r\n control(crickit.servo_1, crickit.servo_1.angle, 40, DELAY)\r\n play_file(\"wow\")\r\n wait(1)\r\n control(crickit.servo_1, crickit.servo_1.angle, 70, DELAY)\r\n\r\ndef hide():\r\n wait(2)\r\n attitude(\"small\")\r\n wait(1)\r\n attitude(\"ninja\")\r\n play_file(\"hum\")\r\n wait(1)\r\n motor1.throttle = -1\r\n motor2.throttle = 1\r\n wait(0.5)\r\n motor1.throttle = 0\r\n motor2.throttle = 0\r\n wait(0.5)\r\n play_file(\"hum\")\r\n motor1.throttle = 1\r\n motor2.throttle = -1\r\n wait(0.5)\r\n motor1.throttle = 0\r\n motor2.throttle = 0\r\n\r\ndef forward():\r\n motor1.throttle = -1\r\n motor2.throttle = 1\r\n wait(0.5)\r\n stop()\r\n\r\ndef backward():\r\n motor1.throttle = 1\r\n motor2.throttle = -1\r\n wait(0.5)\r\n stop()\r\n\r\ndef stop():\r\n motor1.throttle = 0\r\n motor2.throttle = 0\r\n\r\ndef idle():\r\n attitude(\"open\")\r\n ","repo_name":"PictorSomni/Wilson","sub_path":"wilson.py","file_name":"wilson.py","file_ext":"py","file_size_in_byte":5975,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"39062423746","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nTime : 2021/2/15 13:13\r\nAuthor : snail\r\nSoftware :PyCharm\r\nE-mail : zh1289732630@gmail.com\r\nCSDN bolg : https://blog.csdn.net/snail9610\r\n\"\"\"\r\n\"\"\"\r\n逐字法、排序法、蛮力法、计数法\r\n\"\"\"\r\n\r\n\r\ndef anagramSepar(s1, s2):\r\n \"\"\"使用逐字法检测异序词\"\"\"\r\n alist2 = list(s2)\r\n pos1 = 0\r\n stillOK = True\r\n while stillOK and pos1 < len(s1):\r\n found = False\r\n pos2 = 0 # 每次s2从头开始\r\n while pos2 < len(alist2) and not found:\r\n \"\"\"寻找相同的字母\"\"\"\r\n if s1[pos1] == alist2[pos2]:\r\n found = True # 发现相同的字母,则退出循环\r\n else:\r\n pos2 += 1 # 没有发现,则检测s2下一个字母\r\n\r\n # 发现后标记为None,循环继续;否则返回False,程序结束\r\n if found:\r\n alist2[pos2] = None\r\n else:\r\n stillOK = False\r\n\r\n pos1 += 1\r\n\r\n return stillOK\r\n\r\n\r\ndef anagramSort(s1, s2):\r\n \"\"\"使用排序法检测异序词\"\"\"\r\n alist1 = list(s1)\r\n alist2 = list(s2)\r\n\r\n alist1.sort()\r\n alist2.sort()\r\n pos = 0\r\n stillOK = True\r\n while pos < len(alist1) and stillOK:\r\n if alist1[pos] == alist2[pos]:\r\n stillOK = True\r\n pos += 1\r\n else:\r\n stillOK = False\r\n\r\n return stillOK\r\n\r\n\r\ndef anagramCount(s1, s2):\r\n \"\"\"使用计数法\"\"\"\r\n count1 = [0]*26\r\n count2 = [0]*26\r\n\r\n for i in range(len(s1)):\r\n \"\"\"统计s1的字母\"\"\"\r\n pos = ord(s1[i]) - ord('a')\r\n count1[pos] += 1\r\n\r\n for i in range(len(s2)):\r\n \"\"\"统计s2的字母\"\"\"\r\n pos = ord(s2[i]) - ord('a')\r\n count2[pos] += 1\r\n\r\n pos = 0\r\n stillOK = True\r\n while pos < 26 and stillOK:\r\n \"\"\"匹配\"\"\"\r\n if count1[pos] == count2[pos]:\r\n pos += 1\r\n else:\r\n stillOK = False\r\n\r\n return stillOK\r\n\r\nif __name__ == '__main__':\r\n s1 = 'python'\r\n s2 = 'typhon'\r\n s3 = 'abc'\r\n s4 = 'def'\r\n\r\n print(anagramCount(s1, s2))\r\n print(anagramCount(s3, s4))","repo_name":"snail9611/Data-Structures-and-Algorithms","sub_path":"01 异序词检测.py","file_name":"01 异序词检测.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36274873759","text":"import base64\nimport wave\n\nimport gevent\nimport socketio\nfrom locust import HttpUser\n\n\nclass SpeechTranslationUser(HttpUser):\n # Backend service to connect to\n host = \"http://localhost:8000\"\n\n abstract = True\n\n def on_start(self):\n super().on_start()\n # Initialize websocket\n self.socket = socketio.Client(ssl_verify=False)\n self.socket.connect(SpeechTranslationUser.host)\n self.tasks_completed = 0\n\n def on_stop(self):\n super().on_stop()\n self.tasks_completed = 0\n gevent.sleep(0.5)\n self.socket.disconnect()\n\n def send_wav(self, wav_path, user_id, room_id):\n chunk_size = 1600\n sample_rate_hertz = 16000\n\n with wave.open(wav_path) as wav_file:\n i = 0\n\n while i < wav_file.getnframes():\n chunk = wav_file.readframes(chunk_size)\n data = base64.b64encode(chunk)\n\n self.socket.emit(\n \"/audio/stream\",\n {\"userId\": user_id, \"roomId\": room_id, \"data\": data},\n )\n i += chunk_size\n gevent.sleep(chunk_size / sample_rate_hertz)\n","repo_name":"didi/MeetDot","sub_path":"backend/tests/locust/users/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"22"} +{"seq_id":"31076887233","text":"array = [1, 2, 5, 3, 4]\n\n# Example 1 - O(log n) - Binary Search\ndef binarySearch(list, item):\n first = 0\n last = len(list) - 1\n found = False\n\n while first <= last and not found: # O(n)\n midpoint = (first + last) // 2 # O(log n)\n if list[midpoint] == item: # O(1)\n found = True\n else: # O(1)\n if item < list[midpoint]: # O(1)\n last = midpoint-1\n else: # O(1)\n first = midpoint+1\n return found\n\n# Example 2 - O(1)\ndef findItem():\n print('Hi Lorena') # O(1)\n if(1 == array[0]): # O(1)\n return 1\n\n# Example 3 - O(n)\ndef linear(n):\n for iterator in range(n): # O(n)\n print(iterator)\n\n# Example 4 - O(n²)\ndef quadratic():\n for i in n: # O(n)\n for j in n: # O(n)\n print(i, j)\n print('---') \n\n# Example 5 - O(n!)\ndef fibonacci_of(n):\n if n in {0, 1}: # O(1)\n return n\n return fibonacci_of(n - 1) + fibonacci_of(n - 2) # O (n!)\n\n\n# Example 6 - Combination O(n)\n# O(1) + O(5) + O(n) + O(n) + O(3)\n# O(9) + O(2n) -> O(n)\n\ndef combination(n):\n # O(1)\n print(n[0])\n\n # O(5)\n for i in range(5):\n print('test ', i)\n\n # O(n)\n for i in n:\n print(i)\n\n # O(n)\n for i in n:\n print(i)\n\n # O(3)\n print('Python')\n print('Python')\n print('Python')","repo_name":"RodrigoRVSN/buda","sub_path":"docs/data_structure/big-o/big-o-notation.py","file_name":"big-o-notation.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"22"} +{"seq_id":"7576759402","text":"def fatorial(n, show=False):\n \"\"\"\n -> Calcula o fatorial de um número.\n :param n: o número a ser calculado\n :param show: (opcional) mostrar ou não a conta.\n :return: o valor do fatorial de um numero n.\n \"\"\"\n lst = list(range(1, n))\n lst.reverse()\n tmp = n\n for numb in lst:\n tmp = tmp * numb\n if show:\n print(f'{n}', end='')\n for numb in lst:\n print(f' x {numb}', end='')\n print(f' = {tmp}')\n else:\n return tmp\n\n\nfatorial(7, True)\nhelp(fatorial)\n","repo_name":"Alex4gtx/estudos","sub_path":"python/cursoemvideo-python/03-mundo-3/funções/função para fatorial.py","file_name":"função para fatorial.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"17620242841","text":"#!/usr/bin/python3\n\"\"\"define a class Rectangle\"\"\"\nfrom models.rectangle import Rectangle\n\n\nclass Square(Rectangle):\n \"\"\" class Square \"\"\"\n\n def __init__(self, size, x=0, y=0, id=None):\n \"\"\" construct \"\"\"\n super().__init__(size, size, x, y, id)\n\n @property\n def size(self):\n \"\"\" size \"\"\"\n return self.width\n\n @size.setter\n def size(self, val):\n \"\"\" setter \"\"\"\n self.width = val\n self.height = val\n\n def __str__(self):\n \"\"\" Print the method \"\"\"\n st = \"[Square] ({:d}) {:d}/{:d} - {:d}\"\n st = st.format(self.id, self.x, self.y, self.width)\n return st\n\n def update(self, *args, **kwargs):\n \"\"\" Updating the Attributes \"\"\"\n atr = ['id', 'size', 'x', 'y']\n if args and 0 < len(args) <= 4:\n for i, arg in enumerate(args):\n if i == 0:\n super().update(arg)\n else:\n self.__setattr__(atr[i], arg)\n elif kwargs and 0 < len(kwargs) <= 4:\n for k, v in kwargs.items():\n if k == 'id':\n super().update(id=v)\n elif k in atr:\n self.__setattr__(k, v)\n\n def to_dictionary(self):\n \"\"\" Return Repr \"\"\"\n return {'id': self.id,\n 'size': self.width,\n 'x': self.x,\n 'y': self.y}\n","repo_name":"danielj32/holbertonschool-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39578320193","text":"class Node:\n def __init__ (self,key):\n self.left=None\n self.right=None\n self.data=key\n\ndef C(root):\n if root is None:\n return 0\n if root.left is None and root.right is None:\n return 1\n else:\n return C(root.left)+C(root.right)\n \n \nroot=Node(1)\nroot.left=Node(2)\nroot.right=Node(3)\nroot.left.right=Node(4)\nroot.right.right=Node(5)\nroot.right.left=Node(7)\nprint(C(root))\n","repo_name":"mkhanw01/DataStructure","sub_path":"tree/count_leaf.py","file_name":"count_leaf.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7027388685","text":"#!/bin/python\n\nfrom constans import *\n\n\ndef is_integer(n):\n try:\n int(n, 0)\n return True\n except ValueError:\n return False\n\n\ncommands = [['halt', 0, 0],\n ['add', 2, 2],\n ['sub', 2, 2],\n ['mul', 2, 2],\n ['div', 2, 2],\n ['mod', 2, 2],\n ['mov', 2, 2],\n ['sv', 2, 1],\n ['ld', 2, 1],\n ['test', 1, 0],\n ['jmp', 1, 1],\n ['jz', 1, 1],\n ['jn', 1, 1],\n ['jc', 1, 1],\n ['jv', 1, 1],\n ['clz', 0, 0],\n ['cln', 0, 0],\n ['clv', 0, 0],\n ['clc', 0, 0],\n ['push', 1, 0],\n ['pop', 1, 0],\n ['call', 1, 1],\n ['ret', 0, 0]]\n\n\ndef compile_error(line_num, message, code):\n print(f'Строка: {line_num}: {message}')\n exit(code)\n\n\ndef from_str_to_reg_or_op_arg(str: str, num):\n if str == 'r1':\n return 0, 1\n if str == 'r2':\n return 0, 2\n if str == 'r3':\n return 0, 3\n if str == 'r4':\n return 0, 4\n\n if str[0] == 'o':\n if not is_integer(str[1:]):\n compile_error(num, 'Ошибка, операнд не число', 5)\n return 1, int(str[1:], 0)\n\n if not is_integer(str):\n compile_error(num, 'Адресс не число', 6)\n\n return 2, int(str, 0)\n\n\ndef from_str_to_op_arg(str: str, num):\n if str[0] == 'o':\n if not is_integer(str[1:]):\n compile_error(num, 'Ошибка, операнд не число', 5)\n return 1, int(str[1:], 0)\n\n if not is_integer(str):\n compile_error(num, 'Адресс не число', 6)\n\n return 2, int(str, 0)\n\n\ndef from_str_to_reg_arg(str: str, num):\n if str == 'r1':\n return 0, 1\n if str == 'r2':\n return 0, 2\n if str == 'r3':\n return 0, 3\n if str == 'r4':\n return 0, 4\n\n compile_error(num, 'Указан не регистр', 7)\n\n\ndef from_str_to_addr_arg(str: str, labels, num):\n if is_integer(str):\n return int(str, 0)\n\n for lab in labels:\n if lab[0] == str:\n return lab[1]\n\n compile_error(num, 'Ошибочный адрес или метка', 8)\n\n\ndef compile(input_file: str, output_file: str):\n lines = []\n\n with open(input_file, 'r') as inp_f:\n for line in inp_f.readlines():\n lines.append(line)\n\n line_without_comments = []\n for line in lines:\n line_without_comments.append(line.split(';')[0])\n\n counted_lines = []\n for n in range(len(line_without_comments)):\n counted_lines.append([n + 1, line_without_comments[n].strip()])\n\n labels = []\n coms = []\n current_addr = 0\n for num, str in counted_lines:\n splited_str = str.split()\n for n in range(len(splited_str)):\n if splited_str[n][-1] == ':':\n if len(splited_str[n]) == 1:\n if n == 0:\n compile_error(num, 'Пропущено название метки', 1)\n\n labels.append([splited_str[n - 1], current_addr])\n else:\n labels.append([splited_str[n][0:-1], current_addr])\n\n for com in commands:\n if splited_str[n].lower() == com[0]:\n if len(splited_str) <= n + com[1]:\n compile_error(num, 'Недостаточно аргументов', 2)\n\n if com[1] == 0:\n coms.append([num, current_addr, com[0]])\n current_addr += 1\n elif com[1] == 1:\n coms.append([num, current_addr, com[0], splited_str[n + 1]])\n current_addr += 1 + com[2]\n elif com[1] == 2:\n coms.append([num, current_addr, com[0], splited_str[n + 1], splited_str[n + 2]])\n current_addr += 1\n current_addr += 1 if from_str_to_reg_or_op_arg(splited_str[n + 1], num)[0] != 0 else 0\n current_addr += 1 if from_str_to_reg_or_op_arg(splited_str[n + 2], num)[0] != 0 else 0\n break\n\n if splited_str[n].lower() == 'org':\n if len(splited_str) <= n + 1:\n compile_error(num, 'ОRG неуказан адресс', 3)\n\n if not is_integer(splited_str[n + 1]):\n compile_error(num, 'адресс не номер', 4)\n\n current_addr = int(splited_str[n + 1], 0)\n\n mem = [0 for i in range(MEMORY_SIZE)]\n\n for com in coms:\n if com[2] == 'halt':\n mem[com[1]] = 0\n elif com[2] == 'add':\n mem[com[1]] = 1 << 16\n code, data = from_str_to_reg_arg(com[3], com[0])\n mem[com[1]] |= data << 8\n code, data = from_str_to_reg_or_op_arg(com[4], com[0])\n if code == 0:\n mem[com[1]] |= data\n elif code == 1:\n mem[com[1]] |= 5\n mem[com[1] + 1] = data\n elif code == 2:\n mem[com[1]] |= 6\n mem[com[1] + 1] = data\n\n elif com[2] == 'sub':\n mem[com[1]] = 2 << 16\n code, data = from_str_to_reg_arg(com[3], com[0])\n mem[com[1]] |= data << 8\n code, data = from_str_to_reg_or_op_arg(com[4], com[0])\n if code == 0:\n mem[com[1]] |= data\n elif code == 1:\n mem[com[1]] |= 5\n mem[com[1] + 1] = data\n elif code == 2:\n mem[com[1]] |= 6\n mem[com[1] + 1] = data\n elif com[2] == 'mul':\n mem[com[1]] = 3 << 16\n code, data = from_str_to_reg_arg(com[3], com[0])\n mem[com[1]] |= data << 8\n code, data = from_str_to_reg_or_op_arg(com[4], com[0])\n if code == 0:\n mem[com[1]] |= data\n elif code == 1:\n mem[com[1]] |= 5\n mem[com[1] + 1] = data\n elif code == 2:\n mem[com[1]] |= 6\n mem[com[1] + 1] = data\n elif com[2] == 'div':\n mem[com[1]] = 4 << 16\n code, data = from_str_to_reg_arg(com[3], com[0])\n mem[com[1]] |= data << 8\n code, data = from_str_to_reg_or_op_arg(com[4], com[0])\n if code == 0:\n mem[com[1]] |= data\n elif code == 1:\n mem[com[1]] |= 5\n mem[com[1] + 1] = data\n elif code == 2:\n mem[com[1]] |= 6\n mem[com[1] + 1] = data\n elif com[2] == 'mod':\n mem[com[1]] = 5 << 16\n code, data = from_str_to_reg_arg(com[3], com[0])\n mem[com[1]] |= data << 8\n code, data = from_str_to_reg_or_op_arg(com[4], com[0])\n if code == 0:\n mem[com[1]] |= data\n elif code == 1:\n mem[com[1]] |= 5\n mem[com[1] + 1] = data\n elif code == 2:\n mem[com[1]] |= 6\n mem[com[1] + 1] = data\n elif com[2] == 'mov':\n mem[com[1]] = 6 << 16\n code, data = from_str_to_reg_arg(com[3], com[0])\n mem[com[1]] |= data << 8\n code, data = from_str_to_reg_arg(com[4], com[0])\n mem[com[1]] |= data\n elif com[2] == 'sv':\n mem[com[1]] = 7 << 16\n code, data = from_str_to_reg_arg(com[3], com[0])\n mem[com[1]] |= data\n data = from_str_to_addr_arg(com[4], [], com[0])\n mem[com[1] + 1] = data\n elif com[2] == 'ld':\n mem[com[1]] = 8 << 16\n code, data = from_str_to_reg_arg(com[3], com[0])\n mem[com[1]] |= data << 8\n code, data = from_str_to_op_arg(com[4], com[0])\n if code == 1:\n mem[com[1]] |= 5\n mem[com[1] + 1] = data\n if code == 2:\n mem[com[1]] |= 6\n mem[com[1] + 1] = data\n elif com[2] == 'test':\n mem[com[1]] = 9 << 16\n code, data = from_str_to_reg_arg(com[3], com[0])\n mem[com[1]] |= data\n elif com[2] == 'jmp':\n mem[com[1]] = 10 << 16\n data = from_str_to_addr_arg(com[3], labels, com[0])\n mem[com[1] + 1] = data\n elif com[2] == 'jz':\n mem[com[1]] = 11 << 16\n data = from_str_to_addr_arg(com[3], labels, com[0])\n mem[com[1] + 1] = data\n elif com[2] == 'jn':\n mem[com[1]] = 12 << 16\n data = from_str_to_addr_arg(com[3], labels, com[0])\n mem[com[1] + 1] = data\n elif com[2] == 'jc':\n mem[com[1]] = 13 << 16\n data = from_str_to_addr_arg(com[3], labels, com[0])\n mem[com[1] + 1] = data\n elif com[2] == 'jv':\n mem[com[1]] = 14 << 16\n data = from_str_to_addr_arg(com[3], labels, com[0])\n mem[com[1] + 1] = data\n elif com[2] == 'clz':\n mem[com[1]] = 15 << 16\n elif com[2] == 'cln':\n mem[com[1]] = 16 << 16\n elif com[2] == 'clv':\n mem[com[1]] = 17 << 16\n elif com[2] == 'clc':\n mem[com[1]] = 18 << 16\n elif com[2] == 'push':\n mem[com[1]] = 20 << 16\n code, data = from_str_to_reg_arg(com[3], com[0])\n mem[com[1]] |= data\n elif com[2] == 'pop':\n mem[com[1]] = 21 << 16\n code, data = from_str_to_reg_arg(com[3], com[0])\n mem[com[1]] |= data << 8\n elif com[2] == 'call':\n mem[com[1]] = 22 << 16\n data = from_str_to_addr_arg(com[3], labels, com[0])\n mem[com[1] + 1] = data\n elif com[2] == 'ret':\n mem[com[1]] = 23 << 16\n\n with open(output_file, 'wb') as f:\n for i in mem:\n byt = [0, 0, 0, 0]\n byt[0] = (i >> 24) & 0xFF\n byt[1] = (i >> 16) & 0xFF\n byt[2] = (i >> 8) & 0xFF\n byt[3] = i & 0xFF\n f.write(bytes(byt))\n\n\nif __name__ == '__main__':\n import argparse\n\n argParser = argparse.ArgumentParser()\n argParser.add_argument(\"source\", help=\"source asm file\")\n argParser.add_argument(\"compiled\", help=\"result file\")\n\n args = argParser.parse_args()\n\n compile(args.source, args.compiled)\n","repo_name":"YOILLO/CSA_lab3","sub_path":"compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":10407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21396244180","text":"'''\r\nThe Following Program we will make the guess paradox (Monty Hall) \r\nAuthor: ADITYA JAMWAL\r\n'''\r\n\r\nimport random, sys\r\n\r\nALL_CLOSED = \"\"\"\r\n+------+ +------+ +------+\r\n| | | | | |\r\n| 1 | | 2 | | 3 |\r\n| | | | | |\r\n| | | | | |\r\n| | | | | |\r\n+------+ +------+ +------+\"\"\"\r\n\r\nFIRST_GOAT = \"\"\"\r\n+------+ +------+ +------+\r\n| (( | | | | |\r\n| oo | | 2 | | 3 |\r\n| /_/|_| | | | |\r\n| | | | | | |\r\n|GOAT||| | | | |\r\n+------+ +------+ +------+\"\"\"\r\n\r\nSECOND_GOAT = \"\"\"\r\n+------+ +------+ +------+\r\n| | | (( | | |\r\n| 1 | | oo | | 3 |\r\n| | | /_/|_| | |\r\n| | | | | | |\r\n| | |GOAT||| | |\r\n+------+ +------+ +------+\"\"\"\r\n\r\nTHIRD_GOAT = \"\"\"\r\n+------+ +------+ +------+\r\n| | | | | (( |\r\n| 1 | | 2 | | oo |\r\n| | | | | /_/|_|\r\n| | | | | | |\r\n| | | | |GOAT|||\r\n+------+ +------+ +------+\"\"\"\r\n\r\nFIRST_CAR_OTHERS_GOAT = \"\"\"\r\n+------+ +------+ +------+\r\n| CAR! | | (( | | (( |\r\n| __| | oo | | oo |\r\n| _/ | | /_/|_| | /_/|_|\r\n| /_ __| | | | | | |\r\n| O | |GOAT||| |GOAT|||\r\n+------+ +------+ +------+\"\"\"\r\n\r\nSECOND_CAR_OTHERS_GOAT = \"\"\"\r\n+------+ +------+ +------+\r\n| (( | | CAR! | | (( |\r\n| oo | | __| | oo |\r\n| /_/|_| | _/ | | /_/|_|\r\n| | | | /_ __| | | |\r\n|GOAT||| | O | |GOAT|||\r\n+------+ +------+ +------+\"\"\"\r\n\r\nTHIRD_CAR_OTHERS_GOAT = \"\"\"\r\n+------+ +------+ +------+\r\n| (( | | (( | | CAR! |\r\n| oo | | oo | | __|\r\n| /_/|_| | /_/|_| | _/ |\r\n| | | | | | | /_ __|\r\n|GOAT||| |GOAT||| | O |\r\n+------+ +------+ +------+\"\"\"\r\n\r\nswapwin=0\r\nswaplose=0\r\nogwin=0\r\noglose=0\r\n\r\nwhile True:\r\n doorwithcar=random.randint(1,3)\r\n print(ALL_CLOSED)\r\n while True:\r\n userinput=int(input(\"\\n Pick your door\"))\r\n if userinput in range(1,4):\r\n break\r\n elif userinput==0:\r\n print(\"\\n Thanks for playing\")\r\n sys.exit()\r\n else:\r\n print(\"\\n Enter Valid Input\")\r\n\r\n if doorwithcar==1 and userinput==1:\r\n doorshown=random.choice([2,3])\r\n if doorshown==2:\r\n print(SECOND_GOAT)\r\n else:\r\n print(THIRD_GOAT)\r\n elif doorwithcar==2 and userinput==1:\r\n doorshown=3\r\n print(THIRD_GOAT)\r\n elif doorwithcar==3 and userinput==1:\r\n doorshown=2\r\n print(SECOND_GOAT)\r\n elif doorwithcar==1 and userinput==2:\r\n doorshown=3\r\n print(THIRD_GOAT)\r\n elif doorwithcar==2 and userinput==2:\r\n doorshown=random.choice([1,3])\r\n if doorshown==1:\r\n print(FIRST_GOAT)\r\n else:\r\n print(THIRD_GOAT)\r\n elif doorwithcar==3 and userinput==2:\r\n doorshown=1\r\n print(FIRST_GOAT)\r\n elif doorwithcar==1 and userinput==3:\r\n doorshown=2\r\n print(SECOND_GOAT)\r\n elif doorwithcar==2 and userinput==3:\r\n doorshown=1\r\n print(FIRST_GOAT)\r\n elif doorwithcar==3 and userinput==3:\r\n doorshown=random.choice([1,2])\r\n if doorshown==1:\r\n print(FIRST_GOAT)\r\n else:\r\n print(SECOND_GOAT)\r\n else:\r\n print(\"Error\")\r\n\r\n\r\n swap=input(\"\\n Want to Swap? (Y/N) \")\r\n if swap.upper()=='Y':\r\n if userinput==1 and doorshown==2:\r\n userinput=3\r\n elif userinput==1 and doorshown==3:\r\n userinput=2\r\n elif userinput==2 and doorshown==1:\r\n userinput=3\r\n elif userinput==2 and doorshown==3:\r\n userinput=1\r\n elif userinput==3 and doorshown==1:\r\n userinput=2\r\n elif userinput==3 and doorshown==2:\r\n userinput=1\r\n else:\r\n print(\"\\n Error\")\r\n\r\n \r\n # DOOR WITH CAR \r\n\r\n if doorwithcar==1:\r\n print(FIRST_CAR_OTHERS_GOAT)\r\n elif doorwithcar==2:\r\n print(SECOND_CAR_OTHERS_GOAT)\r\n else:\r\n print(THIRD_CAR_OTHERS_GOAT)\r\n\r\n\r\n # SCORING LADDER\r\n\r\n if userinput==doorwithcar and swap.upper()=='Y':\r\n swapwin+=1\r\n print(\"\\n COngrats, You won!\")\r\n elif userinput==doorwithcar and swap.upper()=='N':\r\n ogwin+=1\r\n print(\"\\n Congrats, You won!\")\r\n elif userinput==doorwithcar and swap.upper()=='Y':\r\n swaplose+=1\r\n print(\"\\n Better luck next time\")\r\n else:\r\n oglose+=1\r\n print(\"\\n Better luck next time\")\r\n\r\n # GAME\r\n\r\n print(\"\"\" \r\n Wins after Swapping: {}\r\n Wins without Swapping: {}\r\n Losses after Swapping: {}\r\n Losses without Swapping: {}\"\"\".format(swapwin,ogwin,swaplose,oglose))\r\n\r\n successrateswap=0\r\n sucessrateog=0\r\n try:\r\n successrateswap=round((swapwin/swaplose+swapwin)*100,1)\r\n except ZeroDivisionError:\r\n successrateswap=0\r\n try:\r\n successrateog=round((ogwin/oglose+ogwin)*100,1)\r\n except:\r\n successrateog=0\r\n\r\n print(\"\"\"\\n Success Rate if you swap: {} Success rate if you dont: {} \"\"\".format(successrateswap, sucessrateog))\r\n\r\n\r\n","repo_name":"adityajamwal02/PythonProjects","sub_path":"Paradox.py","file_name":"Paradox.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"25815720497","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 25 11:44:42 2020\n\n@author: Admin\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import accuracy_score,confusion_matrix\n\nTrain_Data= pd.read_csv('CrashTest_TrainData.csv')\nTest_Data=pd.read_csv('CrashTest_TestData.csv')\nTrain_Data.describe()\n\n###Q13 What is the difference between third quartile values of the\n# variable ManBI from Train_Data and Test_Data?\nTrain_Data['ManBI'].describe()\nTest_Data['ManBI'].describe()\n#75% 3.417500\n#75% 2.50000\n# ans=(3.417500-2.50000=0.9175)\n###################################################\n##Q14 How many distinct car types are there in the Train_Data?\npd.crosstab(Train_Data['CarType'],columns= 'count')\n#col_0 count\n#CarType \n#Hatchback 50\n#SUV 30\n#Ans=2\n############################################\n#Q15 How many missing values are there in Train_Data?\nTrain_Data.isnull().sum()\nTest_Data.isnull().sum()\n#Ans=3\n##############################################\n#Q16What is the proportion of car types in the Test_Data?\npd.crosstab(Test_Data['CarType'],columns= 'count')\n#Ans=50-50\n#########################################\n\ntrain_data=Train_Data.dropna(axis=0)\ntrain_x1=train_data.drop(['CarID','CarType'],axis=1,inplace=False)\ntrain_y1=train_data['CarType']\ntrain_y1=train_y1.map({'Hatchback':0,'SUV':1})\n\ntest_data=Test_Data.dropna(axis=0)\ntest_x1=test_data.drop(['CarID','CarType'],axis=1,inplace=False)\ntest_y1=test_data['CarType']\ntest_y1=test_y1.map({'Hatchback':0,'SUV':1})\n\nmodel1=KNeighborsClassifier(n_neighbors=3)\nmodel1_KNN=model1.fit(train_x1,train_y1)\nprediction_model1=model1.predict(test_x1)\naccuracy_score_model1=accuracy_score(test_y1,prediction_model1)\nmisclassified_sample=np.where(prediction_model1 != test_y1)\n\nprint(\"misclassified sample: %d\" %(prediction_model1!=test_y1).sum())\n\n######################33\nmodel2=KNeighborsClassifier(n_neighbors=2)\nmodel2_KNN=model2.fit(train_x1,train_y1)\nprediction_model2=model2.predict(test_x1)\naccuracy_score_model2=accuracy_score(test_y1,prediction_model2)\n\n\n#################################3\nfrom sklearn.linear_model import LogisticRegression\nlgr=LogisticRegression()\nlgr.fit(train_x1,train_y1)\npredict_lgr=lgr.predict(test_x1)\naccuracy_lgr=accuracy_score(test_y1,predict_lgr)\n","repo_name":"notbhuvangab/Programs","sub_path":"Assignment 4.py","file_name":"Assignment 4.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33159178488","text":"import os\nimport numpy as np\nimport distance\nfrom nltk.translate.bleu_score import sentence_bleu\nfrom dateutil import rrule\nfrom datetime import datetime\nimport time\n\n\npred_dir = './mathmatical-expressions-recognition/evaluation/submit_results'\ntarget_file_path = os.path.join('./mathmatical-expressions-recognition/evaluation/submit_results','ground_truth.txt')\n\ntest_file_dir = './images/datasets/test/labels'\ntest_ids_path = './images/datasets/test_ids.txt'\n\nsummary_dir = './mathmatical-expressions-recognition/evaluation/evaluate_summary'\n\ndef main():\n # create target file\n if not os.path.isfile(target_file_path):\n with open(target_file_path,'w') as fout, open(test_ids_path,'r') as fin:\n for one_line in fin.readlines():\n one_ids = int(one_line)\n with open(os.path.join(test_file_dir,str(one_ids)+'.txt'),'r') as f:\n one_label = str(f.readlines()[0]).strip()\n fout.write(one_label+'\\n')\n\n # evaluate each summited files\n summary_dict = {\n '1': # model 1\n { \n # group_id: {}\n }, \n '2': # model 2\n {\n # group_id: {}\n } \n }\n for pred_file in os.listdir(pred_dir):\n if pred_file == 'ground_truth.txt':\n continue\n pred_file_path = os.path.join(pred_dir,pred_file)\n group_id, model_id = pred_file[:-4].split('-')[0], pred_file[:-4].split('-')[1]\n with open(pred_file_path,'r') as f_pred, open(target_file_path,'r') as f_gt:\n pred_lst = []\n gt_lst = []\n pred_lines = f_pred.readlines()\n gt_lines = f_gt.readlines()\n if len(pred_lines)!=len(gt_lines):\n print('Lengths are not equal! len(pred_lines)=%d; len(gt_lines)=%d'%(len(pred_lines),len(gt_lines)))\n # bleu4, Edit_Distance, Exact_Match = 0.0, 0.0, 0.0\n\n for pred_line, gt_line in zip(pred_lines, gt_lines):\n pred_line = pred_line.strip().replace(' ','').replace('\\t','').replace('\\r','').replace('\\n','')\n gt_line = gt_line.strip().replace(' ','').replace('\\t','').replace('\\r','').replace('\\n','')\n if gt_line == 'errormathpix':\n continue\n pred_lst.append(pred_line)\n gt_lst.append(gt_line)\n bleu4, Edit_Distance, Exact_Match = evaluate(pred_lst, gt_lst)\n\n print('%s : bleu4=%.4f; Edit_Distance=%.4f; Exact_Match=%.4f'%(pred_file,bleu4,Edit_Distance,Exact_Match))\n summary_dict[model_id][group_id] = {'bleu_score':bleu4,'edit_distance_score':Edit_Distance,'exact_match':Exact_Match}\n\n # write summary results\n summary_file = os.path.join(summary_dir, '%s.txt'%(time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime())))\n with open(summary_file,'w') as f_summary:\n f_summary.write('='*120+'\\n')\n f_summary.write('{:^200}\\n'.format('Model 1 Leaderboard'))\n f_summary.write('='*120+'\\n')\n f_summary.write('# {:<30}{:<30}{:<30}{:<30}{:<30}{:<30}\\n'.format('Rank','Group_ID','OVERALL_SCORE','(1)BLEU_SCORE','(2)EDIT_DISTANCE_SCORE','(3)Exact_Match'))\n sorted_group_id = sorted([(k,v) for k,v in summary_dict['1'].items()], key=lambda x: (x[1]['bleu_score']+x[1]['edit_distance_score']+x[1]['exact_match'])/3, reverse=True)\n rank_id = 1\n group_cnt = len(sorted_group_id)\n for group_id, scores_dict in sorted_group_id:\n f_summary.write('# {:<35}{:<35}{:<39}{:<42}{:<39}{:<33}\\n'.format(\n '%2s/%2s'%(rank_id,group_cnt),\n '%2s'%(group_id),\n '%7.4f'%((scores_dict['bleu_score']+scores_dict['edit_distance_score']+scores_dict['exact_match'])/3),\n '%7.4f'%(scores_dict['bleu_score']),\n '%7.4f'%(scores_dict['edit_distance_score']),\n '%7.4f'%(scores_dict['exact_match'])\n )\n )\n rank_id+=1\n f_summary.write('='*120+'\\n')\n f_summary.write('{:^200}\\n'.format('END'))\n f_summary.write('='*120+'\\n\\n\\n')\n\n f_summary.write('='*120+'\\n')\n f_summary.write('{:^200}\\n'.format('Model 2 Leaderboard'))\n f_summary.write('='*120+'\\n')\n f_summary.write('# {:<30}{:<30}{:<30}{:<30}{:<30}{:<30}\\n'.format('Rank','Group_ID','OVERALL_SCORE','(1)BLEU_SCORE','(2)EDIT_DISTANCE_SCORE','(3)Exact_Match'))\n sorted_group_id = sorted([(k,v) for k,v in summary_dict['2'].items()], key=lambda x: (x[1]['bleu_score']+x[1]['edit_distance_score']+x[1]['exact_match'])/3, reverse=True)\n rank_id = 1\n group_cnt = len(sorted_group_id)\n for group_id, scores_dict in sorted_group_id:\n f_summary.write('# {:<35}{:<35}{:<39}{:<42}{:<39}{:<33}\\n'.format(\n '%2s/%2s'%(rank_id,group_cnt),\n '%2s'%(group_id),\n '%7.4f'%((scores_dict['bleu_score']+scores_dict['edit_distance_score']+scores_dict['exact_match'])/3),\n '%7.4f'%(scores_dict['bleu_score']),\n '%7.4f'%(scores_dict['edit_distance_score']),\n '%7.4f'%(scores_dict['exact_match'])\n )\n )\n rank_id+=1\n f_summary.write('='*120+'\\n')\n f_summary.write('{:^200}\\n'.format('END'))\n f_summary.write('='*120+'\\n\\n\\n')\n\ndef evaluate(references, hypotheses):\n #用于在验证集上计算各种评价指标指导模型早停\n # Calculate scores\n bleu4 = 0.0\n for i,j in zip(references,hypotheses):\n bleu4 += max(sentence_bleu([i],j),0.01)\n bleu4 = bleu4/len(references)\n bleu4 = bleu4*100\n Edit_Distance = edit_distance(references, hypotheses)\n Exact_Match = np.mean([1.0 if r==h else 0.0 for r,h in zip(references, hypotheses)])*100\n return bleu4, Edit_Distance, Exact_Match\n\ndef edit_distance(references, hypotheses):\n \"\"\"Computes Levenshtein distance between two sequences.\n Args:\n references: list of list of token (one hypothesis)\n hypotheses: list of list of token (one hypothesis)\n Returns:\n 1 - levenshtein distance: (higher is better, 1 is perfect)\n \"\"\"\n d_leven, len_tot = 0, 0\n for ref, hypo in zip(references, hypotheses):\n d_leven += distance.levenshtein(ref, hypo)\n len_tot += float(max(len(ref), len(hypo)))\n\n return (1. - d_leven / len_tot)*100\n \n# pred = ['A D^{2}=P C^{2}+A C^{2}-2 D C \\cdot A c \\cdot \\cos 30^{\\circ}']\n# tgt = ['A D^{2}=P C^{2} + A C^{2}-2 D C \\cdot A c \\cdot \\cos 30^{\\circ}']\n# print(evaluate(pred,tgt))\n\nmain()","repo_name":"zzz47zzz/deep-learning-course-documents","sub_path":"mathmatical-expressions-recognition/evaluation/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":6605,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"22"} +{"seq_id":"7964794595","text":"\"\"\"\nnats.py\nNATS message subscribers and message handlers\n\"\"\"\nimport json\nimport logging\nimport connect.workflows.core as core\nimport ssl\nfrom asyncio import get_running_loop\nfrom nats.aio.client import Client as NatsClient, Msg\nfrom connect.clients.kafka import get_kafka_producer, KafkaCallback\nfrom connect.config import get_settings, nats_sync_subject, kafka_sync_topic, TRACE\nfrom connect.support.encoding import decode_to_dict\nfrom typing import Callable, List, Optional\n\n\nlogger = logging.getLogger(__name__)\nnats_client = None\nnats_clients = []\n\n\nasync def create_nats_subscribers():\n \"\"\"\n Create NATS subscribers. Add additional subscribers as needed.\n \"\"\"\n await start_sync_event_subscribers()\n\n\nasync def start_sync_event_subscribers():\n \"\"\"\n Create a NATS subscriber for 'nats_sync_subject' for the local NATS server/cluster and\n for each NATS server defined by 'nats_sync_subscribers' in config.py.\n \"\"\"\n settings = get_settings()\n\n # subscribe to nats_sync_subject from the local NATS server or cluster\n nats_client = await get_nats_client()\n await subscribe(\n nats_client,\n nats_sync_subject,\n nats_sync_event_handler,\n \"\".join(settings.nats_servers),\n )\n\n # subscribe to nats_sync_subject from any additional NATS servers\n for server in settings.nats_sync_subscribers:\n nats_client = await create_nats_client(server)\n await subscribe(nats_client, nats_sync_subject, nats_sync_event_handler, server)\n\n\nasync def subscribe(client: NatsClient, subject: str, callback: Callable, servers: str):\n \"\"\"\n Subscribe a NATS client to a subject.\n\n :param client: a connected NATS client\n :param subject: the NATS subject to subscribe to\n :param callback: the callback to call when a message is received on the subscription\n \"\"\"\n await client.subscribe(subject, cb=callback)\n nats_clients.append(client)\n logger.debug(f\"Subscribed {servers} to NATS subject {subject}\")\n\n\nasync def nats_sync_event_handler(msg: Msg):\n \"\"\"\n Callback for NATS 'nats_sync_subject' messages\n \"\"\"\n subject = msg.subject\n reply = msg.reply\n data = msg.data.decode()\n logger.log(\n TRACE, f\"nats_sync_event_handler: received a message on {subject} {reply}\"\n )\n\n # if the message is from our local LFH, don't store in kafka\n message = json.loads(data)\n if get_settings().connect_lfh_id == message[\"lfh_id\"]:\n logger.log(\n TRACE,\n \"nats_sync_event_handler: detected local LFH message, not storing in kafka\",\n )\n return\n\n # store the message in kafka\n kafka_producer = get_kafka_producer()\n kafka_cb = KafkaCallback()\n await kafka_producer.produce_with_callback(\n kafka_sync_topic, data, on_delivery=kafka_cb.get_kafka_result\n )\n logger.log(\n TRACE,\n f\"nats_sync_event_handler: stored msg in kafka topic {kafka_sync_topic} at {kafka_cb.kafka_result}\",\n )\n\n # process the message into the local store\n settings = get_settings()\n msg_data = decode_to_dict(message[\"data\"])\n workflow = core.CoreWorkflow(\n message=msg_data,\n origin_url=message[\"consuming_endpoint_url\"],\n certificate_verify=settings.certificate_verify,\n lfh_id=message[\"lfh_id\"],\n data_format=message[\"data_format\"],\n transmit_server=None,\n do_sync=False,\n )\n\n result = await workflow.run(None)\n location = result[\"data_record_location\"]\n logger.log(\n TRACE,\n f\"nats_sync_event_handler: replayed nats sync message, data record location = {location}\",\n )\n\n\nasync def stop_nats_clients():\n \"\"\"\n Gracefully stop all NATS clients prior to shutdown, including\n unsubscribing from all subscriptions.\n \"\"\"\n for client in nats_clients:\n await client.close()\n\n\nasync def get_nats_client() -> Optional[NatsClient]:\n \"\"\"\n Create or return a NATS client connected to the local\n NATS server or cluster defined by 'nats_servers' in config.py.\n\n :return: a connected NATS client instance\n \"\"\"\n global nats_client\n\n if not nats_client:\n settings = get_settings()\n nats_client = await create_nats_client(settings.nats_servers)\n nats_clients.append(nats_client)\n\n return nats_client\n\n\nasync def create_nats_client(servers: List[str]) -> Optional[NatsClient]:\n \"\"\"\n Create a NATS client for any NATS server or NATS cluster configured to accept this installation's NKey.\n\n :param servers: List of one or more NATS servers. If multiple servers are\n provided, they should be in the same NATS cluster.\n :return: a connected NATS client instance\n \"\"\"\n settings = get_settings()\n\n ssl_ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)\n ssl_ctx.load_verify_locations(settings.nats_rootCA_file)\n\n nats_client = NatsClient()\n await nats_client.connect(\n servers=servers,\n nkeys_seed=settings.nats_nk_file,\n loop=get_running_loop(),\n tls=ssl_ctx,\n allow_reconnect=settings.nats_allow_reconnect,\n max_reconnect_attempts=settings.nats_max_reconnect_attempts,\n )\n logger.debug(f\"Created NATS client for servers = {servers}\")\n\n return nats_client\n","repo_name":"kidrecursive/connect-1","sub_path":"connect/clients/nats.py","file_name":"nats.py","file_ext":"py","file_size_in_byte":5250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"69948431418","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nimport matplotlib.pyplot as plt\n#%matplotlib inline\n\nimport seaborn as sns\nsns.set()\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nfrom subprocess import check_output\nprint(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\n\n# Any results you write to the current directory are saved as output.\ntrain = pd.read_csv('../input/train.csv',index_col='AnimalID').fillna('-1 value')\n\n# **Note: all these functions can be implemented using \"map\" functions that could be easily \n# implemented in PySpark. \n# Wishing to see the implementation of map functios in pandas too, since I am not \n# experienced with them!**\n\nprint(train.head())\n\n\n# **I am assuming that the Name column will not provide information**\n# \n# **I still not understanding the value of OutcomeSubtype!!! I will remove it**\ntrain.drop('Name', axis=1, inplace=True)\ntrain.drop('OutcomeSubtype', axis=1, inplace=True)\n# **the \"sexuponOutcome\" is a combination of the real sex of the animal \n# and the state of the sex such Neutered,Spayed or Intact.\n# I will split these two attributes in two columns of the dataframe**\n# splitting sex type and the animal state and removing the original columns\n# splitting sex type and the animal state and removing the original columns\ntrain[\"sex\"] = [x[1] if len(x)==2 else \"Unknown\" for x in [ x.split(\" \") for x in train['SexuponOutcome'].values] ]\ntrain[\"sex_state\"] = [x[0] if len(x)==2 else \"Unknown\" for x in [ x.split(\" \") for x in train['SexuponOutcome'].values] ]\ntrain.drop('SexuponOutcome', axis=1, inplace=True)\n# **The Color appears like a composite column. \n# I will split this column in Primary Color and Secondary Color**\n# color can be mostly splitted between primary color and secondary color \ntrain[\"primary color\"] = [x[1] if len(x)==2 else x[0].split(\" \")[0] for x in [ x.split(\"/\") for x in train['Color'].values] ]\ntrain[\"secondary color\"] = [x[0] if len(x)==2 else x[0].split(\" \")[0] for x in [ x.split(\"/\") for x in train['Color'].values] ]\ntrain.drop('Color', axis=1, inplace=True)\n# **The Breed attribute also appears like a composite column. \n# Since the animal can be a mix of several breeds, I add a further column is the animal\n# is a mix.\n# I will further split the Breed column in a firtBreedAttribute and a secondaryBreedAttribute.\n# Finally add a 3rd column if the breed has more than 3 attributed**\n# \n# *Note: I am not happy with this split since I see I am lost some important words about the real \n# breed of the animal. Help is appreciated!*\n# check if the animal is a mix \ntrain[\"isMix\"] = [x[-1] == \"Mix\" for x in [ x.split(\" \") for x in train['Breed'].values] ]\ntrain[\"firtBreedAttribute\"] = [x[0] if len(x)>1 else x[0] for x in [ x.split(\" \") for x in train['Breed'].values] ]\ntrain[\"secondaryBreedAttribute\"] = [x[1] if len(x)>2 else x[0] for x in [ x.split(\" \") for x in train['Breed'].values] ]\ntrain[\"hasMoreBreedAttributes\"] = [len(x)>3 if x[-1] == \"Mix\" else len(x)>2 for x in [ x.split(\" \") for x in train['Breed'].values] ]\n\ntrain.drop('Breed', axis=1, inplace=True)\n# **The age upon outcome is mixed between years, months and weeks. \n# I will normalized this column considering only the age in weeks**\n# normalized age in weeks \nnumOfWeeksPerYear = 52\nnumOfWeeksPerMonth = 4\n# int(x[0])*numOfWeeksPerYear if x[1].startswith('year') elseif x[0]\ndef getNormalizedWeeks(x):\n if x[1].startswith('year'):\n nw = int(x[0])*52\n elif x[1].startswith('month'):\n nw = int(x[0])*4\n else:\n nw = int(x[0])\n return nw\n\ntrain[\"normalizedAgeuponOutcome\"] = [ getNormalizedWeeks(x) for x in [ x.split(\" \") for x in train['AgeuponOutcome'].values]]\ntrain.drop('AgeuponOutcome', axis=1, inplace=True)\n# **As last, I assume here that the information about the month when the animal\n# has been outcome could bring some information. Instead of the full date, I will\n# create a column that will bring only the information of the month.**\ntrain[\"OutcomeMonth\"] = [x[0].split(\"-\")[1] for x in [ x.split(\" \") for x in train['DateTime'].values] ]\ntrain.drop('DateTime', axis=1, inplace=True)\n# ### A final data format looks like this\nprint(train.head(20))\n\n","repo_name":"aorursy/new-nb-5","sub_path":"piecurus_usable-format-with-separated-attributes.py","file_name":"piecurus_usable-format-with-separated-attributes.py","file_ext":"py","file_size_in_byte":4590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72613323256","text":"# coding=utf-8\n# helper function for deformable conv\nimport tensorflow as tf\n\ndef _to_bc_h_w(x, x_shape):\n\t\t\"\"\"(b, h, w, c) -> (b*c, h, w)\"\"\"\n\t\tx = tf.transpose(x, [0, 3, 1, 2])\n\t\tx = tf.reshape(x, (-1, x_shape[1], x_shape[2]))\n\t\treturn x\n\ndef _to_b_h_w_n_c(x, x_shape):\n\t\"\"\"(b*c, h, w, n) -> (b, h, w, n, c)\"\"\"\n\tx = tf.reshape(x, (-1, x_shape[4], x_shape[1], x_shape[2], x_shape[3]))\n\tx = tf.transpose(x, [0, 2, 3, 4, 1])\n\treturn x\n\ndef tf_flatten(a):\n\t\"\"\"Flatten tensor\"\"\"\n\treturn tf.reshape(a, [-1])\n\ndef _get_vals_by_coords(inputs, coords, idx, out_shape):\n\tindices = tf.stack(\n\t\t[idx, tf_flatten(coords[:, :, :, :, 0]),\n\t\t tf_flatten(coords[:, :, :, :, 1])], axis=-1\n\t)\n\tvals = tf.gather_nd(inputs, indices)\n\tvals = tf.reshape(vals, out_shape)\n\treturn vals\n\ndef _tf_repeat(a, repeats):\n\t\"\"\"Tensorflow version of np.repeat for 1D\"\"\"\n\t# https://github.com/tensorflow/tensorflow/issues/8521\n\n\tif len(a.get_shape()) != 1:\n\t\traise AssertionError(\"This is not a 1D Tensor\")\n\n\ta = tf.expand_dims(a, -1)\n\ta = tf.tile(a, [1, repeats])\n\ta = tf_flatten(a)\n\treturn a\n\ndef _tf_batch_map_coordinates(inputs, coords):\n\t\"\"\"Batch version of tf_map_coordinates\n\n\tOnly supports 2D feature maps\n\n\tParameters\n\t----------\n\tinputs : ``tf.Tensor``\n\t\tshape = (b*c, h, w)\n\tcoords : ``tf.Tensor``\n\t\tshape = (b*c, h, w, n, 2)\n\n\tReturns\n\t-------\n\t``tf.Tensor``\n\t\tA Tensor with the shape as (b*c, h, w, n)\n\n\t\"\"\"\n\tinput_shape = inputs.get_shape()\n\tcoords_shape = coords.get_shape()\n\tbatch_channel = tf.shape(inputs)[0]\n\tinput_h = tf.shape(inputs)[1]\n\tinput_w = tf.shape(inputs)[2]\n\tkernel_n = int(coords_shape[3])\n\tn_coords = input_h * input_w * kernel_n\n\n\tcoords_lt = tf.cast(tf.floor(coords), 'int32')\n\tcoords_rb = tf.cast(tf.ceil(coords), 'int32')\n\tcoords_lb = tf.stack([coords_lt[:, :, :, :, 0], coords_rb[:, :, :, :, 1]], axis=-1)\n\tcoords_rt = tf.stack([coords_rb[:, :, :, :, 0], coords_lt[:, :, :, :, 1]], axis=-1)\n\n\tidx = _tf_repeat(tf.range(batch_channel), n_coords)\n\n\tvals_lt = _get_vals_by_coords(inputs, coords_lt, idx, (batch_channel, input_h, input_w, kernel_n))\n\tvals_rb = _get_vals_by_coords(inputs, coords_rb, idx, (batch_channel, input_h, input_w, kernel_n))\n\tvals_lb = _get_vals_by_coords(inputs, coords_lb, idx, (batch_channel, input_h, input_w, kernel_n))\n\tvals_rt = _get_vals_by_coords(inputs, coords_rt, idx, (batch_channel, input_h, input_w, kernel_n))\n\n\tcoords_offset_lt = coords - tf.cast(coords_lt, 'float32')\n\n\tvals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[:, :, :, :, 0]\n\tvals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[:, :, :, :, 0]\n\tmapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[:, :, :, :, 1]\n\n\treturn mapped_vals\n\ndef _tf_batch_map_offsets(inputs, offsets, grid_offset):\n\t\"\"\"Batch map offsets into input\n\n\tParameters\n\t------------\n\tinputs : ``tf.Tensor``\n\t\tshape = (b, h, w, c)\n\toffsets: ``tf.Tensor``\n\t\tshape = (b, h, w, 2*n)\n\tgrid_offset: `tf.Tensor``\n\t\tOffset grids shape = (h, w, n, 2)\n\n\tReturns\n\t-------\n\t``tf.Tensor``\n\t\tA Tensor with the shape as (b, h, w, c)\n\n\t\"\"\"\n\tinput_shape = inputs.get_shape()\n\tbatch_size = tf.shape(inputs)[0]\n\tkernel_n = int(int(offsets.get_shape()[3]) / 2)\n\tinput_h = tf.shape(inputs)[1]\n\tinput_w = tf.shape(inputs)[2]\n\tchannel = input_shape[3]\n\n\t# inputs (b, h, w, c) --> (b*c, h, w)\n\tinputs = _to_bc_h_w(inputs, tf.shape(inputs))\n\n\t# offsets (b, h, w, 2*n) --> (b, h, w, n, 2)\n\toffsets = tf.reshape(offsets, (batch_size, input_h, input_w, kernel_n, 2))\n\t# offsets (b, h, w, n, 2) --> (b*c, h, w, n, 2)\n\t# offsets = tf.tile(offsets, [channel, 1, 1, 1, 1])\n\n\tcoords = tf.expand_dims(grid_offset, 0) # grid_offset --> (1, h, w, n, 2)\n\tcoords = tf.tile(coords, [batch_size, 1, 1, 1, 1]) + offsets # grid_offset --> (b, h, w, n, 2)\n\n\t# clip out of bound\n\tcoords = tf.stack(\n\t\t[\n\t\t\ttf.clip_by_value(coords[:, :, :, :, 0], 0.0, tf.cast(input_h - 1, 'float32')),\n\t\t\ttf.clip_by_value(coords[:, :, :, :, 1], 0.0, tf.cast(input_w - 1, 'float32'))\n\t\t], axis=-1\n\t)\n\tcoords = tf.tile(coords, [channel, 1, 1, 1, 1])\n\n\tmapped_vals = _tf_batch_map_coordinates(inputs, coords)\n\t# (b*c, h, w, n) --> (b, h, w, n, c)\n\tmapped_vals = _to_b_h_w_n_c(mapped_vals, [batch_size, input_h, input_w, kernel_n, channel])\n\n\treturn mapped_vals","repo_name":"JunweiLiang/Object_Detection_Tracking","sub_path":"deformable_helper.py","file_name":"deformable_helper.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","stars":429,"dataset":"github-code","pt":"22"} +{"seq_id":"43900830082","text":"def makingAnagrams(str_one, str_two) -> int:\n \"\"\"Determine number of deletions required to create anagrams\"\"\"\n counter = {}\n num_deletions = 0\n\n for char in str_one:\n try:\n has_occurred = counter[char]\n if has_occurred:\n continue\n except KeyError:\n str_one_count = str_one.count(char)\n str_two_count = str_two.count(char)\n diff = abs(str_one_count - str_two_count)\n if diff != 0:\n num_deletions += diff\n counter[char] = True\n\n for char in str_two:\n try:\n has_occurred = counter[char]\n if has_occurred:\n continue\n except KeyError:\n char_count = str_two.count(char)\n num_deletions += char_count\n counter[char] = True\n\n return num_deletions\n\n\nprint(makingAnagrams(\"abc\", \"amnop\"))\nprint(makingAnagrams(\"cde\", \"abc\"))\n# print(anagram(\"ab\"))\n# print(anagram(\"abc\"))\n# print(anagram(\"mnop\"))\n# print(anagram(\"xyyx\"))\n# print(anagram(\"xaxbbbxx\"))\n# print(\"==============\")\n# print(anagram(\"hhpddlnnsjfoyxpciioigvjqzfbpllssuj\"))\n# print(anagram(\"xulkowreuowzxgnhmiqekxhzistdocbnyozmnqthhpievvlj\"))\n# print(anagram(\"dnqaurlplofnrtmh\"))\n","repo_name":"CoderFemi/AlgorithmsDataStructures","sub_path":"practice_challenges/python/making_anagrams.py","file_name":"making_anagrams.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11078579708","text":"\"\"\"\nUnit and regression test for the kissim.api.subset module.\n\"\"\"\n\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\nfrom opencadd.databases.klifs import setup_local\n\nfrom kissim.utils import enter_temp_directory\nfrom kissim.api import subset\nfrom kissim.api.subset import _subset_fingerprint_generator_data\nfrom kissim.encoding import FingerprintGenerator\nfrom kissim.definitions import KLIFS_POCKET_RESIDUE_SUBSET\n\nPATH_TEST_DATA = Path(__name__).parent / \"kissim\" / \"tests\" / \"data\"\nLOCAL = setup_local(PATH_TEST_DATA / \"KLIFS_download\")\n\n\n@pytest.mark.parametrize(\n \"fingerprints_path, klifs_pocket_residue_subset_type, fingerprints_subset_path, klifs_pocket_residue_subset\",\n [\n (\n (PATH_TEST_DATA / \"fingerprints.json\").absolute(),\n \"dfg_all\",\n None,\n KLIFS_POCKET_RESIDUE_SUBSET,\n ),\n (\n (PATH_TEST_DATA / \"fingerprints.json\").absolute(),\n \"dfg_in\",\n (PATH_TEST_DATA / \"fingerprints_subset.json\").absolute(),\n KLIFS_POCKET_RESIDUE_SUBSET,\n ),\n (\n (PATH_TEST_DATA / \"fingerprints.json\").absolute(),\n \"dfg_out\",\n None,\n KLIFS_POCKET_RESIDUE_SUBSET,\n ),\n ],\n)\ndef test_subset(\n fingerprints_path,\n klifs_pocket_residue_subset_type,\n fingerprints_subset_path,\n klifs_pocket_residue_subset,\n):\n with enter_temp_directory():\n # Generate regular fingerprints\n fingerprints_path = Path(fingerprints_path)\n fingerprint_generator = FingerprintGenerator.from_structure_klifs_ids([12347, 3835])\n fingerprint_generator.to_json(fingerprints_path)\n\n # Generate subset fingerprints\n fingerprint_generator_subset = subset(\n fingerprints_path,\n klifs_pocket_residue_subset_type,\n fingerprints_subset_path,\n )\n\n # Test FingerprintGenerator object\n assert isinstance(fingerprint_generator_subset, FingerprintGenerator)\n assert (\n fingerprint_generator_subset.structure_klifs_ids\n == fingerprint_generator.structure_klifs_ids\n )\n # Attribute `klifs_session` is set to None\n assert fingerprint_generator_subset.klifs_session is None\n\n # Test Fingerprint objects\n for fingerprint_id, fingerprint_subset in fingerprint_generator_subset.data.items():\n # Original fingerprint\n fingerprint = fingerprint_generator.data[fingerprint_id]\n\n # Is bit length correct\n n_residues = len(klifs_pocket_residue_subset[klifs_pocket_residue_subset_type])\n n_bits = len(fingerprint_subset.values_array())\n n_bits_theory = 8 * n_residues + 4 * n_residues + 12\n assert n_bits == n_bits_theory\n\n # Are lists of residues correct?\n assert (\n fingerprint_subset.residue_ixs\n == klifs_pocket_residue_subset[klifs_pocket_residue_subset_type]\n )\n assert len(fingerprint_subset.residue_ids) == n_residues\n\n # Is structure and kinase the same as in original fingerprint\n assert fingerprint_subset.structure_klifs_id == fingerprint.structure_klifs_id\n assert fingerprint_subset.kinase_name == fingerprint.kinase_name\n\n if fingerprints_subset_path is not None:\n fingerprints_subset_path = Path(fingerprints_subset_path)\n assert fingerprints_subset_path.exists()\n fingerprints_subset_path.unlink()\n\n fingerprints_path.unlink()\n\n\n@pytest.mark.parametrize(\n \"structure_klifs_id, klifs_session, subset_residue_ids, fp_subset_sum\",\n [\n (110, LOCAL, [1, 2, 3], 250.981),\n (118, LOCAL, [10, 20, 30], 252.108),\n ],\n)\ndef test_subset_fingerprint_generator_data(\n structure_klifs_id, klifs_session, subset_residue_ids, fp_subset_sum\n):\n fingerprint_generator = FingerprintGenerator.from_structure_klifs_ids(\n [structure_klifs_id], klifs_session\n )\n fingerprint_generator_data = _subset_fingerprint_generator_data(\n fingerprint_generator, subset_residue_ids\n )\n fp_subset_sum_calculated = np.nansum(\n fingerprint_generator_data[structure_klifs_id].values_array()\n )\n assert pytest.approx(fp_subset_sum_calculated, abs=1e-3) == fp_subset_sum\n","repo_name":"volkamerlab/kissim","sub_path":"kissim/tests/api/test_api_subset.py","file_name":"test_api_subset.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"22"} +{"seq_id":"13140793060","text":"import os\nimport random\nimport pandas as pd\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.data import DataLoader, Dataset\nfrom params import param\n\n\ndef read_data(file_path_dataset):\n return pd.read_csv(file_path_dataset, delimiter='\\t')\n\n\ndef make_cuda(tensor):\n \"\"\"Use CUDA if it's available.\"\"\"\n if torch.cuda.is_available():\n tensor = tensor.cuda()\n return tensor\n\n\ndef init_random_seed(manual_seed):\n \"\"\"Init random seed.\"\"\"\n if manual_seed is None:\n seed = random.randint(1, 10000)\n else:\n seed = manual_seed\n print(\"use random seed: {}\".format(seed))\n random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n\n\ndef init_model(net, restore=None):\n\n # restore model weights\n if restore is not None and os.path.exists(restore):\n net.load_state_dict(torch.load(restore))\n print(\"Restore model from: {}\".format(os.path.abspath(restore)))\n\n # check if cuda is available\n if torch.cuda.is_available():\n cudnn.benchmark = True\n net.cuda()\n return net\n\n\ndef save_model(net, filename):\n \"\"\"Save trained model.\"\"\"\n if not os.path.exists(param.model_root):\n os.makedirs(param.model_root)\n torch.save(net.state_dict(),\n os.path.join(param.model_root, filename))\n print(\"save pretrained model to: {}\".format(os.path.join(param.model_root,\n filename)))\n\n\ndef get_data_loader(sequences, labels, maxlen=None):\n # dataset and data loader\n text_dataset = TextDataset(sequences, labels, maxlen)\n\n text_data_loader = DataLoader(\n dataset=text_dataset,\n batch_size=param.batch_size,\n shuffle=True)\n\n return text_data_loader\n\n\nclass TextDataset(Dataset):\n def __init__(self, sequences, labels, maxlen):\n\n seqlen = max([len(sequence) for sequence in sequences])\n\n if maxlen is None or maxlen > seqlen:\n maxlen = seqlen\n\n seq_data = list()\n for sequence in sequences:\n sequence.insert(0, 101) # insert [CLS] token\n seqlen = len(sequence)\n if seqlen < maxlen:\n sequence.extend([0] * (maxlen-seqlen))\n else:\n sequence = sequence[:maxlen]\n seq_data.append(sequence)\n\n self.data = torch.LongTensor(seq_data).cuda()\n self.labels = torch.LongTensor(labels).cuda()\n self.dataset_size = len(self.data)\n\n def __getitem__(self, index):\n review, label = self.data[index], self.labels[index]\n return review, label\n\n def __len__(self):\n return self.dataset_size\n","repo_name":"deep0learning/text-adda","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"70722838775","text":"import glob\nimport os\n\n# Find input files\nFILES = glob.glob(\"*_R1.fq.gz\") #Files must be in working directory\n\n#Using os.system,\n#open the zipped file\n#Take every 4th line, starting from the 1st (name lines in fastq)\n#Remove the first character (@ in fastq)\n#Cut everything after the space \n#Print to new file, retaining sample name\n\nfor file in FILES:\n\tname = file.split(\"_\")[1]\n\tcmd = f\"zcat {file} | sed -n '1~4p' | cut -c2- | cut -d ' ' -f 1 > {name}_names.txt\"\n\tprint(cmd)\n\tos.system(cmd)\n","repo_name":"LMBradford/salmdetectpipeline","sub_path":"scripts/printseqnames.py","file_name":"printseqnames.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5473609713","text":"import os\n\nfrom charmhelpers.core.hookenv import (\n log,\n DEBUG,\n WARNING,\n)\n\ntry:\n from jinja2 import FileSystemLoader, Environment\nexcept ImportError:\n from charmhelpers.fetch import apt_install\n from charmhelpers.fetch import apt_update\n apt_update(fatal=True)\n apt_install('python-jinja2', fatal=True)\n from jinja2 import FileSystemLoader, Environment\n\n\n# NOTE: function separated from main rendering code to facilitate easier\n# mocking in unit tests.\ndef write(path, data):\n with open(path, 'wb') as out:\n out.write(data)\n\n\ndef get_template_path(template_dir, path):\n \"\"\"Returns the template file which would be used to render the path.\n\n The path to the template file is returned.\n :param template_dir: the directory the templates are located in\n :param path: the file path to be written to.\n :returns: path to the template file\n \"\"\"\n return os.path.join(template_dir, os.path.basename(path))\n\n\ndef render_and_write(template_dir, path, context):\n \"\"\"Renders the specified template into the file.\n\n :param template_dir: the directory to load the template from\n :param path: the path to write the templated contents to\n :param context: the parameters to pass to the rendering engine\n \"\"\"\n env = Environment(loader=FileSystemLoader(template_dir))\n template_file = os.path.basename(path)\n template = env.get_template(template_file)\n log('Rendering from template: %s' % template.name, level=DEBUG)\n rendered_content = template.render(context)\n if not rendered_content:\n log(\"Render returned None - skipping '%s'\" % path,\n level=WARNING)\n return\n\n write(path, rendered_content.encode('utf-8').strip())\n log('Wrote template %s' % path, level=DEBUG)\n","repo_name":"ChrisMacNaughton/charms.hardening","sub_path":"charms_hardening/templating.py","file_name":"templating.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"26367122249","text":"import math\nimport functools\nimport random\nimport matplotlib.pyplot as plt\n\nrandom.seed(42)\n\nESP = 1e-8\n\ndef cal_crossdot(p, q):\n return p[0] * q[1] - p[1] * q[0]\n\ndef cal_dis(p, q):\n return math.sqrt((p[0] - q[0]) * (p[0] - q[0]) + (p[1] - q[1]) * (p[1] - q[1]))\n\n#以a1为原点,计算a2 X a3,如果|a||b|sin大于0,说明左转 (==0 共线,尤其和p0共线且成为一条边时需要小心,\n# 如果不是和p0共线,那么可以根据极角区分访问的正确顺序)\ndef left_rotate(a1, a2, a3):\n tmp = cal_crossdot((a2[0] - a1[0], a2[1] - a1[1]), (a3[0] - a1[0], a3[1] - a1[1]))\n if tmp > 0:\n return 1\n elif tmp < 0:\n return -1\n return 0\n\n\n#大���则返回1,小于则返回-1,等于则返回0\ndef comp(centerp, x1, x2):\n if x1 is None:\n return 1\n if x2 is None:\n return -1\n tmp = left_rotate(centerp, x1, x2) #>0左转返回-1\n if tmp > 0:\n return -1\n elif tmp < 0:\n return 1\n elif cal_dis(centerp, x1) < cal_dis(centerp, x2):\n return -1\n else:\n return 1\n \ndef sort_theta(Q, centerp):\n #使用comp函数比较的前提是 规定一个方向(逆时针/顺时针),那么任意两点都在(0,180)内,否则3者大小顺序不确定, 但一定是逆时针\n return sorted(Q,key=functools.cmp_to_key(lambda x,y:comp(centerp, x, y)))\n\ndef isInseg(a, st, end):\n dis1 = cal_dis(a, st) + cal_dis(a, end)\n dis2 = cal_dis(st, end)\n if dis1 < dis2 + ESP and dis1 > dis2 - ESP:\n return True\n return False\n\n\n#判断p点是否在三角形ABC内\ndef isInside(p, a, b, c):\n if left_rotate(a, b, c) == 0: #如果不构成三角形,判断p是否在线段上\n return isInseg(p, a, b) or isInseg(p, a, c) or isInseg(p, b, c)\n # 如果pa在bc同侧, pb在ac同侧, pc在ab同侧,那么p点在ABC内或边/顶点上,return True\n pa_valid = left_rotate(b, c, p) + left_rotate(b, c, a)\n if abs(pa_valid) == 0: # -1, 1 异侧\n return False\n pb_valid = left_rotate(a, c, p) + left_rotate(a, c, b)\n if abs(pb_valid) == 0:\n return False\n pc_valid = left_rotate(a, b, p) + left_rotate(a, b, c)\n if abs(pc_valid) == 0:\n return False\n return True\n\n#分治算法求第K小\ndef getKsmall(X, k):\n if len(X) < 5:\n return sorted(X)[k-1]\n while(len(X) % 5 != 0):\n X.append(float(\"inf\"))\n medians = []\n for i in range(int(len(X)/5)):\n medians.append(sorted(X[i * 5: (i+1) * 5])[2])\n # print(len(X), k, len(medians), int((len(medians) + 1) / 2))\n mm = getKsmall(medians, int((len(medians) + 1) / 2))\n S = [[], [], []]\n for x in X:\n if x < mm:\n S[0].append(x)\n elif x == mm:\n S[1].append(x)\n else:\n S[2].append(x)\n # print(len(S[0]), len(S[1]), len(S[2]))\n if k <= len(S[0]):\n return getKsmall(S[0], k)\n elif k > len(S[0]) + len(S[1]):\n return getKsmall(S[2], k - len(S[0]) - len(S[1]))\n else:\n return mm\n\n\ndef random_points(r, N):\n Q = []\n for i in range(N):\n Q.append([random.random() * r, random.random() * r])\n return Q\n\ndef plotCH(Q, CH):\n plt.figure()\n for i in range(len(Q)):\n q = Q[i]\n plt.scatter(q[0], q[1], s=5, marker='o', color='green') # display all the points\n# plt.annotate(str(i), xy=q)\n for i in range(len(CH)):\n p = CH[i]\n q = CH[(i + 1) % len(CH)]\n plt.plot([p[0], q[0]], [p[1], q[1]], color='red') # display lines\n plt.scatter(p[0], p[1], s=10, marker='o', color='blue')\n plt.show()\n","repo_name":"mtt1998/algorithm","sub_path":"ConvexHull/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70294010296","text":"import csv\nimport os\nfrom typing import Dict, Optional, Union\nimport pandas as pd\nfrom chartalist.datasets.chartalist_dataset import ChartaListDataset\n\n\nclass EthereumTransactionNetworkDataset(ChartaListDataset):\n \"\"\"\n Ethereum dataset.\n \"\"\"\n\n _NOT_IN_DATASET: int = -1\n _data_frame = pd.DataFrame()\n _dataset_name: str = \"ethereum\"\n _versions_dict: Dict[str, Dict[str, Union[str, int]]] = {\n\n \"trans_net_bancor\": {\n \"download_url\": \"https://chartalist.org/files/networkbancor.txt\",\n \"compressed_size\": 4_841_472,\n \"file_name\": \"networkbancor.txt\",\n \"labels\": [\"fromAddress \", \"toAddress\", \"time\", \"amount\"],\n \"sep\": \" \",\n },\n \"type_prediction_trans\": {\n \"download_url\": \"https://chartalist.org/data/ethTypePrediction/token_transfers_full.csv\",\n \"compressed_size\": 2_300_300,\n \"file_name\": \"token_transfers_full.csv\",\n \"labels\": [\"token_address\", \"from_address\", \"to_address\", \"value\", \"transaction_hash\", \"log_index\",\n \"block_number\"],\n \"sep\": \",\",\n },\n \"type_prediction_labels\": {\n \"download_url\": \"https://chartalist.org/data/ethTypePrediction/exchangeLabels.csv\",\n \"compressed_size\": 170_300,\n \"file_name\": \"exchangeLabels.csv\",\n \"labels\": [\"type\", \"address\", \"name\"],\n \"sep\": \",\",\n },\n \"multilayer_bytom\": {\n \"download_url\": \"https://chartalist.org/data/ethMultilayerData/networkbytom.txt\",\n \"compressed_size\": 458_472,\n \"file_name\": \"networkbytom.txt\",\n \"labels\": [\"fromAddress \", \"toAddress\", \"time\", \"amount\"],\n \"sep\": \" \",\n },\n \"multilayer_cybermiles\": {\n \"download_url\": \"https://chartalist.org/data/ethMultilayerData/networkcybermiles.txt\",\n \"compressed_size\": 314_480,\n \"file_name\": \"networkcybermiles.txt\",\n \"labels\": [\"fromAddress \", \"toAddress\", \"time\", \"amount\"],\n \"sep\": \" \",\n },\n \"multilayer_decentraland\": {\n \"download_url\": \"https://chartalist.org/data/ethMultilayerData/networkdecentraland.txt\",\n \"compressed_size\": 680_800,\n \"file_name\": \"networkdecentraland.txt\",\n \"labels\": [\"fromAddress \", \"toAddress\", \"time\", \"amount\"],\n \"sep\": \" \",\n },\n \"multilayer_tierion\": {\n \"download_url\": \"https://chartalist.org/data/ethMultilayerData/networktierion.txt\",\n \"compressed_size\": 397_670,\n \"file_name\": \"networktierion.txt\",\n \"labels\": [\"fromAddress \", \"toAddress\", \"time\", \"amount\"],\n \"sep\": \" \",\n },\n\n \"multilayer_vechain\": {\n \"download_url\": \"https://chartalist.org/data/ethMultilayerData/networkvechain.txt\",\n \"compressed_size\": 532_630,\n \"file_name\": \"networkvechain.txt\",\n \"labels\": [\"fromAddress \", \"toAddress\", \"time\", \"amount\"],\n \"sep\": \" \",\n },\n\n \"multilayer_zrx\": {\n \"download_url\": \"https://chartalist.org/data/ethMultilayerData/networkzrx.txt\",\n \"compressed_size\": 1_000_300,\n \"file_name\": \"networkzrx.txt\",\n \"labels\": [\"fromAddress \", \"toAddress\", \"time\", \"amount\"],\n \"sep\": \" \",\n },\n \"stablecoin_erc20\": {\n \"download_url\": \"https://chartalist.org/data/stablecoinERC20/token_transfers.csv\",\n \"compressed_size\": 1_700_300,\n \"file_name\": \"token_transfers.csv\",\n \"labels\": [\"block_number\", \"transaction_index\", \"from_address\", \"to_address\", \"time_stamp\",\n \"contract_address\", \"value\"],\n \"sep\": \",\",\n },\n\n \"price_prediction_vechain\": {\n \"download_url\": \"https://chartalist.org/data/ethPricePrediction/networkvechainTX.txt\",\n \"compressed_size\": 11_700_300,\n \"file_name\": \"networkvechainTX.txt\",\n \"labels\": [\"fromAddress \", \"toAddress\", \"time\", \"amount\"],\n \"sep\": \" \",\n },\n \"price_prediction_zrx\": {\n \"download_url\": \"https://chartalist.org/data/ethPricePrediction/networkzrxTX.txt\",\n \"compressed_size\": 13_260_300,\n \"file_name\": \"networkzrxTX.txt\",\n \"labels\": [\"fromAddress \", \"toAddress\", \"time\", \"amount\"],\n \"sep\": \" \",\n },\n \"anomaly_detection_ether_delta_trades\": {\n \"download_url\": \"https://chartalist.org/data/ethAnomalyDetection/EtherDeltaTrades.csv\",\n \"compressed_size\": 2_900_300,\n \"file_name\": \"EtherDeltaTrades.csv\",\n \"labels\": [\"transaction_hash\",\t\"block_number\",\t\"timestamp\",\t\"tokenGet\",\t\"amountGet\",\t\"tokenGive\",\t\"amountGive\",\t\"get\",\t\"give\"],\n \"sep\": \",\",\n },\n \"anomaly_detection_ether_dollar_price\": {\n \"download_url\": \"https://chartalist.org/data/ethAnomalyDetection/EtherDollarPrice.csv\",\n \"compressed_size\": 60_300,\n \"file_name\": \"EtherDollarPrice.csv\",\n \"labels\": [\"Date(UTC)\", \"UnixTimeStamp\", \"Value\"],\n \"sep\": \",\",\n },\n \"anomaly_detection_idex\": {\n \"download_url\": \"https://chartalist.org/data/ethAnomalyDetection/IDEXTrades.csv\",\n \"compressed_size\": 3_700_300,\n \"file_name\": \"IDEXTrades.csv\",\n \"labels\": [\"transaction_hash\", \"status\", \"block_number\", \"gas\", \"gas_price\", \"timestamp\", \"amountBuy\",\n \"amountSell\", \"expires\", \"nonce\", \"amount\", \"tradeNonce\", \"feeMake\", \"feeTake\", \"tokenBuy\",\n \"tokenSell\", \"maker\", \"taker\"],\n \"sep\": \",\",\n },\n\n }\n\n def __init__(\n self,\n version: str = None,\n root_dir: str = \"data\",\n download: bool = False,\n split_scheme: str = \"official\",\n ):\n # Dataset information\n self._version: Optional[str] = version\n # The official split is to split by users\n self._split_scheme: str = \"official\"\n # Path of the dataset\n self._data_dir: str = self.initialize_data_dir(root_dir, download,\n self._versions_dict[self.version][\"file_name\"])\n\n print(\"The Ethereum sample data downloaded successfully and stored on your local disk -> {} \\n\"\n \"---- For more information and downloading full datasets visit https://www.Chartalist.org ---- \\n\".format(\n self.version))\n # Load data\n data_df: pd.DataFrame = pd.read_csv(\n os.path.join(self.data_dir, self._versions_dict[self.version][\"file_name\"]),\n names=self._versions_dict[self.version][\"labels\"],\n keep_default_na=False,\n usecols=range(len(self._versions_dict[self.version][\"labels\"])),\n sep=self._versions_dict[self.version][\"sep\"],\n na_values=[],\n quoting=csv.QUOTE_NONNUMERIC,\n )\n self._data_frame = data_df\n super().__init__(root_dir, download, self._split_scheme)\n","repo_name":"cakcora/chartalist","sub_path":"chartalist/datasets/ethereum_transaction_network_dataset.py","file_name":"ethereum_transaction_network_dataset.py","file_ext":"py","file_size_in_byte":7128,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"22"} +{"seq_id":"38543815958","text":"# from stable_baselines3.common.env_checker import check_env\n# import torch as th\nfrom RobitEnv2 import RobitEnvironment\nfrom stable_baselines3 import SAC as alg\nfrom stable_baselines3.common.env_util import make_vec_env\nfrom stable_baselines3.common.callbacks import EvalCallback, StopTrainingOnNoModelImprovement, StopTrainingOnRewardThreshold\nfrom stable_baselines3.common.noise import NormalActionNoise\nimport os\nimport torch as th\nimport numpy as np\n\n# env = RobitEnvironment(gui=True)\n\nenv = make_vec_env(RobitEnvironment, n_envs=1)\n\n# stop_train_callback = StopTrainingOnNoModelImprovement(max_no_improvement_evals=5, min_evals=10, verbose=1)\n# eval_callback = EvalCallback(env, eval_freq=10000, callback_after_eval=stop_train_callback, verbose=1)\n# callback_on_best = StopTrainingOnRewardThreshold(reward_threshold=103, verbose=1)\n# eval_callback = EvalCallback(env, callback_on_new_best=callback_on_best, verbose=1)\neval_callback = EvalCallback(env, best_model_save_path=\"./logs/\",\n log_path=\"./logs/\", eval_freq=5000,\n deterministic=True, render=False)\n\nMODEL_NAME = \"KrabbelTest016\"\nLEARING_TIMESTEPS = 1_000_000\nUSE_OLD_MODEL = False\nlogdir = \"logs\"\nif not os.path.exists(logdir):\n os.makedirs(logdir)\n\nif not USE_OLD_MODEL:\n model = alg(\"MultiInputPolicy\",\n env,\n # seed=10,\n learning_starts=5_000,\n # gamma=1-0.00016,\n # learning_rate=0.0005,\n ent_coef=0.0005,\n # policy_kwargs=dict(\n # activation_fn=th.nn.ReLU,\n # net_arch=[128, 128]),\n # train_freq=(1, \"step\"),\n use_sde=True,\n verbose=1,\n device=\"auto\",\n tensorboard_log=logdir)\nelse:\n print(f\"Using old Model {MODEL_NAME}\")\n model = alg.load(f\"{MODEL_NAME}\")\n model.set_env(env)\n\nmodel.learn(total_timesteps=LEARING_TIMESTEPS, callback=eval_callback, progress_bar=True)\nmodel.save(MODEL_NAME)\n","repo_name":"Nopedi/KrabbelDing","sub_path":"ModelTrainer.py","file_name":"ModelTrainer.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37205858382","text":"#!/usr/bin/env python3\n# -- coding: utf8 --\n# :author: nvagus\n# :time: 11/6/17-8:42 PM\n# :package: tforce.test\n\nimport code\nimport tforce as t4\n\n\nclass A(t4.Widget, name='A', parent='Scope', k=1):\n def __init__(self):\n super(A, self).__init__()\n print(f'init {self._name} in A')\n\n def _build(self):\n print(f'building {self._name}')\n\n\nclass B(A, name='B', parent='A'):\n def __init__(self):\n print(f'init {self._name} in B')\n super(B, self).__init__()\n\n\nif __name__ == '__main__':\n a = A()\n b = A(name='b')\n c = B()\n code.interact(local=locals())\n","repo_name":"nvagus/tforce","sub_path":"test/scope.py","file_name":"scope.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70357185337","text":"from airflow import DAG \nfrom airflow.operators.python import PythonOperator\nfrom datetime import datetime\nfrom pull_youtube_data import pull_data\nfrom run_query_in_athena import run_query\n\n\n# pip install apache-airflow\n# pip install pandas\n# pip install boto3\n# pip install --upgrade google-api-python-client\n# pip install --upgrade google-auth-oauthlib google-auth-httplib2\n\n\n# create dag\nwith DAG(\n dag_id='pulling_youtube_data',\n schedule='@daily',\n start_date=datetime(year=2023, month=4, day=7), \n catchup=False,\n tags=[\"youtube\"]\n) as dag:\n\n # pull Brazil video data\n pull_data_BR = PythonOperator(task_id='pull_video_data_BR',\n python_callable=pull_data,\n op_kwargs={'region_code': 'BR'})\n \n # pull India video data\n pull_data_IN = PythonOperator(task_id='pull_video_data_IN',\n python_callable=pull_data,\n op_kwargs={'region_code': 'IN'})\n \n # pull Indonesia video data\n pull_data_ID = PythonOperator(task_id='pull_video_data_ID',\n python_callable=pull_data,\n op_kwargs={'region_code': 'ID'})\n \n # pull Mexico video data\n pull_data_MX = PythonOperator(task_id='pull_video_data_MX',\n python_callable=pull_data,\n op_kwargs={'region_code': 'MX'})\n \n # pull United States video data\n pull_data_US = PythonOperator(task_id='pull_video_data_US',\n python_callable=pull_data,\n op_kwargs={'region_code': 'US'})\n \n # run query on all collected data\n run_query = PythonOperator(task_id='run_sql_query',\n python_callable=run_query)\n \n # pull all data before running the query\n [pull_data_BR, pull_data_ID, pull_data_IN, pull_data_MX, pull_data_US] >> run_query\n\n\n","repo_name":"anair123/Building-a-Youtube-Data-Pipeline-With-AWS-and-Airflow","sub_path":"youtube_dag.py","file_name":"youtube_dag.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"2198376095","text":"import os\nimport os.path as osp\nimport ants\nfrom options.test_options import TestOptions\nfrom data import create_dataset\nfrom models import create_model\nfrom util.visualizer import save_images\nfrom util import html\nfrom PIL import Image\nimport numpy as np\nimport torch\nfrom time import time\nimport h5py\n\n\n\nimport torch\ntorch.backends.cuda.matmul.allow_tf32 = False\ntorch.backends.cudnn.benchmark = True\ntorch.backends.cudnn.deterministic = False\ntorch.backends.cudnn.allow_tf32 = True\ndata = torch.randn([4, 1, 262, 262, 38], dtype=torch.float, device='cuda', requires_grad=True)\nnet = torch.nn.Conv3d(1, 64, kernel_size=[7, 7, 7], padding=[0, 0, 0], stride=[1, 1, 1], dilation=[1, 1, 1], groups=1)\nnet = net.cuda().float()\nout = net(data)\nout.backward(torch.randn_like(out))\ntorch.cuda.synchronize()\n\n\n\n\n\n\n\n\nfrom tqdm import tqdm\nfrom utility import *\nfrom glob import glob\nfrom monai.inferers import sliding_window_inference\nfrom monai.transforms import (\n AddChanneld,\n Compose,\n ScaleIntensityd,\n ToTensord,\n LoadImaged,\n RandSpatialCropd,\n RandAdjustContrastd,\n CropForegroundd,\n RandZoomd,\n RandAffined,\n RandCropByPosNegLabeld,\n RandBiasFieldd,\n RandShiftIntensityd,\n NormalizeIntensityd,\n ScaleIntensityRangePercentilesd,\n)\nimport gzip\n'''\ndef decompress_nifti_gz_folder(input_folder, output_folder):\n if not os.path.exists(output_folder):\n os.makedirs(output_folder) # 确保输出文件夹存在\n for filename in os.listdir(input_folder):\n if filename.endswith('.nii.gz'):\n input_file = os.path.join(input_folder, filename)\n output_file = os.path.join(output_folder, filename[:-3])\n decompress_nifti_gz(input_file, output_file)\ndef decompress_nifti_gz(nifti_gz_file, output_nifti_file):\n with gzip.open(nifti_gz_file, 'rb') as gz_file:\n data = gz_file.read()\n with open(output_nifti_file, 'wb') as nifti_file:\n nifti_file.write(data)\n'''\ndef mkdir(folder):\n if not os.path.exists(folder):\n os.mkdir(folder)\n\n\ndef load_model(opt):\n opt.num_threads = 0 \n opt.batch_size = 1 \n opt.serial_batches = True \n opt.no_flip = True \n model = create_model(opt) \n model.setup(opt) \n if opt.eval:\n model.eval()\n return model\n\n\ndef mr_to_ct(\n img_fp: str,\n model: torch.nn.Module, \n transform, \n info: list, \n save_fp: str=None,\n overlap_ratio: float=0.6,\n ) -> None:\n\n data = transform({'A': img_fp})[\"A\"]\n start = time()\n\n print('Performing sliding window inference.....')\n with torch.no_grad():\n output = sliding_window_inference(\n inputs=data.unsqueeze(0),\n roi_size=(256, 256, 32),\n sw_batch_size=4,\n predictor=model,\n overlap=overlap_ratio) \n\n output = output.squeeze(0).squeeze(0).cpu().detach().numpy()\n\n print('Done...')\n print(f'Time elapsed: {time()-start:.3f} seconds')\n output = output * 2047.5 + 1023.5 # map to CT hounsfield units\n ants.image_write(ants.from_numpy(output, origin=info[0], spacing=info[1], direction=info[2]), save_fp)\n return output\n\n\nif __name__ == '__main__':\n\n opt = TestOptions().parse()\n # override the arguments for our trained model\n opt.model = \"han_pix2pix\"\n opt.input_nc = 1\n opt.output_nc = 1\n opt.direction = 'AtoB'\n opt.netG = 'resnet_9blocks' ###resnet_9blocks\n opt.name = 'experiment_name'\n opt.epoch = 'best'\n model = load_model(opt).netG\n\n # check input and output directories\n mkdir(opt.output_dir)\n assert osp.exists(opt.input_dir)\n mr_paths = sorted(glob(opt.input_dir + '/*'))\n assert len(mr_paths) > 0, 'At least one input image is required.'\n test_files = []\n test_files = mr_paths[-80:]\n # input data transform\n transform = Compose([\n LoadImaged(keys=\"A\"),\n AddChanneld(keys=\"A\"),\n NormalizeIntensityd(keys=\"A\", nonzero=True),\n ScaleIntensityRangePercentilesd(keys=\"A\", lower=0.01, upper=99.9, b_min=-1.0, b_max=1.0, clip=True, relative=False),\n ToTensord(keys=\"A\"),\n ])\n\n for mr_path in test_files:\n pid = os.path.basename(mr_path).split('.')[0]\n mr = ants.image_read(mr_path)\n info = [mr.origin, mr.spacing, mr.direction]\n output_path = os.path.join(opt.output_dir, f'{pid}_sCT.nii.gz')\n f_ct = mr_to_ct(mr_path, model, transform, info, output_path, opt.overlap_ratio)\n\n ","repo_name":"fighthhhh/MRI-to-CT","sub_path":"run_inference.py","file_name":"run_inference.py","file_ext":"py","file_size_in_byte":4448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9511296942","text":"t = [9,7,3,6,2,8,5,4,5,8,7,10,4]\n\n# lmax[i] = lungimea maxima a unui subsir care se termina cu t[i]\n# pred[i] = pozitia elementului in fata caruia a fost alipit t[i] sau -1 daca nu poate fi alipit\nn = len(t)\nlmax = [1] * n\npred = [-1 for i in range(n)]\n\nfor i in range(1,n):\n for j in range(i):\n if t[j] <= t[i] and lmax[i] < 1 + lmax[j]:\n lmax[i] = 1 + lmax[j]\n pred[i] = j\n\n# pozitia elementului t[i] - elem cu care se termina cel mai lung subsir maximal\npmax = lmax.index(max(lmax))\n\n# reconstituim sirul de la coada la cap, apoi il afisam reversed\nsol = []\ni = pmax\nwhile i != -1:\n sol.append(t[i])\n i = pred[i]\n\nsol.reverse()\nprint(\"Lungimea maxima a unui subsir crescator:\", lmax[pmax])\nprint(\"Un subsir crescator maximal:\")\nprint(sol)\n","repo_name":"ana-rosu/ProgrAlgo","sub_path":"Seminarii/Seminar7/ProgramareDinamica/subsir_crescator_maximal_inapoi.py","file_name":"subsir_crescator_maximal_inapoi.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30873334763","text":"#WIP / TO DO LIST\n#include timer delays / multiproc the requests\n#include withdraw / transaction fees\n\nimport itertools\nimport requests\nimport get_market_prices\nimport time\n\nBUY_FEES_DICT = {'bitfinex': .002,\n 'bitstamp': .0025,\n 'btce': .005,\n 'cexio': .002,\n 'kraken': .0026} #fees represented as a decimal\n\nSELL_FEES_DICT = {'bitfinex': .002,\n 'bitstamp': .0025,\n 'btce': .005,\n 'cexio': .002,\n 'kraken': .0026} #fees represented as a decimal\n\n#The function calculates the arbritrage including fees from the exchanges\n#marketPriceDict - a dictionary that maps exchange name to current market price\n#exchange1 - an exchange that user is BUYING BTC on\n#exchange2 - an exchange that user is SELLING BTC on\n#!!!CURRENTLY ASSUMES FEE IS APPLIED TO CURRENCY RECIEVED (RECIEVE LESS THAN EXPECTED)\ndef arbitrage_difference_withfees(marketPriceDict, exchange1, exchange2):\n fee1 = BUY_FEES_DICT[exchange1]\n fee2 = SELL_FEES_DICT[exchange2]\n\n price1 = marketPriceDict[exchange1]\n price2 = marketPriceDict[exchange2]\n\n #%profit difference = (price sold - price bought) / price bought\n if (price1 < price2):\n #price1 is buy\n price1 *= (1.0+fee1)\n price2 *= (1.0-fee2)\n return (exchange1, exchange2, ((price2 - price1) / price1))\n else:\n #price2 is buy\n price1 *= (1.0-fee1)\n price2 *= (1.0+fee2)\n return (exchange2, exchange1, ((price1 - price2) / price2))\n\n\n#The handler to find potential arbitrage opportunities using a list of exchanges to check\n#listOfExchanges - a list of exchanges (strings) to check for opportunity\n#listOfExchanges is like [\"bitfinex\", \"btce\"] (all lowercase)\n#threshold - a DECIMAL specifying the minimum arbitrage desired\ndef handler(listOfExchanges):\n marketPriceDict = {} #dictionary that maps exchange name to current market price\n\n #populating market prices\n for exch, name in listOfExchanges.iteritems():\n successful_crawl = 0\n attempt_count = 0\n while(successful_crawl == 0 and attempt_count < 3):\n attempt_count+=1\n try:\n marketPriceDict[exch] = get_market_prices.handler(exch)\n successful_crawl = 1\n except:\n time.sleep(1)\n pass\n\n #obtain possible combinations\n arbitrage_opportunities = []\n combos = itertools.combinations(marketPriceDict.keys(), 2) #pick every possible combination of a pair of exchanges\n for exchange in combos: #iterate through the combinations\n arbitrage = arbitrage_difference_withfees(marketPriceDict, exchange[0], exchange[1])\n arbitrage_opportunities.append(arbitrage)\n\n return arbitrage_opportunities\n","repo_name":"BDANG/hold_em_or_fold","sub_path":"prototypes/check_arbitrage.py","file_name":"check_arbitrage.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"74988794616","text":"###QUESTION 1\n\nlyst = [1, 2, 3, 4, 2, 5, 2, 8, 21414]\n\n###function takes a list as argument\ndef oddEvenLister(list):\n #make empty lists and dict for use in the loop\n odds = []\n evens = []\n dict_e = {}\n #this loop simply check if the key in argument list is even or odd, then appends it to\n #its list\n for key in list:\n if key % 2 == 0:\n evens.append(key)\n else:\n odds.append(key)\n #setting dictionary values\n dict_e[\"Odd\"] = odds\n dict_e[\"Even\"] = evens\n return dict_e\n\n#call and print the results\nprint(oddEvenLister(lyst))\n\n#==================================================================================\n\n###QUESTION 2\n#importing random and counter, which counts the elements in a list\nimport random\nfrom collections import Counter\n\n#list gen counter generates 2 lists, then counts how many values\n#there is in each list\ndef listGenCounter():\n list_1 = []\n list_2 = []\n list_3 = []\n dictionary_e = {}\n for x in range(0, 200):\n list_1_numb = random.randint(1, 100)\n list_2_numb = random.randint(1, 100)\n #appending to each individual list, and then the third list\n #is both\n list_1.append(list_1_numb)\n list_2.append(list_2_numb)\n list_3.append(list_1_numb)\n list_3.append(list_2_numb)\n #counts the amount of values in each list\n list_1_count = Counter(list_1)\n list_2_count = Counter(list_2)\n list_3_count = Counter(list_3)\n #adds the dictionaries generated from counter into one big dictionary\n dictionary_e[\"LIST 1 COUNTS\"] = list_1_count\n dictionary_e[\"LIST 2 COUNTS\"] = list_2_count\n dictionary_e[\"COMBINED LIST COUNTS\"] = list_3_count\n #writes to a file, RESULTS4.txt\n with open(\"RESULTS4.txt\", \"w\") as file:\n file.write(\"LIST 1 COUNTS\" + \"\\n\" + str(dictionary_e[\"LIST 1 COUNTS\"]) + \"\\n\" + \"LIST 2 COUNTS\" + \"\\n\" + str(dictionary_e[\"LIST 2 COUNTS\"]) + \"\\n\" + \"COMBINED LIST COUNTS\" + \"\\n\" + str(dictionary_e[\"COMBINED LIST COUNTS\"]))\n #if needed\n return dictionary_e\nlistGenCounter()\n\n#==================================================================================\n#QUESTION 3\nimport statistics\n\ndef stepAvg(file):\n #month lists\n month_list = []\n jan = []\n feb = []\n mar = []\n apr = []\n may = []\n jun = []\n jul = []\n aug = []\n sep = []\n oct = []\n nov = []\n dec = []\n#open and read file\n file_open = open(file, 'r')\n file_read = file_open.read()\n file_split = file_read.splitlines()\n print(file_split)\n #grab the keys in the ranges of the split file, then adds it to each lists\n for key in range(334, 365):\n dec.append(key)\n for key in range(304, 334):\n nov.append(key)\n for key in range(273, 304):\n oct.append(key)\n for key in range(243, 273):\n sep.append(key)\n for key in range(212, 243):\n aug.append(key)\n for key in range(182 ,212):\n jul.append(key)\n for key in range(151 ,182):\n jun.append(key)\n for key in range(121, 151):\n may.append(key)\n for key in range(90, 121):\n apr.append(key)\n for key in range(60 ,90):\n mar.append(key)\n for key in range(32, 60):\n feb.append(key)\n for key in range(0, 32):\n jan.append(key)\n\n #calculates means\n jan_mean = statistics.mean(jan)\n feb_mean = statistics.mean(feb)\n mar_mean = statistics.mean(mar)\n apr_mean = statistics.mean(apr)\n may_mean = statistics.mean(may)\n jun_mean = statistics.mean(jun)\n jul_mean = statistics.mean(jul)\n aug_mean = statistics.mean(aug)\n sep_mean = statistics.mean(sep)\n oct_mean = statistics.mean(oct)\n nov_mean = statistics.mean(nov)\n dec_mean = statistics.mean(dec)\n\n #makes a table\n table = (\"MONTH\".ljust(12) + \"|\" + \"AVERAGE\".rjust(12) + \"\\n\" + \"January\".ljust(12) + \"|\" + str(jan_mean).rjust(12) + \"\\n\" + \"Febuary\".ljust(12) + \"|\" + str(feb_mean).rjust(12) + \"\\n\" + \"March\".ljust(12) + \"|\" + str(mar_mean).rjust(12) + \"\\n\" + \"March\".ljust(12) + \"|\" + str(mar_mean).rjust(12) + \"\\n\" + \"April\".ljust(12) + \"|\" + str(apr_mean).rjust(12) + \"\\n\" + \"May\".ljust(12) + \"|\" + str(may_mean).rjust(12) + \"\\n\" + \"June\".ljust(12) + \"|\" + str(jun_mean).rjust(12) + \"\\n\" + \"July\".ljust(12) + \"|\" + str(jul_mean).rjust(12) + \"\\n\" + \"August\".ljust(12) + \"|\" + str(aug_mean).rjust(12) + \"\\n\" + \"September\".ljust(12) + \"|\" + str(sep_mean).rjust(12) + \"\\n\" + \"October\".ljust(12) + \"|\" + str(oct_mean).rjust(12) + \"\\n\" + \"November\".ljust(12) + \"|\" + str(nov_mean).rjust(12) + \"\\n\" + \"December\".ljust(12) + \"|\" + str(dec_mean).rjust(12))\n #puts it in a new file called month.txt\n with open(\"month.txt\", \"w\") as file:\n file.write(table)\n\nstepAvg(\"steps.txt\")","repo_name":"georgequank/CIS-121","sub_path":"exam 2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28240279450","text":"# Dice Rolling Simulator\n# Guess the absolute value of the two rolls\n\nfrom random import randint\ndef diceroller():\n# Die 1: take user's guess, then choose \n print(\"Let's roll the dice. Can you guess the numbers between 1 and 6?\")\n x = int(input(\"Die #1: \"))\n a = (randint(1,6))\n\n# Die 2: take user's guess, then choose\n w = int(input(\"Die #2: \"))\n b = (randint(1,6))\n\n# Total the guesses and the rolled numbers.\n z = (x+w)\n c = (a+b)\n\n# Show the results\n print(\"Fate chose \"+str(a)+\" and \"+str(b))\n if abs(z-c)==0:\n print(\"How did you do that?!\")\n if abs(z-c)!=0:\n print(\"You were off by \"+str(abs(z-c)))\n\n# Play again?\nreplay=True\n#No=False\n#Yes=True\nwhile replay:\n diceroller()\n again=input(\"Play again? Y/N: \")\n if again==\"N\":\n replay=False\n else:\n diceroller()\n","repo_name":"stokelycw/ltc","sub_path":"dro.py","file_name":"dro.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37357219302","text":"from calendar import THURSDAY, WEDNESDAY\nfrom cmath import sqrt\nfrom pickle import FALSE\n\n#%%\n#Conditional Statements \n#prompt the user for a day of the week, print out whether the day is Monday or not\nuser_input = input('What day of the week is it?').lower()\n\nif user_input == 'monday': \n print(\"It is monday\")\nelse:\n print(\"It is not monday\")\n\n\n#%%\n#prompt the user for a day of the week, print out whether the day is a weekday or a weekend\nuser_input = input('What day of the week is it?').lower()\n\nif user_input == 'monday'or'tuesday'or'wednesday'or'thursday'or'friday':\n print('it is a weekday')\n\nif user_input == 'saturday'or'sunday':\n print('it the weekend')\n\n# %%\n#pay calulator w/overtime\nuser_input = float(input('how hours worked in one week'))\n\nif user_input:\n rate = 33.38 \n if user_input <= 40:\n grosspay = rate*user_input\n print('Gross Pay: $', grosspay)\n else:\n regularpay = rate*40\n overtime = user_input-40\n overtimerate = rate*1.5\n overtimepay = overtimerate*overtime\n grosspay = regularpay+overtimepay\n print('Gross Pay: $', grosspay)\n \n\n# %%\n#2 Loop Basics \ni = 5\nwhile i <= 15:\n print(i)\n i += 1\n\n# %%\ni = 0\nwhile i <= 100:\n print(i)\n i += 2\n#%% \ni = 100\nwhile i >= -10:\n print(i)\n i -= 5\n\n# %%\ni = 2\nwhile i <= 1000000:\n print(i)\n i = i**2\n# %%\ni = 100\nwhile i >= 5:\n print(i)\n i -= 5\n# %%\n#For Loops\n#i\nUser_Input = int(input('multiplication table up through 10'))\nif int(User_Input) > 0:\n for i in range(1, 11):\n print(User_Input, '*', i, '=', (User_Input*i))\n# %%\nnum = 9\nfor i in range (1, num+1):\n for j in range(1, i+1):\n print(i, end=\"\")\n print()\n \n# %%\n# break and continue\nwhile True:\n User_Input = input('enter odd number between 1 & 50')\n if User_Input.isdigit():\n if int(User_Input) % 2 == 1 and int(User_Input) <= 50:\n break\nUser_Input = int(User_Input)\nfor i in range(1, 50, 2):\n print(i)\n if i == User_Input:\n print('skip odd number:', i)\n else:\n print('here is an odd number')\n#%% \n# user to enter a positive number and write \n# a loop that counts from 0 to that number\nwhile True:\n user_input = input('enter positive number')\n if user_input.isdigit():\n if int(user_input) > 0:\n break\nuser_input = int(user_input)\nfor i in range(1000):\n print(i)\n if i == user_input:\n break\n#%%\nwhile True:\n user_input = input('enter positive integer')\n if user_input.isdigit():\n if int(user_input) > 0:\n break\n\n\nuser_input = int(user_input)\nfor i in range(user_input, 0, -1):\n print(i)\n \n# %%\n\nfor i in range(1, 101):\n if i % 3 == 0 and i % 5 == 0:\n print('Fizzbuzz')\n elif i % 3 == 0:\n print('fizz')\n elif i % 5 == 0:\n print('buzz')\n else:\n print(i)\n# %%\nwhile True:\n user_input = input('enter an integer')\n if user_input.isdigit():\n if int(user_input) > 0:\n break\nuser_input_cont = input('do you want to continue & print table')\nif user_input_cont.lower().startswith('y'):\n user_input = int(user_input)\n print()\n print('number | squared | cubed')\n print('------ | ------- | -----')\n for i in range(1, user_input + 1):\n i_squared = i ** 2\n i_cubed = i ** 3\n print(f'{i: <6} | {i_squared: ^7} | {i_cubed: 5}')\n \n \n# %%\n","repo_name":"sgtcurran/python-exercises","sub_path":"control_structures_exercises.py","file_name":"control_structures_exercises.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23316204862","text":"from flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String(120), unique=True, nullable=False)\n password = db.Column(db.String(80), unique=False, nullable=False)\n\n favorites = db.relationship('Favorites', backref='user', uselist=True)\n\n def __repr__(self):\n return f\"\"\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"email\": self.email,\n # do not serialize the password, its a security breach\n }\n\n\nclass Favorites(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n \n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n name = db.Column(db.String(60), nullable=False)\n nature = db.Column(db.String(50), nullable=False)\n nature_id = db.Column(db.Integer, nullable=False)\n __table_args__ = (db.UniqueConstraint(\n 'user_id',\n 'name',\n name=\"dont_repeat_favorites\"\n ),)\n\n def __repr__(self):\n return f\"\"\n\n def serialize(self):\n return {\n \"user_id\": self.user_id,\n \"name\": self.name,\n \"nature\": self.nature,\n \"nature_id\": self.nature_id\n }\n\n\nclass People(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n\n name = db.Column(db.String(50), nullable=False)\n height = db.Column(db.Integer, nullable=False)\n mass = db.Column(db.Integer, nullable=False)\n hair_color = db.Column(db.String(50), nullable=False)\n skin_color = db.Column(db.String(50), nullable=False)\n eye_color = db.Column(db.String(50), nullable=False)\n birth_year = db.Column(db.String(50), nullable=False)\n gender = db.Column(db.String(50), nullable=False)\n\n\n def __repr__(self):\n return f\"\"\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"height\": self.height,\n \"mass\": self.mass,\n \"hair_color\": self.hair_color,\n \"skin_color\": self.skin_color,\n \"eye_color\": self.eye_color,\n \"birth_year\": self.birth_year,\n \"gender\": self.gender\n }\n\n def __init__(self, *args, **kwargs):\n\n for (key, value) in kwargs.items():\n if hasattr(self, key):\n attr_type = getattr(self.__class__, key).type\n\n try:\n attr_type.python_type(value)\n setattr(self, key, value)\n except Exception as error:\n print(f\"ignore the other values: {error.args}\")\n\n @classmethod\n def create(cls, data):\n instance = cls(**data)\n if (not isinstance(instance, cls)):\n print(\"Something failed\")\n return None\n db.session.add(instance)\n try:\n db.session.commit()\n print(f\"Created: {instance.name}\")\n return instance\n except Exception as error:\n db.session.rollback()\n print(error.args)\n\n\nclass Planets(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n\n name = db.Column(db.String(50), unique=True, nullable=False)\n diameter = db.Column(db.Float, nullable=False)\n climate = db.Column(db.String(50), nullable=False)\n gravity = db.Column(db.String(50), nullable=False)\n terrain = db.Column(db.String(50), nullable=False)\n surface_water = db.Column(db.String(50), nullable=False)\n population = db.Column(db.String(100))\n __table_args__ = (db.UniqueConstraint(\n 'diameter',\n 'name',\n name=\"dont_repeat_planets\"\n ),)\n\n \n def __repr__(self):\n return f\"\"\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"diameter\": self.diameter,\n \"climate\": self.climate,\n \"gravity\": self.gravity,\n \"terrain\": self.terrain,\n \"surface_water\": self.surface_water,\n \"population\": self.population\n }\n\n def __init__(self, *args, **kwargs):\n\n for (key, value) in kwargs.items():\n if hasattr(self, key):\n attr_type = getattr(self.__class__, key).type\n\n try:\n attr_type.python_type(value)\n setattr(self, key, value)\n except Exception as error:\n print(f\"ignore the other values: {error.args}\")\n\n @classmethod\n def create(cls, data):\n instance = cls(**data)\n if (not isinstance(instance, cls)):\n print(\"Something failed\")\n return None\n db.session.add(instance)\n try:\n db.session.commit()\n print(f\"Created: {instance.name}\")\n return instance\n except Exception as error:\n db.session.rollback()\n print(error.args)","repo_name":"Jdvd01/star-wars-api","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18729216868","text":"\ndef isPalindrome(str):\n rev = ''.join(reversed(str))\n if (str == rev):\n return print(\"Полиндром.\")\n return print(\"Не полиндром\")\n\ndef creditka(card):\n return '*' * len(card[:-4]) + card[-4:]\n\nclass Tomato:\n states = {0: 'Отсутствует', 1: 'Росток', 2: 'Зеленый', 3: 'Зрелый'}\n\n def __init__(self, index):\n self.index = index\n self.state = 0\n\n def grow(self):\n if self.state < 3:\n self.state += 1\n self.print_state()\n\n def is_ripe(self):\n if self.state == 3:\n return True\n return False\n\n def print_state(self):\n print('Томат {} сейчас {}'.format(self.index, Tomato.states[self.state]))\n\nclass TomatoBush:\n\n def __init__(self, count):\n self.tomates = [Tomato(index) for index in range (1, count + 1)]\n\n def are_all_ripe(self):\n if not all([i_tomato.is_ripe() for i_tomato in self.tomates]):\n print('Томаты еще не созрели! \\n')\n else:\n print('Все томаты созрели! Можно собирать. \\n')\n\n def grow_all(self):\n print('Томаты растут!')\n for i_tomates in self.tomates:\n i_tomates.grow()\n\nclass Gardener:\n def __init__(self, name, collected_tomates):\n self.name, self.collected_tomates = name, collected_tomates\n\n def gardener_info(self):\n print('Имя садовника: {}\\nСколько собрал томатов: {}\\n'.format(self.name, self.collected_tomates))\n\n def tend(worker, my_garden):\n if all([i_tomates.is_ripe() for i_tomates in my_garden.tomates]):\n question = int(input('Собрать томаты? \\n1 - да, 2 - нет\\n'))\n if question == 1:\n tomato_count = 0\n for i_potato in my_garden.tomates:\n worker.collected_tomates += 1\n tomato_count += 1\n i_potato.state = 0\n\n print('{} собрал {} томатов!'.format(worker.name, tomato_count))\n worker.gardener_info()\n else:\n question = int(input('Отправить {}а ухаживать за томатами? \\n 1 - да, 2 - нет\\n'.format(worker.name)))\n if question == 1:\n my_garden.grow_all()\n my_garden.are_all_ripe()\n\n @staticmethod\n def knowledge_base():\n print('''Класс Tomato:\nстатическое свойство states - все стадии созревания помидора\nметод __init__()определены два динамических protected свойства: 1) _index - передается параметром и 2) _state - принимает первое\nзначение из словаря states\nметод grow() переводит томат на следующую стадию созревания\nis_ripe()проверяtn, что томат созрел (достиг последней стадии созревания)\nКласс TomatoBush\nметод __init__()принимать в качестве параметра количество томатов и на его основе будет создавать список объектов класса\nTomato. Данный список будет храниться внутри динамического свойства tomatoes.\nметод grow_all(), который переводит все объекты из списка томатов на следующий этап созревания\nметод all_are_ripe() возвращаtn True, если все томаты из\nсписка стали спелыми\nметод give_away_all() будет чистить список томатов после сбора урожая\nКласс Gardener\nметод __init__(), внутри которого определены два динамических\nсвойства: 1) name - передается параметром, является публичным и 2) _plant -\nпринимает объект класса Tomato, является protected\nwork(), который зас��авляет садовника работать, что позволяет\nрастению становиться более зрелым\nharvest(), который проверяет, все ли плоды созрели. Если все -\nсадовник собирает урожай. Если нет - метод печатает предупреждение.\nстатический метод knowledge_base(), который выведет в консоль справку\nпо садоводству.''')\n\ndef main(index=None):\n while True:\n vvod = int(input(\"1.Задание №1. \\n2.Задание №2. \\n3.Задание №3.\\n4.Выход.\\n\"))\n if vvod == 1:\n card = input(\"Введите номер кредитной карты: \")\n print(creditka(card))\n elif vvod == 2:\n str = input(\"Введите слово: \")\n isPalindrome(str)\n elif vvod == 3:\n while True:\n vvod = int(input(\"1.Посадить томаты и выращивать. \\n2.Справка.\\n3.Выход.\\n\"))\n if vvod == 1:\n index = int(input(\"Введите количество томатов: \"))\n my_garden = TomatoBush(index)\n worker = Gardener('Иван', 0)\n\n while True:\n Gardener.tend(worker, my_garden)\n\n elif vvod == 2:\n Gardener.knowledge_base()\n\n elif vvod == 3:\n break\n elif vvod == 4:\n print(\"Выход.\")\n break\n else:\n print(\"Нет такого номера задания.\"\n \"Введите номер из списка.\")\n\nmain()","repo_name":"BlackRain91/Overone_Test","sub_path":"Экзамен_3.py","file_name":"Экзамен_3.py","file_ext":"py","file_size_in_byte":6023,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30802391390","text":"#!usr/bin/python\n\nfrom __future__ import print_function\nimport argparse\nimport os, sys\n\ndef to_bool(x):\n if x.lower() in ['true','t']:\n return True\n elif x.lower() in ['false','f']:\n return False\n else:\n raise argparse.ArgumentTypeError('Bool 값을 넣으세요')\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-r', type=to_bool, default=False,\n metavar='runserver',\n help='if you want to run server only'\n )\n\n args = parser.parse_args()\n\n\n cmds = ['./django_dev', 'pip install -r requirements.txt', 'python manage.py migrate', 'python manage.py runserver']\n\n if args.r :\n os.chdir('./django_dev')\n os.system('python manage.py runserver')\n else :\n for cmd in cmds:\n if 'django' in cmd:\n os.chdir(cmd)\n else :\n os.system(cmd)\n\nif __name__ == '__main__':\n main()","repo_name":"Development-On-Saturday/AIFOODIE_PROJECT","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"22"} +{"seq_id":"23102457463","text":"import random\nkolvo=0\nik1=0\nik0=0\nn=0\nf=0\ns=''\ni=0\nm=''\nsrednee=0\nalist=['','','','','','','','','','']\nwhile(n!=11):\n while(f==1):\n a=random.randint(0, 1)\n if (a==1):\n s=s+'T'\n else:\n s=s+'H'\n kolvo+=1\n if (a==1):\n ik1+=1\n ik0=0\n else:\n ik0+=1\n ik1=0\n if (ik1==3 or ik0==3):\n f=0\n srednee+=kolvo\n m=str(kolvo)\n alist[i]=s+' '+m\n i+=1\n m='' \n \n n+=1\n f=1\n ik1=0\n ik0=0\n s=''\n kolvo=0\nfor i in range(10):\n print(alist[i])\nprint(srednee/10)\n","repo_name":"rozalinaag/Python_1-course","sub_path":"laba2Python.py","file_name":"laba2Python.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37235803455","text":"import pandas as pd\nimport numpy as np\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\n\n\"\"\"One-Hot Encoding:\n\n For nominal features (t-shirt color), converting\n the strings to just numbers is a bad idea because\n the learning algorithm will believe that one is\n larger than the other (this is entirely wrong).\n\n A common way to work around this is using\n One-Hot Encoding, which returns a sparse matrix\n and converts the strings to \"binary\" values. For\n example:\n\n blue | green | red\n _____|_______|_____\n 0 | 1 | 0\n 0 | 0 | 1\n 1 | 0 | 0\n\n One-hot encoding can be done using both skikit-learn\n and Pandas. The first part of the code is the same\n as before, but skip down for the One-hot Encoding part:\n\n\"\"\"\n\n# Dataframe with both types of features:\ndf = pd.DataFrame([\n ['green', 'M', 10.1, 'class1'],\n ['red', 'L', 13.5, 'class2'],\n ['blue', 'XL', 15.3, 'class1']\n])\n\n# Set the column names of the dataframe\ndf.columns = ['color', 'size', 'price', 'classlabel']\n\n# Create mapping for shirt sized\nsize_mapping = {\n 'XL': 3,\n 'L': 2,\n 'M': 1\n}\n\n# Map the values inside the df using size_mapping\ndf['size'] = df['size'].map(size_mapping)\n\n# To do a reverse mapping of size, use the following:\ninv_size_mapping = {v: k for k, v in size_mapping.items()}\n\n# Enumerate labels (Order doesn't matter!)\nclass_mappings = {\n label: idx for idx, label in enumerate(np.unique(df['classlabel']))\n}\n# Now use the mapping dictionary:\ndf['classlabel'] = df['classlabel'].map(class_mappings)\n\n\n\"\"\"------ One-hot Encoding -------\"\"\"\n# For One-hot encoding to work, you must first encode the\n# values into numbers like we did before:\nX = df[['color', 'size', 'price']].values\ncolor_le = LabelEncoder()\nX[:, 0] = color_le.fit_transform(X[:, 0])\n\n# Put categorical_features = 0 to let it know that you\n# want to encode just the first column (color).\nohe = OneHotEncoder(categorical_features=[0])\n\nprint('\\nAfter doing One-hot Encoding, the dataframe is:')\nprint(ohe.fit_transform(X).toarray())\n\n\n# This can also be done in Pandas using get_dummies\n# pd.get_dummies(df[['price', 'color', 'size']])\n","repo_name":"CSwithJC/PythonMachineLearning","sub_path":"chapter_04_data_preprocessing/04_one_hot_encoding.py","file_name":"04_one_hot_encoding.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74229364216","text":"import QCNN_circuit\r\nimport pennylane as qml\r\nfrom pennylane import numpy as np\r\nimport autograd.numpy as anp\r\nfrom tqdm import tqdm\r\n\r\ndef square_loss(labels, predictions):\r\n loss = 0\r\n for l, p in zip(labels, predictions):\r\n loss = loss + (l - p) ** 2\r\n loss = loss / len(labels)\r\n return loss\r\n\r\ndef cross_entropy(labels, predictions):\r\n loss = 0\r\n for l, p in zip(labels, predictions):\r\n c_entropy = l * (anp.log(p[l])) + (1 - l) * anp.log(1 - p[1 - l])\r\n loss = loss + c_entropy\r\n return -1 * loss\r\n\r\ndef cost(params, X, Y, U, U_params, embedding_type, circuit, cost_fn):\r\n predictions = [QCNN_circuit.QCNN(x, params, U, U_params, embedding_type, cost_fn=cost_fn) for x in X]\r\n if cost_fn == 'mse':\r\n loss = square_loss(Y, predictions)\r\n elif cost_fn == 'cross_entropy':\r\n loss = cross_entropy(Y, predictions)\r\n return loss\r\n\r\n# Circuit training parameters\r\nsteps = 1460\r\nlearning_rate = 0.01\r\nbatch_size = 64 \r\ndef circuit_training(X_train, Y_train, U, U_params, embedding_type, circuit, cost_fn):\r\n if U == 'U_SU4_no_pooling' or U == 'U_SU4_1D' or U == 'U_9_1D':\r\n total_params = U_params * 3\r\n elif U =='U_SU4_1D_double':\r\n total_params = 63\r\n elif U =='U_SU4_1D_tf':\r\n total_params = 75\r\n else:\r\n total_params = U_params * 3 + 2 * 3\r\n \r\n params = np.random.randn(total_params, requires_grad=True)\r\n #opt = qml.NesterovMomentumOptimizer(stepsize=learning_rate)\r\n opt = qml.AdamOptimizer(stepsize=learning_rate)\r\n loss_history = []\r\n\r\n for it in tqdm(range(0,steps)):\r\n batch_index = np.random.randint(0, len(X_train), (batch_size,))\r\n X_batch = [X_train[i] for i in batch_index]\r\n Y_batch = [Y_train[i] for i in batch_index]\r\n params, cost_new = opt.step_and_cost(lambda v: cost(v, X_batch, Y_batch, U, U_params, embedding_type, circuit, cost_fn),params)\r\n loss_history.append(cost_new)\r\n if it % 10 == 0:\r\n print(\"iteration: \", it, \" cost: \", cost_new)\r\n \r\n print('loss_history:', loss_history)\r\n print(\"---------------------------\")\r\n print('training params:',params)\r\n with open(r'C:\\Users\\charu\\Desktop\\Projects\\MNIST_QCNN\\Result_BRATS\\loss_history.txt', \"a\") as f:\r\n f.write(str(loss_history))\r\n f.write('\\n')\r\n f.close()\r\n with open(r'C:\\Users\\charu\\Desktop\\Projects\\MNIST_QCNN\\Result_BRATS\\training_params.txt', \"a\") as f:\r\n f.write(str(params))\r\n f.write('\\n')\r\n f.close()\r\n \r\n return loss_history, params\r\n\r\n","repo_name":"CharulataJain/Quantum-Dropout","sub_path":"Training.py","file_name":"Training.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"28886616433","text":"# Convert lowercase in uppercase in input()\ndata = input().upper()\n\nres = \"\"\ncurrent_string = \"\"\ncurrent_number = \"\"\n\nfor index, char in enumerate(data):\n if char.isnumeric():\n current_number += char\n if index + 1 < len(data) and data[index + 1].isnumeric():\n current_number += data[index + 1]\n res += int(current_number) * current_string\n current_string = \"\"\n current_number = \"\"\n else:\n current_string += char\n\n# Get len() from unique symbols in res\nsymbol_counter = len(set([x for x in res]))\n\nprint(f\"Unique symbols used: {symbol_counter}\")\nprint(res)\n","repo_name":"sasho132/softuni-courses","sub_path":"python-fundamentals-jan-2022/text_processing/14_rage_quit.py","file_name":"14_rage_quit.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29051551174","text":"import json\nimport re\nimport time\n\nfrom datetime import datetime\nfrom typing import Optional\n\nimport kubernetes\nimport kubernetes as k8s\n\n\ndef timeout_reached(start_time: float, timeout_seconds: int):\n return time.time() > start_time + timeout_seconds\n\n\nclass K8sUtils:\n \"\"\"\n Utility class for interacting with a Kubernetes deployment\n\n Sets up basic kubernetes API clients and helper methods\n \"\"\"\n\n batch_client = None\n core_client = None\n network_client = None\n\n def __init__(self):\n k8s.config.load_kube_config()\n self.app_client = k8s.client.AppsV1Api()\n self.batch_client = k8s.client.BatchV1Api()\n self.core_client = k8s.client.CoreV1Api()\n self.network_client = k8s.client.NetworkingV1Api()\n\n def get_endpoint(self, substring: str) -> str:\n response = self.network_client.list_ingress_for_all_namespaces(\n _preload_content=False\n )\n routes = json.loads(response.data)\n hostname = next(\n (\n route[\"spec\"][\"rules\"][0][\"host\"]\n for route in routes[\"items\"]\n if substring in route[\"spec\"][\"rules\"][0][\"host\"]\n ),\n None,\n )\n return f\"http://{hostname}\"\n\n def run_job(self, name: str, wait: bool = True) -> k8s.client.V1Job:\n cron_jobs = self.batch_client.list_cron_job_for_all_namespaces()\n try:\n job_body, job_namespace = next(\n (cron_job.spec.job_template, cron_job.metadata.namespace)\n for cron_job in cron_jobs.items\n if cron_job.metadata.name == name\n )\n except StopIteration:\n raise ValueError(f\"No cron job named '{name}' found\")\n\n curr_time = datetime.now().strftime(\"%Y%m%d%H%M%S.%f\")\n job_body.metadata.name = f\"{name}-test-{curr_time}\"\n job = self.batch_client.create_namespaced_job(\n body=job_body, namespace=job_namespace\n )\n\n if wait:\n self.wait_for_job_complete(job_body.metadata.name, job_namespace)\n\n return job\n\n def wait_for_job_complete(self, name: str, namespace: str):\n watch = k8s.watch.Watch()\n for event in watch.stream(\n func=self.core_client.list_namespaced_pod,\n namespace=namespace,\n timeout_seconds=60,\n ):\n if event[\"object\"].metadata.name.startswith(name) and event[\n \"object\"\n ].status.phase in [\"Succeeded\", \"Failed\"]:\n watch.stop()\n return\n\n def get_deployment_pod_names(self, label: str, namespace: str):\n res = self.core_client.list_namespaced_pod(\n namespace=namespace, label_selector=label\n )\n return [pod.metadata.name for pod in res.items]\n\n def kill_pods(self, label: str, namespace: str, timeout_seconds: int = 300):\n pod_names = self.get_deployment_pod_names(label=label, namespace=namespace)\n for pod_name in pod_names:\n self.core_client.delete_namespaced_pod(\n name=pod_name,\n namespace=namespace,\n body=k8s.client.V1DeleteOptions(\n propagation_policy=\"Foreground\", grace_period_seconds=5\n ),\n )\n # Wait for the pods to be deleted\n start_time = time.time()\n for pod_name in pod_names:\n while not timeout_reached(start_time, timeout_seconds):\n try:\n self.core_client.read_namespaced_pod_status(pod_name, namespace)\n except k8s.client.exceptions.ApiException as e:\n # The pod has been deleted\n if e.status == 404:\n break\n\n def wait_for_pod_running(self, label: str, namespace: str):\n watch = k8s.watch.Watch()\n for event in watch.stream(\n func=self.core_client.list_namespaced_pod,\n namespace=namespace,\n label_selector=label,\n timeout_seconds=60,\n ):\n if event[\"object\"].status.phase == \"Running\":\n watch.stop()\n return\n\n def get_latest_pod_logs(\n self, pod_name: str, container_name: str, pod_namespace: str, log_lines: int\n ):\n pod_logs = self.core_client.read_namespaced_pod_log(\n name=pod_name,\n container=container_name,\n namespace=pod_namespace,\n tail_lines=int(log_lines),\n )\n pod_logs = pod_logs.splitlines()\n return pod_logs\n\n def wait_for_pod_log(\n self,\n pod_name: str,\n namespace: str,\n log_message: str,\n timeout_seconds: int = 120,\n ):\n \"\"\"\n Wait until a message has been logged or timeout reached\n :param pod_name: Pod name to watch the logs\n :param namespace: Namespace of pod name\n :param log_message: Log message to wait for\n :param timeout_seconds: Timeout seconds\n \"\"\"\n pods = self.core_client.list_namespaced_pod(namespace)\n try:\n pod = next(\n pod.metadata.name\n for pod in pods.items\n if pod.metadata.name.startswith(pod_name)\n )\n except StopIteration as err:\n print(f\"Pod '{pod_name}' not found. {err}\")\n raise\n\n watch = k8s.watch.Watch()\n watch_start = time.time()\n for event in watch.stream(\n func=self.core_client.read_namespaced_pod_log,\n namespace=namespace,\n name=pod,\n ):\n if log_message in event or timeout_reached(\n start_time=watch_start, timeout_seconds=timeout_seconds\n ):\n watch.stop()\n return\n\n def get_namespace_names(self):\n namespaces = self.core_client.list_namespace()\n return [ns.metadata.name for ns in namespaces.items]\n\n def get_first_matching_pod_name(self, namespace: str, label: str) -> str:\n try:\n return next(\n name\n for name in self.get_deployment_pod_names(label, namespace)\n )\n except StopIteration:\n print(\n f\"Pod not found for label {label} in namespace {namespace}\"\n )\n return \"\"\n\n def get_namespaced_secret(\n self, name: str, namespace: str\n ) -> Optional[kubernetes.client.V1Secret]:\n \"\"\"\n Get a secret from a namespace\n :param name: Name of the secret\n :param namespace: Namespace of the secret\n :return: V1Secret object\n \"\"\"\n secrets = self.core_client.list_namespaced_secret(namespace)\n return next(\n (\n secret\n for secret in secrets.items\n if secret.metadata.name.startswith(name)\n ),\n None,\n )\n\n def get_pod_env_vars(self, namespace: str, label: str) -> [str]:\n \"\"\"\n Exec into a pod and get the environment variables\n\n :param namespace: Namespace of pod\n :param label: Pod label\n :return: List of environment variables\n \"\"\"\n pod_name = self.get_first_matching_pod_name(namespace, label)\n if not pod_name:\n return []\n return self.exec_command(namespace, pod_name, [\"env\"]).split(\"\\n\")\n\n def run_python_script_in_pod(\n self, namespace: str, label: str, script_path: str\n ) -> [str]:\n pod_name = self.get_first_matching_pod_name(namespace, label)\n if not pod_name:\n return []\n return self.exec_command(namespace, pod_name, [\"python\", script_path]).split(\n \"\\n\"\n )\n\n def exec_command(self, namespace: str, pod_name: str, command: [str]) -> str:\n \"\"\"\n Execute a command in a running pod\n\n :param namespace: Namespace of pod\n :param pod_name: Pod name\n :param command: Command to run in pod\n :return: response\n \"\"\"\n try:\n res = k8s.stream.stream(\n self.core_client.connect_get_namespaced_pod_exec,\n pod_name,\n namespace,\n command=command,\n stderr=True,\n stdin=False,\n stdout=True,\n tty=False,\n )\n return res\n except k8s.client.exceptions.ApiException as err:\n print(f\"Unable to exec in pod {pod_name}. {err}\")\n return \"\"\n\n def get_configmap_values(self, namespace: str, configmap_name: str) -> {str: str}:\n try:\n res = self.core_client.read_namespaced_config_map(configmap_name, namespace)\n return res.data\n except k8s.client.exceptions.ApiException as err:\n print(\n f\"Unable to get values for configmap {configmap_name} in namespace {namespace}. {err}\"\n )\n return {}\n","repo_name":"oleksii-kalinin/ping-cloud-base","sub_path":"tests/python-utils/k8s_utils.py","file_name":"k8s_utils.py","file_ext":"py","file_size_in_byte":8932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"22326782603","text":"from Outputs import Output\nimport numpy as np\nfrom PIL.Image import Resampling\ntry:\n import cv2\nexcept:\n pass\n\nclass CvOutput(Output.Output):\n \n def __init__(self, name, **kwargs):\n super().__init__(name, **kwargs)\n\n def getName(self):\n return \"Tk output\"\n \n def Input(self, frame):\n frame = frame.resize((frame.width*self.scale, frame.height*self.scale), resample=Resampling.NEAREST)\n cv2.imshow(self.name, cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)) # type: ignore\n cv2.waitKey(1) # type: ignore\n \n def getArgs(self):\n return {\n \"scale\": {\n \"types\": [float, int],\n \"default\": 1\n }\n }","repo_name":"DrekkCuga/ProtoCore","sub_path":"Outputs/CvOutput.py","file_name":"CvOutput.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6474109569","text":"#白\n#连接数据库\n#依据数据库的时间填补离去时间\n\nimport pypyodbc\nimport io\nimport sys,os\nimport time\nfrom dateutil import parser\nfrom datetime import datetime, timedelta\n\n#把数据弄干净一点\ndef add_two_day(object):\n if object is not None:\n object = parser.parse(object)\n object += timedelta(days=2)\n object = object.strftime('%Y-%m-%d %H:%M')\n return object\n return object\n\ndef add_clearance(box_no):\n print(\"补全\"+box_no+\"的清关数据\")\n #获取irid和wpos和npos和cdate\n irid = 0\n wpos = 0\n npos = 0\n cdate = \"1990-01-01 00:00\"\n cplace =\"中国\"\n cinfo =\"正在清关\"\n sqlcommand = \"\"\"select cr.irid,max(cd.wpos) \n from client_rec cr left join check_detail cd on cr.irid = cd.irid \n where cr.cnum = '\"\"\"\n sqlcommand += box_no\n sqlcommand += \"' group by cr.irid;\"\n\n newCursor.execute(sqlcommand)\n \n for row in newCursor:\n irid = row[0]\n wpos = row[1]\n\n wpos += 1\n\n sqlcommand = \"\"\"select cr.irid,max(cd.npos) from client_rec cr left join check_detail cd on cr.irid = cd.irid \n where cr.cnum = '\"\"\"\n sqlcommand +=box_no+\"' and cd.npos <100 group by cr.irid;\"\n newCursor.execute(sqlcommand)\n for row in newCursor:\n npos = row[1] \n sqlcommand =\"\"\"select cdate from check_detail where irid = '\"\"\"\n sqlcommand+=str(irid)+\"\"\"' and npos = '\"\"\"\n sqlcommand+=str(npos)+\"';\"\n newCursor.execute(sqlcommand)\n for row in newCursor:\n cdate = row[0]\n npos += 1\n \n\n #插入数据\n sqlcommand =\"\"\"insert into check_detail(irid,wpos,npos,cplace,cdate,cinfo,cinput,cextra) VALUES('\"\"\"\n sqlcommand+= str(irid) + \"','\"\n sqlcommand+= str(wpos) + \"','\"\n sqlcommand+= str(npos) + \"','\"\n sqlcommand+= str(cplace) + \"','\"\n sqlcommand+= add_two_day(cdate) + \"','\"\n sqlcommand+= str(cinfo)+\"','张晟','TXT');\"\n\n newCursor.execute(sqlcommand)\n newCursor.commit();\n\n \n\n#解决输出\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030')\n\n\n","repo_name":"mengyangbai/ex_work_script","sub_path":"Practise Scripts/add_clearance.py","file_name":"add_clearance.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11133025309","text":"#Write a Python function that takes a list of strings as input and\r\n# returns a new list containing only the strings that start with the\r\n# letter 'A' (case-insensitive). For example, if the input list is\r\n# ['apple', 'banana', 'Avocado', 'orange'], the function should return ['apple', 'Avocado'].\r\ndef fruit(list):\r\n a=[]\r\n for i in list:\r\n if i[0].lower()==\"a\":\r\n a.append(i)\r\n return a\r\nlist=['apple', 'banana', 'Avocado', 'orange']\r\nprint(fruit(list))","repo_name":"aarticode/Python-Questions","sub_path":"list_of_string.py","file_name":"list_of_string.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"71711031416","text":"# -*- coding: utf-8 -*-\nimport sys\nimport os\nsys.path.insert(0,'/Users/SRG/Documents/GitHub/SSNMR/functions')\nimport numpy as np\nimport nmrglue as ng\nimport functions as proc\nimport simpson as simproc\nimport wavelet_denoise as wave\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport time\nimport NUS as nus\nfrom tabulate import tabulate\n\nstart_time = time.time()\n\ncwd = os.getcwd()\n\n##1. Params:\n#Pick just two figures to compare\naa = 2 #Spectrum index\nbase = 0.5 #base contour %'age\nnc = 40 #number of contours\ns = 0 #switch on sum (0) or skyline (1) projection or both (2)\n\n# Params:\nnzf1 = 512\nnzf2 = 4096\ngb1 = 30 #GB on F1\ngb2 = 0 #GB on F2\n\nph0 = [243 + 10 -6, 797420 + 140, 0, 0] #phases for F2\n# ph0 = [-148, 796480, 0, 0] #phases for F2\n\na1 = 196; a2 = 340; a3 = 356 #F1 indices for the 2nd order spectra\n# a3 = simproc.nearest(fiso,-26.6504)\n# a2 = simproc.nearest(fiso,-27.43)\n# a1 = simproc.nearest(fiso,-33.82)\n\n##2. Obtain all exp spec over a loop\nm = 3\nexp = [20, 30, 31, 32]\nSSIMin1 = np.zeros(m); SSIMin2 = np.zeros(m); SSIMin3 = np.zeros(m); SSIMout1 = np.zeros(m);\nSSIMout2 = np.zeros(m); SSIMout3 = np.zeros(m); snrF1in = np.zeros(m); snrF2in = np.zeros(m);\nsnrF2out = np.zeros(m); snrF1out = np.zeros(m)\nspecrecon = np.zeros((nzf1,nzf2,m),dtype='complex')\nspecin = np.zeros((nzf1,nzf2,m),dtype='complex')\n\nfor j in range(m):\n if j == 0:\n fid = np.load('128t1_ref_FID_EXP20.npy')\n os.chdir(cwd + '\\\\' + str(exp[j]))\n else:\n os.chdir(cwd + '\\\\' + str(exp[j]))\n dct, data = ng.bruker.read(cwd+ '\\\\' + str(exp[j])) #load FID\n ##NUS fcn\n f1nus = nus.prep(data, nzf2, ph0)\n \n #IST recon\n spec = nus.IST_D(f1nus, nzf1, threshold = 0.99,max_iter=30) #IST_S or _D\n \n spec = np.fliplr(spec)\n \n fid = np.fft.ifft2(spec)\n fid = np.fft.fftshift(np.fft.fft(fid, axis = 1 ),axes = 1)\n fid = np.fft.ifft(fid, axis = 1)\n ##end \n\n ## Shear\n spec = proc.mqproc(fid, SH = 7/9,zf1=nzf1, zf2=nzf2, lb1 = gb1, lb2 = gb2) \n \n #Phase\n if j == 0:\n # ph = [243 + 10, 797420 + 140, 0, 0]\n ph = ph0\n specin[:,:,j] = proc.phase(spec,ph,ax=1)\n else:\n # proc.mphase(spec, fine = 100, ax=1) #determine initial phases \n #Need a shift and flips n such\n spec = np.fft.fftshift(spec,axes=0)\n spec = np.fliplr(spec)\n spec = np.flipud(spec)\n #Phase\n # proc.mphase(spec, fine = 100, ax=1) #determine initial phases \n ph = [180+3, 0, 0, 0]\n # ph = [0, 0, 0, 0]\n specin[:,:,j] = proc.phase(spec,ph,ax=1)\n \n #Denoise 2D SWT\n fin_reg = wave.region_spec2(np.real(spec), thresh = 22, wndw = 8)\n specrecon[:,:,j], coeffin, coeffs = wave.wavelet_denoise2(2, np.real(specin[:,:,j]), fin_reg)\n\n #PCA Denoise\n # specrecon[:,:,j] = proc.PCA(specrecon[:,:,j],10)\n # plt.close()\n \n #Normalize\n specin[:,:,j] = np.real(specin[:,:,j]) / np.max(np.real(specin[:,:,j]))\n specrecon[:,:,j] = np.real(specrecon[:,:,j]) / np.max(np.real(specrecon[:,:,j]))\n \n #SNRs\n a = np.unravel_index(spec.argmax(), spec.shape)\n snrF2in[j] = proc.snr(specin[a[0],:,j],1000)\n snrF1in[j] = proc.snr(specin[:,a[1],j],150)\n snrF2out[j] = proc.snr(specrecon[a[0],:,j],1000)\n snrF1out[j] = proc.snr(specrecon[:,a[1],j],150)\n \n ##SSIMs for the 2nd order patterns\n SSIMin1[j] = simproc.ssim(specin[a1,:,0],specin[a1,:,j])\n SSIMin2[j] = simproc.ssim(specin[a2,:,0],specin[a2,:,j]) \n SSIMin3[j] = simproc.ssim(specin[a3,:,0],specin[a3,:,j])\n \n SSIMout1[j] = simproc.ssim(specin[a1,:,0],specrecon[a1,:,j])\n SSIMout2[j] = simproc.ssim(specin[a2,:,0],specrecon[a2,:,j])\n SSIMout3[j] = simproc.ssim(specin[a3,:,0],specrecon[a3,:,j])\n \n print('%.0f Percent Done' %(100*(j+1)/m))\n\nc = simproc.ssim(specin[a1,:,0],specin[a1,:,0]) - 1\nSSIMin1 = SSIMin1 - c; SSIMin2 = SSIMin2 - c; SSIMin3 = SSIMin3 - c; \nSSIMout1 = SSIMout1 - c; SSIMout2 = SSIMout2 - c; SSIMout3 = SSIMout3 - c; \n\n#############Plotting Stuff\n#Plotting Stuff\nmpl.rcParams['font.family'] = \"arial\"\nmpl.rcParams['font.size'] = 14\nmpl.rcParams['pdf.fonttype'] = 42\n\nfreq2 = proc.freqaxis(spec[0,:],unit='ppm')\nfiso = proc.fiso(spec[:,0],unit='ppm')\n\nh = np.max(np.real(specin[:,:,0]))\nlvls = np.linspace((base*1e-2)*h,h,nc)\n\n#############First Fig\nfig = plt.figure(figsize=(12, 8)) # figure size w x h\ngrid = plt.GridSpec(4, 5, hspace=0.3, wspace=0.6) #4x5 grid of subplots #spacings for h and w\nmain_ax = fig.add_subplot(grid[1:, 1:4]) \n\nyplot = fig.add_subplot(grid[1:, 0], yticklabels=[])\nxplot = fig.add_subplot(grid[0, 1:4], yticklabels=[], sharex=main_ax)\n\nmain_ax.contour(freq2,fiso,(np.real(specin[:,:,0])),lvls,cmap='jet')\nmain_ax.set_xlabel('F$_{2}$ (ppm)')#, fontfamily = 'Arial')\n#main_ax.set_ylabel('F1_iso Frequency (ppm)')#, fontfamily = 'Arial')\nmain_ax.set_ylabel(\"F$_{iso}$ (ppm)\",labelpad=-429)\nmain_ax.invert_yaxis()\nmain_ax.invert_xaxis()\nmain_ax.set_xlim(-24, -40) ##CHK BACK\nmain_ax.set_ylim(-22, -39) ##CHK BACK\nmain_ax.tick_params(right = True,left = False,labelleft = False, \n labelright=True, which = 'both')\nmain_ax.minorticks_on()\n\nif s == 0:\n xplot.plot(freq2,(np.sum(np.real(specin[:,:,0]),0)),'k') #sum\n yplot.plot(np.real(np.sum(specin[:,:,aa],1)),fiso,'k')\nelif s==1:\n xplot.plot(freq2,(np.max(np.real(specin[:,:,0]),0)),'k') #skyline\n yplot.plot(np.real(np.max(specin[:,:,aa],1)),fiso,'k') #Skyline\nelse:\n xplot.plot(freq2,(np.max(np.real(specin[:,:,0]),0) / np.max(np.max(np.real(specin[:,:,0]),0)) )+0.5,'k') #both\n xplot.plot(freq2,(np.sum(np.real(specin[:,:,0]),0) / np.max(np.sum(np.real(specin[:,:,0]),0))),'r') #\n yplot.plot(np.real(np.max(specin[:,:,aa],1) / np.max(np.max(specin[:,:,aa],1)))+0.5,fiso,'k') \n yplot.plot(np.real(np.sum(specin[:,:,aa],1) / np.max(np.sum(specin[:,:,aa],1))),fiso,'r')\n\n# xplot.plot(freq2,(np.sum(np.real(specin[:,:,aa]),0)),'k')\n# yplot.plot(np.real(np.sum(specin[:,:,aa],1)),fiso,'k')\nyplot.invert_xaxis()\nyplot.invert_yaxis()\nyplot.set_ylim(-22, -39) ##CHK BACK\n\n#Plot the sub-spectra\ns1 = fig.add_subplot(grid[1, 4], yticklabels=[])\ns2 = fig.add_subplot(grid[2, 4], yticklabels=[],sharex=s1)\ns3 = fig.add_subplot(grid[3, 4], yticklabels=[],sharex=s1)\ns1.invert_xaxis()\n#s1.minorticks_on()\ns1.set_xlim(-24,-40) ##CHK BACK\ns3.set_xlabel('F$_{2}$ (ppm)')\n\ns1.plot(freq2,(np.real(specin[a1,:,0])),'c')\ns2.plot(freq2,(np.real(specin[a2,:,0])),'b')\ns3.plot(freq2,(np.real(specin[a3,:,0])),'m')\n\nyplot.axis('off')\nxplot.axis('off')\n####################Second fig\nh = np.max(np.real(specrecon[:,:,aa]))\nlvls = np.linspace((base*1e-2)*h,h,nc)\n\nfig = plt.figure(figsize=(12, 8)) # figure size w x h\ngrid = plt.GridSpec(4, 5, hspace=0.3, wspace=0.6) #4x5 grid of subplots #spacings for h and w\nmain_ax = fig.add_subplot(grid[1:, 1:4]) \n\nyplot = fig.add_subplot(grid[1:, 0], yticklabels=[])\nxplot = fig.add_subplot(grid[0, 1:4], yticklabels=[], sharex=main_ax)\n\nmain_ax.contour(freq2,fiso,(np.real(specrecon[:,:,aa])),lvls,cmap='jet')\nmain_ax.set_xlabel('F$_{2}$ (ppm)')#, fontfamily = 'Arial')\n#main_ax.set_ylabel('F1_iso Frequency (ppm)')#, fontfamily = 'Arial')\nmain_ax.set_ylabel(\"F$_{iso}$ (ppm)\",labelpad=-429)\nmain_ax.invert_yaxis()\nmain_ax.invert_xaxis()\nmain_ax.set_xlim(-24, -40) ##CHK BACK\nmain_ax.set_ylim(-22, -39) ##CHK BACK\nmain_ax.tick_params(right = True,left = False,labelleft = False, \n labelright=True, which = 'both')\nmain_ax.minorticks_on()\n\nif s == 0:\n xplot.plot(freq2,(np.sum(np.real(specrecon[:,:,aa]),0)),'k') #sum\n yplot.plot(np.real(np.sum(specrecon[:,:,aa],1)),fiso,'k')\nelif s==1:\n xplot.plot(freq2,(np.max(np.real(specrecon[:,:,aa]),0)),'k') #skyline\n yplot.plot(np.real(np.max(specrecon[:,:,aa],1)),fiso,'k') #Skyline\nelse:\n xplot.plot(freq2,(np.max(np.real(specrecon[:,:,aa]),0) / np.max(np.max(np.real(specin[:,:,aa]),0)) )+0.5,'k') #both\n xplot.plot(freq2,(np.sum(np.real(specrecon[:,:,aa]),0) / np.max(np.sum(np.real(specin[:,:,aa]),0))),'r') #\n yplot.plot(np.real(np.max(specrecon[:,:,aa],1) / np.max(np.max(specrecon[:,:,aa],1)))+0.5,fiso,'k') \n yplot.plot(np.real(np.sum(specrecon[:,:,aa],1) / np.max(np.sum(specrecon[:,:,aa],1))),fiso,'r')\n \n\n# xplot.plot(freq2,(np.sum(np.real(specrecon[:,:,aa]),0)),'k')\n# yplot.plot(np.real(np.sum(specrecon[:,:,aa],1)),fiso,'k')\n\nyplot.invert_xaxis()\nyplot.invert_yaxis()\nyplot.set_ylim(-22, -39) ##CHK BACK\n\n#Plot the sub-spectra\ns1 = fig.add_subplot(grid[1, 4], yticklabels=[])\ns2 = fig.add_subplot(grid[2, 4], yticklabels=[],sharex=s1)\ns3 = fig.add_subplot(grid[3, 4], yticklabels=[],sharex=s1)\ns1.invert_xaxis()\n#s1.minorticks_on()\ns1.set_xlim(-24,-40) ##CHK BACK\ns3.set_xlabel('F$_{2}$ (ppm)')\n\ns1.plot(freq2,(np.real(specrecon[a1,:,aa])),'c')\ns2.plot(freq2,(np.real(specrecon[a2,:,aa])),'b')\ns3.plot(freq2,(np.real(specrecon[a3,:,aa])),'m')\n\nyplot.axis('off')\nxplot.axis('off')\n##Table of results\n##N.B.: dont want SNR measure and want SSIM against no NUS\n# ns = ['ns=384', 'ns=192', 'ns=96', '75 kHz', 'No SPAM']\nns = [100,60,40,20]\ndata=[]\nfor i in range(len(SSIMin1)):\n data.append( [ns[i],\"%.1f\"%snrF1in[i],\"%.1e\"%snrF1out[i], \"%.1e\"%snrF2in[i],\n \"%.1e\"%snrF2out[i], \"%.4f\"%SSIMin1[i], \"%.4f\"%SSIMout1[i],\n \"%.4f\"%SSIMin2[i], \"%.4f\"%SSIMout2[i], \"%.4f\"%SSIMin3[i],\n \"%.4f\"%SSIMout3[i] ])\n# create header\nhead = ['NUS %','SNRF1_in','SNRF1_out', 'SNRF2_in','SNRF2_out', 'SSIM_in1', 'SSIM_out1', \n 'SSIM_in2', 'SSIM_out2', 'SSIM_in3', 'SSIM_out3']\n# display table\nprint(tabulate(data, headers=head, tablefmt=\"pretty\", floatfmt=\"5.4f\"))\n\nos.chdir(cwd)\n\na = np.asarray(data,dtype='float64')\nnp.savetxt(\"8_MQ_exp.csv\",a, fmt='%5.4f', delimiter=',')\n\nprint('Finished!')\nprint(\"-- %5.5f s Run Time --\" % (time.time() - start_time))","repo_name":"rschurko/DESPERATE","sub_path":"Experimental Datasets/RbNO3_MQMAS_Nov9_2021/MQMAS_NUSbench.py","file_name":"MQMAS_NUSbench.py","file_ext":"py","file_size_in_byte":9848,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"69813687735","text":"from rest_framework import serializers\nfrom django.db.models import F\n\nfrom .models import Order, ReciverInfo\nfrom carts.models import Cart\nfrom carts.serializers import CartSerializer\nfrom products.models import Product\n\n\nclass ReciverInfoSerializer(serializers.ModelSerializer):\n class Meta:\n model = ReciverInfo\n fields = '__all__'\n\n\nclass OrderListSerializer(serializers.ModelSerializer):\n total_price = serializers.SerializerMethodField()\n items_count = serializers.SerializerMethodField()\n\n class Meta:\n model = Order\n fields = ('id', 'created_at', 'total_price', 'code',\n 'items_count', 'shipping_status', 'purchase_invoice')\n\n def get_total_price(self, obj):\n total_price = 0\n for item in obj.cart.items.all():\n total_price += item.total_price\n return total_price\n\n def get_items_count(self, obj):\n return obj.cart.items.all().count()\n\n\nclass OrderDetailSerializer(serializers.ModelSerializer):\n cart = CartSerializer()\n reciver = ReciverInfoSerializer()\n\n class Meta:\n model = Order\n fields = '__all__'\n\n\nclass CreateOrderSerializer(serializers.ModelSerializer):\n reciver = ReciverInfoSerializer()\n\n class Meta:\n model = Order\n exclude = ('code',)\n read_only_fields = (\n 'shipping_status', 'cart', 'user',\n 'shipping_method',\n )\n\n def create(self, data):\n user = self.context.get('request').user\n cart = user.carts.get(ordered=False)\n # Validate cart\n if cart.items.all().exists() == False:\n raise serializers.ValidationError(\"Cart must not be empty\")\n # Update products sale count\n for item in cart.items.all():\n Product.objects.filter(id=item.product.id).update(\n sale_count=F('sale_count') + item.quantity\n )\n # Create reaciver info model\n reciver_info = ReciverInfo.objects.create(**data.get('reciver'))\n # Create order model\n cart.ordered = True\n cart.save()\n order = Order.objects.create(\n user=user, cart=cart, reciver=reciver_info,\n purchase_invoice=data.get('purchase_invoice'), shipping_status=\"Preparation\"\n )\n # Create another cart model with ordered=False\n Cart.objects.create(user=user)\n return order\n","repo_name":"fullstack0516/django-react-ecommerce","sub_path":"orders/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"3108195786","text":"from __future__ import print_function, absolute_import\nimport json\nimport sys\nimport os\nfrom datetime import datetime\nimport requests\nfrom isCached import doCache\nfrom importlib import reload\nfrom dl_logo import parent_folder_logo\nimport check_dark_mode\nreload(sys)\n\ndef get_data_json():\n url = 'https://api-football-standings.azharimm.site/leagues/'\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'} # use user-agent to prevent from blocked\n response = requests.request(\"GET\", url, headers=headers, verify=False)\n posts = response.json()\n return posts\n\n\ndef time_now():\n now = datetime.now()\n\n # dd/mm/YY H:M:S\n timeNow = now.strftime(\"%d %B, %Y %H:%M:%S\")\n return timeNow\n\ndef parse_season(seasonName=None):\n return seasonName.replace(' ', '_').lower();\n\ndef check_season_number(seasonName=None):\n parse = seasonName.split(' ')[1] or 25\n return int(parse)\n\n\ndef get_rank_symbol(rank=None):\n switcher={\n 1 : '🥇',\n 2 : '🥈',\n 3 : '🥉'\n }\n return switcher.get(rank,'#{}'.format(rank))\n \ndef get_status_lastMatched(point=None):\n num = float(point)\n if num > 0:\n return '✅'\n elif num == 0:\n return '✴️'\n else:\n return '🔴'\ndef light_dark():\n env_dark_mode = os.environ['adaptive_dark_mode_league_icon'] or True\n if (env_dark_mode):\n if (check_dark_mode.check_appearance()):\n return '_dark'\n else:\n return '_light'\n else:\n return '_light' \n\ndef football(search=None, division=None):\n be = doCache()\n\n if (be.compare_time('leagues', 2)):\n data_out = get_data_json() # get from internet\n else:\n data_out = data_object() # get from cache file\n \n projects = data_out['data']\n \n result = []\n for project in projects:\n if search is not None and project['name'].lower().find(search.lower()) == -1:\n continue\n \n result.append({\n 'title': f\"{project['name']}\",\n 'arg': f\"{project['id']}\",\n 'valid' : True,\n 'icon': {\n 'path': (f\"{parent_folder_logo}{project['id']}/{project['id']}{light_dark()}.png\") if os.path.exists(f\"{parent_folder_logo}{project['id']}/{project['id']}{light_dark()}.png\") else (f\"{parent_folder_logo}/no-logo.png\") # check icon if empty\n },\n # \"action\": {\n # \"text\": project['team']['displayName'],\n # },\n # 'quicklookurl' : 'w'\n # 'text': {\n # # \"copy\": project['url'],\n # \"largetype\": f\"{division}\\n{project['team']['abbreviation']}\\n{project['team']['displayName'].lower()}\"\n # },\n 'mods': {\n 'alt': {\n 'valid': False,\n # 'arg': project['id'],\n 'subtitle': f\"League code : {project['id']}\"\n },\n # 'ctrl': {\n # 'valid': True,\n # # add argument project finished to Dialog Conditional\n # 'arg': '{}:{}'.format(project['id'],project['finished']),\n # 'subtitle' : '{}'.format('🍿Mark Unwatched' if project['finished'] == True else '☑️Mark Watched'),\n # },\n # 'cmd': {\n # 'valid': True,\n # 'arg': 'Season ',\n # 'subtitle': '➕Add season',\n # },\n }\n })\n if search == 'set' or search == '!' and len(search) > 0:\n result.append({\n 'title': f\"Settings\",\n 'subtitle': f\"Football Info Configuration\",\n 'arg': (f\"settings\"),\n 'valid' : True,\n 'icon': {\n 'path': (f\"src/settings.png\")\n },\n }) \n \n \n return result\n\n\"\"\"Run Script Filter.\"\"\"\ndef main():\n SEARCH = sys.argv[1] if len(sys.argv) >= 2 else None\n division = os.getenv('fav_league')\n posts = football(search=SEARCH, division=division)\n data = json.dumps({ \"items\": posts }, indent=4)\n print(data)\n \n\ndef data_object():\n fileJson = f\"Cache/leagues.json\"\n f = open(fileJson)\n posts = json.load(f)\n return posts\n\n\nif __name__ == '__main__':\n # default load filter\n main()","repo_name":"afridho/alfred-football-info","sub_path":"alfred_football.py","file_name":"alfred_football.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38396239952","text":"import argparse\nimport hashlib\nimport json\nimport os\nimport shutil\nimport sys\nfrom os.path import join\nfrom random import randrange\nimport torch\n\nbase_dir = \"./\"\nsys.path.insert(0, base_dir)\n\nfrom chad_score.chad_score import ChadScoreModel\nfrom stable_diffusion.utils_backend import get_device, get_memory_status\nfrom stable_diffusion.utils_image import to_pil\nfrom stable_diffusion.model.clip_text_embedder import CLIPTextEmbedder\nfrom stable_diffusion.model.clip_image_encoder import CLIPImageEncoder\nfrom stable_diffusion import StableDiffusion\nfrom stable_diffusion.model_paths import IODirectoryTree\nfrom configs.model_config import ModelPathConfig\n\nEMBEDDED_PROMPTS_DIR = os.path.abspath(\"./input/embedded_prompts/\")\nOUTPUT_DIR = \"./output/data/\"\nFEATURES_DIR = join(OUTPUT_DIR, \"features/\")\nIMAGES_DIR = join(OUTPUT_DIR, \"images/\")\n# SCORER_CHECKPOINT_PATH = os.path.abspath(\"./input/model/aesthetic_scorer/sac+logos+ava1-l14-linearMSE.pth\")\nSCORER_CHECKPOINT_PATH = os.path.abspath(\"./input/model/aesthetic_scorer/chadscorer.pth\")\n\n# DEVICE = input(\"Set device: 'cuda:i' or 'cpu'\")\n\n\nparser = argparse.ArgumentParser(\"Embed prompts using CLIP\")\nparser.add_argument(\n \"--prompt\",\n type=str,\n default='A woman with flowers in her hair in a courtyard, in the style of Frank Frazetta',\n help=\"The prompt to embed. Defaults to 'A woman with flowers in her hair in a courtyard, in the style of Frank Frazetta'\",\n)\nparser.add_argument(\n \"--save_embeddings\",\n type=bool,\n default=False,\n help=\"If True, the disturbed embeddings will be saved to disk. Defaults to False.\",\n)\nparser.add_argument(\n \"--embedded_prompts_dir\",\n type=str,\n default=EMBEDDED_PROMPTS_DIR,\n help=\"The path to the directory containing the embedded prompts tensors. Defaults to a constant EMBEDDED_PROMPTS_DIR, which is expected to be './input/embedded_prompts/'\",\n)\nparser.add_argument(\n \"--num_iterations\",\n type=int,\n default=8,\n help=\"The number of iterations to batch-generate images. Defaults to 8.\",\n)\n\n# parser.add_argument(\n# \"--batch_size\",\n# type=str,\n# default=1,\n# help=\"The number of images to generate per batch. Defaults to 1.\",\n# )\n\nparser.add_argument(\n \"--seed\",\n type=str,\n default='',\n help=\"The noise seed used to generate the images. Defaults to random int from 0 to 2^24\",\n)\nparser.add_argument(\n \"--noise_multiplier\",\n type=float,\n default=0.01,\n help=\"The multiplier for the amount of noise used to disturb the prompt embedding. Defaults to 0.01.\",\n)\nparser.add_argument(\n \"--cuda_device\",\n type=str,\n default=\"cuda:0\",\n help=\"The cuda device to use. Defaults to 'cuda:0'.\",\n)\nparser.add_argument(\n \"--clear_output_dir\",\n type=bool,\n default=False,\n help=\"Avoid. If True, the output directory will be cleared before generating images. Defaults to False.\",\n)\n\nparser.add_argument(\n \"--random_walk\",\n type=bool,\n default=False,\n help=\"Random walk on the embedding space, with the prompt embedding as origin. Defaults to False.\",\n)\nargs = parser.parse_args()\n\nNULL_PROMPT = \"\"\nPROMPT = args.prompt\nNUM_ITERATIONS = args.num_iterations\n\nif args.seed == '':\n SEED = randrange(0, 2 ** 24)\nelse:\n SEED = int(args.seed)\n\nNOISE_MULTIPLIER = args.noise_multiplier\nDEVICE = get_device(args.cuda_device)\n# BATCH_SIZE = args.batch_size\nBATCH_SIZE = 1\nSAVE_EMBEDDINGS = args.save_embeddings\nCLEAR_OUTPUT_DIR = args.clear_output_dir\nRANDOM_WALK = args.random_walk\nos.makedirs(EMBEDDED_PROMPTS_DIR, exist_ok=True)\n\nmodel_config = ModelPathConfig()\npt = IODirectoryTree(model_config)\n\ntry:\n shutil.rmtree(OUTPUT_DIR)\nexcept Exception as e:\n print(e, \"\\n\", \"Creating the paths...\")\n os.makedirs(OUTPUT_DIR, exist_ok=True)\n os.makedirs(FEATURES_DIR, exist_ok=True)\n os.makedirs(IMAGES_DIR, exist_ok=True)\nelse:\n os.makedirs(OUTPUT_DIR, exist_ok=True)\n os.makedirs(FEATURES_DIR, exist_ok=True)\n os.makedirs(IMAGES_DIR, exist_ok=True)\n\n\ndef init_stable_diffusion(device, path_tree: IODirectoryTree, sampler_name=\"ddim\", n_steps=20, ddim_eta=0.0):\n device = get_device(device)\n\n stable_diffusion = StableDiffusion(\n device=device, sampler_name=sampler_name, n_steps=n_steps, ddim_eta=ddim_eta\n )\n\n stable_diffusion.quick_initialize()\n stable_diffusion.model.load_unet(path_tree.unet['unet'])\n autoencoder = stable_diffusion.model.load_autoencoder(path_tree.autoencoder['autoencoder'])\n autoencoder.load_decoder(path_tree.decoder['decoder'])\n\n return stable_diffusion\n\n\ndef embed_and_save_prompts(prompt: str, null_prompt=NULL_PROMPT):\n null_prompt = null_prompt\n prompt = prompt\n\n clip_text_embedder = CLIPTextEmbedder(device=get_device(DEVICE))\n clip_text_embedder.load_submodels()\n\n null_cond = clip_text_embedder(null_prompt)\n torch.save(null_cond, join(EMBEDDED_PROMPTS_DIR, \"null_cond.pt\"))\n print(\n \"Null prompt embedding saved at: \",\n f\"{join(EMBEDDED_PROMPTS_DIR, 'null_cond.pt')}\",\n )\n\n embedded_prompts = clip_text_embedder(prompt)\n torch.save(embedded_prompts, join(EMBEDDED_PROMPTS_DIR, \"embedded_prompts.pt\"))\n\n print(\n \"Prompts embeddings saved at: \",\n f\"{join(EMBEDDED_PROMPTS_DIR, 'embedded_prompts.pt')}\",\n )\n\n get_memory_status()\n clip_text_embedder.to(\"cpu\")\n del clip_text_embedder\n torch.cuda.empty_cache()\n get_memory_status()\n return embedded_prompts, null_cond\n\n\ndef generate_images_from_disturbed_embeddings(\n sd: StableDiffusion,\n embedded_prompt: torch.Tensor,\n null_prompt: torch.Tensor,\n device=DEVICE,\n seed=SEED,\n num_iterations=NUM_ITERATIONS,\n noise_multiplier=NOISE_MULTIPLIER,\n batch_size=BATCH_SIZE\n):\n dist = torch.distributions.normal.Normal(\n loc=embedded_prompt.mean(dim=2), scale=embedded_prompt.std(dim=2)\n )\n\n if not RANDOM_WALK:\n for i in range(0, num_iterations):\n j = num_iterations - i\n\n noise_i = (\n dist.sample(sample_shape=torch.Size([768])).permute(1, 0, 2).permute(0, 2, 1)\n ).to(device)\n noise_j = (\n dist.sample(sample_shape=torch.Size([768])).permute(1, 0, 2).permute(0, 2, 1)\n ).to(device)\n embedding_e = embedded_prompt + ((i * noise_multiplier) * noise_i + (j * noise_multiplier) * noise_j) / (\n 2 * num_iterations)\n\n latent = sd.generate_images_latent_from_embeddings(\n seed=seed,\n embedded_prompt=embedding_e,\n null_prompt=null_prompt,\n batch_size=batch_size\n )\n\n image_e = sd.get_image_from_latent(latent)\n\n yield (image_e, embedding_e)\n else:\n\n noise_t = torch.zeros_like(embedded_prompt).to(device)\n\n for i in range(0, num_iterations):\n noise_i = (\n dist.sample(sample_shape=torch.Size([768])).permute(1, 0, 2).permute(0, 2, 1)\n ).to(device)\n # noise_t = noise_t + noise_i\n # embedding_e = embedded_prompt + (noise_multiplier * noise_t)\n\n noise_t += (noise_multiplier * noise_i)\n embedding_e = embedded_prompt + noise_t\n\n latent = sd.generate_images_latent_from_embeddings(\n seed=seed,\n embedded_prompt=embedding_e,\n null_prompt=null_prompt,\n batch_size=batch_size\n )\n\n image_e = sd.get_image_from_latent(latent)\n\n yield (image_e, embedding_e)\n\n\ndef calculate_sha256(tensor):\n if tensor.device == \"cpu\":\n tensor_bytes = tensor.numpy().tobytes() # Convert tensor to a byte array\n else:\n tensor_bytes = tensor.cpu().numpy().tobytes() # Convert tensor to a byte array\n sha256_hash = hashlib.sha256(tensor_bytes)\n return sha256_hash.hexdigest()\n\n\ndef get_image_features(\n image, model, preprocess, device=DEVICE,\n):\n image = preprocess(image).unsqueeze(0).to(device)\n with torch.no_grad():\n image_features = model.encode_image(image)\n # l2 normalize\n image_features /= image_features.norm(dim=-1, keepdim=True)\n\n image_features_cpu = image_features.cpu()\n\n # free gpu memory\n image_features = image_features.detach()\n del image_features\n torch.cuda.empty_cache()\n\n return image_features_cpu.numpy()\n\n\ndef main():\n model_config = ModelPathConfig()\n pt = IODirectoryTree(model_config)\n\n embedded_prompts, null_prompt = embed_and_save_prompts(PROMPT)\n sd = init_stable_diffusion(DEVICE, pt)\n\n images = generate_images_from_disturbed_embeddings(sd, embedded_prompts, null_prompt, batch_size=BATCH_SIZE)\n\n image_encoder = CLIPImageEncoder(device=DEVICE)\n image_encoder.load_submodels(image_processor_path=pt.image_processor, vision_model_path=pt.vision_model)\n\n loaded_model = torch.load(SCORER_CHECKPOINT_PATH)\n predictor = ChadScoreModel(768, device=DEVICE)\n predictor.load_state_dict(loaded_model)\n predictor.eval()\n\n json_output = []\n scores = []\n manifest = []\n json_output_path = join(FEATURES_DIR, \"features.json\")\n manifest_path = join(OUTPUT_DIR, \"manifest.json\")\n scores_path = join(OUTPUT_DIR, \"scores.json\")\n # images_tensors = []\n\n for i, (image, embedding) in enumerate(images):\n # images_tensors.append(image.cpu().detach())\n get_memory_status()\n # compute hash\n img_hash = calculate_sha256(image.squeeze())\n pil_image = to_pil(image.squeeze())\n # compute aesthetic score\n image_features = image_encoder(pil_image, do_preprocess=True)\n image_features /= image_features.norm(dim=-1, keepdim=True)\n score = predictor(image_features.to(DEVICE).float()).cpu()\n img_file_name = f\"image_{i:06d}.png\"\n img_path = join(IMAGES_DIR, img_file_name)\n pil_image.save(img_path)\n print(f\"Image saved at: {img_path}\")\n\n if SAVE_EMBEDDINGS:\n embedding_file_name = f\"embedding_{i:06d}.pt\"\n embedding_path = join(FEATURES_DIR, embedding_file_name)\n torch.save(embedding, embedding_path)\n print(f\"Embedding saved at: {embedding_path}\")\n\n manifest_i = {\n \"file-name\": img_file_name,\n \"file-hash\": img_hash,\n \"file-path\": img_path,\n }\n manifest.append(manifest_i)\n\n scores_i = manifest_i.copy()\n scores_i[\"score\"] = score.item()\n scores.append(scores_i)\n\n json_output_i = manifest_i.copy()\n json_output_i[\"initial-prompt\"] = PROMPT\n json_output_i[\"score\"] = score.item()\n json_output_i[\"embedding-tensor\"] = embedding.tolist()\n json_output_i[\"clip-vector\"] = image_features.tolist()\n json_output.append(json_output_i)\n\n if i % 64 == 0:\n json.dump(json_output, open(json_output_path, \"w\"), indent=4)\n print(f\"features.json saved at: {json_output_path}\")\n\n json.dump(scores, open(scores_path, \"w\"), indent=4)\n print(f\"scores.json saved at: {scores_path}\")\n\n json.dump(manifest, open(manifest_path, \"w\"), indent=4)\n print(f\"manifest.json saved at: {manifest_path}\")\n\n json.dump(json_output, open(json_output_path, \"w\"), indent=4)\n print(f\"features.json saved at: {json_output_path}\")\n json.dump(scores, open(scores_path, \"w\"), indent=4)\n print(f\"scores.json saved at: {scores_path}\")\n json.dump(manifest, open(manifest_path, \"w\"), indent=4)\n print(f\"manifest.json saved at: {manifest_path}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rematchka/kcg-ml-sd1p4","sub_path":"scripts/disturb_embeddings_and_score.py","file_name":"disturb_embeddings_and_score.py","file_ext":"py","file_size_in_byte":11611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"17680385861","text":"import base64\nimport os\nfrom flask import request\nimport flask\n\napp = flask.Flask(__name__)\n\n@app.route('/', methods = ['GET'])\ndef index():\n\n response = flask.render_template('index.html')\n return response, 200\n\n@app.route('/sandbox/', methods = ['GET', 'POST'])\ndef sandbox():\n\n all = {}\n\n if request.method == 'POST':\n response = flask.render_template('sandbox.html')\n\n #We search through all submitted forms and insert them in our \"all\" dict which will be used as response headers\n #if request.method == 'POST': for key, value in request.args.get.iteritems(): all.update({key: str(value)})\n for key, value in request.form.iteritems(): all.update({key: str(value)})\n\n #We seek after not set Headers and remove them if they are empty.\n for x in list(all.keys()):\n if all[x] == '':\n del all[x]\n all.pop(\"payload\", None) #We need to manually remove the HTML-key from the headers, otherwise we would have the HTML in the headers. We do not want that\n\n #Here we decode the HTML-data that was sent via POST. We will insert this in the response body.\n response += base64.b64decode(request.form['payload'].encode('utf-8'))\n response += str('\\n\\n')\n return response, 200, all\n\n@app.route('/examples/', methods = ['GET'])\ndef examples():\n response = flask.render_template('examples.html')\n return response, 200, {'server': 'chloe'}\n\n@app.route('/info/', methods = ['GET'])\ndef info():\n response = flask.render_template('info.html')\n return response, 200, {'Content-Security-Policy': \"default-src 'none' ; script-src 'none' ; style-src https://fonts.googleapis.com:443/css 'self'; img-src data: ; font-src https://fonts.gstatic.com:443/s/worksans/v2/ https://fonts.googleapis.com:443/css; connect-src 'none' ; media-src 'none' ; object-src 'none' ; child-src 'none' ; frame-ancestors 'none' ; form-action 'none' ; manifest-src 'none' ; referrer no-referrer;\"}\n\nport = int(os.environ.get('PORT', '1337'))\napp.run(host='0.0.0.0', port=port, debug=False)\n","repo_name":"intchloe/HeaderSandbox","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"75571409015","text":"import unittest\n\nfrom terminal import Terminal\n\nclass TestTerminal(unittest.TestCase):\n def test_init(self):\n terminal = Terminal(\"test\")\n self.assertEqual(terminal.symbol, \"test\")\n\n def test_equals(self):\n terminal1 = Terminal(\"test\")\n terminal2 = Terminal(\"test\")\n terminal3 = Terminal(\"123\")\n self.assertIsNot(terminal1, terminal2)\n self.assertEqual(terminal1, terminal1)\n self.assertEqual(terminal1, terminal2)\n self.assertNotEqual(terminal1, terminal3)\n\n\n def test_str(self):\n terminal = Terminal(\"test\")\n\n self.assertEqual(str(terminal), 'Terminal (\"test\")')\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"fkropfhamer/parser","sub_path":"test_terminal.py","file_name":"test_terminal.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30540468778","text":"from __future__ import absolute_import\n\nimport calendar\nfrom datetime import timedelta, datetime\nimport json\nimport pytest\nimport requests\nimport six\n\nfrom django.conf import settings\nfrom django.utils import timezone\n\nfrom sentry.models import GroupHash, EventUser\nfrom sentry.tagstore.exceptions import (\n GroupTagKeyNotFound,\n GroupTagValueNotFound,\n TagKeyNotFound,\n TagValueNotFound,\n)\nfrom sentry.tagstore.snuba.backend import SnubaTagStorage, cache_suffix_time\nfrom sentry.testutils import SnubaTestCase, TestCase\n\n\nclass TagStorageTest(TestCase, SnubaTestCase):\n def setUp(self):\n super(TagStorageTest, self).setUp()\n\n self.ts = SnubaTagStorage()\n\n self.proj1 = self.create_project()\n self.proj1env1 = self.create_environment(project=self.proj1, name=\"test\")\n self.proj1env2 = self.create_environment(project=self.proj1, name=\"test2\")\n\n self.proj1group1 = self.create_group(self.proj1)\n self.proj1group2 = self.create_group(self.proj1)\n\n hash1 = \"1\" * 32\n hash2 = \"2\" * 32\n GroupHash.objects.create(project=self.proj1, group=self.proj1group1, hash=hash1)\n GroupHash.objects.create(project=self.proj1, group=self.proj1group2, hash=hash2)\n\n self.now = timezone.now().replace(microsecond=0)\n data = json.dumps(\n [\n {\n \"event_id\": six.text_type(r) * 32,\n \"primary_hash\": hash1,\n \"group_id\": self.proj1group1.id,\n \"project_id\": self.proj1.id,\n \"message\": \"message 1\",\n \"platform\": \"python\",\n \"datetime\": (self.now - timedelta(seconds=r)).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"),\n \"data\": {\n \"received\": calendar.timegm(self.now.timetuple()) - r,\n \"tags\": {\n \"foo\": \"bar\",\n \"baz\": \"quux\",\n \"environment\": self.proj1env1.name,\n \"sentry:release\": 100 * r,\n \"sentry:user\": u\"id:user{}\".format(r),\n },\n \"user\": {\"id\": u\"user{}\".format(r), \"email\": u\"user{}@sentry.io\".format(r)},\n \"exception\": {\"values\": [{\"stacktrace\": {\"frames\": [{\"lineno\": 29}]}}]},\n },\n }\n for r in [1, 2]\n ]\n + [\n {\n \"event_id\": \"3\" * 32,\n \"primary_hash\": hash2,\n \"group_id\": self.proj1group2.id,\n \"project_id\": self.proj1.id,\n \"message\": \"message 2\",\n \"platform\": \"python\",\n \"datetime\": (self.now - timedelta(seconds=2)).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"),\n \"data\": {\n \"received\": calendar.timegm(self.now.timetuple()) - 2,\n \"tags\": {\n \"browser\": \"chrome\",\n \"environment\": self.proj1env1.name,\n \"sentry:user\": \"id:user1\",\n },\n \"user\": {\"id\": \"user1\"},\n },\n }\n ]\n + [\n {\n \"event_id\": \"4\" * 32,\n \"primary_hash\": hash2,\n \"group_id\": self.proj1group1.id,\n \"project_id\": self.proj1.id,\n \"message\": \"message 2\",\n \"platform\": \"python\",\n \"datetime\": (self.now - timedelta(seconds=2)).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"),\n \"data\": {\n \"received\": calendar.timegm(self.now.timetuple()) - 2,\n \"tags\": {\"foo\": \"bar\", \"environment\": self.proj1env2.name},\n \"user\": {\"id\": \"user1\"},\n },\n }\n ]\n )\n\n assert (\n requests.post(settings.SENTRY_SNUBA + \"/tests/events/insert\", data=data).status_code\n == 200\n )\n\n def test_get_group_tag_keys_and_top_values(self):\n result = list(\n self.ts.get_group_tag_keys_and_top_values(\n self.proj1.id, self.proj1group1.id, [self.proj1env1.id]\n )\n )\n tags = [r.key for r in result]\n assert set(tags) == set([\"foo\", \"baz\", \"environment\", \"sentry:release\", \"sentry:user\"])\n\n result.sort(key=lambda r: r.key)\n assert result[0].key == \"baz\"\n assert result[0].top_values[0].value == \"quux\"\n assert result[0].count == 2\n\n assert result[3].key == \"sentry:release\"\n assert result[3].count == 2\n top_release_values = result[3].top_values\n assert len(top_release_values) == 2\n assert set(v.value for v in top_release_values) == set([\"100\", \"200\"])\n assert all(v.times_seen == 1 for v in top_release_values)\n\n # Now with only a specific set of keys,\n result = list(\n self.ts.get_group_tag_keys_and_top_values(\n self.proj1.id,\n self.proj1group1.id,\n [self.proj1env1.id],\n keys=[\"environment\", \"sentry:release\"],\n )\n )\n tags = [r.key for r in result]\n assert set(tags) == set([\"environment\", \"sentry:release\"])\n\n result.sort(key=lambda r: r.key)\n assert result[0].key == \"environment\"\n assert result[0].top_values[0].value == \"test\"\n\n assert result[1].key == \"sentry:release\"\n top_release_values = result[1].top_values\n assert len(top_release_values) == 2\n assert set(v.value for v in top_release_values) == set([\"100\", \"200\"])\n assert all(v.times_seen == 1 for v in top_release_values)\n\n def test_get_top_group_tag_values(self):\n resp = self.ts.get_top_group_tag_values(\n self.proj1.id, self.proj1group1.id, self.proj1env1.id, \"foo\", 1\n )\n assert len(resp) == 1\n assert resp[0].times_seen == 2\n assert resp[0].key == \"foo\"\n assert resp[0].value == \"bar\"\n assert resp[0].group_id == self.proj1group1.id\n\n def test_get_group_tag_value_count(self):\n assert (\n self.ts.get_group_tag_value_count(\n self.proj1.id, self.proj1group1.id, self.proj1env1.id, \"foo\"\n )\n == 2\n )\n\n def test_get_tag_keys(self):\n expected_keys = set(\n [\"baz\", \"browser\", \"environment\", \"foo\", \"sentry:release\", \"sentry:user\"]\n )\n keys = {\n k.key: k\n for k in self.ts.get_tag_keys(\n project_id=self.proj1.id, environment_id=self.proj1env1.id\n )\n }\n assert set(keys) == expected_keys\n keys = {\n k.key: k\n for k in self.ts.get_tag_keys(\n project_id=self.proj1.id, environment_id=self.proj1env1.id, include_values_seen=True\n )\n }\n assert set(keys) == expected_keys\n\n def test_get_group_tag_key(self):\n with pytest.raises(GroupTagKeyNotFound):\n self.ts.get_group_tag_key(\n project_id=self.proj1.id,\n group_id=self.proj1group1.id,\n environment_id=self.proj1env1.id,\n key=\"notreal\",\n )\n\n assert (\n self.ts.get_group_tag_key(\n project_id=self.proj1.id,\n group_id=self.proj1group1.id,\n environment_id=self.proj1env1.id,\n key=\"foo\",\n ).key\n == \"foo\"\n )\n\n keys = {\n k.key: k\n for k in self.ts.get_group_tag_keys(\n project_id=self.proj1.id,\n group_id=self.proj1group1.id,\n environment_ids=[self.proj1env1.id],\n )\n }\n assert set(keys) == set([\"baz\", \"environment\", \"foo\", \"sentry:release\", \"sentry:user\"])\n\n def test_get_group_tag_value(self):\n with pytest.raises(GroupTagValueNotFound):\n self.ts.get_group_tag_value(\n project_id=self.proj1.id,\n group_id=self.proj1group1.id,\n environment_id=self.proj1env1.id,\n key=\"foo\",\n value=\"notreal\",\n )\n\n assert self.ts.get_group_tag_values(\n project_id=self.proj1.id,\n group_id=self.proj1group1.id,\n environment_id=self.proj1env1.id,\n key=\"notreal\",\n ) == set([])\n\n assert (\n list(\n self.ts.get_group_tag_values(\n project_id=self.proj1.id,\n group_id=self.proj1group1.id,\n environment_id=self.proj1env1.id,\n key=\"foo\",\n )\n )[0].value\n == \"bar\"\n )\n\n assert (\n self.ts.get_group_tag_value(\n project_id=self.proj1.id,\n group_id=self.proj1group1.id,\n environment_id=self.proj1env1.id,\n key=\"foo\",\n value=\"bar\",\n ).value\n == \"bar\"\n )\n\n def test_get_tag_key(self):\n with pytest.raises(TagKeyNotFound):\n self.ts.get_tag_key(\n project_id=self.proj1.id, environment_id=self.proj1env1.id, key=\"notreal\"\n )\n\n def test_get_tag_value(self):\n with pytest.raises(TagValueNotFound):\n self.ts.get_tag_value(\n project_id=self.proj1.id,\n environment_id=self.proj1env1.id,\n key=\"foo\",\n value=\"notreal\",\n )\n\n def test_get_tag_value_label(self):\n assert self.ts.get_tag_value_label(\"foo\", \"notreal\") == \"notreal\"\n assert self.ts.get_tag_value_label(\"sentry:user\", None) is None\n assert self.ts.get_tag_value_label(\"sentry:user\", \"id:stuff\") == \"stuff\"\n assert self.ts.get_tag_value_label(\"sentry:user\", \"email:stuff\") == \"stuff\"\n assert self.ts.get_tag_value_label(\"sentry:user\", \"username:stuff\") == \"stuff\"\n assert self.ts.get_tag_value_label(\"sentry:user\", \"ip:stuff\") == \"stuff\"\n\n def test_get_groups_user_counts(self):\n assert self.ts.get_groups_user_counts(\n project_ids=[self.proj1.id],\n group_ids=[self.proj1group1.id, self.proj1group2.id],\n environment_ids=[self.proj1env1.id],\n ) == {self.proj1group1.id: 2, self.proj1group2.id: 1}\n\n # test filtering by date range where there shouldn't be results\n assert (\n self.ts.get_groups_user_counts(\n project_ids=[self.proj1.id],\n group_ids=[self.proj1group1.id, self.proj1group2.id],\n environment_ids=[self.proj1env1.id],\n start=self.now - timedelta(days=5),\n end=self.now - timedelta(days=4),\n )\n == {}\n )\n\n def test_get_releases(self):\n assert (\n self.ts.get_first_release(project_id=self.proj1.id, group_id=self.proj1group1.id)\n == \"200\"\n )\n\n assert (\n self.ts.get_first_release(project_id=self.proj1.id, group_id=self.proj1group2.id)\n is None\n )\n\n assert (\n self.ts.get_last_release(project_id=self.proj1.id, group_id=self.proj1group1.id)\n == \"100\"\n )\n\n assert (\n self.ts.get_last_release(project_id=self.proj1.id, group_id=self.proj1group2.id) is None\n )\n\n def test_get_group_ids_for_users(self):\n assert self.ts.get_group_ids_for_users(\n [self.proj1.id], [EventUser(project_id=self.proj1.id, ident=\"user1\")]\n ) == set([self.proj1group1.id, self.proj1group2.id])\n\n assert self.ts.get_group_ids_for_users(\n [self.proj1.id], [EventUser(project_id=self.proj1.id, ident=\"user2\")]\n ) == set([self.proj1group1.id])\n\n def test_get_group_tag_values_for_users(self):\n result = self.ts.get_group_tag_values_for_users(\n [EventUser(project_id=self.proj1.id, ident=\"user1\")]\n )\n assert len(result) == 2\n assert set(v.group_id for v in result) == set([self.proj1group1.id, self.proj1group2.id])\n assert set(v.last_seen for v in result) == set(\n [self.now - timedelta(seconds=1), self.now - timedelta(seconds=2)]\n )\n result.sort(key=lambda x: x.last_seen)\n assert result[0].last_seen == self.now - timedelta(seconds=2)\n assert result[1].last_seen == self.now - timedelta(seconds=1)\n for v in result:\n assert v.value == \"user1\"\n\n result = self.ts.get_group_tag_values_for_users(\n [EventUser(project_id=self.proj1.id, ident=\"user2\")]\n )\n assert len(result) == 1\n assert result[0].value == \"user2\"\n assert result[0].last_seen == self.now - timedelta(seconds=2)\n\n def test_get_release_tags(self):\n tags = list(self.ts.get_release_tags([self.proj1.id], None, [\"100\"]))\n\n assert len(tags) == 1\n one_second_ago = self.now - timedelta(seconds=1)\n assert tags[0].last_seen == one_second_ago\n assert tags[0].first_seen == one_second_ago\n assert tags[0].times_seen == 1\n assert tags[0].key == \"sentry:release\"\n\n def test_get_group_event_filter(self):\n assert self.ts.get_group_event_filter(\n self.proj1.id, self.proj1group1.id, [self.proj1env1.id], {\"foo\": \"bar\"}, None, None\n ) == {\"event_id__in\": set([\"1\" * 32, \"2\" * 32])}\n\n assert self.ts.get_group_event_filter(\n self.proj1.id,\n self.proj1group1.id,\n [self.proj1env1.id],\n {\"foo\": \"bar\"},\n (self.now - timedelta(seconds=1)),\n None,\n ) == {\"event_id__in\": set([\"1\" * 32])}\n\n assert self.ts.get_group_event_filter(\n self.proj1.id,\n self.proj1group1.id,\n [self.proj1env1.id],\n {\"foo\": \"bar\"},\n None,\n (self.now - timedelta(seconds=1)),\n ) == {\"event_id__in\": set([\"2\" * 32])}\n\n assert self.ts.get_group_event_filter(\n self.proj1.id,\n self.proj1group1.id,\n [self.proj1env1.id, self.proj1env2.id],\n {\"foo\": \"bar\"},\n None,\n None,\n ) == {\"event_id__in\": set([\"1\" * 32, \"2\" * 32, \"4\" * 32])}\n\n assert self.ts.get_group_event_filter(\n self.proj1.id,\n self.proj1group1.id,\n [self.proj1env1.id],\n {\"foo\": \"bar\", \"sentry:release\": \"200\"}, # AND\n None,\n None,\n ) == {\"event_id__in\": set([\"2\" * 32])}\n\n assert self.ts.get_group_event_filter(\n self.proj1.id,\n self.proj1group2.id,\n [self.proj1env1.id],\n {\"browser\": \"chrome\"},\n None,\n None,\n ) == {\"event_id__in\": set([\"3\" * 32])}\n\n assert (\n self.ts.get_group_event_filter(\n self.proj1.id,\n self.proj1group2.id,\n [self.proj1env1.id],\n {\"browser\": \"ie\"},\n None,\n None,\n )\n is None\n )\n\n def test_get_tag_value_paginator(self):\n from sentry.tagstore.types import TagValue\n\n assert list(\n self.ts.get_tag_value_paginator(\n self.proj1.id, self.proj1env1.id, \"sentry:user\"\n ).get_result(10)\n ) == [\n TagValue(\n key=\"sentry:user\",\n value=\"id:user1\",\n times_seen=2,\n first_seen=self.now - timedelta(seconds=2),\n last_seen=self.now - timedelta(seconds=1),\n ),\n TagValue(\n key=\"sentry:user\",\n value=\"id:user2\",\n times_seen=1,\n first_seen=self.now - timedelta(seconds=2),\n last_seen=self.now - timedelta(seconds=2),\n ),\n ]\n\n assert list(\n self.ts.get_tag_value_paginator(\n self.proj1.id, self.proj1env1.id, \"sentry:user\", query=\"user1\"\n ).get_result(10)\n ) == [\n TagValue(\n key=\"sentry:user\",\n value=\"id:user1\",\n times_seen=2,\n first_seen=self.now - timedelta(seconds=2),\n last_seen=self.now - timedelta(seconds=1),\n )\n ]\n\n def test_get_tag_value_paginator_with_dates(self):\n from sentry.tagstore.types import TagValue\n\n day_ago = self.now - timedelta(days=1)\n two_days_ago = self.now - timedelta(days=2)\n assert list(\n self.ts.get_tag_value_paginator(\n self.proj1.id, self.proj1env1.id, \"sentry:user\", start=day_ago, end=self.now\n ).get_result(10)\n ) == [\n TagValue(\n key=\"sentry:user\",\n value=\"id:user1\",\n times_seen=2,\n first_seen=self.now - timedelta(seconds=2),\n last_seen=self.now - timedelta(seconds=1),\n ),\n TagValue(\n key=\"sentry:user\",\n value=\"id:user2\",\n times_seen=1,\n first_seen=self.now - timedelta(seconds=2),\n last_seen=self.now - timedelta(seconds=2),\n ),\n ]\n\n day_ago = self.now - timedelta(days=1)\n assert (\n list(\n self.ts.get_tag_value_paginator(\n self.proj1.id, self.proj1env1.id, \"sentry:user\", start=two_days_ago, end=day_ago\n ).get_result(10)\n )\n == []\n )\n\n def test_numeric_tag_value_paginator(self):\n from sentry.tagstore.types import TagValue\n\n assert list(\n self.ts.get_tag_value_paginator(\n self.proj1.id, self.proj1env1.id, \"stack.lineno\"\n ).get_result(10)\n ) == [\n TagValue(\n key=\"stack.lineno\",\n value=\"29\",\n times_seen=2,\n first_seen=self.now - timedelta(seconds=2),\n last_seen=self.now - timedelta(seconds=1),\n )\n ]\n\n assert list(\n self.ts.get_tag_value_paginator(\n self.proj1.id, self.proj1env1.id, \"stack.lineno\", query=\"30\"\n ).get_result(10)\n ) == [\n TagValue(\n key=\"stack.lineno\",\n value=\"29\",\n times_seen=2,\n first_seen=self.now - timedelta(seconds=2),\n last_seen=self.now - timedelta(seconds=1),\n )\n ]\n\n def test_get_group_tag_value_iter(self):\n from sentry.tagstore.types import GroupTagValue\n\n assert list(\n self.ts.get_group_tag_value_iter(\n self.proj1.id, self.proj1group1.id, self.proj1env1.id, \"sentry:user\"\n )\n ) == [\n GroupTagValue(\n group_id=self.proj1group1.id,\n key=\"sentry:user\",\n value=\"id:user1\",\n times_seen=1,\n first_seen=self.now - timedelta(seconds=1),\n last_seen=self.now - timedelta(seconds=1),\n ),\n GroupTagValue(\n group_id=self.proj1group1.id,\n key=\"sentry:user\",\n value=\"id:user2\",\n times_seen=1,\n first_seen=self.now - timedelta(seconds=2),\n last_seen=self.now - timedelta(seconds=2),\n ),\n ]\n\n def test_get_group_tag_value_paginator(self):\n from sentry.tagstore.types import GroupTagValue\n\n assert list(\n self.ts.get_group_tag_value_paginator(\n self.proj1.id, self.proj1group1.id, self.proj1env1.id, \"sentry:user\"\n ).get_result(10)\n ) == [\n GroupTagValue(\n group_id=self.proj1group1.id,\n key=\"sentry:user\",\n value=\"id:user1\",\n times_seen=1,\n first_seen=self.now - timedelta(seconds=1),\n last_seen=self.now - timedelta(seconds=1),\n ),\n GroupTagValue(\n group_id=self.proj1group1.id,\n key=\"sentry:user\",\n value=\"id:user2\",\n times_seen=1,\n first_seen=self.now - timedelta(seconds=2),\n last_seen=self.now - timedelta(seconds=2),\n ),\n ]\n\n def test_get_group_seen_values_for_environments(self):\n assert self.ts.get_group_seen_values_for_environments(\n [self.proj1.id], [self.proj1group1.id], [self.proj1env1.id]\n ) == {\n self.proj1group1.id: {\n \"first_seen\": self.now - timedelta(seconds=2),\n \"last_seen\": self.now - timedelta(seconds=1),\n \"times_seen\": 2,\n }\n }\n\n # test where there should be no results because of time filters\n assert (\n self.ts.get_group_seen_values_for_environments(\n [self.proj1.id],\n [self.proj1group1.id],\n [self.proj1env1.id],\n start=self.now - timedelta(hours=5),\n end=self.now - timedelta(hours=4),\n )\n == {}\n )\n\n def test_cache_suffix_time(self):\n starting_key = cache_suffix_time(self.now, 0)\n finishing_key = cache_suffix_time(self.now + timedelta(seconds=300), 0)\n\n assert starting_key != finishing_key\n\n def test_cache_suffix_hour_edges(self):\n \"\"\" a suffix should still behave correctly around the end of the hour\n\n At a duration of 10 only one key between 0-10 should flip on the hour, the other 9\n should flip at different times.\n \"\"\"\n before = datetime(2019, 9, 5, 17, 59, 59)\n on_hour = datetime(2019, 9, 5, 18, 0, 0)\n changed_on_hour = 0\n # Check multiple keyhashes so that this test doesn't depend on implementation\n for key_hash in range(10):\n before_key = cache_suffix_time(before, key_hash, duration=10)\n on_key = cache_suffix_time(on_hour, key_hash, duration=10)\n if before_key != on_key:\n changed_on_hour += 1\n\n assert changed_on_hour == 1\n\n def test_cache_suffix_day_edges(self):\n \"\"\" a suffix should still behave correctly around the end of a day\n\n This test is nearly identical to test_cache_suffix_hour_edges, but is to confirm that date changes don't\n cause a different behaviour\n \"\"\"\n before = datetime(2019, 9, 5, 23, 59, 59)\n next_day = datetime(2019, 9, 6, 0, 0, 0)\n changed_on_hour = 0\n for key_hash in range(10):\n before_key = cache_suffix_time(before, key_hash, duration=10)\n next_key = cache_suffix_time(next_day, key_hash, duration=10)\n if before_key != next_key:\n changed_on_hour += 1\n\n assert changed_on_hour == 1\n\n def test_cache_suffix_time_matches_duration(self):\n \"\"\" The number of seconds between keys changing should match duration \"\"\"\n previous_key = cache_suffix_time(self.now, 0, duration=10)\n changes = []\n for i in range(21):\n current_time = self.now + timedelta(seconds=i)\n current_key = cache_suffix_time(current_time, 0, duration=10)\n if current_key != previous_key:\n changes.append(current_time)\n previous_key = current_key\n\n assert len(changes) == 2\n assert (changes[1] - changes[0]).total_seconds() == 10\n\n def test_cache_suffix_time_jitter(self):\n \"\"\" Different key hashes should change keys at different times\n\n While starting_key and other_key might begin as the same values they should change at different times\n \"\"\"\n starting_key = cache_suffix_time(self.now, 0, duration=10)\n for i in range(11):\n current_key = cache_suffix_time(self.now + timedelta(seconds=i), 0, duration=10)\n if current_key != starting_key:\n break\n\n other_key = cache_suffix_time(self.now, 5, duration=10)\n for j in range(11):\n current_key = cache_suffix_time(self.now + timedelta(seconds=j), 5, duration=10)\n if current_key != other_key:\n break\n\n assert i != j\n","repo_name":"lizardkinggg/sentry-cicd","sub_path":"tests/snuba/tagstore/test_tagstore_backend.py","file_name":"test_tagstore_backend.py","file_ext":"py","file_size_in_byte":24563,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"23886492937","text":"# ConnectionAndAuthExample.py\n\n\"\"\"This example shows how to configure the library to establish connections\nusing different host and ports, with a session identity.\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom argparse import ArgumentParser, Action\nimport blpapi\nfrom blpapi import AuthOptions, AuthUser\n\nNONE = \"none\"\nUSER = \"user\"\nAPP = \"app\"\nUSERAPP = \"userapp\"\nDIR = \"dir\"\n\n\nclass AuthOptionsAction(Action): # pylint: disable=too-few-public-methods\n \"\"\"Parse authorization args from user input\"\"\"\n\n def __call__(self, parser, args, values, option_string=None):\n vals = values.split(\"=\", 1)\n\n auth = None\n if vals[0] == NONE:\n auth = AuthOptions.createDefault()\n elif vals[0] == USER:\n user = AuthUser.createWithLogonName()\n auth = AuthOptions.createWithUser(user)\n elif vals[0] == APP and len(vals) == 2:\n appName = vals[1]\n auth = AuthOptions.createWithApp(appName)\n elif vals[0] == USERAPP and len(vals) == 2:\n appName = vals[1]\n user = AuthUser.createWithLogonName()\n auth = AuthOptions.createWithUserAndApp(user, appName)\n elif vals[0] == DIR and len(vals) == 2:\n dirProperty = vals[1]\n user = AuthUser.createWithActiveDirectoryProperty(dirProperty)\n auth = AuthOptions.createWithUser(user)\n elif vals[0] == \"manual\":\n parts = []\n if len(vals) == 2:\n parts = vals[1].split(\",\")\n\n if len(parts) != 3:\n raise ValueError(\"Invalid auth option '%s'\" % values)\n\n appName, ipAddress, userId = parts\n user = AuthUser.createWithManualOptions(userId, ipAddress)\n auth = AuthOptions.createWithUserAndApp(user, appName)\n else:\n raise ValueError(\"Invalid auth option '%s'\" % values)\n\n setattr(args, self.dest, auth)\n\n\nclass HostAction(Action): # pylint: disable=too-few-public-methods\n \"\"\" Helper class to parse host arguments \"\"\"\n\n def __call__(self, parser, namespace, values, option_string=None):\n host = values.split(\":\")\n if len(host) != 2:\n raise ValueError(\"Invalid host:port '%s'\" % values)\n host[1] = int(host[1])\n hosts = getattr(namespace, self.dest)\n if not hosts:\n setattr(namespace, self.dest, [host])\n else:\n hosts.append(host)\n\n\ndef getTlsOptions(args):\n \"\"\"Create TlsOptions from user input\"\"\"\n\n if (args.tls_client_credentials is None or\n args.tls_trust_material is None):\n return None\n\n print(\"TlsOptions enabled\")\n if args.read_certificate_files:\n credential_blob = None\n trust_blob = None\n with open(args.tls_client_credentials, 'rb') as credentialfile:\n credential_blob = credentialfile.read()\n with open(args.tls_trust_material, 'rb') as trustfile:\n trust_blob = trustfile.read()\n return blpapi.TlsOptions.createFromBlobs(\n credential_blob,\n args.tls_client_credentials_password,\n trust_blob)\n\n return blpapi.TlsOptions.createFromFiles(\n args.tls_client_credentials,\n args.tls_client_credentials_password,\n args.tls_trust_material)\n\n\ndef parseCmdLine():\n \"\"\"Parse command line arguments\"\"\"\n\n parser = ArgumentParser(description=\"Connection and Auth example\")\n\n defaultUser = AuthUser.createWithLogonName()\n defaultAuthOptions = AuthOptions.createWithUser(defaultUser)\n\n parser.add_argument(\"--auth\",\n dest=\"auth\",\n help=\"authentication option: \"\n \"user|none|app=|userapp=|dir=|\"\n \"manual=\"\n \" (default: user)\",\n metavar=\"option\",\n action=AuthOptionsAction,\n default=defaultAuthOptions)\n\n parser.add_argument(\"--host\",\n dest=\"host\",\n help=\"server name or IP, and port \"\n \"(default 'localhost:8194')\",\n metavar=\"\",\n action=HostAction)\n\n parser.add_argument(\"--retries\",\n dest=\"retries\",\n help=\"number of connection retries \"\n \"(default: number of hosts)\",\n metavar=\"option\",\n type=int,\n action=AuthOptionsAction)\n\n # TLS Options\n parser.add_argument(\"--tls-client-credentials\",\n dest=\"tls_client_credentials\",\n help=\"name a PKCS#12 file to use as a source of \"\n \"client credentials\",\n metavar=\"option\")\n parser.add_argument(\"--tls-client-credentials-password\",\n dest=\"tls_client_credentials_password\",\n help=\"specify password for accessing\"\n \" client credentials\",\n metavar=\"option\",\n default=\"\")\n parser.add_argument(\"--tls-trust-material\",\n dest=\"tls_trust_material\",\n help=\"name a PKCS#7 file to use as a source of trusted\"\n \" certificates\",\n metavar=\"option\")\n parser.add_argument(\"--read-certificate-files\",\n dest=\"read_certificate_files\",\n help=\"(optional) read the TLS files and pass the blobs\",\n action=\"store_true\")\n\n args = parser.parse_args()\n\n args.tlsOptions = getTlsOptions(args)\n\n if not args.host:\n args.host = [[\"localhost\", 8194]]\n\n if args.retries is None:\n args.retries = len(args.host)\n\n return args\n\n\nclass ConnectionAndAuthExample: # pylint: disable=too-few-public-methods\n \"\"\"This example shows how to configure the library to establish\n connections using different host and ports, with a session identity.\n \"\"\"\n\n def __init__(self, options):\n self.config = options\n\n def run(self):\n \"\"\" Execute the example \"\"\"\n\n sessionOptions = blpapi.SessionOptions()\n for i, host in enumerate(self.config.host):\n sessionOptions.setServerAddress(host[0], host[1], i)\n\n sessionOptions.setSessionIdentityOptions(self.config.auth)\n sessionOptions.setAutoRestartOnDisconnection(True)\n sessionOptions.setNumStartAttempts(self.config.retries)\n\n if self.config.tlsOptions:\n sessionOptions.setTlsOptions(self.config.tlsOptions)\n\n session = blpapi.Session(sessionOptions)\n if not session.start():\n print(\"Failed to start session.\")\n return\n print(\"Session started\")\n\n while True:\n event = session.nextEvent(1000)\n if event:\n for message in event:\n print(message)\n\n\ndef main():\n \"\"\" Main function. \"\"\"\n print(\"ConnectionAndAuthExample. Press Ctrl+C to stop.\")\n\n options = parseCmdLine()\n\n example = ConnectionAndAuthExample(options)\n\n try:\n example.run()\n except Exception as err: # pylint: disable=W0703\n print(\"Exception caught: {}\".format(err))\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"Ctrl+C pressed. Stopping...\")\n\n__copyright__ = \"\"\"\nCopyright 2020. Bloomberg Finance L.P.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to\ndeal in the Software without restriction, including without limitation the\nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or\nsell copies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions: The above\ncopyright notice and this permission notice shall be included in all copies\nor substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n\"\"\"\n","repo_name":"pbadenski/blpapi-python","sub_path":"examples/ConnectionAndAuthExample.py","file_name":"ConnectionAndAuthExample.py","file_ext":"py","file_size_in_byte":8586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"23753372908","text":"import pickle\nimport random\nfrom pathlib import Path\n\nfrom curriculum import Curriculum\nfrom exceptions import FatalError\nfrom file_handling import unpickle, load_json_from_file, load_csv_from_file\nfrom question_asker import QuestionAsker\nfrom question_setter import QuestionSetter\nfrom record_entry import calculate_soonest_due_date, RecordEntry\nimport argparse\n\nfrom constants import (\n CURRICULA_PATH_SEGMENT,\n DATA_PATH_SEGMENT,\n DEFAULT_CURRICULUM_FILENAME,\n RECORDS_PATH_SEGMENT,\n QUESTION_WORDINGS_PATH_SEGMENT,\n COLUMN_RENAMINGS_FILENAME,\n RECORD_FILENAME_TEMPLATE,\n TEMPLATES_FILENAME\n)\nfrom verb_data import VerbData\n\ndef main(curriculum_filename=DEFAULT_CURRICULUM_FILENAME):\n column_renamings_file_path = Path(QUESTION_WORDINGS_PATH_SEGMENT, COLUMN_RENAMINGS_FILENAME)\n renamings = load_json_from_file(column_renamings_file_path)\n question_templates_file_path = Path(QUESTION_WORDINGS_PATH_SEGMENT, TEMPLATES_FILENAME)\n question_templates = load_json_from_file(question_templates_file_path)\n curriculum_file_path = Path(CURRICULA_PATH_SEGMENT, curriculum_filename)\n curriculum = Curriculum.from_dict(load_json_from_file(curriculum_file_path))\n\n record_filename = RECORD_FILENAME_TEMPLATE.format(\n base=curriculum_file_path.stem,\n hash=curriculum.replicable_hash()\n )\n record_file_path = Path(RECORDS_PATH_SEGMENT, record_filename)\n if record_file_path.is_file():\n record_entries = unpickle(record_file_path)\n else:\n data_file_path = Path(DATA_PATH_SEGMENT, curriculum.data_filename)\n verb_metadata, verb_data = load_csv_from_file(data_file_path)\n verb_data = VerbData(verb_metadata, verb_data)\n question_setter = QuestionSetter(renamings=renamings, templates=question_templates)\n questions = question_setter.set_questions(verb_data, curriculum)\n record_entries = [\n RecordEntry(question) for question in questions\n ]\n\n record_entries_due = [re for re in record_entries if re.is_due()]\n random.shuffle(record_entries_due)\n question_asker = QuestionAsker(question_templates)\n for qr in record_entries_due:\n performance = question_asker.ask(qr.question)\n qr.reschedule_according_to_performance(performance)\n\n soonest_due_date = calculate_soonest_due_date(record_entries)\n print(f\"The soonest date a question falls due is {soonest_due_date}\")\n with open(record_file_path, \"wb\") as record_file:\n pickle.dump(record_entries, record_file)\n print(\"Wrote record for this curriculum.\")\n\n\nparser = argparse.ArgumentParser(\n prog=\"python3 futur.py\",\n description=\"Spaced repetition verb endings quiz\",\n epilog=\"You just read the help text.\"\n)\nparser.add_argument(\n \"curriculum_filename\",\n nargs=\"?\",\n default=DEFAULT_CURRICULUM_FILENAME,\n help=f\"Enter the filename (no path) of a curriculum file present in {CURRICULA_PATH_SEGMENT}\"\n)\nargs = parser.parse_args()\n\ntry:\n main(curriculum_filename=args.curriculum_filename)\nexcept FatalError as fe:\n print(str(fe))\n exit(1)\n","repo_name":"KevinCHiggins/futur","sub_path":"futur.py","file_name":"futur.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"8033233485","text":"\n\n\n\n\n\nimport os\nimport shutil\n\n\nimport CifFile as pycif\n\n\n\nfrom ..read.cifs.cifdata import CifData\nfrom ..vols.sphv.sphericalvol import SphericalVol\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom ..utils.utils import strerr2floaterrr, verbose_dec\nimport time\n\n\n\n\nclass AlgoHandlerPostRecon:\n\n\n\n\n\n def run_shelx(self, sub_tag, count=None, skip_targ=True):\n\n ##make shelx folder\n if not os.path.exists(f'{self.path}/{sub_tag}/shelx/'):\n os.mkdir(f'{self.path}/{sub_tag}/shelx/')\n\n\n\n if count is None:\n shutil.copyfile(f'{self.hkl_count_path(sub_tag)}', f'{self.path}/{sub_tag}/shelx/{self.tag}_{sub_tag}.hkl')\n shutil.copyfile(f'{self.path}/{self.tag}.ins', f'{self.path}/{sub_tag}/shelx/{self.tag}_{sub_tag}.ins')\n elif count=='targ':\n cif_targ = CifData(self.cif_targ_path(), rotk=self.rotk, rottheta= self.rottheta)\n cif_targ.save_hkl(f'{self.path}/{sub_tag}/shelx/{self.tag}_targ.hkl')\n shutil.copyfile(f'{self.path}/{self.tag}.ins', f'{self.path}/{sub_tag}/shelx/{self.tag}_targ.ins')\n\n elif count >= 0:\n shutil.copyfile(f'{self.path}/{self.tag}.ins', f'{self.path}/{sub_tag}/shelx/{self.tag}_{sub_tag}_count_{count}.ins')\n shutil.copyfile(f'{self.hkl_count_path(sub_tag, count)}', f'{self.path}/{sub_tag}/shelx/{self.tag}_{sub_tag}_count_{count}.hkl')\n\n\n\n\n cwd = os.getcwd()\n os.chdir(f'{self.path}/{sub_tag}/shelx/')\n if count is None:\n os.system(f'shelxl {self.tag}_{sub_tag} > shelxl.log')\n elif count=='targ':\n os.system(f'shelxl {self.tag}_targ > shelxl.log')\n else:\n os.system(f'shelxl {self.tag}_{sub_tag}_count_{count} > shelxl.log')\n\n os.chdir(f'{cwd}')\n\n# @verbose_dec\n # def get_intensity(self, sub_tag, count=None, z=None, n_scats=10000, verbose=0):\n\n # print('## algo.get_intensity: Getting target cif')\n # cif_targ = CifData(self.cif_targ_path(), rotk=self.rotk, rottheta= self.rottheta)\n # print('## algo.get_intensity: Getting final cif')\n # cif_final = CifData(self.cif_final_path(sub_tag), rotk=self.rotk, rottheta= self.rottheta)\n\n # if count is not None:\n # print('## algo.get_intensity: filling hkl')\n # cif_final.fill_from_hkl(self.hkl_count_path(sub_tag, count=count), qmax=self.qmax)\n\n\n # if n_scats==-1 or n_scats >cif_targ.scat_bragg.shape[0]:\n # n_scats = cif_targ.scat_bragg.shape[0]\n # loc = np.zeros(n_scats)\n\n # scats = list(cif_targ.scat_bragg)\n # np.random.shuffle(scats)\n\n\n # print('## algo.get_intensity: Finding bragg points')\n # for i, (h, k, l, I) in enumerate(scats[:n_scats]):\n # print(f'{i}/{n_scats}', end='\\r')\n # bragg_loc =np.where( (cif_final.scat_bragg[:,:-1]==[h,k,l]).all(axis=1))[0][0]\n # loc[i] = bragg_loc\n # loc = loc.astype(int)\n\n # It = np.array(scats[:n_scats])[:,-1]\n # If = cif_final.scat_bragg[loc, -1]\n\n\n # if z=='q':\n # z = cif_final.scat_sph[loc,0]\n # elif z=='theta':\n # z = cif_final.scat_sph[loc,1]\n # elif z=='phi':\n # z = cif_final.scat_sph[loc,2]\n\n # return It, If, z\n\n @verbose_dec\n def get_intensity(self, sub_tag, count=None, n_scats=-1, verbose=0):\n\n print('## algo.get_intensity: Getting target cif')\n cif_targ = CifData(self.cif_targ_path(), rotk=self.rotk, rottheta= self.rottheta)\n print('## algo.get_intensity: Getting final cif')\n cif_final = CifData(self.cif_final_path(sub_tag), rotk=self.rotk, rottheta= self.rottheta)\n\n if count is not None:\n print('## algo.get_intensity: filling hkl')\n cif_final.fill_from_hkl(self.hkl_count_path(sub_tag, count=count), qmax=self.qmax)\n\n\n if n_scats==-1 or n_scats >cif_targ.scat_bragg.shape[0]:\n n_scats = cif_targ.scat_bragg.shape[0]\n loc = np.zeros(n_scats)\n\n scats = list(cif_targ.scat_bragg)\n\n\n print('## algo.get_intensity: Finding bragg points')\n for i, (h, k, l, I) in enumerate(cif_targ.scat_bragg):\n # print(f'{i}/{n_scats}', end='\\r')\n bragg_loc =np.where( (cif_final.scat_bragg[:,:-1]==[h,k,l]).all(axis=1))[0][0]\n loc[i] = bragg_loc\n loc = loc.astype(int)\n\n It = np.array(scats[:n_scats])[:,-1]\n If = cif_final.scat_bragg[loc, -1]\n\n # It = cif_targ.scat_bragg[:, -1]\n # If = cif_targ.scat_bragg[:, -1]\n\n\n\n\n return It, If\n\n\n\n\n\n\n\n\n def get_targ_final_scat_eq_loc(self, sub_tag):\n\n cif_targ = CifData(self.cif_targ_path(), rotk=self.rotk, rottheta= self.rottheta)\n cif_final = CifData(self.cif_final_path(sub_tag), rotk=self.rotk, rottheta= self.rottheta)\n\n\n loc = np.zeros(cif_targ.scat_bragg.shape[0])\n\n\n for i, hkli_targ in enumerate(cif_targ.scat_bragg):\n bragg_loc =np.where( (cif_final.scat_bragg[:,:-1]==hkli_targ[:-1]).all(axis=1))[0][0]\n loc[i] = bragg_loc\n\n return loc.astype(int)\n\n\n\n\n\n\n\n\n def get_geometry_vals(self, sub_tag, count=None, geometry='distances'):\n\n\n assert geometry =='distances' or 'angles', 'geometry must be \"distances\" or \"angles\"'\n\n if count is None:\n cif = pycif.ReadCif(f'{self.path}/{sub_tag}/shelx/{self.tag}_{sub_tag}.cif')\n elif count == 'targ':\n cif = pycif.ReadCif(f'{self.path}/{sub_tag}/shelx/{self.tag}_targ.cif')\n else:\n cif = pycif.ReadCif(f'{self.path}/{sub_tag}/shelx/{self.tag}_{sub_tag}_count_{count}.cif')\n\n if geometry=='angles':\n key = '_geom_angle'\n else:\n key = '_geom_bond_distance'\n\n vk = cif.visible_keys[0]\n x = dict(cif[vk])[key]\n vals, errs = np.zeros(len(x)), np.zeros(len(x))\n\n for i, xi in enumerate(x):\n val, err = strerr2floaterrr(xi)\n\n vals[i] = val\n errs[i] = err\n\n return vals, errs\n\n\n def get_xyzs(self, sub_tag, count=None):\n\n if count is None:\n cif = pycif.ReadCif(f'{self.path}/{sub_tag}/shelx/{self.tag}_{sub_tag}.cif')\n elif count == 'targ':\n cif = pycif.ReadCif(f'{self.path}/{sub_tag}/shelx/{self.tag}_targ.cif')\n else:\n cif = pycif.ReadCif(f'{self.path}/{sub_tag}/shelx/{self.tag}_{sub_tag}_count_{count}.cif')\n\n vk = cif.visible_keys[0]\n\n a = strerr2floaterrr(dict(cif[vk])['_cell_length_a'])[0]\n b = strerr2floaterrr(dict(cif[vk])['_cell_length_a'])[0]\n c = strerr2floaterrr(dict(cif[vk])['_cell_length_b'])[0]\n\n xstrs = dict(cif[vk])['_atom_site_fract_x']\n ystrs = dict(cif[vk])['_atom_site_fract_y']\n zstrs = dict(cif[vk])['_atom_site_fract_z']\n\n xyzs = np.zeros( (len(xstrs), 3))\n\n for i, (fracx, fracy, fracz) in enumerate( zip(xstrs, ystrs, zstrs)):\n x = strerr2floaterrr(fracx)[0]*a\n y = strerr2floaterrr(fracy)[0]*b\n z = strerr2floaterrr(fracz)[0]*c\n\n xyzs[i, :] = [x,y,z]\n\n return xyzs\n\n\n\n\n\n\n\n\n\n\n def get_shelx_rf(self, sub_tag, count=None):\n\n if count is None:\n cif = pycif.ReadCif(f'{self.path}/{sub_tag}/shelx/{self.tag}_{sub_tag}.cif')\n elif count == 'targ':\n cif = pycif.ReadCif(f'{self.path}/{sub_tag}/shelx/{self.tag}_targ.cif')\n else:\n cif = pycif.ReadCif(f'{self.path}/{sub_tag}/shelx/{self.tag}_{sub_tag}_count_{count}.cif')\n\n\n vk = cif.visible_keys[0]\n\n # _refine_ls_r_factor_all, _refine_ls_wr_factor_ref, _refine_ls_wr_factor_gt\n rf = dict(cif[vk])['_refine_ls_r_factor_all']\n\n return float(rf)\n\n\n def get_inten_rf(self, sub_tag, count=None):\n\n\n It, If = self.get_intensity(sub_tag, count=count)\n\n It /=np.sum(It)\n If /=np.sum(If)\n\n rf = np.sum(np.abs(It - If))/np.sum(np.abs(If))\n\n return rf\n\n\n\n def get_It_If_loc(self, sub_tag):\n\n #it is missing some intensities\n It_cif = CifData(self.cif_targ_path())\n #if has too many brag peaks\n If_hkli = np.genfromtxt(self.hkl_count_path(sub_tag, count=0),delimiter=(4,4,4,8), skip_footer=1, usecols=(0,1,2,3))\n\n loc = np.zeros(It_cif.scat_bragg.shape[0])\n print(f'checking for missing bragg points ({It_cif.scat_bragg.shape[0]})')\n for i, (h, k, l, I) in enumerate(It_cif.scat_bragg):\n print(f'{i}', end='\\r')\n bragg_loc =np.where( (If_hkli[:,:-1]==[h,k,l]).all(axis=1))[0][0]\n loc[i] = bragg_loc\n print()\n print('Done')\n\n #an array that \n loc = loc.astype(int)\n \n return loc\n\n\n @verbose_dec\n def get_inten_quick(self, sub_tag, counts, loc=None, verbose=0):\n\n\n # #it is missing some intensities\n # It_cif = CifData(self.cif_targ_path())\n #if has too many brag peaks\n If_hkli = np.genfromtxt(self.hkl_count_path(sub_tag, count=0),delimiter=(4,4,4,8), skip_footer=1, usecols=(0,1,2,3))\n\n if loc is None:\n print('loc is none, gettting loc')\n loc = self.get_It_If_loc(sub_tag)\n\n intens = np.zeros((len(counts), loc.shape[0])) \n\n # It = It_cif.scat_bragg[:,-1]\n # It /=np.sum(It)\n for i, count in enumerate(counts):\n print(i)\n\n If = np.genfromtxt(self.hkl_count_path(sub_tag, count=count),delimiter=(4, 4, 4, 8 ), skip_footer=1, usecols=3)\n\n If = If[loc]\n\n\n intens[i] = If\n\n return intens\n\n\n\n def get_targ_inten(self):\n It_cif = CifData(self.cif_targ_path())\n It = It_cif.scat_bragg[:,-1]\n return It\n\n\n def get_inten_rfs_quick(self, sub_tag, n, loc=None, It=None):\n\n #it is missing some intensities\n If_hkli = np.genfromtxt(self.hkl_count_path(sub_tag, count=0),delimiter=(4,4,4,8), skip_footer=1, usecols=(0,1,2,3))\n\n if loc is None:\n\n loc = self.get_It_If_loc(sub_tag)\n\n\n\n\n rfs = np.zeros(n) \n if It is None:\n It_cif = CifData(self.cif_targ_path())\n It = It_cif.scat_bragg[:,-1]\n\n It /=np.sum(It)\n for i in range(n):\n print(i)\n\n If = np.genfromtxt(self.hkl_count_path(sub_tag, count=i),delimiter=(4, 4, 4, 8 ), skip_footer=1, usecols=3)\n\n If = If[loc]\n\n If /=np.sum(If)\n\n rf = np.sum(np.abs(It - If))/np.sum(np.abs(If))\n rfs[i] = rf\n\n return rfs\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n @verbose_dec\n def save_dxyzs(self, sub_tag, verbose=0):\n print(f'Saving dxyz: {self.tag} {sub_tag}')\n print(f'Started: {time.asctime()}')\n\n\n ncounts = len(os.listdir(self.hkls_path(sub_tag)))\n\n dxyzs = np.zeros(ncounts)\n mindxyzs = np.zeros(ncounts)\n maxdxyzs = np.zeros(ncounts)\n stddxyzs = np.zeros(ncounts)\n\n targ_xyz = self.get_xyzs(sub_tag, count='targ')\n\n\n\n for count in range(ncounts):\n\n print(count, end='\\r')\n count_xyz = self.get_xyzs(sub_tag, count=count)\n\n count_dxyz = np.abs( targ_xyz - count_xyz)\n\n count_dxyz_norm = np.linalg.norm(count_dxyz, axis=1)\n\n dxyzs[count] = np.mean(count_dxyz_norm)\n mindxyzs[count] = np.min(count_dxyz_norm)\n maxdxyzs[count] = np.max(count_dxyz_norm)\n stddxyzs[count] = np.std(count_dxyz_norm)\n\n np.save(self.mean_dxyzs_path(sub_tag), dxyzs)\n np.save(self.min_dxyzs_path(sub_tag), mindxyzs)\n np.save(self.max_dxyzs_path(sub_tag), maxdxyzs)\n np.save(self.std_dxyzs_path(sub_tag), stddxyzs)\n\n\n print(f'Finished: {time.asctime()}')\n\n\n @verbose_dec\n def save_dgeom(self, sub_tag,geometry='distances', verbose=0):\n print(f'Saving mean bond {geometry}: {self.tag} {sub_tag}')\n print(f'Started: {time.asctime()}')\n\n\n ncounts = len(os.listdir(self.hkls_path(sub_tag)))\n\n mean_geom = np.zeros(ncounts)\n min_geom = np.zeros(ncounts)\n max_geom = np.zeros(ncounts)\n std_geom = np.zeros(ncounts)\n\n targ_xs, targ_errs = self.get_geometry_vals(sub_tag, count='targ', geometry=geometry)\n\n\n\n\n for count in range(ncounts):\n\n print(count, end='\\r')\n count_xs, count_errs = self.get_geometry_vals(sub_tag, count=count, geometry=geometry)\n\n\n count_dxs = np.abs(targ_xs- count_xs)\n mean_geom[count] = np.mean(count_dxs)\n min_geom[count] = np.min(count_dxs)\n max_geom[count] = np.max(count_dxs)\n std_geom[count] = np.std(count_dxs)\n\n\n\n if geometry=='distances':\n np.save(self.mean_bond_distances_path(sub_tag), mean_geom)\n np.save(self.min_bond_distances_path(sub_tag), min_geom)\n np.save(self.max_bond_distances_path(sub_tag), max_geom)\n np.save(self.std_bond_distances_path(sub_tag), std_geom)\n elif geometry=='angles':\n np.save(self.mean_bond_angles_path(sub_tag), mean_geom)\n np.save(self.min_bond_angles_path(sub_tag), min_geom)\n np.save(self.max_bond_angles_path(sub_tag), max_geom)\n np.save(self.std_bond_angles_path(sub_tag), std_geom)\n\n\n print(f'Finished: {time.asctime()}')\n\n\n\n\n @verbose_dec\n def save_rfs(self, sub_tag, verbose=0):\n print(f'Saving R factors: {self.tag} {sub_tag}')\n print(f'Started: {time.asctime()}')\n\n\n ncounts = len(os.listdir(self.hkls_path(sub_tag)))\n\n rfs_shelx = np.zeros(ncounts)\n\n for count in range(ncounts):\n print(count, end='\\r')\n rf_shelx = self.get_shelx_rf(sub_tag, count=count)\n rfs_shelx[count] = rf_shelx\n\n np.save(self.rfs_shelx_path(sub_tag), rfs_shelx)\n\n\n print(f'Finished: {time.asctime()}')\n\n\n\n\n\n","repo_name":"YellowSub17/scorpy-pkg","sub_path":"scorpy/algo/algohandler_postrecon.py","file_name":"algohandler_postrecon.py","file_ext":"py","file_size_in_byte":13857,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"31094431409","text":"from xlwt import Workbook\nimport sys\n\ndef kodneTabele(file_name):\n \"\"\"\n Funkcija kot argument sprejme ime excel datoteke, kamor se izpisejo vse kodne zamenjave.\n \"\"\"\n\n # Crke, za katere zelimo dobiti kodne zamenjave\n txt = \"ČŠŽčšž\" \n # Datoteka excell kamor shranimo rezultate\n file_name = file_name+\".xls\"\n \n wb = Workbook()\n sheet1 = wb.add_sheet(\"Sheet 1\")\n \n sheet1.write(1, 0, \"ZNAK\") \n # Vnesi crke v prvi stolpec \n for c in range(len(txt)): \n sheet1.write(2+c, 0, txt[c])\n\n # Lista tabel\n tables_list = [\"cp852\", \"iso8859_2\", \"cp1250\", \"mac_latin2\", \"utf_8\", \"utf_16_le\", \"utf_16_be\"]\n\n # Vnesi vrednosti v celice v tabeli\n for i in range(len(tables_list)):\n # Vnesi ime kodirne tabele\n sheet1.write(0, 1+i*3, tables_list[i]) \n sheet1.write(1, 1+i*3, \"BIN\")\n sheet1.write(1, 2+i*3, \"DEC\")\n sheet1.write(1, 3+i*3, \"HEX\")\n for k in range(len(txt)):\n # Lista binarnih vrednosti za en znak\n list = [format(b, '08b') for b in txt[k].encode(tables_list[i])]\n # Zdruzi binarne vrednosti za en znak v string \n list_to_string = ''.join(map(str, list[:])) \n sheet1.write(2+k, 1+i*3, list_to_string)\n \n\n # Lista decimalnih vrednosti za en znak \n list = [str(b) for b in txt[k].encode(tables_list[i])]\n # Zdruzi decimalne vrednosti za en znak v string \n list_to_string = ' '.join(map(str, list[:]))\n sheet1.write(2+k, 2+i*3, list_to_string)\n\n # Lista hexadecimalnih vrednosti za en znak \n list = [format(b, '02x') for b in txt[k].encode(tables_list[i])]\n # Zdruzi hexadecimalne vrednosti za en znak v string\n list_to_string = ''.join(map(str, list[:])) \n sheet1.write(2+k, 3+i*3, list_to_string)\n\n wb.save(file_name)\n\n \n print(\"Kodne zamenjave za črke: \", txt)\n print(20*\"-\")\n print(\"Kodne tabele so v datoteki: \" , file_name)\n \n\ndef kodiranjeDekodiranje(file_in, file_out, file_name):\n \"\"\"\n Funkcija kot argument sprejme vhodno .txt datoteko, ime izhodne .txt datoteke in ime .xls datoteke.\n \"\"\"\n #file_in = \"kodne to¦Źke.txt\"\n #file_out = \"dekodirano_besedilo.txt\"\n\n f_in = open(file_in)\n txt_data = f_in.read()\n # Loci vrednosti - naredi seznam\n list_data = txt_data.split(\", \") \n f_in.close()\n\n tekst = \"\"\n vrednost = \"\"\n\n for i in range(len(list_data)):\n number = (int(list_data[i]))\n # Pretvori decimalno vrednost v string bitov npr. \"01010011\"\n strng = format(number, '08b') \n if len(strng) == 8:\n # Zapis v enem bajtu \n vrednost = strng\n\n if len(strng) > 8 and len(strng) < 12:\n # Zapis v dveh bajtih\n d=11-len(strng)\n vrednost = \"110\"\n vrednost += d*\"0\"\n vrednost += strng[:-6] \n vrednost += \"10\"\n vrednost += strng[-6:]\n\n if len(strng) > 11 and len(strng) < 17:\n # Zapis v treh bajtih \n d=16-len(strng)\n vrednost = \"1110\"\n vrednost += d*\"0\"\n vrednost += strng[:-12] \n vrednost += \"10\"\n vrednost += strng[-12:-6]\n vrednost += \"10\"\n vrednost += strng[-6:]\n\n if len(strng) > 16:\n # Zapis v stirih bajtih \n d=21-len(strng)\n vrednost = \"11110\"\n vrednost += d*\"0\"\n vrednost += strng[:-18] \n vrednost += \"10\"\n vrednost += strng[-18:-12]\n vrednost += \"10\"\n vrednost += strng[-12:-6]\n vrednost += \"10\"\n vrednost += strng[-6:]\n # Dvojiska base \n bajti = int(vrednost,2).to_bytes(8,'big') \n # Dekodiraj\n znak = bajti.decode(\"utf_8\") \n # Odrezi drugace v .txt file pred vsak znak zapisuje presledke... \n znak=znak[-1:]\n tekst += znak\n\n # Zapisi rezultat v .txt datoteko\n f_out = open(file_out, \"w\", encoding=\"utf_8\")\n f_out.write(tekst)\n f_out.close()\n\n # Dobi unikatne znake in jih soritraj po velikosti\n unikatni = ''.join(sorted(set(tekst))) \n\n # Datoteka excell kamor shranimo rezultate\n file_name = file_name+\".xls\"\n \n wb = Workbook()\n sheet1 = wb.add_sheet(\"Sheet 1\")\n \n # Vnesi crke v celice \n for c in range(len(unikatni)): \n sheet1.write(1+c, 0, unikatni[c])\n\n # Vnesi \"glavo\" tabele\n sheet1.write(0, 0, \"ZNAK\")\n sheet1.write(0, 1, \"BIN\")\n sheet1.write(0, 2, \"DEC\")\n sheet1.write(0, 3, \"HEX\")\n\n # Vnesi vrednosti v celice v tabeli\n for k in range(len(unikatni)):\n # Lista binarnih vrednosti za en znak\n list = [format(b, '08b') for b in unikatni[k].encode(\"utf_8\")] \n list_to_string = ''.join(map(str, list[:])) \n # Zdruzi binarne vrednosti za en znak v string\n sheet1.write(1+k, 1, list_to_string)\n\n list = [str(b) for b in unikatni[k].encode(\"utf_8\")] \n # Lista decimalnih vrednosti za en znak\n list_to_string = ' '.join(map(str, list[:])) \n # Zdruzi decimalne vrednosti za en znak v string\n sheet1.write(1+k, 2, list_to_string)\n\n list = [format(b, '02x') for b in unikatni[k].encode(\"utf_8\")] \n # Lista hexadecimalnih vrednosti za en znak\n list_to_string = ''.join(map(str, list[:])) \n # Zdruzi hexadecimalne vrednosti za en znak v string\n sheet1.write(1+k, 3, list_to_string)\n\n wb.save(file_name)\n\n\n print(\"Dekodirano besedilo je v datoteki: \" , file_out)\n print(20*\"-\")\n print(\"Unikatni znaki so v datoteki: \" , file_name)\n\n\n\nif __name__ == \"__main__\":\n try:\n filename_xls_1_out = sys.argv[1]\n filename_txt_in = sys.argv[2]\n filename_txt_out = sys.argv[3]\n filename_xls_2_out = sys.argv[4]\n\n\n except IndexError:\n print(\"Usage: python vaja2.py <.xls output file name without extension>\")\n print(\"Primer klica: python vaja2.py kodne_zamenjave_1 kodne_tocke.txt besedilo.txt tabela_unikatnih\")\n sys.exit(1)\n\n print(40*\"-\")\n print(\"Naloga 1: \")\n kodneTabele(filename_xls_1_out)\n print(40*\"-\")\n\n print(40*\"-\")\n print(\"Naloga 2: \")\n kodiranjeDekodiranje(filename_txt_in,filename_txt_out,filename_xls_2_out)\n print(40*\"-\")","repo_name":"spehj/informacija-in-kodi","sub_path":"vaja2/vaja2.py","file_name":"vaja2.py","file_ext":"py","file_size_in_byte":6475,"program_lang":"python","lang":"sr","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4008151601","text":"height_map = []\nbasin_map = []\n\ndef add_neighbors(_row, _col):\n neighbor_list = set()\n\n if _row > 0:\n neighbor_list.add((_row-1, _col))\n if _row < len(height_map) - 1:\n neighbor_list.add((_row+1, _col))\n if _col > 0:\n neighbor_list.add((_row, _col-1))\n if _col < len(height_map[0]) - 1:\n neighbor_list.add((_row, _col+1))\n\n return neighbor_list\n\ndef crawl_map(row_start, col_start, curr_zone): \n count = 0\n\n to_visit = set()\n visited = set()\n\n to_visit = to_visit.union(add_neighbors(row_start, col_start))\n\n while len(to_visit) > 0:\n next_loc = list(to_visit)[0]\n visited.add(next_loc)\n to_visit.remove(next_loc)\n next_loc_val = basin_map[next_loc[0]][next_loc[1]]\n\n if next_loc_val != 99999 and next_loc_val < 0:\n basin_map[next_loc[0]][next_loc[1]] = curr_zone\n count += 1\n\n neighbors = add_neighbors(next_loc[0], next_loc[1])\n\n for n in neighbors:\n if n not in visited:\n to_visit.add(n)\n\n # print(f\"For {row_start}, {col_start} checking locations {to_visit}\")\n # print(f\"Current Value {height_map[row_start][col_start]}: against: \", end='')\n return count\n\n\nfile = open(\"day_9_input.txt\", \"r\")\ndata = file.read().strip().split(\"\\n\")\n\nfor line in data:\n casted_line = [int(i) for i in line.strip()]\n height_map.append(casted_line)\n\n basin_line = []\n for item in casted_line:\n if item == 9:\n basin_line.append(99999)\n else:\n basin_line.append(-1)\n basin_map.append(basin_line)\n \nrisk_sum = 0\nzone_count = 0\n\nsizes = []\n\nfor r in range(len(height_map)):\n for c in range(len(height_map[0])):\n if basin_map[r][c] < 0:\n sizes.append(crawl_map(r, c, zone_count))\n zone_count += 1\n\nsizes.sort(reverse=True)\nprint(sizes[0] * sizes[1] * sizes[2])\n\nfile.close()","repo_name":"alex-roos/aoc21","sub_path":"day_09/day9-2.py","file_name":"day9-2.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7830598475","text":"pen_count = int(input())\nmarker_count = int(input())\ndetergent_liters = int(input())\ndiscount_percent = int(input())\n\nprice_pen = pen_count * 5.80\nprice_markers = marker_count * 7.20\nprice_detergent = detergent_liters * 1.20\n\ntotal_price = price_pen + price_markers + price_detergent\ndiscount = total_price * (discount_percent / 100)\nresult = total_price - discount\n\nprint(result)\n","repo_name":"bacesimo/Programming-Basics-Python-July-2022","sub_path":"exercise_pb/supplies_for_school.py","file_name":"supplies_for_school.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3837497914","text":"#!/usr/bin/env python3\nfrom bluepy.btle import DefaultDelegate, Peripheral\nimport bluepy\n\nfrom SimpleLogger import SimpleLogger\n\nfrom datetime import datetime\nimport queue\n\ntimestampFormat ='%d/%m/%Y-%H:%M:%S'\n\nclass LYWSD03MMC_reader:\n def __init__(self, mac, name):\n self.mac = mac\n self.name = name\n self.logger = SimpleLogger(verbose = True, loggerName = f\"Sensor-{name}-{mac}\")\n\n self.retryCounter = 0\n self.maxRetryCounter = 4\n self.p = None\n while 1:\n try:\n self.p = Peripheral(self.mac)\n self.p.disconnect()\n self.logger.log(f\"BLE connected to {self.name} ({self.mac})!\", messageType = \"OK\")\n break\n except bluepy.btle.BTLEDisconnectError as e:\n self.logger.log(f\"BLE disconnect error: {e} on {self.name} ({self.mac})\", messageType = \"ERROR\")\n self.retryCounter += 1\n if self.retryCounter > self.maxRetryCounter:\n self.logger.log(f\"Couldn't connect {self.maxRetryCounter} times to {self.name} ({self.mac})\", messageType = \"ERROR\")\n break\n\n self.q = queue.Queue()\n \n\n def wait_for_notification(self):\n while True:\n if self.p.waitForNotifications(10.0):\n break\n\n def read_data(self):\n self.logger.log(f\"Start reading data from {self.name} ({self.mac})\", messageType = \"DEBUG\")\n \n if self.p == None:\n return None\n\n try:\n self.p.connect(self.mac)\n self.p.writeCharacteristic(0x0038, b'\\x01\\x00', True) #enable notifications of Temperature, Humidity and Battery voltage\n self.p.writeCharacteristic(0x0046, b'\\xf4\\x01\\x00', True)\n self.p.withDelegate(LYWSD03MMC_delegate(self.q))\n self.wait_for_notification()\n self.p.disconnect()\n self.logger.log(f\"Reading was successful on {self.name} ({self.mac})!\", messageType = \"OK\")\n except bluepy.btle.BTLEDisconnectError as e:\n self.logger.log(f\"BLE disconnect error: {e} on {self.name} ({self.mac})\", messageType = \"ERROR\")\n\n return self.process_queue()\n\n def process_queue(self):\n qsize = self.q.qsize()\n self.logger.log(f\"Queue size on {self.name} ({self.mac}): {qsize}\", messageType = \"DEBUG\")\n if qsize > 0:\n data = self.q.get()\n self.logger.log(f\"Data from {self.name} ({self.mac}): {data}\")\n return data\n else:\n return None\n\nclass LYWSD03MMC_delegate(DefaultDelegate):\n def __init__(self, q):\n DefaultDelegate.__init__(self)\n self.queue = q\n\n def handleNotification(self, cHandle, data):\n if data is None:\n self.logger.log(f\"Empty data from {self.name} ({self.mac})\", messageType = \"WARN\")\n return\n temperature = round(int.from_bytes(data[0:2],byteorder='little',signed=True)/100, 2)\n #print(f\"Temperature: {temperature}\")\n humidity = int.from_bytes(data[2:3],byteorder='little')\n #print(f\"Humidity: {humidity}\")\n voltage=int.from_bytes(data[3:5],byteorder='little') / 1000.\n #print(f\"Voltage: {voltage}\")\n batteryLevel = min(int(round((voltage - 2.1),2) * 100), 100) #3.1 or above --> 100% 2.1 --> 0 %\n #print(f\"Battery level: {batteryLevel}\")\n\n timeStamp = datetime.now().strftime(timestampFormat)\n self.queue.put({\"time\":timeStamp, \"temperature\":temperature, \"humidity\":humidity, \"voltage\":voltage, \"battery\":batteryLevel})\n\n\n\n","repo_name":"dudasdavid/XiaomiTemperatureSensor","sub_path":"LYWSD03MMC_handler.py","file_name":"LYWSD03MMC_handler.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15286597901","text":"from flask import Blueprint, request, g, redirect, url_for, session, render_template, flash\nimport time\n\nroutes = Blueprint('submit', __name__)\n\n\n@routes.route('/submit', methods=['GET', 'POST'])\ndef submit():\n \"\"\"\n Create a new link submission\n :return: View\n \"\"\"\n if session.get('logged_in'):\n if request.method == 'GET':\n return render_template('submit.html')\n else:\n if request.form['title'] is not None and request.form['url'] is not None:\n g.db.execute(\n \"INSERT INTO links (user_name, title, url, votes, submit_date) VALUES (? , ?, ?, ?, ?)\",\n [session['user_name'], request.form['title'], request.form['url'], 1,\n time.strftime(\"%d/%m/%Y %H:%M\")])\n g.db.commit()\n return redirect(url_for('home.home'))\n else:\n flash('You must be logged in to submit a new link.')\n return redirect(url_for('auth.login'))","repo_name":"plutokid/FlaskerNews","sub_path":"FlaskerNews/FlaskerNews/views/submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27422438750","text":"\n\n\ndef main_menu():\n commands = ['Показать все контакты',\n 'Открыть файл', \n 'Сoхранить файл', \n 'Новый контакт', \n 'Изменить контакт',\n 'Удалить контакт', \n 'Найти контакт', \n 'Выйти из программы']\n\n print('\\nВыберите пункт меню:')\n\n for i in range(len(commands)):\n print(f'\\t{i+1}. {commands[i]}')\n user_input = int(input('\\nВведите пункт меню: '))\n if user_input < 1 or user_input > 8:\n print('Ошибка ввода. Введите число от 1 до 8')\n return user_input \n\ndef show_contacts(phone_book: list, flag: int):\n if len(phone_book) > 0:\n for item in phone_book:\n if flag == 2:\n confirm = input(f'Удалить контакт {item[0]} {item[1]} ({item[2]}) да/нет? ')\n return confirm\n elif flag == 3:\n confirm = input(f'Изменить контакт {item[0]} {item[1]} ({item[2]}) да/нет? ')\n return confirm\n else:\n print(f'{item[0]} {item[1]} ({item[2]})')\n else:\n if flag in range(1, 4):\n print ('Контакты не найдены')\n else: \n print ('Телефонная книга пустая или не загружена')\n\ndef load_success():\n print('Телефонная книга загружена успешно')\n\ndef new_contact():\n name = input('Введите имя и фамилию контакта: ')\n phone = input('Введите номер телефона: ')\n comment = input('Введите комментарий к контакту: ')\n return name, phone, comment\n\ndef save_success(flag: int, flag2: int):\n if flag == 2:\n print('Телефонная книга сохранена успешно')\n if flag2 == 2:\n exit()\n\ndef contact_success():\n print('Контакт сохранен успешно')\n\ndef find_contact():\n search = input('Введите искомое значение: ') \n return search\n\ndef delete_contact():\n delete = input('Введите Имя и Фамилию контакта, который нужно удалить: ') \n return delete\n\ndef delete_success(confirm: str):\n if confirm == 'да':\n print('Запись успешно удалена')\n elif confirm == 'нет':\n print('Удаление записи отменено')\n else:\n print('Ошибка ввода')\n\ndef change_contact():\n change = input('Введите Имя и Фамилию контакта, который нужно изменить: ') \n return change\n\ndef change_info(confirm: str):\n if confirm == 'да':\n list = []\n name = input('Измените имя и фамилию контакта: ')\n phone = input('Введите новый номер телефона: ')\n comment = input('Введите новый комментарий к контакту: ')\n list = [name, phone, comment]\n return list\n\ndef change_success(confirm: str):\n if confirm == 'да':\n print('Запись успешно изменена')\n elif confirm == 'нет':\n print('Изменение записи отменено')\n else:\n print('Ошибка ввода')\n\ndef change_in_book(result: int):\n if result == 2:\n confirm = input('В телефонную книгу были внесены изменения. Сохранить изменения (да/нет)? ')\n return confirm\n else:\n print('Выход из программы')\n exit()\n\ndef changing_book(confirm: str): \n if confirm == 'да':\n return 2\n elif confirm == 'нет':\n print('Выход из программы')\n exit()\n else:\n print('Ошибка ввода') \n ","repo_name":"ekaterina-chusova/py_hw7","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23326410636","text":"import pandas as pd\ndata=pd.read_csv(\"/home/smiyake/sho/Fullset/txt/two_results_merge.txt\",sep=\"\\t\")\nfamily_list=[]\nfor i in data.Family:\n if i not in family_list:\n family_list.append(i)\n\nfamily_lfc_list=[]\nfor i in family_list:\n sub_data=data[data[\"Family\"] ==i]\n Ttran_sum=sub_data.Log2FoldChange_TEtranscripts.sum()\n Ttools_sum=sub_data.Log2FoldChange_TEtools.sum()\n T_ave=(Ttran_sum+Ttools_sum)/2\n family_lfc_list.append([i,sub_data.iloc[0].Class,len(sub_data),T_ave/len(sub_data)])\n\nfamily_lfc_list=pd.DataFrame(family_lfc_list,columns=[\"Family\",\"Class\",\"Counts\",\"Log2FoldChange_ave\"])\n\nfamily_lfc_list.to_csv(\"/home/smiyake/sho/Fullset/txt/lfc_groupedby_family.txt\",sep=\"\\t\",index=False)\n","repo_name":"HNOthree/my_directory","sub_path":"Study_abroad/Fullset/step1/cal_lfc_ave_grouped_by_family.py","file_name":"cal_lfc_ave_grouped_by_family.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41462576335","text":"\n\n#\n# THIS IS A POC ONLY AND SHOULD NOT BE USED IN PRODUCTION\n#\n\n\n#\n# For paid support/development activities, please visit https://otnss.co.uk\n#\n# For open source support, please visit https://github.com/unixhead/pyFileTransferDiode\n#\n\n# txFileTransferDiode.py\n# Detects new files in a folder (inFolder) and transmits via UDP for sending over uni-directional links\n#\n# Original source: https://github.com/unixhead/pyFileTransferDiode\n\n# Beerware license\n\n\nimport os \nfrom socket import *\nimport struct\nimport time\nimport zfec\nimport math\n\n# how frequently to check folder for new files in seconds\npollTime=5\n\n# Will transmit files received in the inFolder and move them to sentFolder\n# To clear out sentFolder, another process is needed.\n#\n#use the full path for this to avoid potential security issues, the filenames do get basic filtering but there's a degree of assumed trust\ninFolder=\"in\"\nsentFolder=\"sent\"\n\ntempFolder=\"temp\"\n\n#where to send files\nsendToIP=\"127.0.0.1\"\nsendToPort=10337\nfileNameSize=12 #size of filename field, you can increase this but it's put in every packet header so avoid going too large as it'll reduce throughput\n\n\n#FEC ratio - must be between 0 and 1\n#This is the redundancy requirement for the forward error correction, it sets how many blocks must successfully arrive to reassemble the file\n# e.g. 0.5 = 50% of blocks must arrive\n# 0.8 = 80% of blocks must arrive\ndefaultFecRatio = 0.8\nif defaultFecRatio > 1 or defaultFecRatio < 0:\n print(\"defaultFecRatio must be between 0 and 1, recommend set to 0.8\")\n exit\n\n#size of data to be transferred - TODO - make this an MTU setting\nchunkSize=1400 # size of data in each packet\n\n# introduced a delay between packets to lower chances of drops\n# if network is a bit slower then may need to increase this, but obviously decreases throughput\npacketDelay = 0.001\n#delay between sending files to give remote end a chance to unzfec and tidy up\nfileDelay = 0.1\n\n#Internal variable - packet format\nformatStr=\"III\"+str(fileNameSize)+\"sI\"+str(chunkSize)+\"s\"\n\n\ndef debugLog(data):\n print(data)\n\n#return unix timestamp in milliseconds\ndef getTimeMS():\n return int(time.time()*1000)\n\n\n# Error signalling - send a fault message and quit afterwards\ndef sendError(error):\n error = error + \"The transmitting program will now exit and need manually restarting once the error is resolved.\"\n debugLog(\"Sending error packet: \" + error)\n s = socket(type=SOCK_DGRAM)\n errMsg = bytes(error,\"utf-8\")\n errCode = bytes(\"ERROR\", \"utf-8\")\n txData=struct.pack(formatStr, 99 , 1, 1,errCode, len(errMsg), errMsg)\n s.sendto(txData,(sendToIP,int(sendToPort)))\n s.close()\n exit(0)\n\n\n#called with a filepath/name and transmits it to the remote site.\ndef sendFile(fileName, type = 0, overrideFileName = False):\n debugLog(\"sendFile \" + str(fileName) + \" type: \" + str(type))\n s = socket(type=SOCK_DGRAM)\n fH = open(fileName, \"rb\")\n fileSize=os.stat(fileName).st_size\n debugLog(\"Sending file: \" + str(fileName) + \" - size: \" + str(fileSize))\n num = math.ceil( fileSize / chunkSize)\n i=0\n\n # build packet and transmit\n while i < num:\n # Packet format is:\n # messageType (int) - 0 = data transfer, 1 = FEC transfer, 2 = last FEC packet, 69 = checksum, 99 = error\n # currentSerial (int)\n # totalPackets (int)\n # filename (char[fileNameSize])\n # dataSize (int) - size of data in this packet\n # bytes[chunkSize] - actual data \n\n dataChunk = fH.read(chunkSize)\n \n dataSize=len(dataChunk)\n if type == 0:\n fileNameTrimmed = fileName.replace(inFolder + \"/\", '') # remove the path for filename value that goes into packet header\n elif type == 1 or type == 2:\n fileNameTrimmed = overrideFileName.replace(inFolder + \"/\", '') # for FECS then use the original file name rather than the fec itself\n\n #debugLog(\"trimmed name: \" +fileNameTrimmed)\n fileNameEncoded=bytes(fileNameTrimmed,\"utf-8\") # will get truncated to fileNameSize when packed, or null padded if shorter\n \n txData=struct.pack(formatStr, type , i, num,fileNameEncoded, dataSize, dataChunk)\n s.sendto(txData,(sendToIP,int(sendToPort)))\n i+=1\n #delay 1ms to try and avoid dropped packets\n time.sleep(packetDelay)\n\n # Finished sending, close handles for file and socket\n fH.close()\n s.close()\n\n\n# uses zfec to create a bunch of FEC files, sends those over network.\ndef processFile(fileName):\n debugLog(\"Processing file \" + fileName)\n # work out K & M values\n # K = how many blocks needed to recreate file\n # M = how many blocks to break file into, max 256\n \n #set using blocksPerMeg so I can play around tuning it\n blocksPerMeg = 0.1\n minBlocks = 20\n \n fileSize=os.stat(fileName).st_size\n numBlocks = int((fileSize/1024/1024) * blocksPerMeg)\n if numBlocks < minBlocks:\n numBlocks = minBlocks\n if numBlocks > 256:\n numBlocks = 256\n\n minGoodBlocks = int(defaultFecRatio * numBlocks)\n\n debugLog(\"sending with numBlocks: \" + str(numBlocks) + \" and min: \" + str(minGoodBlocks))\n\n fH = open(fileName, \"rb\")\n zfec.filefec.encode_to_files(fH, fileSize, tempFolder, \"tmp\", minGoodBlocks, numBlocks, \".fec\", True, True)\n fecList = os.listdir(tempFolder)\n n=1\n nFecs = len(fecList)\n for fec in fecList:\n # send the FEC, last one needs to be type 2\n if n==nFecs:\n packetType = 2\n else: \n packetType = 1\n debugLog(\"Sending FEC # \" + str(n))\n #send it\n sendFile(tempFolder + \"/\" + fec, packetType, fileName)\n\n # delete the file\n os.remove(tempFolder + \"/\" + fec)\n\n n+=1\n\n # wait between transmissions => attempt to reduce dropped packets\n time.sleep(fileDelay)\n\n\n# Main loop, watches for files appearing in the \"inFolder\" and then sends them. \n\nwhile True:\n #debugLog(\"Main loop\")\n\n #check folders exist\n if not os.path.exists(inFolder):\n sendError(\"The folder : \" + str(inFolder) + \" does not exist, please create it or update configuration to the correct path for new files.\")\n\n if not os.path.exists(sentFolder):\n sendError(\"The folder : \" + str(sentFolder) + \" does not exist, please create it or update configuration to the correct path for new files.\")\n\n if not os.path.exists(tempFolder):\n sendError(\"The folder : \" + str(tempFolder) + \" does not exist, please create it or update configuration to the correct path for new files.\")\n\n\n # look for any new folders in the inFolder\n fileList = os.listdir(inFolder)\n for file in fileList:\n if os.path.isfile(inFolder + \"/\" + file): # only send files in the directory, not links, other directories, etc\n debugLog(\"Got new file: \" + str(file))\n\n #process it\n processFile(inFolder + \"/\" + file)\n \n\n #TODO send checksum across\n\n\n #now move it to the archive folder\n outFile = sentFolder + \"/\" + file\n if os.path.exists(outFile): # if it does then name this file with a timestamp afterwards\n outFile = outFile + str(getTimeMS())\n\n try:\n os.rename(inFolder + \"/\" + file, outFile)\n except PermissionError: \n sendError(\"Permission error writing to the output folder.\")\n except OSError as error: \n sendError(\"Failed to move file to output folder, error was: \" + str(error) + \".\") \n\n\n\n #sleep for the configured interval time\n time.sleep(pollTime)\n","repo_name":"unixhead/pyFileTransferDiode","sub_path":"FEC/txFileTransferDiode.py","file_name":"txFileTransferDiode.py","file_ext":"py","file_size_in_byte":7624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13611553733","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nA module to process raw cytokine data.\nFeatures:\n- Flag and interpolate missing data\n- log-transform and normalize data\n- fit cubic splines\n- extract features (integrals, concentrations & derivatives)\n\n@author:tjrademaker\nMarch 2020\n\nbased off a module by\n@author:frbourassa\nJuly 2019\n\"\"\"\nimport os,sys\nfrom sys import platform as sys_pf\nif sys_pf == 'darwin':\n\timport matplotlib\n\tmatplotlib.use(\"TkAgg\")\nimport tkinter as tk\nfrom tkinter import ttk\nfrom scripts.process.adapt_dataframes import set_standard_order\nfrom scripts.gui.plotting.plottingGUI import createLabelDict, checkUncheckAllButton, selectLevelsPage\nfrom scripts.process.adapt_dataframes import set_standard_order\n\nimport numpy as np\nimport scipy\nfrom scipy import interpolate\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsplitPath = os.getcwd().split('/')\npath = '/'.join(splitPath[:splitPath.index('cytokine-pipeline-master')+1])+'/'\n\ndef moving_average(points, kernelsize):\n \"\"\" Moving average filtering on the array of experimental points, averages over a block of size kernelsize.\n kernelsize should be an odd number; otherwise, the odd number below is used.\n The ith smoothed value, S_i, is\n $$ S_i = \\frac{1}{kernelsize} \\sum_{j = i-kernelsize//2}^{i + kernelsize//2} x_j $$\n Values at the boundary are smoothed with smaller and smaller kernels (up to size 1 for boundary values)\n\n Args:\n points (1darray): the experimental data points\n kernelsize (int): odd integer giving the total number of points summed\n\n Returns:\n smoothed (ndarray): the smoothed data points.\n \"\"\"\n\n smoothed = np.zeros(points.shape)\n if kernelsize % 2 == 0: # if an even number was given\n kernelsize -= 1\n w = kernelsize // 2 # width\n end = smoothed.shape[0] # index of the last element\n\n # Smooth the middle points using slicing.\n smoothed[w:end - w] = points[w:end - w]\n for j in range(w): # Add points around the middle one\n smoothed[w:-w] += points[w - j - 1:end - w - j - 1] + points[w + j + 1:end - w + j + 1]\n\n # Use the loop to treat the two points at a distance j from boundaries\n if j < w:\n smoothed[j] = np.sum(points[0:2*j + 1], axis=0) / (2*j + 1)\n smoothed[-j - 1] = np.sum(points[-2*j - 1:], axis=0)/(2*j + 1)\n\n # Normalize the middle points\n smoothed[w:end - w] = smoothed[w:end - w] / kernelsize\n\n return smoothed\n\n\ndef log_management(df, take, rescale, lod={}):\n \"\"\" Function to either take the log or normalize the concentrations\n Args:\n df (pd.DataFrame): the time series before taking the log. \"Cytokine\"\n should be the outermost level of the column MultiIndex.\n take (bool): whether to take the log or not\n lod (dict): lower limit of detection, in nM, of each cytokine.\n Returns:\n df_log (pd.DataFrame): the log or normalized series\n \"\"\"\n df_log = pd.DataFrame(np.zeros(df.shape), index=df.index, columns=df.columns)\n # If log, make the smallest log 0 and the largest be\n # maxlog within each cytokine.\n # Else, we linearly rescale the concentrations between 0 and 10\n # Must proceed one cytokine at a time\n\n for cyt in df.columns.get_level_values(\"Cytokine\").unique():\n df_cyt = df.xs(cyt, level=\"Cytokine\", axis=1)\n min_conc = lod.get(cyt, df_cyt.values.min())\n\n if np.isnan(min_conc):\n min_conc = df_cyt.dropna().values.min()\n\n if take & rescale:\n max_conc = df_cyt.values.max()\n df_log.loc[:, cyt] = (np.log10(df_cyt.values) - np.log10(min_conc)) / (np.log10(max_conc) - np.log10(min_conc))\n elif take:\n df_log.loc[:, cyt] = np.log10(df_cyt.values) - np.log10(min_conc)\n else:\n df_log.loc[:, cyt] = df_cyt.values / max_conc * 10\n\n return df_log\n\n\ndef smoothing_data(df, kernelsize=3):\n \"\"\" Function to smooth all cytokine time series in the dataframe with a moving average filter.\n\n Args:\n data (pd.DataFrame): indexed with row levels (Peptide, Concentration) and column levels (Cytokine, Time)\n kernelsize (int): the number of points considered when averaging.\n Default: 3.\n\n Returns:\n smoothed (pd.DataFrame of UnivariateSpline): Spline objects, one per cytokine, ligand, and concentration triplet.\n \"\"\"\n smoothed = pd.DataFrame(np.zeros(df.shape), index=df.index, columns=df.columns)\n for cyto in df.columns.get_level_values(\"Cytokine\").unique():\n smt = moving_average(df[cyto].values.T, kernelsize=kernelsize)\n smoothed.loc[:, cyto] = smt.T\n return smoothed\n\n\ndef generate_splines(df, smoothed, rtol=1/2):#check_finite=True\n \"\"\" Function to prepare a DataFrame of splines objects, fitted on the\n inputted data. Same indexing as the raw data, but without time.\n\n Args:\n df (pd.DataFrame): the raw data, maybe after log management\n smoothed (pd.DataFrame): the smoothed data\n rtol (float): the fraction of the sum of squared residuals between\n raw and smoothed data used as a tolerance on spline fitting.\n\n Returns:\n spline_frame (pd.DataFrames): DataFrame of spline objects,\n one per cytokine per condition\n \"\"\"\n # The experimental time points do not include time t = 0, of course, but we want\n # to integrate starting from t = 0. So, extrapolate to 0 by saying that\n # all cytokines are at their minimum value, which is zero.\n exp_times=df.columns.levels[1].to_list()\n inter_t = np.concatenate(([0], exp_times))\n # Create an empty DataFrame\n spline_frame = pd.DataFrame(None, index=df.index,\n columns=df.columns.get_level_values(\"Cytokine\").unique(),\n dtype=object)\n for cyto in spline_frame.columns:\n for row in spline_frame.index:\n y = np.concatenate(([0],smoothed.loc[row, cyto]))\n r = np.concatenate(([0],df.loc[row, cyto]))\n tolerance = rtol * np.sum((y - r)**2)\n spl = scipy.interpolate.UnivariateSpline(inter_t, y, s=tolerance)\n spline_frame.loc[row, cyto] = spl\n return spline_frame\n\n\ndef lod_import(date):\n \"\"\" Function to import a LOD dictionary associated to the cytokine\n concentration file named cyto_file. Looks in lod_folder for a file\n containing the same experiment name than in cyto_file.\n\n LOD dictionary structure (Sooraj): Each pickle file is a dictionary that\n has 7 keys, one for each cytokine, each pointing to a list with 4 numbers.\n Number 1: Minimum Limit of Detection in GFI for cytokine\n Number 2: Maximum Limit of Detection in GFI for cytokine\n Number 3: Minimum Limit of Detection in Concentration for cytokine\n Number 4: Maximum Limit of Detection in Concentration for cytokine\n We are particularly interested in number 3. Numbers 1-2 can change if\n Sooraj performs dilutions.\n\n Args:\n cyto_file (str): the name of the cytokine data file.\n\n Returns:\n lower_bounds (dict): the dictionary containing the lower limit of\n detection for each cytokine (keys are cytokine names).\n \"\"\"\n # Look for all LOD with the right date\n lod_file = [file for file in os.listdir(path+\"data/LOD/\") if ((date in file) & file.endswith(\".pkl\"))]\n\n if lod_file==[]:\n print(\"Will rescale with the minimum value of cytokines in the data, because it could not find the LOD file\\n\")\n return {}\n\n else:\n lod_dict=pd.read_pickle(path+\"data/LOD/\"+lod_file[0])\n\n # Return only the lower bounds, in nM units\n lower_bounds = {cy:a[2] for cy, a in lod_dict.items()}\n return lower_bounds\n\ndef treat_missing_data(df):\n \"\"\" Function to remove randomly or structurally missing datapoints, search for suspicious entries (zeros in all cytokines after having been nonzero).\n If found, set to NaN, then interpolate linearly\n\n Args:\n df (pd.DataFrame): ordered dataframe\n\n Returns:\n df (pd.DataFrame): ordered dataframe with zeros set to NaN\n \"\"\"\n # Check for zeros (=minimum) per cytokine and time\n df_zero=(np.sum(df==df.min(),axis=1)==len(df.columns)).unstack(\"Time\")\n\n # Logic: after having been nonzero cannot be zero in all cytokines at the same time\n remove_idx_time={}\n for time in range(1,len(df_zero.columns)):\n save_idx=[]\n for idx in range(len(df_zero)):\n if (not df_zero.iloc[idx,0:time].all()) & (df_zero.iloc[idx,time]):\n save_idx.append(idx)\n remove_idx_time[time]=save_idx\n\n # as a one liner\n # remove_idx_time={time:[idx for idx in range(len(df_zero)) if (not df_zero.iloc[idx,0:time].all()) & (df_zero.iloc[idx,time])] for time in range(1,len(df_zero.columns))}\n\n # Set missing data to NaN\n df_=df.copy()\n for k in remove_idx_time.keys():\n vals=remove_idx_time.get(k)\n if len(vals) == 0:\n continue\n for v in vals:\n df_.loc[tuple(list(df_zero.iloc[v,:].name)+[df_zero.columns[k]])] = np.nan\n\n # Interpolate NaNs linearly and return dataframe to desired shape\n df_=df_.interpolate(method=\"linear\").unstack(\"Time\")\n # For nonlinear interpolation methods applied to MultiIndex, see\n # https://stackoverflow.com/questions/32496062/how-can-i-interpolate-based-on-index-values-when-using-a-pandas-multiindex\n\n return df_\n\n\ndef extract_features(df_spline,max_time=72):\n \"\"\" Function to extract integrals, concentrations and derivatives from splines\n\n Args:\n df_spline (pd.DataFrame): dataframe of splines\n max_time (int): maximum time at which to extract features. Default = 72\n\n Returns:\n df (pd.DataFrame): dataframe with features\n \"\"\"\n times=1+np.arange(max_time)\n df = pd.DataFrame(np.zeros((len(df_spline.index),len(times))), index=df_spline.index, columns=times)\n df.columns.name = \"Time\"\n df=pd.DataFrame(df.stack(level=\"Time\"))\n df.columns = pd.MultiIndex.from_arrays([['integral'], [\"IFNg\"]], names=['Feature','Cytokine'])\n\n for cyto in df_spline.columns:\n df['integral',cyto] = np.array([[df_spline[cyto].iat[i].integral(0,time) for time in times] for i in range(len(df_spline.index))]).flatten()\n df['concentration',cyto] = np.array([df_spline[cyto].iat[i](times) for i in range(len(df_spline.index))]).flatten()\n df['derivative',cyto] = np.array([df_spline[cyto].iat[i].derivative()(times) for i in range(len(df_spline.index))]).flatten()\n\n return df\n\n\ndef update_integral_features(df_int):\n \"\"\" Function to make integrals monotonuous. Decreasing integrals are an\n artefact from the spline fitting procedure. Knots are fixed at start and\n end, which may cause the concentration to dip below the unphysical zero\n (in minimum of log transformed space), and thus integrals to decrease.\n\n Args:\n df_int(pd.DataFrame): dataframe with integral features (potentially nonmonotonuous)\n\n Returns:\n df_int (pd.DataFrame): dataframe with integral features without artificiality\n \"\"\"\n df_int=df_int.unstack(\"Time\").stack(\"Cytokine\")\n for time in df_int.columns:\n df_int[time]-=np.nansum((df_int.diff(axis=1)[df_int.diff(axis=1)<0]).loc[:,np.arange(1,time+1)],axis=1)\n\n return df_int.stack(\"Time\").unstack(\"Cytokine\")\n\n\ndef process_file(folder,file, **kwargs):\n \"\"\" Function to process the raw cytokine concentrations time series:\n Find missing data points and linearly interpolate between them, take log, rescale and smooth with a moving average, interpolate with cubic splines, and extract features (integral, concentration & derivatives) at desired times\n Also tries to load limits of detection\n\n Args:\n data_file (str): path to the raw data file (a pickled pd.DataFrame)\n\n Keyword args:\n take_log (bool): True to take the log of the concentrations in the\n preprocessing, False if the networks have to deal with raw values.\n Default: True.\n rescale_max (bool): True: rescale concentrations by their maximum to\n account for experimental variability, False if we postpone\n normalization to a later stage.\n Default: False.\n smooth_size (int, default=3): number of points to consider when\n performing the moving average smoothing on the data points.\n In other words, width of the kernel.\n rtol_splines (float): tolerance for spline fitting: specify the\n fraction of the sum of squared residuals between the raw data\n and the data smoothed with a moving average that will be used\n as the total error tolerance in UnivariateSpline. Default: 1/2\n\n Returns:\n data (pd.DataFrame): the rearranged raw data, before processing.\n data_log (pd.DataFrame): the normalized log time series\n data_smooth (pd.DataFrame): log data after applying a moving average\n df (pd.DataFrame): processed data after extracting features from splines\n \"\"\"\n # Processing-related keyword arguments\n take_log = kwargs.get(\"take_log\", True)\n rescale_max = kwargs.get(\"rescale_max\", False)\n smooth_size = kwargs.get(\"smooth_size\", 3)\n rtol_splines = kwargs.get(\"rtol_splines\", 1/2)\n max_time = kwargs.get(\"max_time\", 72)\n\n # Import raw data\n data = pd.read_pickle(folder+file)\n\n # Put all timepoints for a given cytokine in continuous columns\n data = data.stack().unstack('Cytokine')\n\n # Check for randomly or structurally missing datapoints and interpolate between them\n data = treat_missing_data(data)\n\n # Import the limits of detection, if any\n cytokine_lower_lod = lod_import(file[32:40])\n\n # Take the log of the data if take_log, else normalize in linear scale\n data_log = log_management(data, take=take_log, rescale=rescale_max, lod=cytokine_lower_lod)\n\n # Smooth the data points before fitting splines for interpolation\n data_smooth = smoothing_data(data_log, kernelsize=smooth_size)\n\n # Fit cubic splines on the smoothed series\n spline_frame = generate_splines(data_log, data_smooth,rtol=rtol_splines)\n\n # Extract integral, concentration and derivative features from splines at set timepoints\n df = extract_features(spline_frame,max_time=max_time)\n\n # Update concentration and integral\n #TODO: speed of the code is limited by updating integrals. Optimizing this could make running time LIGHTNING FAST\n df[df.concentration<0]=0\n df[\"integral\"] = update_integral_features(df.integral)\n\n # Return data in various stages of processing\n return [data, data_log, data_smooth, df]\n\nclass SplineDatasetSelectionPage(tk.Frame):\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n # Action radio buttons at the top\n self.actionWindow = tk.Frame(self)\n self.actionWindow.pack(side=tk.TOP,padx=10,pady=20)\n l1 = tk.Label(self.actionWindow, text=\"Select action:\", font='Helvetica 18 bold').grid(row=0,column=0,sticky=tk.W)\n rblist = []\n actions = ['Create Splines','Plot Splines']\n actionVar = tk.StringVar(value=actions[0])\n for i,action in enumerate(actions):\n rb = tk.Radiobutton(self.actionWindow,text=action, variable=actionVar,value=action)\n rb.grid(row=i+1,column=0,sticky=tk.W)\n rblist.append(rb)\n # Buttons at the bottom to navigate between pages\n # Pack them before the data selection so it does not shrink.\n self.buttonWindow = tk.Frame(self)\n self.buttonWindow.pack(side=tk.BOTTOM, pady=10, expand=tk.YES)\n\n tk.Button(self.buttonWindow, text=\"OK\",\n command=lambda: collectInputs()\n ).pack(in_=self.buttonWindow,side=tk.LEFT)\n tk.Button(self.buttonWindow, text=\"Back\",\n command=lambda: master.switch_frame(master.homepage)\n ).pack(in_=self.buttonWindow,side=tk.LEFT)\n tk.Button(self.buttonWindow, text=\"Quit\",\n command=lambda: quit()\n ).pack(in_=self.buttonWindow,side=tk.LEFT)\n\n # Collect the list of available data sets\n folder = path+\"data/final/\"\n fileNameDict = {}\n for fileName in os.listdir(folder):\n if fileName.endswith(\".pkl\"):\n fileNameDict[fileName[41:-10]] = fileName\n sortedFileNameDict = set_standard_order(pd.DataFrame({'Data':list(fileNameDict.keys())}),returnSortedLevelValues=True)\n trueLabelDict = {'Select dataset':sortedFileNameDict['Data']}\n\n # Frame to contain the scrollable canvas and the scrollbar within the\n # main window.\n self.labelWindow1 = tk.Frame(self)\n self.labelWindow1.pack(side=tk.TOP,padx=10,fill=tk.X,expand=tk.NO)\n\n # Make canvas inside that frame\n self.w1 = tk.Canvas(self.labelWindow1, borderwidth=0,\n height=600)\n\n # Make scrollbar in side the self.labelWindow1 frame as well\n scr_v1 = tk.Scrollbar(self.labelWindow1, orient=tk.VERTICAL, command=self.w1.yview)\n scr_v1.pack(side=tk.RIGHT,fill=tk.Y)\n # Add and bind scrollbar to canvas\n self.w1.config(yscrollcommand=scr_v1.set)\n self.w1.pack(fill=tk.BOTH, expand=tk.NO)\n\n # Make a frame to contain the list of radio buttons inside the Canvas\n # This is to create all buttons at once so they can be scrolled\n self.labelWindow = tk.Frame(self.w1)\n self.labelWindow.pack(fill=tk.BOTH, expand=tk.NO)\n self.w1.create_window((0,0), window=self.labelWindow, anchor = tk.NW)\n\n # Bind the label frame's to the canvas' size\n # See https://stackoverflow.com/questions/3085696/adding-a-scrollbar-to-a-group-of-widgets-in-tkinter\n self.labelWindow1.bind(\"\", self.onFrameConfigure)\n\n # Adding radio buttons for the different datasets, all linked together\n # by a check/uncheck all\n levelValueCheckButtonList = []\n overallCheckButtonVariableList = []\n checkAllButtonList = []\n uncheckAllButtonList = []\n i=0\n maxNumLevelValues = 0\n self.labels_list = []\n for levelName in trueLabelDict:\n j=0\n levelCheckButtonList = []\n levelCheckButtonVariableList = []\n levelLabel = tk.Label(self.labelWindow, text=levelName+':', font='Helvetica 18 bold')\n levelLabel.grid(row=1,column = i*6,sticky=tk.N,columnspan=5)\n for levelValue in trueLabelDict[levelName]:\n includeLevelValueBool = tk.BooleanVar()\n cb = tk.Checkbutton(self.labelWindow, text=levelValue, variable=includeLevelValueBool)\n self.labels_list.append(levelValue)\n cb.grid(row=j+4,column=i*6+2,columnspan=2,sticky=tk.W)\n self.labelWindow.grid_columnconfigure(i*6+3,weight=1)\n cb.select()\n levelCheckButtonList.append(cb)\n levelCheckButtonVariableList.append(includeLevelValueBool)\n j+=1\n\n checkAllButton1 = checkUncheckAllButton(self.labelWindow,levelCheckButtonList, text='Check All')\n checkAllButton1.configure(command=checkAllButton1.checkAll)\n checkAllButton1.grid(row=2,column=i*6,sticky=tk.N,columnspan=3)\n checkAllButtonList.append(checkAllButton1)\n\n uncheckAllButton1 = checkUncheckAllButton(self.labelWindow,levelCheckButtonList, text='Uncheck All')\n uncheckAllButton1.configure(command=checkAllButton1.uncheckAll)\n uncheckAllButton1.grid(row=2,column=i*6+3,sticky=tk.N,columnspan=3)\n uncheckAllButtonList.append(checkAllButton1)\n\n levelValueCheckButtonList.append(levelCheckButtonList)\n overallCheckButtonVariableList.append(levelCheckButtonVariableList)\n if len(trueLabelDict[levelName]) > maxNumLevelValues:\n maxNumLevelValues = len(trueLabelDict[levelName])\n i+=1\n\n # Adjust canvas width based on the length of the longest label.\n font2 = tk.font.Font(family=\"Helvetica\")\n max_label_width = max(map(font2.measure, self.labels_list))\n # Add some padding around the longest label, for the radio button etc.\n self.w1.config(width=max(400, max_label_width+50))\n\n def collectInputs():\n includeLevelValueList = []\n #Decode boolean array of checkboxes to level names\n i = 0\n for levelName,checkButtonVariableList in zip(trueLabelDict,overallCheckButtonVariableList):\n tempLevelValueList = []\n for levelValue,checkButtonVariable in zip(trueLabelDict[levelName],checkButtonVariableList):\n if checkButtonVariable.get():\n tempLevelValueList.append(levelValue)\n #Add time level values in separately using time range entrybox\n if i == len(trueLabelDict.keys()) - 2:\n timeRange = e2.get().split('-')\n includeLevelValueList.append(list(range(int(timeRange[0]),int(timeRange[1]))))\n includeLevelValueList.append(tempLevelValueList)\n i+=1\n\n if actionVar.get() == 'Create Splines':\n for fileName in includeLevelValueList[0]:\n fullFileName = fileNameDict[fileName]\n [data, data_log, data_smooth, df]=process_file(folder,fullFileName)\n df.to_hdf(path+\"data/processed/\"+fileName+\".hdf\", key=\"Features\", mode=\"w\")\n print(\"All splines created and saved as hdf.\")\n #TODO: allow every stage (log/smooth/spline) to be plotted, not just spline\n else:\n dflist = []\n for fileName in includeLevelValueList[0]:\n fullFileName = fileNameDict[fileName]\n df = pd.read_hdf(path+\"data/processed/\"+fullFileName[41:-10]+\".hdf\",key='Features')\n dflist.append(df)\n try:\n fulldf = pd.concat(dflist,keys=includeLevelValueList[0],names=['Data'])\n except:\n tk.messagebox.showerror(\"Error\", \"Datasets have different types of levels. Please change your selections.\")\n else:\n stackedFullDf = fulldf.stack().stack().to_frame('value')\n print(stackedFullDf)\n stackedFullDf = stackedFullDf.swaplevel(i=-3,j=-1,axis=0)\n print(stackedFullDf)\n stackedFullDf = set_standard_order(stackedFullDf.reset_index())\n stackedFullDf = pd.DataFrame(stackedFullDf['value'].values,index=pd.MultiIndex.from_frame(stackedFullDf.iloc[:,:-1]))\n stackedFullDf.columns = ['value']\n master.switch_frame(selectLevelsPage,stackedFullDf,SplineDatasetSelectionPage)\n\n\n def onFrameConfigure(self, event):\n \"\"\" Reset the scroll region to encompass the entire inner frame,\n so no radio button labels are missing. \"\"\"\n self.w1.configure(scrollregion=self.w1.bbox(\"all\"))\n\n def resizeFrame(self, event):\n width = event.width\n self.labelWindow1.itemconfig(self)\n","repo_name":"tjrademaker/cytokine-pipeline","sub_path":"scripts/process/process_raw_data.py","file_name":"process_raw_data.py","file_ext":"py","file_size_in_byte":23223,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"16764311747","text":"popcount=[0]*32\nfor i in range(1,32):\n popcount[i]=popcount[i//2]+(i%2)\n\ndef hypot(p,q):\n return ((p[0]-q[0])**2+(p[1]-q[1])**2)**0.5\n\nN,M=map(int,input().split())\npos=[tuple(map(int,input().split())) for _ in range(N+M)]\n\ndp=[[1e18]*(1<<(N+M)) for _ in range(N+M)]\nfor i in range(N+M): dp[i][1<>N]\n for i in range(N+M):\n if not (s>>i)&1: continue\n for j in range(N+M):\n if (s>>j)&1: continue\n new_dist=dp[i][s]+hypot(pos[i],pos[j])*coef\n if dp[j][s^(1<new_dist: dp[j][s^(1<>N])\n\nprint(f\"{ans:.10f}\")\n","repo_name":"harukaeru/CompetitiveProgramming","sub_path":"abc274/E/mmmm.py","file_name":"mmmm.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"40625932533","text":"import os\nfrom setuptools import setup, find_packages\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\ndesc = \"A project generator tools for easy create project skeleton.\"\n\ntry:\n\tlong_description = read('ansprogen/README.md')\nexcept:\n\tlong_description = desc\n\nsetup(\n name = \"ansprogen\",\n version = \"0.0.5\",\n author = \"Robin Syihab\",\n author_email = \"r[@]nosql.asia\",\n description = (desc),\n license = \"MIT\",\n keywords = \"ansprogen project generator\",\n url = \"http://www.mindtalk.com/u/anvie\",\n\tdownload_url = \"https://github.com/anvie/Ansprogen\",\n packages=find_packages(),\n\tpackage_data = {\n\t\t'': ['README.md', '*.py']\n\t},\n long_description=long_description,\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Topic :: Utilities\",\n \"License :: OSI Approved :: MIT License\",\n ],\n\tentry_points='''\n\t[console_scripts]\n\tprogen = ansprogen.progen:main\n\t'''\n)\n","repo_name":"anvie/Ansprogen","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"75121705974","text":"import csv\n\ninput_filename = \"input/gun-violence-data.csv\"\noutput_guns_filename = \"output/guns.csv\"\noutput_vic_to_gun_filename = \"output/vic_to_gun.csv\"\noutput_gun_violence_filename = \"output/gun-violence-data-AFTER_GUNS.csv\"\n\nfields = []\nrows = []\n\nout_file_guns = open(output_guns_filename, 'w', encoding='utf-8', newline='\\n')\nout_file_vic_to_gun = open(output_vic_to_gun_filename, 'w', encoding='utf-8', newline='\\n')\nout_file_gun_violence = open(output_gun_violence_filename, 'w', encoding='utf-8', newline='\\n')\ncsvwriter_guns = csv.writer(out_file_guns)\ncsvwriter_vic_to_gun = csv.writer(out_file_vic_to_gun)\ncsvwriter_gvd = csv.writer(out_file_gun_violence)\n\n\ndef split_row_to_guns(row, fields):\n def split_field(field_index, row):\n splitted = [x for x in row[field_index].split(\"||\")]\n if len(splitted) < 2:\n return dict()\n dictionary = dict((x.split('::')[0], x.split('::')[1]) for x in splitted)\n return dictionary\n\n def create_gun_row(key, i_id, g_stolen, g_type):\n stolen = extract_by_key_if_present(key, g_stolen)\n type = extract_by_key_if_present(key, g_type)\n return [i_id, stolen, type]\n\n def extract_by_key_if_present(key, p_ages, return_val_if_absent=None):\n return p_ages[key] if key in p_ages.keys() else return_val_if_absent\n\n i_id_index = fields.index(\"incident_id\")\n g_stolen_index = fields.index(\"gun_stolen\")\n g_type_index = fields.index(\"gun_type\")\n\n i_id = row[i_id_index]\n g_stolen_list = split_field(g_stolen_index, row)\n g_type_list = split_field(g_type_index, row)\n\n guns = []\n for i, participant_type in g_type_list.items():\n gun = create_gun_row(i, i_id, g_stolen_list, g_type_list)\n guns.append(gun)\n return guns\n\n# return [i_id, stolen, type]\ncsvwriter_guns.writerow([\"gun_id\", \"incident_id\", \"gun_stolen\", \"gun_type\"])\n\nwith open(input_filename, 'r', encoding='utf-8') as csvfile:\n # creating a csv reader object\n csvreader = csv.reader(csvfile)\n fields = next(csvreader)\n\n g_id = 1\n\n for row in csvreader:\n guns = split_row_to_guns(row, fields)\n for g in guns:\n csvwriter_guns.writerow([str(g_id)] + g)\n g_id += 1\n\n out_file_guns.close()\n out_file_vic_to_gun.close()\n print(\"Total no. of rows: %d\" % (csvreader.line_num))\n\nprint('Field names are:' + ', '.join(field for field in fields))\n","repo_name":"shorti1996/huhurtownie","sub_path":"guns_denormalizer.py","file_name":"guns_denormalizer.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39612746643","text":"\"hdl_string_format unit tests\"\n\nfrom os.path import join, dirname\nfrom vunit import VUnit\n\ndef main():\n ui = VUnit.from_argv()\n ui.enable_location_preprocessing()\n\n src_path = join(dirname(__file__), \"src\")\n\n str_format = ui.add_library(\"str_format\")\n str_format.add_source_files(join(src_path, \"*.vhd\"))\n\n str_format_tb = ui.add_library(\"str_format_tb\")\n str_format_tb.add_source_files(join(src_path, \"test\", \"*.vhd\"))\n\n ui.set_compile_option('modelsim.vcom_flags', ['-novopt', '-explicit'])\n ui.set_sim_option('modelsim.vsim_flags', ['-novopt'])\n ui.main()\n\nimport sys\nsys.exit(main())\n","repo_name":"suoto/hdl_string_format","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"22"} +{"seq_id":"29291577186","text":"import networkx as nx\n\nnodes = [1,2,3,4,5,6,7,8,9]\n\ngraph = nx.Graph()\nfor node in nodes:\n graph.add_node(node)\n\ngraph.add_edge(1, 2)\ngraph.add_edge(1, 9)\n\nindependent_set = nx.maximal_independent_set(graph)\nprint(independent_set)","repo_name":"lefteran/EV-chargers","sub_path":"FLPv2/examples/independent_set.py","file_name":"independent_set.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"72463830776","text":"import random\nimport functools32\nfrom copy import deepcopy\n\nclass AbstractRule(object):\n \"\"\"\n This class will represent every rule used in the project (Constructor and Constant)\n \"\"\"\n def _set_grammar(self, gram):\n self._grammar = gram\n \n def random(self, n):\n return self.unrank(n, random.randint(0, self.count(n) - 1))\n \nclass ConstructorRule(AbstractRule):\n \"\"\"\n This class descends from AbstractRule as indicated in the description,\n \"\"\"\n def __init__(self, fst, snd, size):\n \"\"\"\n Input:\n - fst and snd represent both a non-terminal rule of the grammar\n - size represents the function that can find the size of an object\n \"\"\"\n self._parameters = (fst, snd)\n self._valuation = float(\"inf\")\n self._size = size\n \n def valuation(self):\n return self._valuation\n \n def _update_valuation(self):\n self._valuation = self._calc_valuation()\n \nclass UnionRule(ConstructorRule):\n \n def __init__(self, fst, snd, invert_union = None, size = None):\n \"\"\"\n Input:\n - fst and snd represent both a non-terminal rule of the grammar\n - size represents the function that can find the size of an object\n - invert_union represents a function that receives the result of \n the union rule over two objects and is able to retreive both original values\n \"\"\"\n ConstructorRule.__init__(self, fst, snd, size)\n self._invert_union = invert_union\n \n def _calc_valuation(self):\n \"\"\"\n finds the size of the smallest derived object\n \"\"\"\n return min([self._grammar[parameter].valuation() for parameter in self._parameters])\n \n @functools32.lru_cache(maxsize=100)\n def count(self, n):\n \"\"\"\n This function returns the number of objects of type n\n Input:\n - n represents the size of the objects\n \"\"\"\n return sum([self._grammar[parameter].count(n) for parameter in self._parameters])\n \n @functools32.lru_cache(maxsize=100)\n def list(self, n):\n \"\"\"\n This functions returns a list with all elements of size n\n Input:\n - n represents the size of the objects\n \"\"\"\n answer = []\n for parameter in self._parameters:\n answer += self._grammar[parameter].list(n)\n return answer\n \n @functools32.lru_cache(maxsize=100)\n def unrank(self, n, i):\n \"\"\"\n This function finds the element of rank \"rank\" and returns it\n Input:\n - n is the size of the desired object\n - rank is its desired rank\n \"\"\"\n if i >= self.count(n):\n raise ValueError\n else:\n C_a = self._grammar[self._parameters[0]].count(n)\n if i < C_a:\n return self._grammar[self._parameters[0]].unrank(n, i)\n else:\n return self._grammar[self._parameters[1]].unrank(n, i - C_a)\n\n @functools32.lru_cache(maxsize=100)\n def rank(self, obj):\n \"\"\"\n This function finds the rank of the element obj\n Input:\n - obj is the object to be ranked\n \"\"\"\n if self._invert_union is None or self._size is None:\n raise ValueError(\"Inversion functions have to be provided for rank usage\")\n if self._invert_union(obj) == 0:\n return self._grammar[self._parameters[0]].rank(obj)\n else:\n return self._grammar[self._parameters[0]].count(self._size(obj)) + self._grammar[self._parameters[1]].rank(obj)\n \nclass ProductRule(ConstructorRule):\n \n def __init__(self, fst, snd, cons, invert_prod = None, size = None):\n \"\"\"\n Input:\n - fst and snd represent both a non-terminal rule of the grammar\n - size represents the function that can find the size of an object\n - cons represents the function that will be applied to both fst and snd\n - invert_prod represents a function that receives the result of \n the product rule over two objects and is able to retreive both original values\n \"\"\"\n ConstructorRule.__init__(self, fst, snd, size)\n self._constructor = cons\n self._invert_constructor = invert_prod\n \n def _calc_valuation(self):\n \"\"\"\n finds the size of the smallest derived object\n \"\"\"\n return sum([self._grammar[parameter].valuation() for parameter in self._parameters])\n \n @functools32.lru_cache(maxsize=100)\n def count(self, n):\n \"\"\"\n This function returns the number of objects of type n\n Input:\n - n represents the size of the objects\n \"\"\"\n total_sum = 0\n vals = [self._grammar[parameter].valuation() for parameter in self._parameters]\n for k in range(vals[0], n+1):\n l = n - k\n if l >= vals[1]:\n aux = [self._grammar[self._parameters[0]].count(k), self._grammar[self._parameters[1]].count(l)]\n total_sum += aux[0]*aux[1]\n return total_sum\n \n @functools32.lru_cache(maxsize=100)\n def list(self, n):\n \"\"\"\n This functions returns a list with all elements of size n\n Input:\n - n represents the size of the objects\n \"\"\"\n answer = []\n vals = [self._grammar[parameter].valuation() for parameter in self._parameters]\n for k in range(vals[0], n+1):\n l = n - k\n if l >= vals[1]:\n auxans = (self._grammar[self._parameters[0]].list(k), self._grammar[self._parameters[1]].list(l))\n for elem0 in auxans[0]:\n for elem1 in auxans[1]:\n answer.append(self._constructor(elem0, elem1))\n return answer\n\n @functools32.lru_cache(maxsize=100)\n def unrank(self, n, rank):\n \"\"\"\n This function finds the element of rank \"rank\" and returns it\n Input:\n - n is the size of the desired object\n - rank is its desired rank\n \"\"\"\n if rank >= self.count(n):\n raise ValueError\n else:\n first_nonterm = self._grammar[self._parameters[0]]\n second_nonterm = self._grammar[self._parameters[1]]\n count = 0\n last_count = 0\n index = 0\n while count <= rank and index <= n:\n last_count = count\n count += first_nonterm.count(index) * second_nonterm.count(n - index)\n index += 1\n if count <= rank:\n raise ValueError\n index -= 1\n difference = rank - last_count\n k = second_nonterm.count(n - index)\n quotient, rest = difference/k, difference % k\n return self._constructor(first_nonterm.unrank(index, quotient), second_nonterm.unrank(n-index, rest))\n \n @functools32.lru_cache(maxsize=100)\n def rank(self, obj):\n \"\"\"\n This function finds the rank of the element obj\n Input:\n - obj is the object to be ranked\n \"\"\"\n if self._invert_constructor is None or self._size is None:\n raise ValueError(\"Inversion functions have to be provided for rank usage\")\n first_nonterm = self._grammar[self._parameters[0]]\n second_nonterm = self._grammar[self._parameters[1]]\n obj_fst, obj_snd = self._invert_constructor(obj)\n n = self._size(obj)\n n_left = self._size(obj_fst)\n r_fst = first_nonterm.rank(obj_fst)\n r_snd = second_nonterm.rank(obj_snd)\n count = sum(first_nonterm.count(i) * second_nonterm.count(n - i) for i in range(n_left))\n return count + r_fst * second_nonterm.count(n - n_left) + r_snd\n \nclass ConstantRule(AbstractRule):\n \n def __init__(self, object):\n self._object = object\n \nclass SingletonRule(ConstantRule):\n \n def __init__(self, obj):\n ConstantRule.__init__(self, obj)\n \n def valuation(self):\n \"\"\"\n finds the size of the smallest derived object\n \"\"\"\n return 1\n \n def count(self, n):\n \"\"\"\n This function returns the number of objects of type n\n Input:\n - n represents the size of the objects\n \"\"\"\n if n == 1:\n return 1\n else:\n return 0\n \n def list(self, n):\n \"\"\"\n This functions returns a list with all elements of size n\n Input:\n - n represents the size of the objects\n \"\"\"\n if n == 1:\n return [self._object]\n else:\n return []\n \n def unrank(self, n, rank):\n \"\"\"\n This function finds the element of rank \"rank\" and returns it\n Input:\n - n is the size of the desired object\n - rank is its desired rank\n \"\"\"\n if rank >= self.count(n):\n raise ValueError\n else:\n return self._object\n\n def rank(self, obj):\n \"\"\"\n This function finds the rank of the element obj\n Input:\n - obj is the object to be ranked\n \"\"\"\n if obj == self._object:\n return 0\n else:\n raise \"Not a correct object\"\n \nclass EpsilonRule(ConstantRule):\n \n def __init__(self, obj):\n ConstantRule.__init__(self, obj)\n \n def valuation(self):\n \"\"\"\n finds the size of the smallest derived object\n \"\"\"\n return 0\n \n def count(self, n):\n \"\"\"\n This function returns the number of objects of type n\n Input:\n - n represents the size of the objects\n \"\"\"\n if n == 0:\n return 1\n else:\n return 0\n \n def list(self, n):\n \"\"\"\n This functions returns a list with all elements of size n\n Input:\n - n represents the size of the objects\n \"\"\"\n if n == 0:\n return [self._object]\n else:\n return []\n \n def unrank(self, n, rank):\n \"\"\"\n This function finds the element of rank \"rank\" and returns it\n Input:\n - n is the size of the desired object\n - rank is its desired rank\n \"\"\"\n if rank >= self.count(n):\n raise ValueError\n else:\n return self._object\n\n def rank(self, obj):\n \"\"\"\n This function finds the rank of the element obj\n Input:\n - obj is the object to be ranked\n \"\"\"\n if obj == self._object:\n return 0\n else:\n raise \"Not a correct object\"\n\nclass Union:\n def __init__(self, fst, snd):\n self._fst = fst\n self._snd = snd\n\n def convert(self, gram, new_key=None):\n key_fst = self._fst.convert(gram)\n key_snd = self._snd.convert(gram)\n new_key = new_key or \"U(\"+str(key_fst)+\", \"+str(key_snd)+\")\"\n gram[new_key] = UnionRule(key_fst, key_snd)\n return new_key\n\n\nclass Prod:\n def __init__(self, fst, snd, cons):\n self._fst = fst\n self._snd = snd\n self._constructor = cons\n\n def convert(self, gram, new_key=None):\n key_fst = self._fst.convert(gram)\n key_snd = self._snd.convert(gram)\n new_key = new_key or \"P(\"+str(key_fst)+\", \"+str(key_snd)+\")\"\n gram[new_key] = ProductRule(key_fst, key_snd, self._constructor)\n return new_key\n\n\nclass Singleton:\n def __init__(self, obj):\n self._obj = obj\n\n def convert(self, gram, new_key=None):\n new_key = new_key or \"S-\"+str(self._obj)\n gram[new_key] = SingletonRule(self._obj)\n return new_key\n\n\nclass Epsilon:\n def __init__(self, obj):\n self._obj = obj\n\n def convert(self, gram, new_key=None):\n new_key = new_key or \"E-\"+str(self._obj)\n gram[new_key] = EpsilonRule(self._obj)\n return new_key\n\nclass NonTerm:\n def __init__(self, string):\n self._str = string\n\n def convert(self, gram, new_key=None):\n if gram[self._str] is None:\n raise Exception(\"NonTerm is not in the grammar\")\n return self._str\n\nclass Bound:\n def __init__(self, C, the_min, the_max):\n self._class = C\n self._min = the_min\n self._max = the_max\n\n def list(self):\n if self._min > self._max:\n raise \"Max should be larger than Min\"\n else:\n return [C.count(i) for i in range(self._min, self._max+1)]\n \ndef init_grammar(grammar):\n \"\"\"\n This function initiates the grammar \"grammar\"\n Input:\n - \"grammar\" is a grammar\n - is_condensed is boolean that is true iff the grammar is condensed\n \"\"\"\n for rule_key in grammar:\n if hasattr(grammar[rule_key], \"_set_grammar\"):\n grammar[rule_key]._set_grammar(grammar)\n valuations = [float(\"inf\")]*len(grammar)\n newvaluations = [grammar[key].valuation() for key in grammar]\n while newvaluations != valuations:\n valuations = newvaluations\n for rule_key in grammar:\n if isinstance(grammar[rule_key], ConstructorRule):\n grammar[rule_key]._update_valuation()\n newvaluations = [grammar[key].valuation() for key in grammar]\n return\n\nclass Bound:\n def __init__(self, C, the_min, the_max):\n \"\"\"\n This will give a constructor that iterates over the lists of objects\n with sizes between the_min and the_max\n Input:\n - C represents a class of objects\n - the_min and the_max are integers representing the minimum and maximum sizes of the wanted objects\n \"\"\"\n self.C = C\n self.min = the_min\n self.max = the_max\n\n def __iter__(self):\n \"\"\"\n Iterates over the lists of objects C\n \"\"\"\n for list_i in range(self.min, self.max+1):\n for value_i in range(self.C.count(list_i)):\n yield self.C.list(list_i)[value_i]\n\ndef convert_condensed_gram(cond_grammar):\n \"\"\"\n This functions converts a condensed grammar into a normal gramma\n Input:\n - cond_grammar is a condensed_grammar\n \"\"\"\n new_grammmar = deepcopy(cond_grammar)\n cond_grammar[cond_grammar.keys()[0]].convert(new_grammmar, cond_grammar.keys()[0])\n return new_grammmar","repo_name":"gustavo-castro/grammargen","sub_path":"Rules.py","file_name":"Rules.py","file_ext":"py","file_size_in_byte":14242,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"2821205244","text":"from random import *\n\nloopval = 0\nx = str(int(uniform(1,4)))\n\nwhile loopval == 0:\n\n value1 = input(\"Guess any number between 1 and 4:\")\n\n if value1 == x:\n print(\"You guessed correctly!\")\n loopval = 1\n \n else:\n loopval = 0\n print(\"Wrong guess! Try again.\")\n \n\n\n","repo_name":"wkerby/PCAP_prep","sub_path":"guessthenumber.py","file_name":"guessthenumber.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11515389975","text":"import colorful as cf\n\nlogo = '''{c.bold}{c.pink}\\\n __ __ _ \n ____ _____/ /_____ / /__________ _(_)___ \n / __ \\/ ___/ __/ __ \\______/ __/ ___/ __ `/ / __ \\\\\n/ /_/ / /__/ /_/ /_/ /_____/ /_/ / / /_/ / / / / /\n\\____/\\___/\\__/\\____/ \\__/_/ \\__,_/_/_/ /_/ {c.reset}'''.format(c=cf)\n\nwelcome_message = f'''\\\n Welcome to\n{logo}\nAn individual programming trainer for everyone.'''\n\n\ndef multisolve(solved, goal):\n diff = solved - goal\n if diff < 0:\n return ''\n phrases = [\n '',\n 'Dominating',\n 'Rampage',\n 'Mega Solve',\n 'Unstoppable',\n 'Wicked Sick',\n 'Monster Solve',\n 'Godlike',\n 'Beyond Godlike',\n ]\n phrase = ' {c.yellow}(Goal completed){c.bold}{c.red} '.format(c=cf)\n phrase += phrases[min(diff, len(phrases) - 1)].upper() + str(cf.reset)\n return phrase\n","repo_name":"dankondr/octo-train","sub_path":"octo_train/colored.py","file_name":"colored.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"72042494777","text":"\n# coding: utf-8\n\n# # 1 b\n\n# inputs get very big or small softmax might have a hard time computing exp\n\n# # 2\n\n# In[1]:\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nget_ipython().magic('matplotlib inline')\n\n\n# In[2]:\n\ndef load_data(filename):\n \n with open(filename) as f:\n train_data = f.readlines()\n train = []\n for line in train_data:\n train.append((line.strip().split()))\n #print(train)\n #print(np.array(train, 'float')[:,0])\n return np.array(train, 'float')[:,1:], np.array(train, 'float')[:,0]\n\n\n# In[919]:\n\n#load the data in train and test arrays\n\nX_train, y_train = load_data('iris-train.txt')\nX_test, y_test = load_data('iris-test.txt')\n\n#normalize Training data\nX_train = 2*(X_train-0.5)\nprint(X_train.mean(axis=0))\nprint(X_train.max())\nX_test = 2*(X_test-0.5)\n\n\n# In[4]:\n\nW = np.random.rand(X_train.shape[1],np.unique(y_train).size) # 2 x 3 matrix\n#np.dot(np.array([[2,4],[2,2]]),np.array([2,1]))\nW.shape\n\n\n# In[980]:\n\ndef softmax(W, X):\n e_x = np.exp(np.clip(np.dot(W.transpose(),X.transpose()),-500,500)) #we need the transpose for dimensions to match(W columns corresponds to)\n return np.divide(e_x,e_x.sum(0)) #ex.sum(0) sums elements by column and the devide function divides by row\n\n\n# In[1055]:\n\ndef trainSoftmax(X_train, X_test, y_train, y_test):\n \n #creation of target vectors (target array)\n \n t_train = np.empty([y_train.size, np.unique(y_train).size])\n t_test = np.empty([y_test.size, np.unique(y_train).size]) # labels in y_train and y_test should be the same\n for k in range(t_train.shape[1]):\n t_train[:,k] = np.where(y_train==k+1*(np.unique(y_train).size < 4),1,0) # + 1 depends on the labels indexing: +1 for IRIS, 0 for CIPHAR\n t_test[:,k] = np.where(y_test==k+1*(np.unique(y_train).size < 4),1,0)\n \n total_loss_train = []\n total_loss_test = []\n mean_train_accuracy = []\n mean_test_accuracy = []\n W = np.random.rand(X_train.shape[1],np.unique(y_train).size)*0.1 # weight initialization, 2 x 3 matrix\n DW = np.zeros([X_train.shape[1],np.unique(y_train).size]) # momentum\n batch_size = 100\n l_r = 0.0001 # learning rate ciphar:0.0001\n a = 0.001 # decay parameter ciphar: 0.001\n m_r = 0.01 # momentum rate ciphar: 0.01\n \n for epoch in range(100):\n \n # minibatch creation\n randomizer = np.arange(y_train.size)\n np.random.shuffle(randomizer)\n #initialize loss and class accuracy\n Loss_train = 0\n train_class_accuracy = []\n# print('start')\n \n #iterate over batches\n for batch_no in range(y_train.size//batch_size):\n batch = randomizer[(batch_no*batch_size):(batch_no+1)*batch_size] # batch selection\n P_train_b = softmax(W, X_train[batch,:]) # 3 x batch_size matrix\n\n Loss_train = Loss_train - np.multiply(t_train[batch,:].transpose(), np.log(P_train_b)).sum()\n\n y_train_pred = np.argmax(P_train_b, axis = 0) + 1*(np.unique(y_train).size < 4) # pick the class that maximizes the likelihood for every datapoint (+1 because of python indexing for IRIS data)\n\n train_class_accuracy.append(sum(list(map(lambda x: (y_train_pred[y_train[batch]==x]==x).sum()/(y_train[batch]==x).sum(), [k for k in range(1*(np.unique(y_train).size < 4),np.unique(y_train[batch]).size+1*(np.unique(y_train).size < 4))])))/np.unique(y_train[batch]).size)\n \n #gradient calculation WITH regularization (check end of next line)\n dLoss = a*W.transpose() + np.dot((P_train_b - t_train[batch,:].transpose()), X_train[batch,:]) # leads to a 3 x 2 matrix, each row being the loss gradient for this class WITH regularization\n\n #update momentum rule\n DW = m_r*DW + l_r*dLoss.transpose()\n\n W = W - DW\n\n \n P_test = softmax(W, X_test) # 3 x 51 matrix\n\n Loss_test = -np.multiply(t_test.transpose(), np.log(P_test)).sum()\n\n y_test_pred = np.argmax(P_test, axis = 0) + 1*(np.unique(y_test).size < 4) # +1 for IRIS, 0 for CIPHAR-10\n\n test_class_accuracy = sum(list(map(lambda x: (y_test_pred[y_test==x]==x).sum()/(y_test==x).sum(), [k for k in range(1*(np.unique(y_test).size < 4),np.unique(y_test).size+1*(np.unique(y_test).size < 4))])))/np.unique(y_test).size\n\n total_loss_train.append(Loss_train)\n\n total_loss_test.append(Loss_test)\n\n mean_train_accuracy.append(np.mean(train_class_accuracy))\n\n mean_test_accuracy.append(test_class_accuracy)\n\n\n fig,ax = plt.subplots(1,2,figsize = (12,6))\n ax[0].plot(np.arange(epoch+1), total_loss_train, 'r-', np.arange(epoch+1), total_loss_test, 'b-') \n ax[0].set(title = 'Cross-Entropy Loss', xlabel = 'Epochs', ylabel = 'Loss')#,xlim = ax[0].get_xlim(), ylim = ax[0].get_xlim())\n ax[1].plot(np.arange(epoch+1), mean_train_accuracy, 'r-', np.arange(epoch+1), mean_test_accuracy, 'b-')\n ax[1].tick_params(reset = True)\n ax[1].set(title = 'mean per-class accuracy', xlabel = 'Epochs')\n\n return W\n\n\n# terrible results for l_r = 0.1, loss bounces back and forth\n\n# In[1043]:\n\n#randomizer = np.arange(90)\n#np.random.shuffle(randomizer)\n#print(X_train[randomizer,:].shape, y_train[randomizer].shape)\nW_iris = trainSoftmax(X_train, X_test, y_train, y_test)\n#print(W_iris)\n\nP = softmax(W_iris, X_test)\ny_pred = np.argmax(P, axis = 0) + 1 # pick the class that maximizes the likelihood for every datapoint (+1 because of python indexing)\nprint(y_pred, y_test, sep='\\n')\naccuracy = (y_test==y_pred).sum()/y_test.size\naccuracy\n\n\n# In[1044]:\n\nplt.savefig('iris softmax.png')\n\n\n# ## 2 b\n\n# In[1045]:\n\n# DISPLAYING THE DECISION BOUNDARIES\n\nh = .02 # step size in the mesh\n# create a mesh to plot in\nx_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1\ny_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n\n# # Plot the decision boundary. For that, we will assign a color to each\nP_train = softmax(W_iris, np.c_[xx.ravel(), yy.ravel()])\nZ = np.argmax(P_train, axis = 0) + 1 \n\n# # Put the result into a color plot\nZ = Z.reshape(xx.shape)\nplt.figure(figsize=(7,6))\nplt.contourf(xx, yy, Z, cmap='cool', alpha = 0.8)\nplt.scatter(X_train[:,0],X_train[:,1],s=20, c = y_train, edgecolors='b', linewidths=0.5)\nplt.savefig('decision_bound.png')\n\n\n# In[229]:\n\n#[ 1., 1., 2., 2., 2., 3., 1., 2., 1., 3.]\ny_pred = np.array([3, 3, 1, 1, 1, 1, 3, 1, 3, 1])\ny_train[[25, 22, 45, 54, 51, 72, 28, 47, 27, 80]]\n#(y_pred[y_train[[ 4, 2, 53, 8, 7, 52, 88, 47, 17, 19]] == 1]==1).sum()/(y_train[[ 4, 2, 53, 8, 7, 52, 88, 47, 17, 19]] == 1).sum()#== np.array( [3, 3, 2, 2, 3, 2, 2, 3, 3, 2])\n\n\n# # 3\n\n# In[1051]:\n\ndef loadCIFAR10():\n import pickle\n train_dict = {}\n for file_no in range(1,6):\n with open('cifar-10-batches-py/data_batch_{}'.format(file_no), 'rb') as fo:\n train_dict[file_no] = pickle.load(fo, encoding='bytes')\n with open('cifar-10-batches-py/test_batch', 'rb') as fo:\n test_dict = pickle.load(fo, encoding='bytes')\n \n #aggregating the train batches\n data = train_dict[1][b'data'] # features are already in numpy arrays\n labels = train_dict[1][b'labels'] # labels are in lists\n for batch in range(2,6):\n data = np.concatenate((data, train_dict[batch][b'data']))\n labels.extend(train_dict[batch][b'labels']) #labels are in lists\n \n return (data, np.array(labels), test_dict[b'data'], np.array(test_dict[b'labels']))\n\n\n# In[1052]:\n\ntrain_feat, train_labels, test_feat, test_labels = loadCIFAR10()\n\n#display first images\nrows = 3\ncols = 10\nfig, axes = plt.subplots(rows, cols, figsize=(12,6))\n\nfor i in range(rows*cols):\n row_index = i//cols\n col_index = i%cols\n ax = axes[row_index, col_index]\n ax.imshow(train_feat[train_labels==col_index,:][row_index,:].reshape(3,32,32).transpose(1,2,0))\n \nplt.tight_layout()\nplt.savefig('ciphar_images.png')\n\n\n# # 4\n\n# In[1056]:\n\ntrain_feat = np.divide(train_feat - train_feat.min(axis=0),train_feat.max(axis=0) - train_feat.min(axis=0))\ntrain_feat = train_feat - train_feat.mean(axis=0)\ntest_feat = np.divide(test_feat - test_feat.min(axis=0),test_feat.max(axis=0) - test_feat.min(axis=0))\ntest_feat = test_feat - test_feat.mean(axis=0)\n#print(train_feat[0,1:10], test_feat[0,1:10])\nW_ciphar = trainSoftmax(train_feat, test_feat, train_labels, test_labels)\n\n\nP = softmax(W_ciphar, test_feat)\ny_pred = np.argmax(P, axis = 0) # pick the class that maximizes the likelihood for every datapoint (no +1, classes start from 0)\nprint(y_pred, test_labels, sep='\\n')\naccuracy = (test_labels==y_pred).sum()/test_labels.size\naccuracy\n\n\n# In[1060]:\n\n# create the confusion matrix\nCM = []\nfor label in range(np.unique(test_labels).size):\n CM.append([(test_labels[y_pred == label]==k).sum() for k in range(np.unique(test_labels).size)]) # no + 1 for CIPHAR-10\nCM = np.array(CM)\n# normalize it based on actual class, i.e.\n# x% of class 1 objects are classified as class 2\nCM = CM/CM.sum(axis=0)\n\nfig = plt.figure()\nax = fig.add_subplot(111)\ncax = ax.matshow(CM, cmap = 'Greys')\nfig.colorbar(cax)\n# plt.figure(figsize=(3,3))\n# cax = ax.matshow(CM, cmap = 'Greys')\n# plt.colorbar(cax)\nax.set_title(' Normalized per actual class Confusion Matrix', y = 1.1)\nax.set_xlabel('Actual class'); ax.set_ylabel('predicted class')\nplt.show()\nplt.savefig('confusion_heatmap.png')\n\n\n# # 5\n\n# ## Part 1\n\n# In[440]:\n\ndef loadMusicData(fname, addBias):\n data = []\n with open(fname) as f:\n for line in f.readlines():\n data.append((line.strip().split(',')))\n if addBias == True:\n data[-1].extend('1')\n #print(np.array(train, 'float')[:,0])\n data = np.array(data, 'float')\n \n return data[:463714,0].astype(int), data[:463714,1:], data[463714:,0].astype(int), data[463714:,1:]\n\n\n# In[441]:\n\ndef musicMSE(pred, gt):\n s_e = (gt - np.around(pred))**2\n \n return np.mean(s_e)\n\n\n# In[723]:\n\ntrainYears, trainFeat, testYears, testFeat = loadMusicData('YearPredictionMSD/YearPredictionMSD.txt', addBias = True)\n\n\n# In[458]:\n\ntestFeat.max(axis = 0)-testFeat.min(axis = 0)\n\n\n# range (max - min) of values is small for the first 12 features but it gets significantly larger for the following 78 features. This is expected of course since the former represent the average timbre whereas the latter represent timber covariance, i.e. the order is closer to that of timbre squared.\n\n# In[462]:\n\nprint('Training years range from {} to {}'.format(trainYears.min(), trainYears.max()))\nprint('Testing years range from {} to {}'.format(testYears.min(), testYears.max()))\n\n\n# In[489]:\n\nfig,ax = plt.subplots(1,2,figsize = (12,6))#, sharey = True, sharex = True)\nfig.suptitle('Histograms of years for training and testing sets')\nx,bins,p= ax[0].hist(testYears, range = (1921,2011 )) # with normed = True normalizes so that the area under the hist is 1\nprint(bins)\nax[0].set(title = 'Training Set', xlabel = 'Year', ylabel = 'Frequency')#,xlim = ax[0].get_xlim(), ylim = ax[0].get_xlim())\nax[1].hist(testYears, range = (1921, 2011))\nax[1].tick_params(reset = True)\nax[1].set(title = 'Test Set', xlabel = 'Year')\n#savefig(\"figureC.png\", bbox_inches='tight')\n#plt.hist(trainYears)\n\n\n# We see that train and test labels follow a similar distribution which is good for our prediction purposes. However, both sets are dominated by songs from the last 2 decades that account for more than 90% of the sets' observations. Especially as we go back in time we get fewer and fewer instances, which results to years before the 70's being seriously underepresented.\n\n# In[508]:\n\nyears, counts = np.unique(trainYears, return_counts=True)\nyear = years[np.argmax(counts)] # year = 2007\nmse_2007 = musicMSE(year, testYears)\nprint(mse_2007)\nmse_1998 = musicMSE(1998, testYears)\nprint(mse_1998)\n\n\n# ## Part 2\n\n# In[651]:\n\ndef RidgeRegression(X_train, X_test, y_train, y_test, regularization):\n \n L2 = False\n if regularization == 'L2':\n L2 = True\n \n W = np.random.rand(X_train.shape[1]) # weight initialization, 91-vector\n \n batch_size = 100\n l_r = 0.00001 # learning rate\n a2 = 0.01 # L2 decay parameter\n a1 = 0.01 # L1 decay parameter\n \n #initial values of losses and MSEs\n y_test_pred = np.dot(X_test,W)\n y_train_pred = np.dot(X_train,W)\n total_loss_train = [((y_train_pred-y_train)**2).sum() + L2*a2*((W**2).sum()) + (1-L2)*a1*np.linalg.norm(W, ord=1)]\n total_loss_test = [((y_test_pred-y_test)**2).sum() + L2*a2*((W**2).sum()) + (1-L2)*a1*np.linalg.norm(W, ord=1)]\n train_MSE = [musicMSE(y_train_pred, y_train)]\n test_MSE = [musicMSE(y_test_pred, y_test)]\n \n for epoch in range(20):\n \n # minibatch creation\n randomizer = np.arange(y_train.size)\n np.random.shuffle(randomizer)\n #initialize loss and class accuracy\n Loss_train = 0\n batch_MSE = []\n print('start', Loss_train)\n \n #iterate over batches\n for batch_no in range(y_train.size//batch_size):\n batch = randomizer[(batch_no*batch_size):(batch_no+1)*batch_size] # batch selection\n y_train_pred = np.dot(X_train[batch,:],W) # y_pred = W.t*X, batch size vector, will be used in all computations below\n # train square loss\n train_s_l = ((y_train_pred-y_train[batch])**2).sum() # sum of (y_pred-y)^2\n \n # regularized train loss\n Loss_train = Loss_train + train_s_l + L2*a2*((W**2).sum()) + (1-L2)*a1*np.linalg.norm(W, ord=1) # L2 regularization term in the end\n\n #gradient calculation WITH regularization (check end of next line)\n dLoss = L2*2*a2*W + (1-L2)*a1*np.sign(W) + np.dot(X_train[batch,:].transpose(),(y_train_pred-y_train[batch])) # 91-vector gradient of loss\n \n batch_MSE.append(train_s_l) #just the sum of squared errors here, average will be per epoch\n\n # update rule for weights\n W = W - l_r*dLoss\n\n y_test_pred = np.dot(X_test,W)\n Loss_test = ((y_test_pred-y_test)**2).sum() + L2*a2*((W**2).sum()) + (1-L2)*a1*np.linalg.norm(W, ord=1) \n total_loss_train.append(Loss_train)\n total_loss_test.append(Loss_test)\n train_MSE.append(np.sum(batch_MSE)/y_train.size)\n test_MSE.append(musicMSE(y_test_pred, y_test)) # used the mse function from earlier for a change\n\n fig,ax = plt.subplots(1,2,figsize = (12,6))#, sharey = True, sharex = True)\n fig.suptitle('Train and Test Mean Square Errors')\n ax[0].plot(np.arange(epoch+2), train_MSE, 'r-') # with normed = True normalizes so that the area under the hist is 1\n ax[0].set(title = 'Training MSE', xlabel = 'Epochs', ylabel = 'MSE')#,xlim = ax[0].get_xlim(), ylim = ax[0].get_xlim())\n ax[1].plot(np.arange(epoch+2), test_MSE, 'b-')\n ax[1].tick_params(reset = True)\n ax[1].set(title = 'Test MSE', xlabel = 'Epochs')\n \n fig,ax = plt.subplots(1,2,figsize = (12,6))#, sharey = True, sharex = True)\n fig.suptitle('Train and Test Loss')\n ax[0].plot(np.arange(epoch+2), total_loss_train, 'r-') # with normed = True normalizes so that the area under the hist is 1\n ax[0].set(title = 'Training Loss', xlabel = 'Epochs', ylabel = 'Loss')#,xlim = ax[0].get_xlim(), ylim = ax[0].get_xlim())\n ax[1].plot(np.arange(epoch+2), total_loss_test, 'b-')\n ax[1].tick_params(reset = True)\n ax[1].set(title = 'Test Loss', xlabel = 'Epochs')\n \n plt.show()\n\n return W, test_MSE[-1]\n\n\n# In[667]:\n\ntrainFeat_normed = trainFeat\ntrainFeat_normed[:,:-1] = np.divide((trainFeat[:,:-1] - trainFeat[:,:-1].mean(axis=0)),trainFeat[:,:-1].std(axis=0, ddof=1))\ntestFeat_normed = testFeat\ntestFeat_normed[:,:-1] = (testFeat[:,:-1] - trainFeat[:,:-1].mean(axis=0))/trainFeat[:,:-1].std(axis=0, ddof=1)\n\nW_Ridge, MSE_Ridge = RidgeRegression(trainFeat_normed, testFeat_normed, trainYears, testYears, regularization = 'L2')\nprint(MSE_Ridge)\n\n\n# we start with MSE and Loss before the first epoch to show how fast it drops in the first epoch. this happens because our dataset is big and many batch iterations occur during an epoch.\n# OR\n# We start with MSE and Loss after the first epoch has run because the value of lost and MSE before any training dominates the graph as it is too high and the curve is not clear after.\n\n# In[612]:\n\n# RIDGE solution with pseudoinverse --> W = (XtX+λI)^-1*Xt y\nargument = np.dot(trainFeat_normed.transpose(),trainFeat_normed) + a*np.identity(trainFeat_normed.shape[1])\nW_pseudo = np.dot(np.linalg.inv(argument), np.dot(trainFeat_normed.transpose(), trainYears))\npseudo_MSE = musicMSE(np.dot(testFeat_normed,W_pseudo), testYears)\nprint(pseudo_MSE)\n\n\n# # Part 3\n\n# In[668]:\n\nW_Lasso, MSE_Lasso = RidgeRegression(trainFeat_normed, testFeat_normed, trainYears, testYears, regularization = 'L1')\nprint(MSE_Lasso)\n\n\n# In[675]:\n\n#print(np.sort(np.unique(W_Ridge[:-1])), np.sort(np.unique(W_Lasso)))\nplt.hist((W_Ridge[:-1], W_Lasso[:-1]), bins = 20)\nplt.title('Histogram of Ridge vs Lasso Weights')\nplt.ylabel('absolute frequency')\nplt.xlabel('feature weights')\nplt.legend(('Ridge','Lasso'))\n\n\n# # Part 4\n\n# In[890]:\n\ndef exp_decay(l_r, epoch):\n drop = 0.5 # decaying parameter\n lr_new = l_r*drop**(epoch//2)\n return lr_new\n\n\n# In[909]:\n\ndef PoissonRegression(X_train, X_test, y_train, y_test):\n \n # standardize train and test years based on train set to use them in our algorithm, otherwise they take large values and blow up computations\n train_st_dev = np.std(y_train, ddof=1)\n train_mean = np.mean(y_train)\n print(train_st_dev, train_mean)\n y_train = (y_train - train_mean)/train_st_dev\n y_test = (y_test - train_mean)/train_st_dev\n \n # initialize to random small weights\n W = np.random.rand(X_train.shape[1])*0.001 # weight initialization, 91-vector\n \n batch_size = 100\n l_r = 0.000001 # learning rate\n \n #initial values of losses and MSEs\n y_train_pred = np.exp(np.dot(X_train,W))\n y_test_pred = np.exp(np.dot(X_test,W))\n total_loss_train = [(y_train_pred - np.multiply(np.dot(X_train,W),y_train)).sum()]\n total_loss_test = [(y_test_pred - np.multiply(np.dot(X_test,W),y_test)).sum()]\n train_MSE = [musicMSE(y_train_pred, y_train)*train_st_dev**2]\n test_MSE = [musicMSE(y_test_pred, y_test)*train_st_dev**2]\n \n for epoch in range(10):\n \n # minibatch creation\n randomizer = np.arange(y_train.size)\n np.random.shuffle(randomizer)\n #initialize loss and class accuracy\n Loss_train = 0\n batch_MSE = []\n l_r = exp_decay(l_r, epoch)\n print('start')\n \n #iterate over batches\n for batch_no in range(y_train.size//batch_size):\n batch = randomizer[(batch_no*batch_size):(batch_no+1)*batch_size] # batch selection\n WtX = np.dot(X_train[batch,:],W) # W.t*X, batch size vector, will be used in all computations below\n\n y_train_pred = np.exp(np.clip(WtX,-100,100)) # y_pred = exp(W.t*X), for Poisson y_pred is the mean value of P(Y|X)\n \n # train loss\n Loss_train = Loss_train + (y_train_pred - np.multiply(WtX,y_train[batch])).sum()\n\n #gradient calculation\n\n dLoss = np.dot(X_train[batch,:].transpose(),(y_train_pred - y_train[batch])) # 91-vector gradient of loss = sum((exp(WtX)-y)X)\n \n batch_MSE.append(((y_train_pred-y_train[batch])**2).sum()) #just the sum of squared errors here, average will be per epoch\n\n # update rule for weights\n W = W - l_r*dLoss\n \n test_WtX = np.dot(X_test,W) #will be used couple of times below\n y_test_pred = np.exp(np.clip(test_WtX,-100,100))\n Loss_test = (y_test_pred - np.multiply(test_WtX,y_test)).sum()\n total_loss_train.append(Loss_train)\n total_loss_test.append(Loss_test)\n train_MSE.append((np.sum(batch_MSE)/y_train.size)*train_st_dev**2) #de-standardizing final MSE output\n test_MSE.append(musicMSE(y_test_pred, y_test)*train_st_dev**2) # used the mse function from earlier for a change\n \n\n fig,ax = plt.subplots(1,2,figsize = (12,6))#, sharey = True, sharex = True)\n fig.suptitle('Train and Test Mean Square Errors')\n ax[0].plot(np.arange(epoch+2), train_MSE, 'r-') # with normed = True normalizes so that the area under the hist is 1\n ax[0].set(title = 'Training MSE', xlabel = 'Epochs', ylabel = 'MSE')#,xlim = ax[0].get_xlim(), ylim = ax[0].get_xlim())\n ax[1].plot(np.arange(epoch+2), test_MSE, 'b-')\n ax[1].tick_params(reset = True)\n ax[1].set(title = 'Test MSE', xlabel = 'Epochs')\n \n fig,ax = plt.subplots(1,2,figsize = (12,6))#, sharey = True, sharex = True)\n fig.suptitle('Train and Test Loss')\n ax[0].plot(np.arange(epoch+2), total_loss_train, 'r-') # with normed = True normalizes so that the area under the hist is 1\n ax[0].set(title = 'Training Loss', xlabel = 'Epochs', ylabel = 'Loss')#,xlim = ax[0].get_xlim(), ylim = ax[0].get_xlim())\n ax[1].plot(np.arange(epoch+2), total_loss_test, 'b-')\n ax[1].tick_params(reset = True)\n ax[1].set(title = 'Test Loss', xlabel = 'Epochs')\n \n plt.show()\n\n return W, test_MSE \n\n\n# In[910]:\n\n# print(trainYears_centered.min())\n# trainYears_normed = (trainYears - np.mean(trainYears))/np.std(trainYears, ddof=1)\n# testYears_normed = (testYears - np.mean(testYears))/np.std(testYears, ddof = 1)\nW_Poisson, MSE_Poisson = PoissonRegression(trainFeat_normed, testFeat_normed, trainYears, testYears)\nprint(MSE_Poisson)\n\n\n# In[ ]:\n\n\n\n\n# In[1063]:\n\ndef SongSoftmax(X_train, X_test, y_train, y_test):\n \n #creation of target vectors (target array)\n \n t_train = np.empty([y_train.size, np.unique(y_train).size])\n t_test = np.empty([y_test.size, np.unique(y_train).size]) # labels in y_train and y_test should be the same\n for k in range(t_train.shape[1]): #years from 1922 to 2011, only 1923 is missing from train set\n t_train[:,k] = np.where(y_train==k+1922,1,0) # + 1 depends on the labels indexing: +1 for IRIS, 0 for CIPHAR\n t_test[:,k] = np.where(y_test==k+1922,1,0)\n \n \n# print(X_train[randomizer,:].shape, y_train[randomizer].shape)\n \n \n total_loss_train = []\n total_loss_test = []\n mean_train_accuracy = []\n mean_test_accuracy = []\n W = np.random.rand(X_train.shape[1],np.unique(y_train).size) # weight initialization, 2 x 3 matrix\n DW = np.zeros([X_train.shape[1],np.unique(y_train).size]) # momentum\n batch_size = 100\n l_r = 0.00001 # learning rate\n a = 0.001 # decay parameter\n m_r = 0.01 # momentum rate\n \n for epoch in range(50):\n \n # minibatch creation\n randomizer = np.arange(y_train.size)\n np.random.shuffle(randomizer)\n #initialize loss and class accuracy\n Loss_train = 0\n train_class_accuracy = []\n print('start')\n \n #iterate over batches\n for batch_no in range(y_train.size//batch_size):\n batch = randomizer[(batch_no*batch_size):(batch_no+1)*batch_size] # batch selection\n# print('batch =', batch)\n P_train_b = softmax(W, X_train[batch,:]) # 3 x batch_size matrix\n# print('softmax = ', P_train_b)\n Loss_train = Loss_train - np.multiply(t_train[batch,:].transpose(), np.log(P_train_b)).sum()\n# print('Loss_train = ', Loss_train)\n y_train_pred = np.argmax(P_train_b, axis = 0) + 1922 # pick the class that maximizes the likelihood for every datapoint (+1 because of python indexing for IRIS data)\n# print('Y predictions: ', y_train_pred)\n# print('accuracy = ',sum(list(map(lambda x: (y_train_pred[y_train[batch]==x]==x).sum()/(y_train[batch]==x).sum(), [k for k in range(1,np.unique(y_train[batch]).size+1)])))/np.unique(y_train[batch]).size)\n train_class_accuracy.append(sum(list(map(lambda x: (y_train_pred[y_train[batch]==x]==x).sum()/(y_train[batch]==x).sum(), [k for k in range(1922,np.unique(y_train[batch]).size+1922)])))/np.unique(y_train[batch]).size)\n \n #gradient calculation WITH regularization (check end of next line)\n dLoss = a*W.transpose() + np.dot((P_train_b - t_train[batch,:].transpose()), X_train[batch,:]) # leads to a 3 x 2 matrix, each row being the loss gradient for this class WITH regularization\n# print('dLoss is ', dLoss)\n #update momentum rule\n DW = m_r*DW + l_r*dLoss.transpose()\n# print('DW is ', DW)\n W = W - DW\n# print('batch over')\n \n P_test = softmax(W, X_test) # 3 x 51 matrix\n# print('test softmax:', P_test)\n Loss_test = -np.multiply(t_test.transpose(), np.log(P_test)).sum()\n# print('test Loss = ',Loss_test)\n y_test_pred = np.argmax(P_test, axis = 0) + 1922 # +1 for IRIS, 0 for CIPHAR-10\n# print('test predictions =', y_test_pred)\n test_class_accuracy = sum(list(map(lambda x: (y_test_pred[y_test==x]==x).sum()/(y_test==x).sum(), [k for k in range(1922,np.unique(y_test).size+1*922)])))/np.unique(y_test).size\n #test_class_accuracy2 = (1/np.unique(y_test).size)*((y_test_pred[y_test==1]==1).sum()/(y_test==1).sum() + (y_test_pred[y_test==2]==2).sum()/(y_test==2).sum() + (y_test_pred[y_test==3]==3).sum()/(y_test==3).sum())\n# print('test class accuracy = ', list(map(lambda x: (y_test_pred[y_test==x]==x).sum()/(y_test==x).sum(), [k for k in range(1*(np.unique(y_test).size < 4),np.unique(y_test).size+1*(np.unique(y_test).size < 4))])))\n# print('test class accuracy = ', test_class_accuracy)\n total_loss_train.append(Loss_train)\n# print('total loss train ', total_loss_train)\n total_loss_test.append(Loss_test)\n# print('total loss test ', total_loss_test)\n mean_train_accuracy.append(np.mean(train_class_accuracy))\n# print('mean_train_accuracy ', mean_train_accuracy)\n mean_test_accuracy.append(test_class_accuracy)\n# print('mean_test_accuracy ', mean_test_accuracy)\n \n# print(total_loss_train) \n plt.plot(np.arange(epoch+1), total_loss_train, 'r-', np.arange(epoch+1), total_loss_test, 'b-')\n plt.figure()\n plt.plot(np.arange(epoch+1), mean_train_accuracy, 'r-', np.arange(epoch+1), mean_test_accuracy, 'b-')\n plt.show()\n \n\n return W\n\n\n# In[1064]:\n\nW_songs = SongSoftmax(trainFeat_normed, testFeat_normed, trainYears, testYears)\nprint(W_songs)\n\nP = softmax(W_songs, testFeat_normed)\ny_pred = np.argmax(P, axis = 0) + 1922 # pick the class that maximizes the likelihood for every datapoint (+1 because of python indexing)\nprint(y_pred, testYears, sep='\\n')\naccuracy = (testYears==y_pred).sum()/testYears.size\naccuracy\n\n\n# In[ ]:\n\n\n\n","repo_name":"anastasisbele/code_samples","sub_path":"dl_1.py","file_name":"dl_1.py","file_ext":"py","file_size_in_byte":26760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14864666108","text":"# -*- coding:utf-8 -*-\n# 前向传播层\n\nimport tensorflow as tf\n\nINPUT_NODE = 784 # 输入节点为 784 ,即图片像素大小\nOUTPUT_NODE = 10 # 输出节点为 784\nLAYER1_NODE = 500 # 第一层节点为 500\n\n# shape 为传入的形状,regularizer 为正则化权重\ndef get_weight(shape, regularizer):\n # 定义权重 w 为正态分布,标准差为 0.1 \n w = tf.Variable(tf.truncated_normal(shape, stddev = 0.1))\n # 正则化缓解过拟合\n if regularizer != None:\n # 这里使用了 l2 正则化\n tf.add_to_collection(\n 'losses',\n tf.contrib.layers.l2_regularizer(\n regularizer\n )(w)\n )\n # 返回权重值\n return w\n\n# shape 为传入的形状\ndef get_bias(shape):\n # 偏置常数为全 0 数组\n b = tf.Variable(tf.zeros(shape))\n # 返回偏置常数值\n return b\n\n# 前向传播函数\ndef forward(x, regularizer):\n # 第一层神经网络\n w1 = get_weight([INPUT_NODE, LAYER1_NODE], regularizer)\n b1 = get_bias([LAYER1_NODE])\n # 此函数为实现矩阵乘法加上偏置b1过非线性函数relu()的输出\n # 非线性函数 relu: f(x) = MAX(x, 0)\n y1 = tf.nn.relu(tf.matmul(x, w1) + b1)\n # 第二层神经网络\n w2 = get_weight([LAYER1_NODE, OUTPUT_NODE], regularizer)\n b2 = get_bias([OUTPUT_NODE])\n # 矩阵乘法加偏置\n y = tf.matmul(y1, w2) + b2\n return y\n","repo_name":"qrzbing/AI-learning","sub_path":"FC/fc_forward.py","file_name":"fc_forward.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30212054835","text":"from starter import get_puzzle_input\n\nboard = [ [ int(char) for char in line.strip() ] for line in get_puzzle_input(11) ]\n\ndef increment(board):\n for y in range(len(board)):\n for x in range(len(board[0])):\n board[y][x] += 1\n\ndef step(board, flashed):\n flashes = 0\n for y in range(len(board)):\n for x in range(len(board[0])):\n if board[y][x] > 9 and (y,x) not in flashed:\n flashes += 1\n board[y][x] = 0\n flashed.append((y,x))\n if y != 0 and x != 0 and board[y-1][x-1] != 0:\n board[y-1][x-1] += 1\n if y != len(board) - 1 and x != 0 and board[y+1][x-1] != 0:\n board[y+1][x-1] += 1\n if y != 0 and x != len(board[0]) - 1 and board[y-1][x+1] != 0:\n board[y-1][x+1] += 1\n if x != len(board[0]) - 1 and y != len(board) - 1 and board[y+1][x+1] != 0:\n board[y+1][x+1] += 1\n if y != 0 and board[y-1][x] != 0:\n board[y-1][x] += 1\n if y != len(board) - 1 and board[y+1][x] != 0:\n board[y+1][x] += 1\n if x != 0 and board[y][x-1] != 0:\n board[y][x-1] += 1\n if x != len(board[0]) - 1 and board[y][x+1] != 0:\n board[y][x+1] += 1\n return flashes\n\ntotal_flashes = 0\n\ndef run(board):\n increment(board)\n flashes = -1\n total_flashes = 0\n flashed = []\n while flashes != 0:\n new_flashes = step(board, flashed)\n total_flashes += new_flashes\n flashes = new_flashes\n for y in board:\n print(\"\".join(str(i) for i in y))\n print()\n return len(flashed)\ni=1\nwhile True:\n if run(board) == len(board[0])*len(board):\n break\n i += 1\n\nprint(i)\n","repo_name":"nassir90/adventofcode","sub_path":"11/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"71060594296","text":"import numpy as np\nfrom tensorflow.python import keras as K\n\n\ndef main():\n model = K.Sequential([\n # 活性化関数としてsigmoidを使っている\n K.layers.Dense(units=4, input_shape=(2,), activation=\"sigmoid\"),\n K.layers.Dense(units=4)\n ])\n batch = np.random.rand(3,2)\n y = model.predict(batch)\n print(y.shape)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kazuogawa/practice_RL","sub_path":"DeepQ/deep_q_second_layer.py","file_name":"deep_q_second_layer.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21490899981","text":"class Object:\n def __init__(self, w, v, index):\n self.weight = float(w)\n self.value = float(v)\n self.index = int(index)\n def __repr__(self):\n return repr((self.index, self.weight, self.value))\narr = []\nfilepath = 'knapsack.in'\nwith open(filepath) as fp:\n lines = fp.readlines()\n content = [x.strip() for x in lines]\n capacity = float(content[0])\n content.pop(0)\n i = 0\n for item in content:\n ob = item.split(\" \")\n arr.append(Object(ob[0], ob[1], i))\n i+=1\narr = sorted(arr, key = lambda x: x.weight/x.value)\n\ni = 0\nwhile capacity > 0:\n if capacity > arr[i].weight:\n capacity -= arr[i].weight\n i+=1\n else:\n capacity = -capacity\n\nfor j in range(0, i):\n print(\"Object:%d. weight=%f value=%f completed\"%(arr[j].index, arr[j].weight, arr[j].value))\nif capacity < 0:\n print(\"Object:%d. weight=%f value=%f %f fractional\"%(arr[i].index, arr[i].weight, arr[i].value, capacity))\n","repo_name":"thinkphp/computer-science-in-python","sub_path":"foundations/greedy/knapsack.py","file_name":"knapsack.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"4635303490","text":"import os\nimport json\nimport math\nimport torch\nfrom torch import optim\nfrom torch.autograd import Variable\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport objectives\nimport models\nfrom hvae.models.genomic_vae import GenomicVAE\n\nseed = 0\ntorch.manual_seed(seed)\nnp.random.seed(seed)\n\nif torch.cuda.is_available():\n device = torch.device('cuda:0')\nelse:\n device = torch.device('cpu')\n\n#input_size = X.shape[1]\ninput_size=57\nmodel = GenomicVAE(input_size)\noptimizer = optim.Adam(model.parameters(), lr=1e-3)\nloss_function = objectives.vae_objective\n\ndef train(epoch, batches_per_epoch=64, ):\n model.train() # from nn.Module\n b_loss, b_recon, b_kl = 0.0, 0.0, 0.0\n\n ind = np.arange(x.shape[0])\n for i in range(batches_per_epoch):\n data = torch.from_numpy(x[np.random.choice(ind, size=batch_size)])\n data = Variable(data, requires_grad=False)\n optimizer.zero_grad()\n\n qz_x, px_z, lik, kl, loss = loss_function(model, data, 1, 1.0, components=True)\n\n loss.backward()\n optimizer.step()\n\n b_loss += loss.item()\n b_recon += -lik.mean(0).sum().item()\n b_kl += kl.sum(-1).mean(0).sum().item()\n\n if(epoch % 5 == 0):\n print('Train Epoch: {} \\tLoss: {:.6f}'.format(\n epoch, b_loss / len(data)))\n print('====> Epoch: {} done!'.format(epoch))\n\nif __name__ == '__main__':\n print('ok')\n print(type(model))\n #print(model.wtf_with_pa_params)\n print(model.pz_params)\n print('tra', model.training)\n\n for epoch in range(1, 31):\n train(epoch)\n\n print((model.parameters))\n print('p(z) params:')\n print(model.pz_params)\n","repo_name":"igor-bogdanov/ProjectVAE_Bogdanov","sub_path":"my_hyperbolic_vae_temp/hvae/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11178983956","text":"from collections import deque\nimport warnings\nfrom PIL import Image, ImageDraw\n\n\ndef deprecated(func):\n\n def wrapper(*args, **kwargs):\n warnings.warn(f\"Function {func.__name__} is deprecated.\", category=DeprecationWarning)\n return func(*args, **kwargs)\n\n return wrapper\n\n\ndef draw_rect_with_bounds(file, bounds):\n # 打开图像文件\n image = Image.open(file)\n\n # 创建一个可绘制对象\n draw = ImageDraw.Draw(image)\n\n # 定义框的坐标\n x1, y1 = bounds[0]\n x2, y2 = bounds[1]\n\n # 绘制红色框\n draw.line([(x1, y1), (x2, y1)], fill=\"red\", width=2) # 上边\n draw.line([(x2, y1), (x2, y2)], fill=\"red\", width=2) # 右边\n draw.line([(x2, y2), (x1, y2)], fill=\"red\", width=2) # 下边\n draw.line([(x1, y2), (x1, y1)], fill=\"red\", width=2) # 左边\n\n # 保存修改后的图像\n image.save(file)\n\n\ndef png_resize(file, resol_x, resol_y):\n from PIL import Image\n\n try:\n # 打开图片\n image = Image.open(file)\n\n # 设置新的分辨率\n new_resolution = (resol_x, resol_y)\n\n # 改变分辨率\n resized_image = image.resize(new_resolution)\n\n # 保存图片\n resized_image.save(f\"{file}.resize.png\")\n\n return f\"{file}.resize\"\n except Exception as e:\n print(\"[ERR]: Failed to resize the image \" + file, e)\n return -1\n\n\nclass Node:\n\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return self.name\n\n\nclass Edge:\n\n def __init__(self, start_node, end_node, event_strs: list):\n self.start_node = start_node\n self.end_node = end_node\n self.event_strs = event_strs\n\n\nclass DirectedGraph:\n\n def __init__(self):\n # utg.js中原始的nodes以及edges\n self.utg_nodes = []\n self.utg_edges = []\n\n # DirectedGraph中的nodes以及edges\n self.nodes = []\n # {\"state_str\": Node}\n self.nodes_dict = {}\n\n self.edges = []\n # {\"src_node\": {\"dst_node\": [\"e1\", \"e2\"]}}\n self.edges_dict = {}\n self.start_node = None\n\n def add_node(self, node: Node):\n self.nodes.append(node)\n self.nodes_dict[node.name] = node\n\n def add_edge(self, edge: Edge):\n self.edges.append(edge)\n if (edge.start_node.name not in self.edges_dict):\n self.edges_dict[edge.start_node.name] = {}\n\n if (edge.end_node.name not in self.edges_dict[edge.start_node.name]):\n self.edges_dict[edge.start_node.name][edge.end_node.name] = []\n\n for event in edge.event_strs:\n self.edges_dict[edge.start_node.name][edge.end_node.name].append(event)\n\n def find_shortest_path(self, node_1: str, node_2: str):\n\n if node_1 not in self.nodes_dict or node_2 not in self.nodes_dict:\n print(\"[ERR]: Cannot find node\")\n return None\n\n node_1 = self.nodes_dict[node_1]\n node_2 = self.nodes_dict[node_2]\n # 使用广度优先搜索算法寻找最短路径\n visited = set()\n queue = deque([(node_1, [])])\n\n while queue:\n current_node, path = queue.popleft()\n if current_node == node_2:\n return path + [current_node]\n\n if current_node not in visited:\n visited.add(current_node)\n neighbors = self.get_neighbors(current_node)\n for neighbor in neighbors:\n queue.append((neighbor, path + [current_node]))\n\n return None\n\n def get_neighbors(self, node):\n neighbors = []\n for edge in self.edges:\n if edge.start_node == node:\n neighbors.append(edge.end_node)\n return neighbors\n\n\nif __name__ == \"__main__\":\n # 创建有向图\n graph = DirectedGraph()\n\n # 创建节点\n node_1 = Node(\"Node 1\")\n node_2 = Node(\"Node 2\")\n node_3 = Node(\"Node 3\")\n node_4 = Node(\"Node 4\")\n node_5 = Node(\"Node 5\")\n\n # 添加节点到图中\n graph.add_node(node_1)\n graph.add_node(node_2)\n graph.add_node(node_3)\n graph.add_node(node_4)\n graph.add_node(node_5)\n\n # 创建边\n edge_1 = Edge(node_1, node_2)\n edge_2 = Edge(node_1, node_3)\n edge_3 = Edge(node_2, node_3)\n edge_4 = Edge(node_3, node_4)\n edge_5 = Edge(node_4, node_5)\n edge_6 = Edge(node_1, node_5)\n\n # 添加边到图中\n graph.add_edge(edge_1)\n graph.add_edge(edge_2)\n graph.add_edge(edge_3)\n graph.add_edge(edge_4)\n graph.add_edge(edge_5)\n graph.add_edge(edge_6)\n\n # 寻找从node_1到node_5的最短路径\n shortest_path = graph.find_shortest_path(\"Node 1\", \"Node 5\")\n for node in shortest_path:\n print(node)\n","repo_name":"Just1ceP4rtn3r/droidbot-confiot","sub_path":"Confiot_main/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14736702727","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as patches\r\nfrom distanciaDosPuntos import distancia\r\nfrom calculadora import contar_votos_scipy\r\nimport scipy.stats as ss\r\n\r\n#no se implementa formalmente\r\ndef encontrar_distancias(origen, vecinos):\r\n\t\"\"\"Retorna las distancias\"\"\"\r\n\tdistancias = np.zeros(puntos.shape[0])\r\n\tfor i in range(len(distancias)):\r\n\t\tdistancias[i]=distancia(origen, vecinos[i])\r\n\t\tprint(\"puntos: \",vecinos[i] , \"en \", i, \"distancias: \", distancias[i])\r\n\treturn distancias\r\n\r\ndef encontrar_vecino(origen, vecinos, k=3):\r\n\t\"\"\"Encuentra los primeros k vecinos mas cercanos usando la funcion sorted\"\"\"\r\n\tdistancias = np.zeros(vecinos.shape[0])\r\n\tfor i in range(len(distancias)):\r\n\t\tdistancias[i]=distancia(origen, vecinos[i])\r\n\t\tprint(\"puntos: \",vecinos[i] , \"en \", i, \"distancias: \", distancias[i])\r\n\tind= np.argsort(distancias)\r\n\treturn ind[:k]\r\n\r\n\t#origen:punto de origen, vecinos: puntos vecinos, salida: clase_binaria\r\ndef calcular_knn(origen, vecinos, salida, k=5):\r\n\t\"\"\"Calcula segun la distancia si pertenece a una clase u otra, de forma binaria\"\"\"\r\n\tindices_cercanos=encontrar_vecino(origen, vecinos, k)\r\n\tresultado=contar_votos_scipy(salida[indices_cercanos])\r\n\tprint(resultado)\r\n\treturn contar_votos_scipy(salida[indices_cercanos])\r\n\r\ndef generar_datos(m=30):\r\n\t\"\"\"Crea dos conjuntos de datos usando distribucion bivariate normal \"\"\"\r\n\tpuntos_sinteticos_clase_cero = ss.norm(0,1).rvs((m,2))\r\n\tpuntos_sinteticos_clase_uno = ss.norm(1,1).rvs((m,2))\r\n\tcoordenadas_puntos=np.concatenate( (puntos_sinteticos_clase_cero,puntos_sinteticos_clase_uno), axis=0 )\r\n\tclase_binaria_m = np.concatenate( (np.repeat(0,m),np.repeat(1,m) ) ) \r\n\treturn (coordenadas_puntos, clase_binaria_m) #puntos, salida\r\nif __name__==\"__main__\":\r\n\t#vector columna: array([ [a0,b0], [a1,b1], [a2,b2] , ..., [an,bn] ])\r\n\t#si fuera de mas dimesiones en filas:\r\n\t#array([ [a0,b0, c0, ..., z0], [a1,b1,c1,...,z1] , ... , [an,bn,cn,...,zn] ]) \r\n\tpuntos=np.array([ [1,1],[1,2],[1,3],[2,1],[2,2],[2,3],[3,1],[3,2],[3,3] ])\r\n\tpunto_arbitrario= np.array([2.5,2])\r\n\tprint(puntos.shape[0], len(puntos)) #shape[dimension], 0: columna, 1 para fila, equivalente a len(). \r\n\t#Shape lo hace tambien en filas, len no.\r\n\t\r\n\tdistancias = np.zeros(puntos.shape[0])\r\n\t\r\n\tdistancias=encontrar_distancias(punto_arbitrario, puntos)\r\n\tindx_k_vecinos_cercanos = encontrar_vecino(punto_arbitrario, puntos)\r\n\t#argsort, regresa el index de las distancias de menor a mayor en valor.\r\n\tsorteadas=np.argsort(distancias)\r\n\tminima_distancia=min(distancias)\r\n\tprint(\"minima distancia: \", minima_distancia)\r\n\tprint(\"distancias sorted: \", sorteadas)\r\n\tprint(puntos)\r\n\tprint(\"longitud array: \",len(puntos))\r\n\tprint(\"Primera posicion: \", puntos[0], \"segunda posicion: \", puntos[1])\r\n\tprint( \"En la segunda posicion, adentro el segundo valor: \",puntos[1][1])\r\n\tprint(puntos[:3]) #Hasta la n posicion: array[:n] ->1,2,3,...,n\r\n\tprint(puntos[:,1]) #vector columna posicion k: array[:,k]\r\n\t#para entenderlo seria: : -> logitud del vector; :,1->la columa, entonces\r\n\t#array[(:)<,> (1,k=1)]: array[: , k=1]\r\n\t\r\n\tprint(\"indices \", indx_k_vecinos_cercanos, \"puntos : \",puntos[indx_k_vecinos_cercanos])\r\n\t\r\n\t\r\n\t#clase binaria sirve para identificar dos clases\r\n\tclase_binaria=np.array([0,0,0,0,0,-1,-1,-1,-1])\r\n\t\r\n\t#como se clasifica segun clase binaria, el resultado es 0 o 1.\r\n\tprint(\"Clase binaria test\")\r\n\tpunto_arbitrario_de_prueba=np.array([1.0, 2.7])\r\n\tresultado_cercanos, veces = calcular_knn(punto_arbitrario_de_prueba, puntos, clase_binaria)\r\n\tprint(\"gano la clase: \", resultado_cercanos, \"con \", veces, \"votos\")\r\n\t\r\n\t#Generar Synthetic Data en reemplazo de Clase_binaria:\r\n\t#norm es una variable aleatoria normal y continua, definida como:\r\n\t#>> modulo.norm(params).rvs( (filas, columas) )\r\n\tpuntos_sinteticos_clase_cero = ss.norm(0,1).rvs((5,2))\r\n\tpuntos_sinteticos_clase_uno = ss.norm(1,1).rvs((5,2))\r\n\t\r\n\t#unimos las dos clases en un solo vector\r\n\t#axis los apila en 0 en la misma columna ( : ) y 1 los separa en diferentes columnas ( . .)\r\n\t# (:) y (..) indican la forma de apilar, siendo los puntos los vectores\r\n\tdatos_sinteticos=np.concatenate( (puntos_sinteticos_clase_cero,puntos_sinteticos_clase_uno), axis=0 )\r\n\tprint(\"1: \",puntos_sinteticos_clase_cero )\r\n\tprint(\"2 \", puntos_sinteticos_clase_uno)\r\n\tprint(\"3\",datos_sinteticos)\r\n\t#la funcion genera tantos puntos a plotear como salidas. \r\n\t\r\n\t#numpy.repeat( (numero a repetir, veces a repetir) )\r\n\tl=3\r\n\tclase_binaria_k = np.concatenate( (np.repeat(0,l),np.repeat(1,l) ) ) \r\n\tprint(clase_binaria_k)\r\n\t\r\n\t#Testeo\r\n\tprint(\"prueba de funcion generar_datos o mas bien, generar conjunto de puntos\")\r\n\tm=10\r\n\t(puntos_aleatorios, salida)= generar_datos(m)\r\n\t\r\n\t\r\n\t\r\n\t\r\n\tprint(\"Fin\")\r\n\t#------------------GRAFICAS\r\n\t\r\n\t#histograma y comulativa\r\n\tplt.figure()\r\n\tplt.grid(True)\r\n\tplt.hist(distancias)\r\n\tplt.hist(distancias,density=True, cumulative=True, histtype=\"step\")\r\n\tplt.figure()\r\n\tplt.grid(True)\r\n\tplt.plot(distancias, \"r-\")\r\n\t\r\n\t#grafia de puntos aleatorios:\r\n\tplt.figure()\r\n\tplt.grid(True)\r\n\t#>>puntos[0:n, 0] y puntos[0:n,1], indica que se recorre el vector hasta n en la columna 0 y luego la 1\r\n\t#Se recorre hasta n por que hasta ahi va el primer set de puntos, luego, puntos[:n, 0] y puntos[:n,1]\r\n\t#indica que se recorre hasta el final en la columna 0 y 1, dado que el vector tiene logitud 2n.\r\n\tplt.plot(puntos_aleatorios[:m,0],puntos_aleatorios[:m,1], \"ro\")\r\n\tplt.plot(puntos_aleatorios[m:,0],puntos_aleatorios[m:,1], \"bo\")\r\n\t\t\r\n\t#circulo de tipo patch, clase por defecto de matplotlib para hacer poligonos\r\n\t#circulo es el radio de alcance de el vecino mas cercano\r\n\t#circulo2 es un circulo de prueba sin relleno.\r\n\tfig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})\r\n\tcircle = patches.Circle((punto_arbitrario[0],punto_arbitrario[1]), radius=minima_distancia)\r\n\tcircle2 = patches.Circle((2,2), radius=minima_distancia, facecolor=\"none\", edgecolor=\"g\")\r\n\tax.add_patch(circle)\r\n\tax.add_patch(circle2)\r\n\t\r\n\t#alternativa para el circulo, de forma manual:\r\n\t#En este caso, radio=0.5; 2.5 y 2 son lo que hay que sumar para ubicar el circulo al centro del punto_arbitrario.\r\n\t#una formula general seria:\r\n\t#>>plot(radio*seno(theta) +desplazamiento_en_x, radio*coseno(theta) +desplazamiento_en_y, \"color\")\r\n\ttheta = np.linspace(-np.pi, np.pi, 200)\r\n\tplt.plot(0.5*(np.sin(theta))+2.5, 0.5*(np.cos(theta))+2, \"k-\")\r\n\t\r\n\t\r\n\t#LATER ON, Search for the colorbar, how to implement it.\r\n\t#cbar =fig.colorbar(ax)\r\n\t#cbar.solids.set_edgecolor(\"face\")\r\n\t#draw()\r\n\t\r\n\tplt.plot(puntos[:,0], puntos[:,1], \"ro\")\r\n\tplt.plot(punto_arbitrario[0],punto_arbitrario[1], \"bo\")\r\n\tplt.grid(True)\r\n\tplt.show()\r\n\t\r\n\t#hexagono\r\n\t#1, 6, 12, 18, 24, ++6\r\n\r\nelse:\r\n\tprint(\"modulo importado\",\"Caso 3: KNN\")\r\n\r\n","repo_name":"phikubo/Recursos-Python","sub_path":"caso3/vecinos_cercanos.py","file_name":"vecinos_cercanos.py","file_ext":"py","file_size_in_byte":6775,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"29772974208","text":"import sys\nclass Solution:\n # Again dynamic programming, but more optimized\n def maximumScore(self, nums: List[int], multipliers: List[int]) -> int:\n n = len(nums)\n m = len(multipliers)\n dp = [[0 for _ in range(m+1)] for _ in range(m+1)]\n score = -sys.maxsize\n for k in range(1, m+1):\n for l in range(k+1):\n if l == 0:\n res_left = -sys.maxsize\n else:\n res_left = dp[l-1][k-l] + multipliers[k-1] * nums[l-1]\n if l == k:\n res_right = -sys.maxsize\n else:\n res_right = dp[l][k-l-1] + multipliers[k-1] * nums[n-k+l]\n dp[l][k-l] = max(res_left, res_right)\n\n if k == m:\n score = max(score, dp[l][k-l])\n return score\n \n # Time limit exceeded\n \"\"\"\n import sys\n def maximumScore(self, nums: List[int], multipliers: List[int]) -> int:\n n = len(nums)\n m = len(multipliers)\n dp = [[[None for _ in range(n)] for _ in range(n)] for _ in range(m)]\n for i in range(m):\n for j in range(n):\n dp[i][j][j] = multipliers[i] * nums[j]\n def recCalc(k: int, i: int, j: int) -> int:\n nonlocal dp, n\n if i > j:\n return -sys.maxsize\n if k == m:\n return 0\n if not(dp[k][i][j] is None):\n return dp[k][i][j]\n res_left = recCalc(k+1, i+1, j)\n res_right = recCalc(k+1, i, j-1)\n res = max(multipliers[k] * nums[i] + res_left,\n multipliers[k] * nums[j] + res_right)\n dp[k][i][j] = res\n return res\n return recCalc(0, 0, n-1)\n \"\"\"\n","repo_name":"AdrienC21/LeetCode","sub_path":"1770-maximum-score-from-performing-multiplication-operations/1770-maximum-score-from-performing-multiplication-operations.py","file_name":"1770-maximum-score-from-performing-multiplication-operations.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18905578641","text":"import boto3\n\nclient = boto3.client('elbv2')\nec2 = boto3.resource('ec2')\ncodedeploy = boto3.client('codedeploy')\n\n\ndef get_alb_dns_name():\n alb_dns_name = \"http://127.0.0.1:5000\"\n response = client.describe_load_balancers()\n if len(response['LoadBalancers']) > 0:\n alb_dns_name = \"http://\" + response['LoadBalancers'][0]['DNSName'] + \"/api\"\n return alb_dns_name\n\n\ndef get_ec2_on_alb_instance_id(last_deployment_id):\n ec2_instance_ids = []\n response = client.describe_load_balancers()\n loadBalancerArn = (response['LoadBalancers'][0]['LoadBalancerArn'])\n response = client.describe_target_groups(LoadBalancerArn=loadBalancerArn)\n for targetGroup in response['TargetGroups']:\n targetGroupArn = targetGroup['TargetGroupArn']\n response = client.describe_target_health(TargetGroupArn=targetGroupArn)\n targetHealthDescriptions = response['TargetHealthDescriptions']\n for targetHealthDescription in targetHealthDescriptions:\n instance_id = targetHealthDescription['Target']['Id']\n state = targetHealthDescription['TargetHealth']['State']\n last_deployment_event = get_last_deployment_event(last_deployment_id, instance_id)\n current_instance = list(ec2.instances.filter(InstanceIds=[instance_id]))\n private_ip_address = current_instance[0].private_ip_address\n ec2_instance_ids.append({\n 'id': instance_id,\n 'state': state,\n 'event_name': last_deployment_event['lifecycleEventName'],\n 'event_status': last_deployment_event['status'],\n 'last_deployment_id': last_deployment_id,\n 'private_ip_address': private_ip_address\n })\n return ec2_instance_ids\n\n","repo_name":"erwanjouan/flask-check-codedeploy","sub_path":"alb.py","file_name":"alb.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9641950411","text":"import random\n\n###### Game Start! ########\nprint(\"Let's play Rock, Paper, Scissors!(Please type rock, paper, or scissors)\")\n\n######## Gets user's pick or asks again for a valid pick #########\nwhile True:\n user_pick = input(\">\").lower()\n if user_pick == \"rock\":\n break\n elif user_pick == \"paper\":\n break\n elif user_pick == \"scissors\":\n break\n else:\n print(\"Yea that's not a valid input. Do it right this time. or else..\")\n\n####### Sets the random number to a computer pick ##########\nrandom_num = random.randint(0, 2) #generates random whole number between 0 and 2\ncomp_pick = \"\"\nif random_num == 0:\n comp_pick = \"rock\"\nelif random_num == 1:\n comp_pick = \"paper\"\nelif random_num == 2:\n comp_pick = \"scissors\"\n\n####### Compares user pick and computer pick #########\nif comp_pick == \"rock\":\n if user_pick == \"rock\":\n print(\"It's a draw\")\n elif user_pick == \"paper\":\n print(\"You win!\")\n elif user_pick == \"scissors\":\n print(\"You lose..\")\nelif comp_pick == \"paper\":\n if user_pick == \"rock\":\n print(\"You lose..\")\n elif user_pick == \"paper\":\n print(\"It's a draw\")\n elif user_pick == \"scissors\":\n print(\"You win!\")\nelif comp_pick == \"scissors\":\n if user_pick == \"rock\":\n print(\"You win!\")\n elif user_pick == \"paper\":\n print(\"You lose..\")\n elif user_pick == \"scissors\":\n print(\"It's a draw\")","repo_name":"Renaisani/rock-paper-scissors","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73507298296","text":"def sml(ar):\r\n x=len(ar)\r\n tmp=ar[0]\r\n for i in range(0,x):\r\n if ar[i]tmp:\r\n tmp=ar[i]\r\n return tmp\r\n\r\n \r\nno=int(input(\"Enter number of element in array : \"))\r\nar1=[]\r\n\r\nfor i in range(0,no):\r\n tmp=int(input())\r\n ar1.append(tmp)\r\n\r\nprint(\"Smallest number in array is : \", sml(ar1))\r\n\r\nprint(\"Largest number in array is : \", lrg(ar1))","repo_name":"nikunjvisavadia/100daycodingchallenge","sub_path":"Day45.py","file_name":"Day45.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15287488296","text":"\"\"\"There is a well tested rlax, but as a matter of practice...\"\"\"\nfrom typing import Tuple\n\nimport jax\nimport chex\nimport jax.numpy as jnp\nfrom jax.scipy.special import logsumexp\nimport tensorflow_probability.substrates.jax.distributions as tfd\n\nArray = jnp.ndarray\n\n\ndef scaled_and_dual_loss(loss: Array,\n duals: Array,\n epsilon: float,\n per_dimension: bool\n ) -> Tuple[Array, Array]:\n \"\"\"Lagrange multiplier loss.\"\"\"\n chex.assert_rank(epsilon, 0)\n chex.assert_type([loss, duals], float)\n chex.assert_is_broadcastable(loss.shape, duals.shape)\n\n sg = jax.lax.stop_gradient\n scaled_loss = sg(duals) * loss\n dual_loss = duals * sg(epsilon - loss)\n\n if per_dimension:\n scaled_loss = jnp.sum(scaled_loss, axis=-1)\n dual_loss = jnp.sum(dual_loss, axis=-1)\n\n return scaled_loss, dual_loss\n\n\ndef quantile_regression_loss(predictions: Array,\n pred_quantiles: Array,\n targets: Array,\n hubber_delta: float\n ) -> Array:\n \"\"\"Distributional critic loss.\"\"\"\n chex.assert_type([predictions, pred_quantiles, targets], float)\n chex.assert_rank([predictions, pred_quantiles, targets], 1)\n chex.assert_equal_shape([predictions, pred_quantiles])\n sg = jax.lax.stop_gradient\n\n targets = sg(targets)\n resids = targets[jnp.newaxis, :] - predictions[:, jnp.newaxis]\n ind = (resids < 0).astype(pred_quantiles.dtype)\n weight = jnp.abs(pred_quantiles[:, jnp.newaxis] - ind)\n abs_errors = jnp.abs(resids)\n if hubber_delta > 0:\n quadratic = jnp.minimum(abs_errors, hubber_delta)\n linear = abs_errors - quadratic\n loss = 0.5 * quadratic ** 2 / hubber_delta + linear\n else:\n loss = abs_errors\n loss *= sg(weight)\n\n return jnp.sum(jnp.mean(loss, axis=-1))\n\n\ndef cross_entropy_loss(dist: tfd.Distribution,\n actions: Array,\n normalized_weights: Array\n ) -> Array:\n chex.assert_type([actions, normalized_weights], float)\n\n log_probs = dist.log_prob(actions)\n\n chex.assert_rank([log_probs, normalized_weights], 1)\n chex.assert_equal_shape([log_probs, normalized_weights])\n\n return - jnp.sum(normalized_weights * log_probs)\n\n\ndef temperature_loss_and_normalized_weights(\n temperature: Array,\n q_values: Array,\n epsilon: float,\n tv_constraint: float\n) -> Tuple[Array, Array]:\n \"\"\"Direct dual constraint as a part of (C)MPO loss.\n\n If tv_constraint is finite, CMPO will be used instead.\n \"\"\"\n chex.assert_type([temperature, q_values, epsilon, tv_constraint], float)\n chex.assert_rank(\n [temperature, q_values, epsilon, tv_constraint], [0, 1, 0, 0]\n )\n sg = jax.lax.stop_gradient\n adv = sg(q_values - jnp.mean(q_values))\n\n if tv_constraint < float(\"inf\"):\n tempered_q_values = adv / temperature\n clipped = jnp.clip(\n tempered_q_values,\n a_min=-tv_constraint,\n a_max=tv_constraint\n )\n straight_through = tempered_q_values - sg(tempered_q_values)\n tempered_q_values = sg(clipped) + straight_through\n else:\n tempered_q_values = adv / temperature\n tempered_q_values = tempered_q_values.astype(jnp.float32)\n\n normalized_weights = jax.nn.softmax(tempered_q_values)\n normalized_weights = sg(normalized_weights)\n\n log_num_actions = jnp.log(q_values.size)\n q_logsumexp = logsumexp(tempered_q_values)\n temperature_loss = epsilon + q_logsumexp - log_num_actions\n temperature_loss *= temperature\n\n return temperature_loss.astype(q_values.dtype), \\\n normalized_weights.astype(q_values.dtype)\n\n\ndef softplus(param):\n param = jnp.maximum(param, -18.)\n return jax.nn.softplus(param) + 1e-8\n\n\ndef ordinal_logits(logits: Array):\n \"\"\"1901.10500\"\"\"\n chex.assert_type(logits, float)\n chex.assert_rank(logits, 1)\n\n reg = 1e-6\n logits = jax.nn.sigmoid(logits)\n logits = jnp.clip(logits, a_min=reg, a_max=1. - reg)\n\n lt = jnp.log(logits)\n gt = jnp.log(1 - jnp.flip(logits))\n lt = jnp.cumsum(lt)\n gt = jnp.cumsum(gt[:-1])\n gt = jnp.concatenate([jnp.zeros_like(gt[:1]), gt])\n return lt + jnp.flip(gt)\n","repo_name":"lkhromykh/jax-mpo","sub_path":"src/utils/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16174838598","text":"#1.用户需要选择是”1-删除域名”或是“2-删除IP地址”\r\n#2.用户分别输入域名和IP地址\r\n#3.如果输入的是IP地址,判断IP地址是否合法,调用check_ip()函数验证\r\n#4.如果合法,调用dns_\tdelete()函数,打开文件,删除域名或IP地址\r\n#5.如果内容存在,删除后,打印删除成功,并显示被删除的完整条目\r\n#6.如果删除内容不存在,返回没有找到。\r\n#7.最后将删好的内容写入文件,并打印文件全部内容\r\n\r\n#选择删除的对象1-域名,2-ip地址\r\n#删除整个,包括域名和IP地址\r\n#打印出删除后的有的值\r\n\r\nfrom check_ip import *\r\n\r\ndef dns_delete():\r\n begin = False\r\n while begin == False:\r\n print('请选择删除类别:1-删除域名,2-删除IP地址')\r\n answer = input('请输入数字代码[1/2]')\r\n # if选择1-删除域名,or,2-删除IP地址\r\n if answer == '1':\r\n answer_name = input('请输入域名:')\r\n #打开文件,以读模式\r\n file = open('hosts.txt', encoding='utf8')\r\n #创建一个总列表\r\n file_all_list = file.readlines()\r\n file.close()\r\n #用以记录for循环到了哪一个元素\r\n number = -1\r\n #for循环进行与输入值的判断,True就调用del()函数,结束循环\r\n for i in file_all_list:\r\n number += 1\r\n # 将子列表以空格分割成两个列表\r\n address = i.split(' ')\r\n #判断是否存在域名与输入值相等,若相等,就从file_all_list中删除\r\n if address[0] == answer_name:\r\n #删除\r\n del file_all_list[number]\r\n #打开host.txt以写入模式\r\n file_w = open('hosts.txt','w',encoding='utf8')\r\n for z in file_all_list:\r\n #写入剩下的内容\r\n file_w.write(f'{z}')\r\n file_w.close()\r\n print(f'删除成功,DNS记录为 {i}'.strip('\\n'))\r\n print(f'现有记录如下:')\r\n for a in file_all_list:\r\n print(a.strip('\\n'))\r\n begin = True\r\n break\r\n else:\r\n # 若不存在相等,则返回没有找到\r\n print('没有找到')\r\n elif answer == '2':\r\n answer_ip = input('请输入IP地址:')\r\n #调用check_ip函数\r\n #这是你绝对无法逃脱的验证\r\n start = False\r\n while start == False:\r\n start = check_ip(answer_ip)\r\n if start == False:\r\n answer_ip = input('请输入IP地址:')\r\n # 打开文件,以读模式\r\n file0 = open('hosts.txt', encoding='utf8')\r\n # 创建一个总列表\r\n file_all_list0 = file0.readlines()\r\n file0.close()\r\n # 用以记录for循环到了哪一个元素\r\n number0 = -1\r\n # for循环进行与输入值的判断,True就调用del()函数,结束循环\r\n for i in file_all_list0:\r\n number0 += 1\r\n # 将子列表以空格分割成两个列表\r\n ip = i.split(' ')\r\n # 判断是否存在域名与输入值相等,若相等,就从file_all_list中删除\r\n if ip[1].strip('\\n') == {answer_ip}.strip('\\n'):\r\n # 删除\r\n del file_all_list0[number0]\r\n data = file_all_list0\r\n # 打开host.txt以写入模式\r\n file_w = open('hosts.txt', 'w', encoding='utf8')\r\n for z in file_all_list0:\r\n # 写入剩下的内容\r\n file_w.write(f'{z}')\r\n file_w.close()\r\n print(f'删除成功,DNS记录为 {i}'.strip('\\n'))\r\n print(f'现有记录如下:')\r\n for a in file_all_list0:\r\n print(a.strip('\\n'))\r\n begin = True\r\n break\r\n else:\r\n # 若不存在相等,则返回没有找到\r\n print('没有找到')\r\n\r\n else:\r\n begin = False\r\n print('输入数字错误,请重新输入!')\r\n#else就显示没有找到\r\n\r\nif __name__ == \"__main__\":\r\n dns_delete()","repo_name":"zouyunzhen/python_learen_project","sub_path":"DNS-增删查改/dns_delete.py","file_name":"dns_delete.py","file_ext":"py","file_size_in_byte":4576,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4023532849","text":"import http.client\nimport urllib.parse\n\nimport lxml.etree\nimport lxml.html\nimport requests.adapters\nimport selenium.common.exceptions\nimport selenium.webdriver\nimport selenium.webdriver.chrome.options\nimport selenium.webdriver.common.proxy\nimport selenium.webdriver.firefox.options\nimport urllib3.util.retry\nfrom utils import *\n\n\n@contextlib.asynccontextmanager\nasync def executor(func, pool: Executor = Executor.NONE):\n l = log()\n loop = asyncio.get_event_loop()\n\n if not func:\n raise ValueError\n\n if pool == Executor.THREAD:\n with concurrent.futures.ThreadPoolExecutor() as thread_pool:\n l.t('Custom executor: thread_pool == {}, func == {}'.format(thread_pool, func))\n yield await loop.run_in_executor(thread_pool, func)\n elif pool == Executor.PROCESS:\n with concurrent.futures.ProcessPoolExecutor() as process_pool:\n l.t('Custom executor: process_pool == {}, func == {}'.format(process_pool, func))\n yield await loop.run_in_executor(process_pool, func)\n else:\n # 3. Run in the default loop's executor (blocking_io):\n l.t('Default executor: loop == {}, func == {}'.format(loop, func))\n yield await loop.run_in_executor(None, func)\n\nclass Net(Sem):\n t_patterns = ['http://t.me/', 'https://t.me/', 'http://www.t.me/', 'https://www.t.me/', 'http://telegram.me/',\n 'https://telegram.me/', 'http://www.telegram.me/', 'https://www.telegram.me/']\n header_content_type = {\"Content-Type\": \"text/html; charset=utf-8\"}\n header_accept_language = {\"Accept-Language\": \"en\"}\n header_content_language = {\"Content-Language\": \"en-US\"}\n header_dnt = {\"DNT\": \"0\"}\n header_accept = {'Accept': 'text/html'}\n user_agent = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) \" \\\n \"Version/5.1.3 Safari/534.53.10\"\n\n class Client(Enum):\n REQUESTS = enum.auto()\n FIREFOX = enum.auto()\n AIOHTTP = enum.auto()\n CURL = enum.auto()\n WGET = enum.auto()\n FILE = enum.auto()\n DIR = enum.auto()\n NONE = enum.auto()\n\n class Parser(Enum):\n SELF = enum.auto()\n REGEX = enum.auto()\n LXML = enum.auto()\n SOUP = enum.auto()\n MIO = enum.auto()\n\n SOUP_TEXT = enum.auto()\n GREP = \"grep -Po '(?<=link=\\\")[^\\\"]*(?=\\\")|(?<=href=\\\")[^\\\"]*(?=\\\")'\"\n\n @staticmethod\n def referer(url):\n return 'https://www.google.com/search?q={}&ie=utf-8&oe=utf-8&client=firefox-b-ab'.format(url)\n\n @staticmethod\n def get_headers(url):\n return {**Net.header_content_type, **Net.header_accept_language, **Net.header_content_language,\n **Net.header_dnt, **Net.header_accept, \"User-Agent\": Net.user_agent,\n \"Referer\": Net.referer(url)}\n\n @staticmethod\n async def ip_ok(ip):\n l = log()\n try:\n socket.inet_aton(ip)\n l.t('IP == {}'.format(ip))\n return ip\n except socket.error or OSError:\n l.d('IP == Error from {}'.format(ini['default']['ip_url']))\n\n @staticmethod\n async def parse(url, source, parser: Parser = Parser.MIO):\n l = log()\n try:\n if parser is Net.Parser.GREP:\n if os.path.isdir(source) or os.path.isfile(source):\n stdin = None\n Net.Parser.GREP.value = Net.Parser.GREP.value + \" -R \" + source\n else:\n stdin = source\n returncode, stdout, stderr = await funcs.aiocmd(Net.Parser.GREP.value, stdin=stdin)\n\n msg = '{}: returncode == {}, {}'.format(url, returncode, '{}'.format(\n '') if returncode is 0 else 'stderr == {}'.format(stderr.rstrip('\\n')), Net.Parser.GREP.value)\n log().i(msg) if int(returncode) is 0 else log().e(msg)\n\n if int(returncode) is 0 and stdout:\n return stdout.rstrip(\"';\").rstrip(\"/\")\n elif Net.Parser.MIO:\n return re.findall(r'(?<=href=\")[^\"]*(?=\")', source) + \\\n re.findall(r'(?<=link=\")[^\"]*(?=\")', source)\n elif parser is Net.Parser.REGEX:\n return re.findall(r'(?i)href=[\"\\']?([^\\s\"\\'<>]+)', source) + \\\n re.findall(r'(?i)link=[\"\\']?([^\\s\"\\'<>]+)', source)\n elif parser is Net.Parser.SOUP or parser is Net.Parser.SOUP_TEXT:\n soup = bs4.BeautifulSoup(source, 'html.parser')\n soup.decode('utf-8')\n if parser is Net.Parser.SOUP_TEXT:\n return soup.get_text()\n return [link.get('href') for link in soup.find_all('a')] + \\\n [link.get('link') for link in soup.find_all('tr')] + \\\n [link.get('href') for link in soup.find_all('link')] + \\\n [link.get('data-href') for link in soup.find_all('div')]\n elif parser is Net.Parser.LXML:\n xml = u''\n xml = bytes(bytearray(xml, encoding='utf-8'))\n lxml.etree.XML(xml)\n tree = lxml.html.fromstring(source)\n return [url_tag.attrib.get('href', '') for url_tag in tree.findall('.//a')] + \\\n [url_tag.attrib.get('link', '') for url_tag in tree.findall('.//tr')] + \\\n [url_tag.attrib.get('href', '') for url_tag in tree.findall('.//link')] + \\\n [url_tag.attrib.get('data-href', '') for url_tag in tree.findall('.//div')]\n else:\n l.c()\n raise AttributeError\n except ValueError as exc:\n if 'Unicode strings with encoding declaration are not supported' in repr(exc):\n l.c('{}: {}, source[{}]'.format(url, repr(exc), source))\n except TypeError as exc:\n if 'expected string or bytes-like object' in repr(exc):\n l.c('{}: {}, source[{}]'.format(url, repr(exc), source))\n except lxml.etree.ParserError as exc:\n if 'Document is empty' in repr(exc):\n l.c('{}: {}, tree_error_log[{}], source[{}]'.format(url, exc.error_log.filter_from_level(\n lxml.etree.ErrorLevels.WARNING), repr(exc), source))\n except BaseException as exc:\n l.x('{}: {}, source[{}]'.format(url, repr(exc), source))\n\n @staticmethod\n def config_wget(url=None):\n cmd = 'wget'\n cmd_dir = \"{}{}\".format(ini[cmd]['dir_prefix'], cmd)\n cmd_log = \"{0}/{1}.log\".format(\"/var/log\", cmd)\n if url:\n _, url_dom, url_rel, _, _, _ = urllib.parse.urlparse(url)\n url_dir = \"/\".join(i for i in [cmd_dir, url_dom.strip(\"/\"), url_rel.strip(\"/\")] if i.strip(\"/\"))\n os.system(\"mkdir -p {}\".format(url_dir))\n return url_dir\n else:\n os.system(\"mkdir -p {}\".format(cmd_dir))\n os.system('rm -f {}'.format(cmd_log))\n return cmd, cmd_dir, cmd_log\n\n @staticmethod\n @contextlib.asynccontextmanager\n async def wget(url, mirror=True, wait=20, tries=20, backups=4, background=True, debug=False, quiet=False,\n verbose=False, no_clobber=False, cont=True, random_wait=True, no_cache=True,\n secure_protocol=True, https_only=True, no_check_certificate=True, robots=True,\n ignore_length=True, timestamping=True, retry_connrefused=True):\n kwargs = locals().items()\n cmd = wget_cmd\n for k, v in kwargs:\n if k is 'robots' and v:\n cmd = '{} --execute robots=off'.format(cmd, k, v)\n elif k is 'cont' and v:\n cmd = '{} --continue'.format(cmd, k, v)\n elif k is 'secure_protocol' and v:\n cmd = '{} --secure-protocol=auto'.format(cmd)\n elif type(v) is int:\n cmd = '{} --{}={}'.format(cmd, k, v)\n elif v is True:\n cmd = '{} --{}'.format(cmd, k.replace('_', '-'))\n\n _, url_dom, _, _, _, _ = urllib.parse.urlparse(url)\n cmd = \"{} {} {} {} {} {} {} {} {} {} {} {}\"\\\n .format(cmd, \"--append-output='{}'\".format(wget_log),\n \"--directory-prefix='{}'\".format(wget_dir),\n \"--header='{}: {}'\".format(*Net.header_content_type.keys(), *Net.header_content_type.values()),\n \"--header='{}: {}'\".format(*Net.header_accept_language.keys(),\n *Net.header_accept_language.values()),\n \"--header='{}: {}'\".format(*Net.header_content_language.keys(),\n *Net.header_content_language.values()),\n \"--header='{}: {}'\".format(*Net.header_dnt.keys(), *Net.header_dnt.values()),\n \"--header='{}: {}'\".format(*Net.header_accept.keys(), *Net.header_accept.values()),\n \"--user-agent='{}'\".format(Net.user_agent),\n \"--referer='{}'\".format(Net.referer(url_dom)),\n \"--local-encoding='utf-8'\"\n \"--accept='*.html,*.php,*.htm' --reject='*.aac,*.abw,*.arc,*.avi,*.azw,*.bin,*.bmp,*.bz,*.bz2,\"\n \"*.csh,*.css,*.csv,*.doc,*.docx,*.eot,*.epub,*.es,*.gif,*.ico,*.ics,*.jar,*.jpeg,*.jpg,*.js,\"\n \"*.json,*.mid,*.midi,*.mpeg,*.mpkg,*.odt,*.oga,*.ogv,*.ogx,*.otf,*.png,*.pdf,*.ppt,*.pptx,*.rar,\"\n \"*.rtf,*.sh,*.svg,*.swf,*.tar,*.tif,*.tiff,*.ts,*.ttf,*.txt,*.vsd,*.wav,*.weba,*.webm,*.webp,\"\n \"*.woff,*.woff2,*.xhtml,*.xls,*.xlsx,*.xml,*.xul,*.zip,*.3gp,*.3gp2,*.7z' \"\n \"--ignore-tags='img,script' --inet4-only\", format(url))\n returncode, stdout, stderr = await funcs.aiocmd(cmd)\n msg = '{}: returncode == {}, {}, {}'.format(url, returncode,\n 'stdout == {}'.format(stdout.rstrip('\\n')) if returncode is 0\n else 'stderr == {}'.format(stderr.rstrip('\\n')), cmd)\n log().i(msg) if int(returncode) is 0 else log().e(msg)\n\n if background and int(returncode) is 0 and 'Continuing in background, pid ' in stdout:\n while psutil.pid_exists(int(stdout.strip('Continuing in background, pid ').rstrip('.\\n'))):\n log().n('Waiting for {}: pid == {}'.format(url, int(stdout.strip('Continuing in background, pid ').\n rstrip('.\\n'))))\n await asyncio.sleep(random.randint(40, 120))\n log().s('{}: pid == {}'.format(url, int(stdout.strip('Continuing in background, pid ').rstrip('.\\n'))))\n\n yield await Net(Net.config_wget(url)).get\n\n @staticmethod\n @contextlib.asynccontextmanager\n async def file(name, html=True):\n with open(name) as response:\n if html:\n yield response.read()\n else:\n yield [line.rstrip('\\n') for line in response.readlines()]\n\n @staticmethod\n @contextlib.asynccontextmanager\n async def dir(name):\n await Sem().gather(*[Net(os.path.join(root, file)).get for root, _, files in os.walk(name) for file in files])\n yield\n \"\"\"\n https://topicolist.com/ parse ficheros 3- Si url fichero y no HTML -> telegram\n :param client:\n :param links: Si links True return links. False, return eurl\n :param firefox:\n :param firefox_arg:\n :param firefox_times:\n :param firefox_real:\n :param: t_eurl: return links y actualiza extracted y megagroups on los t_eurls extraidos\n :param headless:\n :param parser:\n :param background:\n :param site:\n \"\"\"\n super().__init__()\n self.root_url = root_url\n self.url = url\n self.name = client\n\n if self.url:\n self.ip = None\n if os.path.isdir(self.url):\n self.name = Net.Client.DIR\n self.GET = self.dir(self.url)\n elif os.path.isfile(self.url):\n if \"htm\" in self.url:\n self.name = Net.Client.FILE\n self.GET = self.file(self.url)\n else:\n self.name = Net.Client.NONE\n self.GET = self.file(self.url, False)\n elif not self.url:\n self.url = ip_url\n elif not self.root_url:\n schema, netloc, rel_url, _, _, _ = urllib.parse.urlparse(self.url)\n count = 0\n for crawl in crawl_rows:\n if netloc in crawl.url:\n self.root_url = crawl.url\n count += 1\n if count > 1:\n u = urllib.parse.urlunparse((schema, netloc, rel_url, _, _, _))\n for crawl in crawl_rows:\n if u in crawl.url:\n self.root_url = crawl.url\n break\n else:\n self.url = ip_url\n self.ip = True\n\n self.text = None\n self.links = links\n self.eurl = None if links else True\n self.parser = parser\n self.firefox = firefox\n self.t_urls = None\n self.t_eurls = t_eurl\n self.headers = self.get_headers(self.url) if self.root_url else 'telegram'\n\n if self.name is Net.Client.FIREFOX:\n # profile_path = \"/home/fp/.mozilla/firefox\"\n # profile = selenium.webdriver.FirefoxProfile(profile_path)\n # profile.set_preference(\"network.proxy.type\", 1)\n # profile.set_preference(\"network.proxy.socks\", local_ip)\n # profile.set_preference(\"network.proxy.socks_port\", 9981)\n # profile.set_preference(\"network.proxy.socks_version\", 5)\n # profile.update_preferences()\n # self.client = selenium.webdriver.Firefox(profile=profile, proxy=proxy,\n # service_log_path='/var/log/geckodriver.log', options=options,\n # desired_capabilities=selenium.webdriver.DesiredCapabilities.FIREFOX)\n proxy = selenium.webdriver.common.proxy.Proxy(selenium.webdriver.common.proxy.ProxyType.SYSTEM)\n options = selenium.webdriver.FirefoxOptions()\n options.headless = headless\n self.client = selenium.webdriver.Firefox(service_log_path='/var/log/geckodriver.log', options=options,\n desired_capabilities=selenium.webdriver.DesiredCapabilities.\n FIREFOX, proxy=proxy)\n self.client.implicitly_wait(ini.getint('default', 'browser_implicit_wait'))\n self.firefox = firefox\n if self.firefox is not Firefox.NONE:\n self.firefox_call = getattr(self.client, firefox.value)\n self.firefox_arg = firefox_arg\n self.firefox_times = firefox_times\n self.firefox_real = firefox_real\n self.click = True if 'CLICK' in self.firefox.name else None\n self.GET = executor(functools.partial(self.client.get, self.url))\n elif self.name is Net.Client.WGET:\n self.background = background\n self.site = site\n self.GET = self.wget(self.url, mirror=self.site, background=self.background)\n elif self.name is Net.Client.REQUESTS:\n http.client.HTTPConnection.debuglevel = ini.getint('default', 'HTTP_debug')\n self.client = requests.Session()\n self.client.headers = self.headers\n self.client.adapter = requests.adapters.HTTPAdapter(max_retries=urllib3.util.retry.Retry(\n total=ini.getint('default', 'retry_total'), status_forcelist=[400, 403, 404, 408, 500, 502, 503, 504],\n backoff_factor=ini.getfloat('default', 'retry_backoff_factor')))\n self.client.mount('http://', self.client.adapter)\n self.client.mount('https://', self.client.adapter)\n self.client.verify = True\n self.client.trust_env = True\n self.GET = executor(functools.partial(self.client.get, self.url))\n elif self.name is Net.Client.AIOHTTP:\n # Nota: Probando\n self.connector = aiohttp.TCPConnector(family=socket.AF_INET, ssl=False)\n self.client = aiohttp.ClientSession(headers=self.headers, connector=self.connector, trust_env=True)\n self.GET = self.client.get(self.url)\n elif self.name is Net.Client.CURL:\n self.response = io.BytesIO()\n self.client = pycurl.Curl()\n self.client.setopt(self.client.URL, self.url)\n self.client.setopt(self.client.FOLLOWLOCATION, 1)\n self.client.setopt(self.client.SSL_VERIFYPEER, 0)\n self.client.setopt(self.client.SSL_VERIFYHOST, 0)\n self.client.setopt(self.client.HTTPHEADER, ['{}: {}'.format(key, value)\n for key, value in self.headers.items()])\n self.client.setopt(self.client.WRITEFUNCTION, self.response.write)\n self.client.setopt(self.client.CAINFO, certifi.where())\n self.GET = executor(self.client.perform)\n\n @property\n async def get(self):\n l = log()\n prefix = '{}({}):'.format(self.name.name, self.url)\n suffix = ''\n try:\n async with self.GET as response:\n if self.name is Net.Client.FIREFOX:\n await asyncio.sleep(ini.getint('default', 'browser_load_sleep'))\n tree = lxml.html.fromstring(self.client.page_source)\n title = tree.xpath('/html/head/title/text()')\n try:\n if self.url is not ip_url:\n if title[0] == 'Server Not Found':\n l.e('{}({}), {} == {}'.format(self.name.name, self.url, title[0],\n tree.xpath('// *[ @ id = \"errorShortDescText\"]/text()')[\n 0]))\n except IndexError as exc:\n l.e('{}({}), exc == {}, page_source == {}'.format(self.name.name, self.url, repr(exc),\n self.client.page_source))\n else:\n if self.firefox is not Firefox.NONE:\n for number in range(1, self.firefox_times + 1):\n try:\n if self.click:\n self.firefox_call(self.firefox_arg).click()\n else:\n self.firefox_call(self.firefox_arg)\n l.i('{}({}), {} out of {}'.format(self.name.name, self.url, number,\n self.firefox_times))\n await asyncio.sleep(ini.getint('default', 'browser_load_sleep'))\n except selenium.common.exceptions.NoSuchElementException or \\\n selenium.common.exceptions.NoSuchWindowException as exc:\n if number < self.firefox_real:\n l.w('{}({}), {} of {}, exc == {}'.format(self.name.name, self.url, number,\n self.firefox_times, repr(exc)))\n break\n except selenium.common.exceptions.WebDriverException as exc:\n l.e('{}({}), {} of {}, exc == {}'.format(self.name.name, self.url, number,\n self.firefox_times, repr(exc)))\n break\n if self.ip:\n self.text = await Net.parse(self.url, self.client.page_source, Net.Parser.SOUP_TEXT)\n else:\n \"\"\"\n Alt 1. \n # returns the inner HTML as a string\n self.text = self.client.execute_script(\"return document.body.innerHTML\")\n Alt 2. \n # igual pero sacando el outerHTML\n http://stanford.edu/~mgorkove/cgi-bin/rpython_tutorials/Scraping_a_Webpage_Rendered_by_Javascript_Using_Python.php\n https://code.tutsplus.com/tutorials/modern-web-scraping-with-beautifulsoup-and-selenium--cms-30486\n\n self.text = elem.get_attribute(\"outerHTML\").replace('%25', '%').replace('%2F', '/').\\\n replace('%26', '&').replace('%3F', '?').replace('%3D', '=').replace('%23', '#').\\\n replace('%25', '%').replace('%3A', ':')\n\n lo cambio por:\n\n urllib.parse.unquote()\n \"\"\"\n self.text = urllib.parse.unquote(\n self.client.find_element_by_xpath(\"//*\").get_attribute(\"outerHTML\"))\n self.eurl = self.client.current_url\n elif self.name is Net.Client.REQUESTS and response.status_code is 200 \\\n and 'text/html' in response.headers.get('content-type'):\n self.text = response.text\n self.eurl = response.url\n elif self.name is Net.Client.AIOHTTP and response.status is 200 \\\n and 'text/html' in response.headers.get('content-type'):\n self.text = await response.text()\n self.eurl = response.real_url\n elif self.name is Net.Client.CURL and self.client.getinfo(self.client.RESPONSE_CODE) is 200 \\\n and 'text/html' in self.client.getinfo(self.client.CONTENT_TYPE):\n self.text = self.response.getvalue().decode()\n self.eurl = self.client.getinfo(self.client.EFFECTIVE_URL)\n elif self.name is not Net.Client.WGET and not Net.Client.DIR and not Net.Client.FILE \\\n and not Net.Client.NONE:\n raise ConnectionError\n except selenium.common.exceptions.TimeoutException or selenium.common.exceptions.WebDriverException \\\n or asyncio.TimeoutError or pycurl.error or ConnectionError as exc:\n suffix = '{}({}): exc == {}'.format(self.name, self.url, repr(exc))\n except UnicodeDecodeError as exc:\n suffix = '{}({}): exc == {}'.format(self.name, self.url, repr(exc))\n except urllib3.exceptions.MaxRetryError as exc:\n suffix = '{}({}): exc == {}'.format(self.name, self.url, repr(exc))\n except requests.exceptions.InvalidSchema as exc:\n suffix = '{}({}): exc == {}'.format(self.name, self.url, repr(exc))\n except requests.exceptions.RetryError as exc:\n suffix = '{}({}): exc == {}'.format(self.name, self.url, repr(exc))\n except requests.exceptions.MissingSchema as exc:\n suffix = '{}({}): exc == {}'.format(self.name, self.url, repr(exc))\n except TypeError as exc:\n if 'expected string or bytes-like object' in repr(exc):\n suffix = '{}({}): exc == {}'.format(self.name, self.url, repr(exc))\n except ConnectionRefusedError as exc:\n if 'expected string or bytes-like object' in repr(exc):\n suffix = '{}({}): exc == {}'.format(self.name, self.url, repr(exc))\n else:\n try:\n if self.ip:\n self.ip = await Net.ip_ok(self.text)\n suffix = 'ip == {}'.format(self.ip)\n return self.ip\n elif self.links and self.name is not Net.Client.DIR and self.name is not Net.Client.WGET:\n if self.name is Net.Client.FILE or Net.Client.NONE:\n self.text = response\n self.links = await Net.parse(self.url, self.text)\n self.t_urls = {await self.t(link.lower().rstrip('/')) for link in {*self.links}\n for t_pattern in Net.t_patterns if t_pattern in link and '.me/share/' not in link}\n if self.t_eurls and self.t_urls:\n self.t_eurls = {*await self.gather(*[Net(t_eurl, links=False).get for t_eurl in self.t_urls])}\n extracted_added, megagroup_added = await self.update(self.root_url, self.t_eurls)\n suffix = 'links == {}, t_urls == {}, t_eurls == {} - MySQL added({}): extracted == {}, ' \\\n 'megagroup == {}'.format(len(self.links), len(self.t_urls), len(self.t_eurls),\n self.root_url, extracted_added, megagroup_added)\n return self.links\n suffix = 'links == {}, t_urls == {}'.format(len(self.links), len(self.t_urls))\n return self.links\n elif self.eurl:\n suffix = 'eurl == {}'.format(self.eurl)\n return self.eurl\n except TypeError as exc:\n suffix = '{}({}): exc == {}'.format(self.name, self.url, repr(exc))\n finally:\n try:\n l.c('{} {}'.format(prefix, suffix)) if 'exc == ' in suffix else l.v('{} {}'.format(prefix, suffix))\n\n if self.name is Net.Client.AIOHTTP:\n await self.client.close()\n self.connector.close()\n elif hasattr(self, 'client'):\n self.client.close()\n if self.name is Net.Client.FIREFOX:\n self.client.quit()\n self.client.stop_client()\n except TypeError as exc:\n if 'object NoneType can' in repr(exc):\n suffix = '{}({}): exc == {}'.format(self.name, self.url, repr(exc))\n l.c(suffix)\n\n\nwget_cmd, wget_dir, wget_log = Net.config_wget()\n","repo_name":"mnopi/examples","sub_path":"Python/crawlers-y-sitemaps/crawler/crawl_aiohttp_y_curl_y_parsers.py","file_name":"crawl_aiohttp_y_curl_y_parsers.py","file_ext":"py","file_size_in_byte":31191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40662228621","text":"#!/usr/bin/python\n\nfrom pathlib import Path\nimport datetime\n\nnow = datetime.datetime.now()\nyear = now.year\nmonth = now.month\n\nname = input('Enter article name:')\n\npath1 = Path('articles') / str(year) / str(month)\npath1.mkdir(parents=True, exist_ok=True)\n\npath2 = path1 / f'{name}.txt'\n\npath2.touch()\n\nprint(f'Article created at: {path2}')\n","repo_name":"janbodnar/Python-Course","sub_path":"stdlib/pathlib/new_article.py","file_name":"new_article.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"22"} +{"seq_id":"20450319124","text":"from typing import List, Type, Tuple\r\nimport pandas as pd\r\nimport torch.nn\r\nfrom graph_supervised_learning.rho import compute_rho\r\n\r\nclass RhoEstimator:\r\n \"\"\"\r\n Rho estimator class\r\n \"\"\"\r\n def __init__(self, path: str, model: torch.nn.Module, layers_type: List[Type[torch.nn.Module]], rho_reg: float, lambda_: float, task_type: str):\r\n self._all_modules = self._get_all_modules(model, layers_type=tuple(layers_type))\r\n self._path = path # path to .csv file\r\n if self._path is not None:\r\n self._columns = ['iter'] + ['layer_' + str(i) + '_rho' for i in range(len(self._all_modules))]\r\n pd.DataFrame(columns=self._columns).to_csv(self._path, index=False)\r\n\r\n self._rho_list = []\r\n\r\n self._last_layer = self._all_modules[-1]\r\n self._iter = 0\r\n self._y = None\r\n self._mask = None # we should select only training node representations\r\n self._rho = None\r\n self._rho_reg = rho_reg\r\n self._lambda = lambda_\r\n self._task_type=task_type\r\n\r\n for curr_module in self._all_modules:\r\n def forward_hook(module, input, output):\r\n if module.training == True:\r\n self._rho_list.append(compute_rho(output[self._mask], self._y[self._mask], lambda_=self._lambda))\r\n\r\n if module == self._last_layer:\r\n self._rho = torch.stack(self._rho_list)[:-1].sum() # discard last layer\r\n rho_list = [rho.item() for rho in self._rho_list]\r\n data = [self._iter] + rho_list\r\n if self._path is not None:\r\n pd.DataFrame(data=[data], columns=self._columns).to_csv(\r\n self._path, mode='a', index=False, header=False\r\n )\r\n\r\n self._iter += 1\r\n self._rho_list = []\r\n\r\n curr_module.register_forward_hook(hook=forward_hook)\r\n\r\n @staticmethod\r\n def _get_all_modules(module: torch.nn.Module, layers_type: Tuple[Type[torch.nn.Module]]):\r\n ans = []\r\n if isinstance(module, layers_type):\r\n ans.append(module)\r\n\r\n m_childrens = list(module.children())\r\n if len(m_childrens) == 0:\r\n return ans\r\n else:\r\n for curr_children in m_childrens:\r\n ans += RhoEstimator._get_all_modules(curr_children, layers_type)\r\n return ans\r\n\r\n def set_y(self, y:torch.Tensor):\r\n if self._task_type == 's':\r\n self._y = torch.nn.functional.one_hot(y.flatten()).type(torch.float32)\r\n self._y = self._y[:, self._y.sum(0) != 0] # prevents nans in back propagation\r\n else: # (m)\r\n self._y = y.type(torch.float32)\r\n\r\n def set_mask(self, mask:torch.Tensor):\r\n self._mask = mask\r\n\r\n @property\r\n def rho_reg(self):\r\n return self._rho_reg\r\n\r\n @property\r\n def rho(self):\r\n return self._rho\r\n","repo_name":"Anonymous1252022/KR_for_GNNs","sub_path":"graph_supervised_learning/rho_estimator.py","file_name":"rho_estimator.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"22"} +{"seq_id":"74553384694","text":"# 1. 딕셔너리 만들기\nlunch = {\n '중국집':'02-1123-4544',\n '양식집':'053-216-4545',\n '한식집':'054-451-5452',\n}\n\ndinner = dict(중국집='02-1233-4544') #딕셔너리로 변환시켜주는 내장함수 # key값은 문자열이 아닌 양쪽 따옴표 없이 기입\n# int()\n# list()\n\n\n# 2. 딕셔너리 내용 추가하기\nlunch['분식집'] = '053-123-4567'\n\n\n# 3. 딕셔너리 내용 가져오기\nprint(lunch['중국집']) #=> 02-1123-4544\nidol = {\n 'BTS': {\n '지민':24,\n 'RM': 25,\n }\n}\n\nidol['BTS'] #=> dit -> { '지���':24, 'RM':25 }\nidol['BTS']['RM'] #=> 25\n\n\n# 4. 딕셔너리 반복문 활용하기\n# 기본 활용\nfor key in lunch:\n print(key) #=> key\n print(lunch[key]) #=> value\n\n# key만 가져오기 : .keys()\nfor key in lunch.keys():\n print(key)\n\n# value만 가져오기 : .values()\nfor value in lunch.values():\n print(value)\n\n# item (key, value) 가져오기 : .items()\n# lunch.items() #=> [('중식','02'), ... ]\nfor item in lunch.items():\n print(item[0], item[1])\n# 말고 item을 쪼개서 넣을 수 있다.\n# for key, value in lunch.items():\n# print(key, value)\n\n# n개 = 자료형 길이 n (같을경우 가능)\na, b, c = (1,2,3)\nprint(a)\nprint(b)\n\n\n# 1. 이 학생의 평균을 구하시오.\ntotal_score = 0\nfor subject_score in score.values():\n total_score = total_score + subject_score\n \nave_score = total_score / len(score)\nprint(ave_score) \n\n# sum은 바로 사용할 수 없다. 내장함수로 다른기능으로 등록되어있기 때문이다.\n\n# 1.2 두번째 풀이\ntotal_score = sum(score.values()) # sum([80, 90, 100]) => 270\nave_score = total_score / len(score)\nprint(ave_score) \n","repo_name":"hyunwoo-song/TOT","sub_path":"startcamp/day04/dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34892845826","text":"# -*- coding = utf-8 -*-\r\n# @Time : 2020/8/8 17:14\r\n# @Author : AprilYyt\r\n# @File : crawler2.py\r\n# @Software: PyCharm\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport bs4\r\nfrom lxml import etree\r\nimport urllib.request,urllib.error\r\nimport xlwt\r\nimport re\r\nimport pandas as pd\r\n\r\n# r = requests.get(\"https://www.qcc.com/elib_financing_p1.html\")\r\n\r\ndef main():\r\n\r\n baseurl = 'https://www.qcc.com/elib_financing'\r\n #爬取网页 & 解析数据\r\n datalist = getData(baseurl)\r\n #二次爬取产品简介\r\n # datalist = getDescription(datalist)\r\n # print(datalist)\r\n #保存数据\r\n savepath=\"企查查融资情况test.csv\"\r\n saveData(datalist,savepath)\r\n #askURL(\"https://www.qcc.com/elib_financing\")\r\n\r\n# 爬取网页\r\ndef getData(baseurl):\r\n\r\n datalist = []\r\n for i in range(1,11): #练习只爬取第一页 后面在这里更改要爬取的页面数\r\n url = baseurl+ '_p_' + str(i) + '.html'\r\n html = askURL(url) # 保存获取到的网页源码\r\n parseHTML = bs4.BeautifulSoup(html, 'html.parser')\r\n table = parseHTML.find_all(name='tr')\r\n\r\n li = []\r\n\r\n for tr in table:\r\n tr_list = []\r\n dic = {}\r\n #获取所有text\r\n for td in tr.find_all(name='td'):\r\n text = td.get_text()\r\n #去除所有空格\r\n text = text.replace('\\n', '')\r\n text = text.replace('\\t', '')\r\n text = text.replace('\\xa0', '')\r\n text = text.replace(' ', '')\r\n\r\n # print(text, type(text))\r\n tr_list.append(text)\r\n\r\n #获取所有详情链接\r\n for td_link in tr.find_all(name='a', href=True):\r\n tr_list.append(td_link['href'])\r\n\r\n #tr_list内容写入li\r\n if len(tr_list) != 0:\r\n dic['序号'] = tr_list[0]\r\n dic['产品名称'] = tr_list[2]\r\n dic['产品链接'] = 'https://www.qcc.com'+ tr_list[-1]\r\n dic['所属公司'] = tr_list[3]\r\n dic['投资机构'] = tr_list[4]\r\n dic['融资阶段'] = tr_list[5]\r\n dic['融资金额'] = tr_list[6]\r\n dic['融资时间'] = tr_list[7]\r\n\r\n li.append(dic)\r\n\r\n for dic in li:\r\n if len(dic)!=0:\r\n datalist.append(dic)\r\n\r\n return datalist\r\n\r\n#二次爬取,从每个产品的详情链接里爬出产品简介\r\ndef getDescription(datalist):\r\n for i in range(0,len(datalist)):\r\n url = str(datalist[i]['产品链接'])#获取单个产品链接\r\n html = askURL(url) # 获取详情网页源码\r\n #用bs4的方法\r\n parseHTML = bs4.BeautifulSoup(html, 'html.parser')\r\n table = parseHTML.find_all('section',id='productIntro')\r\n\r\n for tr in table:\r\n tr_list = []\r\n dic = {}\r\n # 获取所有text\r\n for td in tr.find_all(name='td'):\r\n text = td.get_text()\r\n # 去除空格\r\n text = text.replace('\\n', '')\r\n text = text.replace('\\t', '')\r\n text = text.replace('\\xa0', '')\r\n text = text.replace(' ', '')\r\n tr_list.append(text)\r\n # 更新datalist\r\n if len(tr_list) != 0:\r\n dic['产品简介'] = tr_list[0]\r\n datalist[i].update(dic)\r\n\r\n return datalist\r\n\r\n#获得指定URL的网页内容\r\ndef askURL(url):\r\n head = {\r\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36\"\r\n }\r\n request = urllib.request.Request(url, headers=head)\r\n html = \"\"\r\n\r\n try:\r\n response = urllib.request.urlopen(request)\r\n html = response.read().decode(\"utf-8\")\r\n #print(html)\r\n\r\n except urllib.error.URLError as e:\r\n print(e)\r\n # if hasattr(e, \"code\"):\r\n # # print(e.code)\r\n # print(e.text)\r\n\r\n # if hasattr(e, \"reason\"):\r\n # # print(e, reason)\r\n # print(e.text)\r\n\r\n return html\r\n\r\n# 保存数据\r\ndef saveData(datalist, savepath):\r\n df = pd.DataFrame(datalist)\r\n print(df.head())\r\n df.to_csv(savepath,encoding=\"utf_8_sig\")\r\n return None\r\n #声明表头字段\r\n ordered_list = ['序号','产品名称','产品链接','所属公司','投资机构','融资阶段','融资金额','融资时间','产品简介']\r\n wb = xlwt.Workbook(\"New File.xlsx\")\r\n ws = wb.add_sheet(\"融资信息\")\r\n\r\n first_row = 0\r\n for header in ordered_list:\r\n col = ordered_list.index(header) # 保持表头顺序\r\n ws.write(first_row, col, header) # 写入表头\r\n\r\n row = 1\r\n for data in datalist:\r\n for _key, _value in data.items(): ###for key,value in dictionary.items():xxxx\r\n col = ordered_list.index(_key)\r\n ws.write(row, col, _value)\r\n row += 1 # enter the next row\r\n\r\n wb.save(savepath)\r\n\r\nif __name__ == \"__main__\":\r\n\r\n main()","repo_name":"118020071/Web_Crawler_Workshop","sub_path":"Codes/Requests/qcc.py","file_name":"qcc.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"13761970378","text":"import yaml\nimport torch\nimport os\n\nfrom inference.cam_generation.cam_grub_cut import ActivationExtractor\nfrom model_training.common.datasets import PascalClassificationDataset\nfrom model_training.common.augmentations import get_transforms\n\nwith open(os.path.join(os.path.dirname(__file__), \"config\", \"cam.yaml\")) as config_file:\n config = yaml.full_load(config_file)\n\ntrain_transform = get_transforms(config[\"train\"][\"transform\"])\nval_transform = get_transforms(config[\"val\"][\"transform\"])\n\ntrain_ds = PascalClassificationDataset(\n config[\"train\"][\"input_path\"],\n transform=train_transform,\n image_set=\"train\",\n return_name=True,\n return_size=True,\n)\nval_ds = PascalClassificationDataset(\n config[\"val\"][\"input_path\"],\n transform=val_transform,\n image_set=\"validation\",\n return_name=True,\n return_size=True,\n)\n\ntrain_dl = torch.utils.data.DataLoader(\n train_ds, batch_size=config[\"batch_size\"], shuffle=True, num_workers=12\n)\nval_dl = torch.utils.data.DataLoader(\n val_ds, batch_size=config[\"batch_size\"], shuffle=True, num_workers=12\n)\n\ndevice = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\nprint(f\"Device {device}\")\n\nextractor = ActivationExtractor(config, train_dl, val_dl, device)\nextractor.extract()\n","repo_name":"ucuapps/WSMIS","sub_path":"inference/cam_generation/generate_pascal.py","file_name":"generate_pascal.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"22"} +{"seq_id":"5932230590","text":"\"\"\"add road1, road2 and km to markers table\n\nRevision ID: 283bc6a2bcab\nRevises: 2f83f4f1b63d\nCreate Date: 2018-01-19 17:16:44.904000\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '283bc6a2bcab'\ndown_revision = '2f83f4f1b63d'\nbranch_labels = None\ndepends_on = None\n\nimport sqlalchemy as sa\n\nfrom alembic import op\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('markers', sa.Column('km', sa.Float(), nullable=True))\n op.add_column('markers', sa.Column('road1', sa.Integer(), nullable=True))\n op.add_column('markers', sa.Column('road2', sa.Integer(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('markers', 'road2')\n op.drop_column('markers', 'road1')\n op.drop_column('markers', 'km')\n ### end Alembic commands ###\n","repo_name":"data-for-change/anyway","sub_path":"alembic/versions/283bc6a2bcab_add_road1_road2_and_km_to_markers_table.py","file_name":"283bc6a2bcab_add_road1_road2_and_km_to_markers_table.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"22"} +{"seq_id":"21040360043","text":"import os\nimport pathlib\nimport re\n\n\ndef strip_html_tags(subject):\n if subject == '':\n return ''\n else:\n return re.sub(\"<[^>]*>\", \"\", subject)\n\ndef main():\n print('String Menu Builder Run...')\n pwd_win = input(\"Enter path containing the source files: \")\n dir_out = input(\"Enter path to write files to: \")\n pwd = str(pathlib.Path(pwd_win))\n conceptList = {}\n\n for filename in os.listdir(pwd.replace('\\\\', '/')):\n if filename.endswith('.' + 'html'):\n # dest_file_name = dir_out + \"/\" + filename\n print(\"Reading... \" + filename)\n src_file = open(filename, \"r\")\n # print(\"Writing... \" + dest_file_name)\n # dest_file = open(dest_file_name, 'w')\n subject = src_file.read()\n # Do first replacement\n reobj = re.compile('
    \\r*?(.*?)\\r*?
    ', re.DOTALL | re.MULTILINE)\n match = reobj.search(subject)\n if match:\n result = match.group(1)\n conceptList[result] = {}\n print(\"***\"+match)\n # Do 2nd replacement - subconcept\n\n print(conceptList)\n\n# Main\nif __name__ == '__main__':\n # Calling main() function\n main()\n","repo_name":"code-school-teacher-repos/javascript-questionbank","sub_path":"util/theripper_menu_builder.py","file_name":"theripper_menu_builder.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39031757745","text":"import random\nnumero = random.randint(0,100)\ncontador = 1\n\nprint(\"BIENVENIDO AL JUEGO DE ADIVINAR\")\nintento = int (input (\"Dime un numero del 1 al 99: \"))\nif intento < 0 or intento > 99:\n print(\"Limitate a seguir las normas\")\n intento = int (input (\"Dime un numero del 1 al 99: \"))\nwhile numero != intento:\n if numero > intento:\n print(\"muy pequeño\")\n intento = int (input (\"Dime un numero del 1 al 99: \"))\n elif numero < intento:\n print(\"demasiado grande\")\n intento = int (input (\"Dime un numero del 1 al 99: \"))\n\n if intento == numero:\n print(\"FELICIDADES CAMPEÓN\")\n \n contador += 1\n\nif contador < 5:\n print (\"Has usado \",contador, \" intentos\" ) \nelse:\n print (\"Has usado \", contador, \"intentos, necesitas mejorar\")\n","repo_name":"Germiprogramer/EL-JUEGO-DE-ADIVINAR","sub_path":"juegodeadivinar.py","file_name":"juegodeadivinar.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74174552375","text":"import unittest\n\nfrom core.portfolio.tickers import Tickers\nfrom scripts.constants.paths import TICKER_DATA_DIR, TICKER_DETAILS_PATH\n\n\nclass TestTickers(unittest.TestCase):\n tickers = Tickers(TICKER_DETAILS_PATH, TICKER_DATA_DIR)\n\n def test_tickers_init(self):\n tickers = Tickers(TICKER_DETAILS_PATH, TICKER_DATA_DIR)\n\n self.assertIsNotNone(tickers)\n\n def test_tickers_update_ticker(self):\n tickers = Tickers(TICKER_DETAILS_PATH, TICKER_DATA_DIR)\n tickers.update_tickers_data()\n\n def test_add_ticker(self):\n tickers = Tickers(TICKER_DETAILS_PATH, TICKER_DATA_DIR)\n\n ticker_id = 'FET-EUR'\n ticker_name = 'Fetch-AI'\n\n tickers.add_ticker(ticker_id=ticker_id,\n name=ticker_name,\n isin=None,\n instrument='Crypto',\n risk=7,\n fee=0.0)\n\n def test_get_tickers_df(self):\n features = None\n start_date = None\n\n ticker_dict = self.tickers.get_tickers_dict(features, start_date)\n\n self.assertIsNotNone(ticker_dict)\n\n def test_get_tickers_return_df(self):\n features = None\n start_date = None\n freq = 'M'\n\n returns_dict = self.tickers.get_tickers_return_dict(features, start_date, freq)\n\n self.assertIsNotNone(returns_dict)\n\n def test_get_sharpe_ratio(self):\n # start_date = '2021'\n features = None\n start_date = None\n freq = 'M'\n periods_per_year = 12\n\n sharpe_ratios = self.tickers.get_sharpe_ratios(start_date=start_date,\n freq=freq,\n periods_per_year=periods_per_year,\n features=features,\n )\n\n self.assertIsNotNone(sharpe_ratios)\n\n def test_get_tickers_volatility(self):\n features = None\n start_date = None\n freq = 'M'\n periods_per_year = 12\n\n volatility = self.tickers.get_tickers_volatility(start_date=start_date,\n freq=freq,\n features=features,\n )\n\n self.assertIsNotNone(volatility)\n\n def test_get_tickers_return(self):\n features = None\n start_date = None\n freq = 'M'\n periods_per_year = 12\n\n port_return = self.tickers.get_tickers_return(start_date=start_date,\n freq=freq,\n periods_per_year=periods_per_year,\n features=features,\n )\n\n self.assertIsNotNone(port_return)\n\n def test_plot_efficient_frontier(self):\n start_date = None\n points = 30\n features = ['AIAI.MI', 'BATT.MI', 'USPY.DE', 'SWDA.MI',\n 'ESPO.MI']\n # freq = 'D'\n # periods_per_years = 252\n freq = 'W'\n periods_per_years = 52\n freq = 'M'\n periods_per_years = 12\n features = None\n\n ef, er, cov = self.tickers.plot_efficient_frontier(points,\n features=features,\n freq=freq,\n periods_per_year=periods_per_years,\n start_date=start_date)\n\n self.assertIsNotNone(ef)\n\n def test_get_max_sharpe_ratio(self):\n start_date = '2021'\n\n msr, vol, ret = self.tickers.get_max_sharpe_ratio(start_date=start_date)\n self.tickers.plot_efficient_frontier(20)\n self.assertIsNotNone(msr)\n\n def test_get_portfolio_min_vol(self):\n start_date = None\n\n result = self.tickers.get_portfolio_min_vol(start_date=start_date)\n self.tickers.plot_efficient_frontier(20)\n self.assertIsNotNone(result)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"daniele21/portfolio_analysis","sub_path":"tests/tickers.py","file_name":"tickers.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"28559934935","text":"\n# coding: utf-8\n\n# # Practice Assignment: Understanding Distributions Through Sampling\n# \n# ** *This assignment is optional, and I encourage you to share your solutions with me and your peers in the discussion forums!* **\n# \n# \n# To complete this assignment, create a code cell that:\n# * Creates a number of subplots using the `pyplot subplots` or `matplotlib gridspec` functionality.\n# * Creates an animation, pulling between 100 and 1000 samples from each of the random variables (`x1`, `x2`, `x3`, `x4`) for each plot and plotting this as we did in the lecture on animation.\n# * **Bonus:** Go above and beyond and \"wow\" your classmates (and me!) by looking into matplotlib widgets and adding a widget which allows for parameterization of the distributions behind the sampling animations.\n# \n# \n# Tips:\n# * Before you start, think about the different ways you can create this visualization to be as interesting and effective as possible.\n# * Take a look at the histograms below to get an idea of what the random variables look like, as well as their positioning with respect to one another. This is just a guide, so be creative in how you lay things out!\n# * Try to keep the length of your animation reasonable (roughly between 10 and 30 seconds).\n\n# In[3]:\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nget_ipython().magic('matplotlib notebook')\n\n# generate 4 random variables from the random, gamma, exponential, and uniform distributions\nx1 = np.random.normal(-2.5, 1, 10000)\nx2 = np.random.gamma(2, 1.5, 10000)\nx3 = np.random.exponential(2, 10000)+7\nx4 = np.random.uniform(14,20, 10000)\n\n# plot the histograms\nplt.figure(figsize=(9,3))\nplt.hist(x1, normed=True, bins=20, alpha=0.5)\nplt.hist(x2, normed=True, bins=20, alpha=0.5)\nplt.hist(x3, normed=True, bins=20, alpha=0.5)\nplt.hist(x4, normed=True, bins=20, alpha=0.5);\nplt.axis([-7,21,0,0.6])\n\nplt.text(x1.mean()-1.5, 0.5, 'x1\\nNormal')\nplt.text(x2.mean()-1.5, 0.5, 'x2\\nGamma')\nplt.text(x3.mean()-1.5, 0.5, 'x3\\nExponential')\nplt.text(x4.mean()-1.5, 0.5, 'x4\\nUniform')\n\n\n# In[4]:\n\nimport matplotlib.animation as animation\n\n\n# In[7]:\n\nx = [x1, x2, x3, x4]\n\n\n# In[8]:\n\naxis1 = [-7.5, 2.5, 0, 0.6]\naxis2 = [0, 10, 0, 0.6]\naxis3 = [7, 17, 0, 0.6]\naxis4 = [12, 22, 0, 0.6]\naxis = [axis1, axis2, axis3, axis4]\n\n\n# In[9]:\n\ntitles = ['x1 Normal', 'x2 Gamma', 'x3 Exponential', 'Normed Frequency']\n\n\n# In[10]:\n\nbins1 = np.arange(-7.5, 2.5, 0.2)\nbins2 = np.arange(0, 10, 0.2)\nbins3 = np.arange(7, 17, 0.2)\nbins4 = np.arange(12, 22, 0.2)\nbins = [bins1, bins2, bins3, bins4]\n\n\n# In[11]:\n\nanno_x = [-1, 6.5, 13.5, 18.5]\n\n\n# In[12]:\n\nfig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharey = True)\nax = [ax1, ax2, ax3, ax4]\n\n\n# In[13]:\n\ndef update(curr):\n if curr == 100:\n a.event_source.stop()\n for i in range(len(ax)):\n ax[i].cla()\n ax[i].hist(x[i][:100*curr], normed = True, bins = bins[i])\n ax[i].axis(axis[i])\n ax[i].set_title(titles[i])\n ax[i].set_ylabel('Normed Frequency')\n ax[i].set_xlabel('Value')\n ax[i].annotate('n = {}'.format(100*curr), [anno_x[i], 0.5])\n plt.tight_layout()\na = animation.FuncAnimation(fig, update, interval = 100)\n\n","repo_name":"milog17/michigan17","sub_path":"Applied-Plotting-Charting-And-Data-Representation-in-Python/week3/UnderstandingDistributionsThroughSampling.py","file_name":"UnderstandingDistributionsThroughSampling.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"39778679458","text":"import jinja2\nimport datetime\nimport re\nfrom hurry.filesize import size, alternative\n\n__all__ = ['env', 'sanitize_name']\n\n\ndef filesize_format(size_bytes):\n if not str(size_bytes).isdigit():\n return 0\n\n return size(int(size_bytes), system=alternative)\n\n\ndef person_letters(display_name: str) -> str:\n if not display_name:\n return \"Person Not Found\"\n\n output = \"\"\n for name in display_name.upper().split():\n output += name[0]\n\n if len(output) > 2:\n return f\"{output[0]}{output[-1]}\"\n\n return output\n\n\ndef datetime_format(date: datetime, format: str) -> str:\n if not date:\n return str(date)\n\n return date.strftime(format)\n\n\ndef sanitize_name(text: str) -> str:\n text = str(text).strip().replace(' ', '_')\n return re.sub(r'(?u)[^-\\w.]', '', text)\n\n\ndef format_msg(text: str, thread: bool) -> str:\n spaces = \" \" * 2\n if thread:\n spaces *= 3\n\n if isinstance(text, str) and \"\\n\" in text:\n text = re.sub(\"\\\\n\", f\"\\\\n{spaces}\", text)\n text = f\"{spaces}{text}\"\n return f\"\\n{text}\"\n\n return text\n\n\nenv = jinja2.Environment(\n autoescape=False,\n trim_blocks=True,\n lstrip_blocks=True,\n keep_trailing_newline=True,\n loader=jinja2.PackageLoader('webexteamsarchiver', 'templates')\n)\n\nenv.filters['filesize_format'] = filesize_format\nenv.filters['person_letters'] = person_letters\nenv.filters['datetime_format'] = datetime_format\nenv.filters['sanitize_name'] = sanitize_name\nenv.filters['format_msg'] = format_msg\n","repo_name":"CiscoDevNet/webex-teams-archiver","sub_path":"webexteamsarchiver/jinja_env.py","file_name":"jinja_env.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"22"} +{"seq_id":"28849276621","text":"import numpy as np\nimport pandas as pd\n# we can concat for each axis -> columns and rows\n# rows for identical columns and columns for identical rows\n\ndf_one = {\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3'],\n}\n\ndf_two = {\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3'],\n}\n\none = pd.DataFrame(df_one)\ntwo = pd.DataFrame(df_two)\n\nprint(one, two)\n\nprint(pd.concat([one, two], axis=1)) # like left join, rows concat\n\nprint(pd.concat([one, two], axis=0)) # columns concat\n\n# can rename columns\ntwo.columns = one.columns\nnew_df = pd.concat([one, two], axis=0)\nprint(new_df) # columns concat\n\nnew_df.index = range(len(new_df))\nprint(new_df)\n","repo_name":"Emilianissimo/ml-learning-course","sub_path":"pandas_part/concat_df.py","file_name":"concat_df.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35234793155","text":"from copy import deepcopy\nimport time\n\nstart_time = time.time()\n\nwith open('input.txt','rt') as file:\n instructions = file.read().split('\\n')\n\n\ndef convertp2(num,mask):\n num_bin = list(bin(num).lstrip('0b').zfill(36))\n l = list()\n for idx,char in enumerate(mask):\n if char == '0':\n continue\n elif char == '1':\n num_bin[idx] = '1'\n elif char == 'X':\n num_bin[idx] = 'X'\n l.append(num_bin)\n for idx,char in enumerate(num_bin):\n if char == 'X':\n l_copy = deepcopy(l)\n for i in l_copy:\n i[idx] = '1'\n for i in l:\n i[idx] = '0'\n l.extend(l_copy)\n for x,y in enumerate(l):\n l[x] = int(\"\".join(y),2)\n return l\n\n\ndef main():\n data = {}\n mask = ''\n for i in instructions:\n if 'mask' in i:\n mask = i.lstrip('mask = ')\n elif 'mem' in i:\n start = i.find('[') + 1\n end = i.find(']')\n temp_mask = \"\"\n for x in range(start, end):\n temp_mask += i[x]\n c_num = convertp2(int(temp_mask),mask)\n for x in c_num:\n data['mem['+str(x)+']'] = int(i.partition('=')[2])\n return data\n\nprint('The answer to day 14, part 2 is: {}'.format(sum(main().values())))\n\nstop_time = time.time()\nprint('Code took {} seconds to complete.'.format(stop_time - start_time))\n\n#print('The solution to day 14, part 1 is: {}'.format(sum(main().values())))","repo_name":"kenkitts/advent_of_code","sub_path":"day14/day14p2.py","file_name":"day14p2.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40686110614","text":"'''from orm import db'''\nimport datetime\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.orm.util import join\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(80), unique=True, nullable=False)\n email = db.Column(db.String(120), unique=True, nullable=False)\n\n def __repr__(self):\n return '' % self.username\n\n\nclass Post(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(80), nullable=False)\n body = db.Column(db.Text, nullable=False)\n pub_date = db.Column(db.DateTime, nullable=False,\n default=datetime.datetime.utcnow)\n\n category_id = db.Column(db.Integer, db.ForeignKey('category.id'),\n nullable=False)\n category = db.relationship('Category',\n backref=db.backref('posts', lazy=True))\n\n def __repr__(self):\n return '' % self.title\n\n\nclass Category(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(50), nullable=False)\n\n def __repr__(self):\n return '' % self.name\ndef p_wrap(line):\n return f\"

    {line}

    \"\n\n\ndef li_wrap(item):\n return f\"
  • {item}
  • \"\n\n\ndef db_init():\n # if the database doesn't exist, create it an all associated entities\n db.drop_all()\n db.create_all()\n \n # Create Users\n admin = User(username='admin', email='admin@awesome.xyz')\n guest = User(username='guest', email='guest@awesome.xyz')\n\n db.session.add(admin)\n db.session.add(guest)\n\n # Create Posts and Categories\n cat_py = Category(name=\"Python\")\n\n # this is a very interesting aspect of sqlalchemy: the relationship between category and post\n # means that objects associated with a category through a relationships are also added\n # when the category is added/committe\n Post(title=\"Hello Python!\", body=\"Python is greate\", category=cat_py)\n\n # here is a direct addition to the category\n my_post = Post(title=\"My life with snakes\", body=\"Good times\")\n\n # the posts list is a back reference/query to any posts related to this category\n cat_py.posts.append(my_post)\n\n # add all to the database by adding the category\n db.session.add(cat_py)\n\n # Save it all\n db.session.commit() \n\n\nif __name__ == '__main__':\n db_init()","repo_name":"winzadot/barkyservice","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23781238618","text":"from typing import Any, Dict, Union # noqa: F401\n\nimport os\nfrom pathlib import Path\n\nimport schematics.types as sty\nfrom schematics.models import Model\nfrom schematics.exceptions import CompoundError, DataError, ValidationError\n\n\n# VALIDATORS--------------------------------------------------------------------\nCOLOR_SCHEME = dict(\n dark1='#040404',\n dark2='#141414',\n bg='#181818',\n grey1='#242424',\n grey2='#444444',\n light1='#A4A4A4',\n light2='#F4F4F4',\n dialog1='#444459',\n dialog2='#5D5D7A',\n red1='#F77E70',\n red2='#DE958E',\n orange1='#EB9E58',\n orange2='#EBB483',\n yellow1='#E8EA7E',\n yellow2='#E9EABE',\n green1='#8BD155',\n green2='#A0D17B',\n cyan1='#7EC4CF',\n cyan2='#B6ECF3',\n blue1='#5F95DE',\n blue2='#93B6E6',\n purple1='#C98FDE',\n purple2='#AC92DE',\n) # type: Dict[str, str]\n\n\ndef is_color_scheme(item):\n # type: (dict) -> None\n '''\n Determines if given dict is a valid color scheme.\n\n Args:\n item (dict): Color scheme dictionary.\n\n Raises:\n ValidationError: If item contains invalid keys.\n '''\n keys = list(COLOR_SCHEME.keys())\n ikeys = list(item.keys())\n\n diff = set(ikeys).difference(keys) # type: Any\n diff = sorted(list(diff))\n if len(diff) > 0:\n msg = f'Invalid color scheme keys: {diff}.'\n raise ValidationError(msg)\n\n\ndef is_csv(filepath):\n # type: (Union[str, Path]) -> None\n '''\n Determines if given filepath is a CSV.\n\n Args:\n filepath (str or Path): Filepath.\n\n Raises:\n ValidationError: If filepath is not a CSV.\n '''\n filepath = Path(filepath).as_posix()\n ext = os.path.splitext(filepath)[-1][1:].lower()\n if not os.path.isfile(filepath) or ext != 'csv':\n msg = f'{filepath} is not a valid CSV file.'\n raise ValidationError(msg)\n\n\ndef is_comparator(item):\n # type: (str) -> None\n '''\n Ensures that given string is a legal comparator.\n\n Legal comparators:\n\n * ==\n * !=\n * >\n * >=\n * <\n * <=\n * ~\n * !~\n\n Args:\n item (str): String to be tested.\n\n Raises:\n ValidationError: If item is not a legal comparator.\n '''\n comps = ['==', '!=', '>', '>=', '<', '<=', '~', '!~']\n if item not in comps:\n msg = f'{item} is not a legal comparator. Legal comparators: {comps}.'\n raise ValidationError(msg)\n\n\ndef is_metric(item):\n # type: (str) -> None\n '''\n Ensures that given string is a legal metric.\n\n Legal metrics:\n\n * max\n * mean\n * min\n * std\n * sum\n * var\n * count\n\n Args:\n item (str): String to be tested.\n\n Raises:\n ValidationError: If item is not a legal metric.\n '''\n metrics = ['max', 'mean', 'min', 'std', 'sum', 'var', 'count']\n if item not in metrics:\n msg = f'{item} is not a legal metric. Legal metrics: {metrics}.'\n raise ValidationError(msg)\n\n\ndef is_plot_kind(item):\n '''\n Ensures item is a kind of plotly plot.\n\n Args:\n item (str): Kind of plot.\n\n Raises:\n ValidationError: If item is a plot kind.\n '''\n kinds = [\n 'area', 'bar', 'barh', 'line', 'lines', 'ratio', 'scatter', 'spread'\n ]\n if item not in kinds:\n msg = f'{item} is not a legal plot kind. Legal kinds: {kinds}.'\n raise ValidationError(msg)\n\n\ndef is_bar_mode(item):\n '''\n Ensures mode is a legal bar mode.\n\n Args:\n item (str): Mode.\n\n Raises:\n ValidationError: If mode is not a legal bar mode.\n '''\n modes = ['stack', 'group', 'overlay']\n if item not in modes:\n msg = f'{item} is not a legal bar mode. Legal bar modes: {modes}.'\n raise ValidationError(msg)\n\n\ndef is_percentage(number):\n '''\n Ensures number is between 0 and 100.\n\n Args:\n number (float): Number to be tested.\n\n Raises:\n ValidationError: If number is not between 0 and 100.\n '''\n if number < 0 or number > 100:\n msg = f'{number} is not a legal percentage. '\n msg += f'{number} is not between 0 and 100.'\n raise ValidationError(msg)\n\n\n# SCHEMATICS--------------------------------------------------------------------\nclass FilterAction(Model):\n '''\n Schematic for filter actions.\n\n Attributes:\n column (str): Column name.\n comparator (str): String representation of comparator.\n value (object): Value to be compared.\n '''\n column = sty.StringType(required=True)\n comparator = sty.StringType(required=True, validators=[is_comparator])\n value = sty.BaseType(required=True)\n\n\nclass GroupAction(Model):\n '''\n Schematic for group actions.\n\n Attributes:\n columns (str or list[str]): Columns to group data by.\n metric (str): Aggregation metric.\n datetime_column (str, optinal): Datetime column for time grouping.\n Default: date.\n '''\n columns = sty.ListType(sty.StringType(), required=True)\n metric = sty.StringType(required=True, validators=[is_metric])\n datetime_column = sty.StringType(required=True, default='date')\n\n\nclass PivotAction(Model):\n '''\n Schematic for group actions.\n\n Attributes:\n columns (list[str]): Columns whose unique values become separate traces\n within a plot.\n values (list[str], optional): Columns whose values become the values\n within each trace of a plot. Default: [].\n index (str, optional): Column whose values become the y axis values of a\n plot. Default: None.\n '''\n columns = sty.ListType(sty.StringType(), required=True)\n values = sty.ListType(sty.StringType(), required=True, default=[])\n index = sty.StringType(required=True, default=None)\n\n\nclass ConformAction(Model):\n '''\n Schematic for conform actions.\n\n Attributes:\n action (str): Must be 'overwrite' or 'substitute'.\n source_column (str): Source column to be matched.\n target_column (str): Target column to be overwritten.\n mapping (dict): Mapping of matched key in source column with replacement\n value in target column.\n '''\n action = sty.StringType(required=True, choices=['overwrite', 'substitute'])\n source_column = sty.StringType(required=True)\n target_column = sty.StringType(required=True)\n mapping = sty.DictType(\n sty.UnionType(\n types=[sty.FloatType, sty.IntType, sty.BooleanType, sty.StringType]\n ),\n required=True,\n )\n\n def validate(self):\n '''\n Validates the state of the model. If the data is invalid, raises a\n DataError with error messages. Also, performs a stricter validation on\n mapping if action is substitute.\n\n Args:\n partial (bool, optional): Allow partial data to validate.\n Essentially drops the required=True settings from field\n definitions. Default: False.\n convert (bool, optional): Controls whether to perform import\n conversion before validating. Can be turned off to skip an\n unnecessary conversion step if all values are known to have the\n right datatypes (e.g., when validating immediately after the\n initial import). Default: True.\n\n Raises:\n DataError: If data is invalid.\n '''\n super().validate()\n if self.action == 'substitute':\n try:\n sty.DictType(sty.StringType(), required=True)\\\n .validate(self.mapping)\n except CompoundError as e:\n raise DataError(e.to_primitive())\n\n\nclass FigureItem(Model):\n '''\n Schematic for a plot figure.\n\n Attributes:\n kind (str): Type of plot. Default: bar.\n color_scheme (dict[str, str]): Color scheme for plot.\n Default: {'grey1': '#181818', 'bg': '#242424'}.\n x_axis (str): Column to use as x axis: Default: None.\n y_axis (str): Column to use as y axis: Default: None.\n title (str): Title of plot. Default: None.\n x_title (str): Title of plot x axis. Default: None.\n y_title (str): Title of plot y axis. Default: None.\n bins (int): Number of bins if histogram. Default: 50.\n bar_mode (str): How bars in bar graph are presented. Default: stack.\n '''\n kind = sty.StringType(default='bar', validators=[is_plot_kind])\n color_scheme = sty.DictType(\n sty.StringType(), default=dict(grey1='#181818', bg='#242424')\n )\n x_axis = sty.StringType(default=None)\n y_axis = sty.StringType(default=None)\n title = sty.StringType(default=None)\n x_title = sty.StringType(default=None)\n y_title = sty.StringType(default=None)\n bins = sty.IntType(default=50)\n bar_mode = sty.StringType(validators=[is_bar_mode], default='stack')\n\n\nclass PlotItem(Model):\n '''\n Schematic for a plot.\n\n Attributes:\n filters (list[dict]): How data is filtered. Default: [].\n group (dict): How data is grouped. Default: {}.\n pivot (dict): How data is pivoted. Default: {}.\n figure (dict): Plot figure details. Default: {}.\n min_width (float): Minimum width of plot. Default: 0.25.\n '''\n filters = sty.ListType(sty.ModelType(FilterAction), default=[])\n group = sty.ModelType(GroupAction)\n pivot = sty.ModelType(PivotAction)\n figure = sty.ModelType(FigureItem, default={})\n min_width = sty.FloatType(default=25, validators=[is_percentage])\n\n\nclass Config(Model):\n '''\n Configuration of database.\n\n Attributes:\n data_path (str): Path to CSV file.\n columns (list[str]): Columns to be displayed in data.\n default_query (str): Placeholder SQL query string.\n font_family (str): Font family.\n color_scheme (dict): Color scheme.\n conform (lit[dict]): List of conform actions.\n plots (list[dict]): List of plots.\n '''\n data_path = sty.StringType(required=True, validators=[is_csv])\n columns = sty.ListType(sty.StringType, default=[])\n default_query = sty.StringType(default='select * from data')\n font_family = sty.StringType(default='sans-serif, \"sans serif\"')\n color_scheme = sty.DictType(\n sty.StringType(), validators=[is_color_scheme], default=COLOR_SCHEME\n )\n conform = sty.ListType(sty.ModelType(ConformAction), default=[])\n plots = sty.ListType(sty.ModelType(PlotItem), default=[])\n","repo_name":"theNewFlesh/shekels","sub_path":"python/shekels/core/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":10422,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"75572325175","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport numpy as np\nimport datetime as dt\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom random import shuffle\nimport data\nimport pandas as pd\n\nclass NotEnoughData(Exception):\n \"\"\"\n Custom exception to report missing data from the df\n \"\"\"\n pass\n\n\nNhistory = 30\nNnext = 2\ncolumns = ['eur2usd','usd2eur']\nfmodel = 'eur2usd.h5'\nfdata = 'data/ecb.dat'\n\n## Read data\ndf = data.read_data(fdata)\ndates = df.index\n# Fix missing days\nidx = pd.date_range(dates[0], dates[-1], freq = \"D\")\ndf = df.reindex(idx,method='nearest') #XXX this should be interpolation\ndf.index = pd.DatetimeIndex(df.index)\ndf = df[columns]\nprint(df.describe())\n\n# Normalize data\nfrom sklearn.preprocessing import MinMaxScaler\nfrom copy import deepcopy\ndf_scaled = deepcopy(df)\nscaler = MinMaxScaler()\nscaled_values = scaler.fit_transform(df) \ndf_scaled.loc[:,:] = scaled_values\n\n\ndef get_inp_out(df,date,Ninp,Nout,columns):\n \"\"\"\n df: dataframe containing all the information\n date: date to build the input-output pair\n Ninp: Number of points before date for the input\n Nout: Number of points to use as output\n \"\"\"\n day = dt.timedelta(days=1)\n inp = df.loc[date-(Ninp-1)*day:date][columns]\n out = df.loc[date+day:date+Nout*day][columns]\n if len(inp) != Ninp or len(out) != Nout:\n raise NotEnoughData\n else: return inp.values, out.values\n\n\ndata_dates,X,Y = [],[],[]\nfor i,data in df_scaled.iterrows():\n try: x,y = get_inp_out(df_scaled,i,Nhistory,Nnext,columns)\n except: continue\n X.append(x)\n Y.append(y)\n data_dates.append(i)\n\n\n## Save last element for testing\nfinal_date = data_dates[-1]\nfinal_X = X[-1]\nfinal_Y = Y[-1]\n\ndata_dates = data_dates[:-1]\nX = X[:-1]\nY = Y[:-1]\n\nprint(data_dates[0],'<-->',data_dates[-1])\n\nx_data = np.array(X)\ny_data = np.array(Y)\n\ninds = list(range(len(x_data)))\nshuffle(inds)\nvalidation_split = 0.1\ntest = np.array(inds[:int(validation_split*len(inds))])\ntrain = np.array(inds[int(validation_split*len(inds)):])\n\n# x_train = np.expand_dims(x_data[train], axis=2)\nx_train = x_data[train]\ny_train = y_data[train] #np.expand_dims(y_data[train], axis=1)\n# x_test = np.expand_dims(x_data[test], axis=2)\nx_test = x_data[test]\ny_test = y_data[test] #np.expand_dims(y_data[test], axis=1)\n\n\nprint('*************')\nprint('All data:',x_data.shape, y_data.shape)\nprint('Training:',x_train.shape, y_train.shape)\nprint('Testing :',x_test.shape, y_test.shape)\nprint('*************')\n\n#####\n# Tensorflow\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.layers import LSTM, Dense, Reshape\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.callbacks import Callback, EarlyStopping\n\nclass ExternalStop(Callback):\n def on_epoch_end(self, epoch, logs={}):\n if os.path.isfile('STOP'):\n print('\\n\\nExternal stop')\n print(epoch)\n self.model.stop_training = True\n\n\n\ntry:\n model = load_model(fmodel)\n print(f'Loaded model: {fmodel}')\n model.summary()\nexcept OSError:\n print('New model')\n model = Sequential()\n model.add( LSTM(Nhistory, input_shape=x_train.shape[1:],\n activation='tanh',\n recurrent_activation='tanh') )\n model.add( Dense(40, activation='tanh') )\n model.add( Dense(np.prod(y_train.shape[1:]), activation='tanh') )\n model.add( Reshape(y_train.shape[1:]) )\n model.compile(optimizer='Adam', loss='mae', metrics=['mse'])\n\n model.summary()\n\n # Callbacks\n Stopper = ExternalStop()\n Early = EarlyStopping(min_delta=1e-4, patience=90,verbose=2,\n restore_best_weights=True)\n hist = model.fit(x_train,y_train, epochs=900,\n # steps_per_epoch=797,\n validation_data=(x_test,y_test),\n verbose=1,\n callbacks=[Stopper,Early] )\n\n if True:\n metrics,values = [],[]\n for k,v in hist.history.items():\n metrics.append(k)\n values.append(v)\n\n fig = plt.figure() #figsize=(20,10))\n gs = gridspec.GridSpec(len(metrics), 1)\n fig.subplots_adjust(wspace=0.,hspace=0.15)\n axs = []\n for i in range(len(metrics)):\n if i == 0: axs.append(plt.subplot(gs[i])) # Original plot\n else: axs.append( plt.subplot(gs[i], sharex=axs[0]) ) # dists\n\n for i in range(len(metrics)):\n ax = axs[i]\n label = metrics[i]\n val = values[i]\n ax.plot(val, label=label)\n ax.legend()\n ax.set_ylim(ymin=0)\n print('\\n\\nTraining done.')\n\nprint('Training data range:',data_dates[0],'<-->',data_dates[-1])\nY_pred = model.predict( np.expand_dims(final_X,axis=0) )[0]\nY_pred = scaler.inverse_transform(Y_pred)\nfinal_Y = scaler.inverse_transform(final_Y)\n\nprint(final_date.date(),'',' '.join([*columns]))\nprint(' pred (real) pred (real)')\nXx,Yy1,Yy2 = [],[],[]\nfor i in range(final_Y.shape[0]):\n txt = str((final_date+(i+1)*dt.timedelta(days=1)).date())\n for y1,y2 in zip(Y_pred[i],final_Y[i]):\n txt += f' {y1:.3f}({y2:.3f})'\n Xx.append((final_date+(i+1)*dt.timedelta(days=1)).date())\n Yy1.append(y1) # real\n Yy2.append(y2) # prediction\n # txt += '\\n'\n print(txt)\n\nfig, ax = plt.subplots()\n# Column 1\nax.plot(df.index,df['eur2usd'])\nax.scatter(Xx,final_Y[:,0])\nax.scatter(Xx,Y_pred[:,0])\n# Column 2\nax.plot(df.index,df['usd2eur'])\nax.scatter(Xx,final_Y[:,1])\nax.scatter(Xx,Y_pred[:,1])\nax.axvline(final_date,color='k',ls='--')\nplt.show()\nprint('---------')\nmodel.save(fmodel)\n","repo_name":"B4dWo1f/TF_alicante_talk","sub_path":"money/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35010337042","text":"from datetime import datetime\nfrom apread.apreader import APReader\nimport os\nimport multiprocessing as mp\n\nif __name__ == '__main__':\n pool = None # default value, do not change\n\n # find current directory\n dirname = os.path.dirname(__file__)\n file = os.path.join(dirname, 'Example_Catman_Data.bin')\n\n outdir = os.path.join(dirname, 'output')\n\n # specify wether to use parallel loading of channel data\n loadInParallel = False\n speedTest = False\n \n if loadInParallel:\n pool = mp.Pool() \n \n # t0 = datetime.now()\n # # create a reader\n # for i in range(1,1000):\n # reader = APReader(file, parallelPool=pool)\n\n # t1 = datetime.now()\n \n # print(t1-t0)\n\n t0 = datetime.now()\n # create a reader\n for i in range(1,1000 if speedTest else 1):\n reader = APReader(file, parallelPool=pool)\n\n t1 = datetime.now()\n \n print(t1-t0)\n\n if loadInParallel:\n pool.close()\n pool.join()\n ## print all single channels\n #for channel in reader.Channels: \n # print (f\"{channel.Name}: {len(channel.data)} Entries\")\n\n for group in reader.Groups:\n print(\"--------------\")\n print (f\"Group ({group.ChannelX.Name})\")\n\n for channel in group.Channels:\n print (f\"\\t{channel.Name:20}: {len(channel.data)} Entries\") \n\n print(\"--------------\")\n \n \n reader.plot()\n\ndef channel_dates():\n reader = APReader(\"file.bin\")\n \n for channel in reader.Channels:\n print(channel.date) # 12.1.2023: 18:08 ...\n\ndef test2():\n reader = APReader(\"file.bin\")\n\n for group in reader.Groups:\n # alle channels in der gruppe\n channels = group.Channels\n # X-Channel der Gruppe (Zeit)\n channelX = group.ChannelX\n # alle Y channels, also Messreihen der Gruppe\n channelsY = group.ChannelsY\n \n # data in z.B. der ersten Messreihe\n channel1 = channelsY[0]\n # das ist das numpy-array mit den tatsächlichen Werten drin\n data = channel1.data\n print(data)\n \n # ALTERNATIV (alle channels in der bin Datei):\n for channel in reader.Channels:\n print(channel.data)\n ","repo_name":"leonbohmann/APReader","sub_path":"test/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"22"} +{"seq_id":"23950509471","text":"import re\nimport os\nimport argparse\n\n\ndef solve(file, year, is_newer=True, is_older=False):\n file_path = os.path.join(os.getcwd(), file)\n pattern = r\"^(?:3[0-1]|[1-2][0-9]|0[1-9])\\/\" \\\n r\"(?:1[0-2]|0[1-9])\\/\" \\\n r\"(?P\\d{4}) \" \\\n r\"(?:2[0-4]|[0-1][0-9]):[0-5][0-9]:[0-5][0-9]$\"\n dates = list()\n with open(file_path, encoding=\"utf8\") as f:\n data = f.read().replace(\"; \", \"\\n\")\n refs = re.finditer(pattern, data, re.MULTILINE)\n for i in refs:\n if is_newer and is_older: # after and before the year\n if int(i.group(\"year\")) > int(year) or int(i.group(\"year\")) < int(year):\n dates.append(i[0])\n if is_newer: # after the year\n if int(i.group(\"year\")) > int(year):\n dates.append(i[0])\n else: # before the year\n if int(i.group(\"year\")) < int(year):\n dates.append(i[0])\n if refs:\n return list(set(dates))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"file\", help=\"Txt file with dates relative path\")\n parser.add_argument(\"-y\", \"--year\", type=int, default=2012, help=\"Year relative to which to search\")\n parser.add_argument(\"-n\", \"--is_newer\", type=bool, default=True, help=\"Print newer years\")\n parser.add_argument(\"-o\", \"--is_older\", type=bool, default=False, help=\"Print older years\")\n\n args = parser.parse_args()\n\n print(solve(args.file, args.year, args.is_newer, args.is_older))\n","repo_name":"GentleSkiddie/FileSystem_Regex_Labs","sub_path":"Labs/2/12/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11641416567","text":"\r\nimport pygame, sys, pygame.freetype\r\n \r\n\r\nmainClock = pygame.time.Clock()\r\nfrom pygame.locals import *\r\npygame.init()\r\npygame.display.set_caption('Tower Defense')\r\ndimensiones = (920, 512)\r\nscreen = pygame.display.set_mode(dimensiones)\r\n\r\nfont = pygame.font.SysFont(\"Bahnschrift SemiLight SemiConde\", 30)\r\n\r\ndef draw_text(text, font, color, surface, x, y):\r\n \r\n textobj = font.render(text, 1, color)\r\n textrect = textobj.get_rect()\r\n textrect.topleft = (x, y)\r\n surface.blit(textobj, textrect)\r\n \r\nclick = False\r\n \r\ndef menu_principal():\r\n while True:\r\n \r\n screen.fill((0, 0, 0))\r\n \"\"\"screen.blit(fondo, (0, 0))\"\"\"\r\n draw_text('Tower Defense', font, (255, 255, 255), screen, 400, 20)\r\n draw_text('Menu Principal', font, (255, 255, 255), screen, 50, 50)\r\n \r\n mx, my = pygame.mouse.get_pos()\r\n \r\n button_1 = pygame.Rect(50, 100, 150, 50)\r\n button_2 = pygame.Rect(50, 155, 150, 50)\r\n button_3 = pygame.Rect(30, 250, 150, 30)\r\n if button_1.collidepoint((mx, my)):\r\n if click:\r\n game()\r\n if button_2.collidepoint((mx, my)):\r\n if click:\r\n niveles()\r\n if button_3.collidepoint((mx, my)):\r\n if click:\r\n pygame.quit()\r\n\t\t\r\n pygame.draw.rect(screen, (0, 0, 0), button_1)\r\n pygame.draw.rect(screen, (0, 0, 0), button_2)\r\n pygame.draw.rect(screen, (0, 0, 0), button_3)\r\n \r\n draw_text('Jugar', font, (255, 255, 255), screen, 110, 120)\r\n draw_text('Niveles', font, (255, 255, 255), screen, 110, 170)\r\n draw_text('Salir', font, (255, 255, 255), screen, 110, 220)\r\n \r\n click = False\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n click = True\r\n \r\n pygame.display.update()\r\n mainClock.tick(60)\r\n \r\ndef jugar():\r\n running = True\r\n while running:\r\n screen.fill((0,0,0))\r\n \r\n draw_text('Jugar', font, (255, 255, 255), screen, 20, 20)\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n running = False\r\n \r\n pygame.display.update()\r\n mainClock.tick(60)\r\n \r\ndef niveles():\r\n running = True\r\n while running:\r\n screen.fill((0,0,0))\r\n \r\n draw_text('Niveles', font, (255, 255, 255), screen, 20, 20)\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n running = False\r\n \r\n pygame.display.update()\r\n mainClock.tick(60)\r\n \r\ndef salir():\r\n\timport sys\r\n\tsys.exit(0)\r\n\t \r\nif __name__ == '__main__':\r\n \r\n salir = False\r\n opciones = [\r\n (\"Jugar\", jugar),\r\n (\"Niveles\", niveles),\r\n (\"Salir\", salir)\r\n ]\r\n\r\n pygame.font.init()\r\n screen = pygame.display.set_mode((320, 240))\r\n fondo = pygame.image.load(\"fondomenu.jpg\").convert()\r\n menu_principal = Menu(niveles)\r\n\r\n while not salir:\r\n\r\n for e in pygame.event.get():\r\n if e.type == QUIT:\r\n salir = True\r\n\r\n screen.blit(fondo, (0, 0))\r\n menu.actualizar()\r\n menu.imprimir(screen)\r\n\r\n pygame.display.flip()\r\n pygame.time.delay(10)\r\n\r\n \r\nmenu_principal()\r\n","repo_name":"Nicolas810/Juego-Programacion","sub_path":"Juego/Menu.py","file_name":"Menu.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30260224575","text":"import os\nfrom PIL import Image\nimport torch\nimport pickle\nfrom torchvision import transforms\nfrom datasets.masking_generator import RandomMaskingGenerator\n\nfrom timm.data.constants import (\n IMAGENET_DEFAULT_MEAN,\n IMAGENET_DEFAULT_STD,\n IMAGENET_INCEPTION_MEAN,\n IMAGENET_INCEPTION_STD,\n)\nfrom timm.data import create_transform\nimport numpy as np\n\n# -----------------------dataset------------------------ #\nROOT_DIR = \"data/datasets\"\n\n\nclass MiniImagenetDataset(torch.utils.data.Dataset):\n def __init__(self, root_path, split, transform):\n if split == \"train\":\n split_tag = \"train_phase_train\"\n elif split == \"val\":\n split_tag = \"train_phase_val\"\n elif split == \"test\":\n split_tag = \"train_phase_test\"\n elif split == \"meta_val\":\n split_tag = \"val\"\n elif split == \"meta_test\":\n split_tag = \"test\"\n else:\n assert False, \"Dataset: 'split' name is wrong\"\n split_file = \"miniImageNet_category_split_{}.pickle\".format(split_tag)\n with open(os.path.join(root_path, split_file), \"rb\") as f:\n pack = pickle.load(f, encoding=\"latin1\")\n\n data = pack[\"data\"]\n label = pack[\"labels\"]\n data = [Image.fromarray(x) for x in data]\n min_label = min(label)\n label = [x - min_label for x in label]\n self.data = data\n self.label = label\n self.n_classes = max(self.label) + 1\n\n self.transform = transform\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, i):\n return self.transform(self.data[i]), self.label[i]\n\n\nclass Cifar100Dataset(torch.utils.data.Dataset):\n def __init__(self, root_path, name, split, transform):\n if split == \"train\":\n split_tag = \"train\"\n elif split == \"meta_val\":\n split_tag = \"val\"\n elif split == \"meta_test\":\n split_tag = \"test\"\n else:\n assert False, \"Dataset: 'split' name is wrong\"\n assert name == \"CIFAR_FS\" or name == \"FC100\"\n split_file = name + \"_{}.pickle\".format(split_tag)\n with open(os.path.join(root_path, split_file), \"rb\") as f:\n pack = pickle.load(f, encoding=\"latin1\")\n\n data = pack[\"data\"]\n labels = pack[\"labels\"]\n\n cur_class = 0\n label2label = {}\n for idx, label in enumerate(labels):\n if label not in label2label:\n label2label[label] = cur_class\n cur_class += 1\n new_labels = []\n for idx, label in enumerate(labels):\n new_labels.append(label2label[label])\n data = [Image.fromarray(x) for x in data]\n\n self.data = data\n self.label = new_labels\n\n self.n_classes = len(set(self.label))\n\n self.transform = transform\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, i):\n return self.transform(self.data[i]), self.label[i]\n\n\nclass CubDataset(torch.utils.data.Dataset):\n def __init__(self, root_path, name, split, transform):\n if split == \"train\":\n split_tag = \"train\"\n elif split == \"meta_val\":\n split_tag = \"val\"\n elif split == \"meta_test\":\n split_tag = \"test\"\n else:\n assert False, \"Dataset: 'split' name is wrong\"\n assert name == \"CUB\"\n\n IMAGE_PATH = root_path\n SPLIT_PATH = os.path.join(root_path, \"split/\")\n txt_path = os.path.join(SPLIT_PATH, split_tag + \".csv\")\n\n lines = [x.strip() for x in open(txt_path, \"r\").readlines()][1:]\n\n if split_tag == \"train\":\n lines.pop(5864) # this image file is broken\n\n data = []\n labels = []\n lb = -1\n\n self.wnids = []\n\n for l in lines:\n context = l.split(\",\")\n name = context[0]\n wnid = context[1]\n path = os.path.join(IMAGE_PATH, name)\n if wnid not in self.wnids:\n self.wnids.append(wnid)\n lb += 1\n\n data.append(path)\n labels.append(lb)\n\n cur_class = 0\n label2label = {}\n for idx, label in enumerate(labels):\n if label not in label2label:\n label2label[label] = cur_class\n cur_class += 1\n new_labels = []\n for idx, label in enumerate(labels):\n new_labels.append(label2label[label])\n\n data = [Image.open(path).convert(\"RGB\") for path in data]\n\n self.data = data\n self.label = new_labels\n\n self.n_classes = len(set(self.label))\n\n self.transform = transform\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, i):\n return self.transform(self.data[i]), self.label[i]\n\n\nclass TieredDataset(torch.utils.data.Dataset):\n def __init__(self, root_path, name, split, transform):\n if split == \"train\":\n split_tag = \"train\"\n elif split == \"meta_val\":\n split_tag = \"val\"\n elif split == \"meta_test\":\n split_tag = \"test\"\n else:\n assert False, \"Dataset: 'split' name is wrong\"\n assert name == \"tiered\"\n\n THE_PATH = os.path.join(root_path, split_tag)\n\n data = []\n labels = []\n\n folders = [\n os.path.join(THE_PATH, label)\n for label in os.listdir(THE_PATH)\n if os.path.isdir(os.path.join(THE_PATH, label))\n ]\n folders.sort()\n\n for idx in range(len(folders)):\n this_folder = folders[idx]\n this_folder_images = os.listdir(this_folder)\n this_folder_images.sort()\n for image_path in this_folder_images:\n data.append(os.path.join(this_folder, image_path))\n labels.append(idx)\n\n cur_class = 0\n label2label = {}\n for idx, label in enumerate(labels):\n if label not in label2label:\n label2label[label] = cur_class\n cur_class += 1\n new_labels = []\n for idx, label in enumerate(labels):\n new_labels.append(label2label[label])\n\n self.data = data\n self.label = new_labels\n\n self.n_classes = len(set(self.label))\n\n self.transform = transform\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, i):\n return self.transform(Image.open(self.data[i]).convert(\"RGB\")), self.label[i]\n\n\n# sampler for few-shot learning\nclass CategoriesSampler:\n def __init__(self, labels, n_iteration, n_class, n_sample, n_episode=1):\n self.n_iteration = n_iteration\n self.n_class = n_class\n self.n_sample = n_sample\n self.n_episode = n_episode\n\n labels = np.array(labels)\n self.catlocs = []\n for c in range(max(labels) + 1):\n self.catlocs.append(np.argwhere(labels == c).reshape(-1))\n\n def __len__(self):\n return self.n_iteration\n\n def __iter__(self):\n for i_batch in range(self.n_iteration):\n batch = []\n for i_ep in range(self.n_episode):\n episode = []\n classes = np.random.choice(\n len(self.catlocs), self.n_class, replace=False\n )\n for c in classes:\n l = np.random.choice(self.catlocs[c], self.n_sample, replace=False)\n episode.append(torch.from_numpy(l))\n episode = torch.stack(episode) # n_class * n_sample\n batch.append(episode)\n batch = torch.stack(batch) # bs * n_class * n_sample\n yield batch.view(-1)\n\n\n# -------------------------pretrain------------------------------ #\nclass DataAugmentationForMAE(object):\n def __init__(self, args):\n imagenet_default_mean_and_std = args.imagenet_default_mean_and_std\n mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN\n std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD\n\n self.transform = transforms.Compose(\n [\n transforms.RandomResizedCrop(args.input_size),\n transforms.ToTensor(),\n transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)),\n ]\n )\n\n self.masked_position_generator = RandomMaskingGenerator(\n args.window_size, args.mask_ratio\n )\n\n def __call__(self, image):\n return self.transform(image), self.masked_position_generator()\n\n def __repr__(self):\n repr = \"(DataAugmentationForBEiT,\\n\"\n repr += \" transform = %s,\\n\" % str(self.transform)\n repr += \" Masked position generator = %s,\\n\" % str(\n self.masked_position_generator\n )\n repr += \")\"\n return repr\n\n\ndef build_pretraining_dataset(args):\n root_dir = ROOT_DIR\n transform = DataAugmentationForMAE(args)\n print(\"Data Aug = %s\" % str(transform))\n dataset_name = args.dataset_name\n if dataset_name == \"mini\":\n root_path = os.path.join(root_dir, \"mini-imagenet\")\n return MiniImagenetDataset(root_path, split=\"train\", transform=transform)\n elif dataset_name == \"CUB\":\n root_path = os.path.join(root_dir, \"cub\")\n name = \"CUB\"\n return CubDataset(root_path, name, split=\"train\", transform=transform)\n elif dataset_name == \"tiered\":\n root_path = os.path.join(root_dir, \"tiered_imagenet\")\n name = \"tiered\"\n return TieredDataset(root_path, name, split=\"train\", transform=transform)\n elif dataset_name == \"CIFAR_FS\":\n root_path = os.path.join(root_dir, \"CIFAR_FS\")\n name = \"CIFAR_FS\"\n return Cifar100Dataset(root_path, name, split=\"train\", transform=transform)\n elif dataset_name == \"FC100\":\n root_path = os.path.join(root_dir, \"FC100\")\n name = \"FC100\"\n return Cifar100Dataset(root_path, name, split=\"train\", transform=transform)\n else:\n assert False\n\n\n# -----------------------finetune------------------------ #\ndef build_dataset(is_train, dataset_name, split, args):\n root_dir = ROOT_DIR\n transform = build_transform(is_train, args)\n\n print(\"Transform = \")\n if isinstance(transform, tuple):\n for trans in transform:\n print(\" - - - - - - - - - - \")\n for t in trans.transforms:\n print(t)\n else:\n for t in transform.transforms:\n print(t)\n print(\"---------------------------\")\n if dataset_name == \"mini\":\n root_path = os.path.join(root_dir, \"mini-imagenet\")\n dataset = MiniImagenetDataset(root_path, split, transform=transform)\n elif dataset_name == \"CUB\":\n root_path = os.path.join(root_dir, \"cub\")\n name = \"CUB\"\n dataset = CubDataset(root_path, name, split, transform=transform)\n elif dataset_name == \"tiered\":\n root_path = os.path.join(root_dir, \"tiered_imagenet\")\n name = \"tiered\"\n dataset = TieredDataset(root_path, name, split, transform=transform)\n elif dataset_name == \"CIFAR_FS\":\n root_path = os.path.join(root_dir, \"CIFAR_FS\")\n name = \"CIFAR_FS\"\n dataset = Cifar100Dataset(root_path, name, split, transform=transform)\n elif dataset_name == \"FC100\":\n root_path = os.path.join(root_dir, \"FC100\")\n name = \"FC100\"\n dataset = Cifar100Dataset(root_path, name, split, transform=transform)\n else:\n assert False\n nb_classes = dataset.n_classes\n print(\"Number of the class = %d\" % nb_classes)\n\n return dataset, nb_classes\n\n\ndef build_transform(is_train, args):\n resize_im = args.input_size > 32\n imagenet_default_mean_and_std = args.imagenet_default_mean_and_std\n mean = (\n IMAGENET_INCEPTION_MEAN\n if not imagenet_default_mean_and_std\n else IMAGENET_DEFAULT_MEAN\n )\n std = (\n IMAGENET_INCEPTION_STD\n if not imagenet_default_mean_and_std\n else IMAGENET_DEFAULT_STD\n )\n\n if is_train:\n # this should always dispatch to transforms_imagenet_train\n transform = create_transform(\n input_size=args.input_size,\n is_training=True,\n color_jitter=args.color_jitter,\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n re_prob=args.reprob,\n re_mode=args.remode,\n re_count=args.recount,\n mean=mean,\n std=std,\n )\n if not resize_im:\n # replace RandomResizedCropAndInterpolation with\n # RandomCrop\n transform.transforms[0] = transforms.RandomCrop(args.input_size, padding=4)\n return transform\n\n t = []\n if resize_im:\n if args.crop_pct is None:\n if args.input_size < 384:\n args.crop_pct = 224 / 256\n else:\n args.crop_pct = 1.0\n size = int(args.input_size / args.crop_pct)\n t.append(\n transforms.Resize(\n size, interpolation=transforms.functional.InterpolationMode.BICUBIC\n ), # to maintain same ratio w.r.t. 224 images\n )\n t.append(transforms.CenterCrop(args.input_size))\n\n t.append(transforms.ToTensor())\n t.append(transforms.Normalize(mean, std))\n return transforms.Compose(t)\n","repo_name":"Li-hq1/TransVLAD","sub_path":"datasets/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":13326,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"10075427603","text":"# -*- coding: utf-8 -*-\n\nlines = int(input())\n\nn = [0 for i in range(lines)]\n\nfor i in range(lines):\n inp = input()\n if i == 0 and inp == '1':\n n[i] += 1\n if i != 0 and inp == '1':\n n[i - 1] += 1\n n[i] += 1\n if i != lines - 1 and inp == '1':\n n[i + 1] += 1\n \nprint(*n, sep=\"\\n\")\n","repo_name":"luam0oliveira/competitive_programming","sub_path":"beecrowd/2399.py","file_name":"2399.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18776243355","text":"import urllib, urllib2\nimport simplejson\n\ndef create_profile(sender, instance, signal, created, **kwargs):\n \"\"\"When user is created also create a matching profile.\"\"\"\n \n from jobs.main.models import Profile\n \n if created:\n Profile( user = instance, email = instance.email ).save()\n\n\ndef edit_profile( sender, instance, signal, created, **kwargs ):\n \"\"\"When profile is edited also create or update ATS candidate .\"\"\"\n\n profile = instance\n\n if not created and not instance.atsid: \n r = urllib2.Request(\n 'http://localhost/ats/api/candidate/new/'\n ,data = urllib.urlencode( [\n ( 'first_name', profile.first_name )\n ,( 'last_name', profile.last_name )\n ,( 'phone', profile.phone )\n ,( 'mobile', profile.mobile )\n ,( 'email_1', profile.email )\n ] )\n )\n y = urllib2.urlopen( r )\n response = y.read()\n y.close()\n \n result = simplejson.loads( response )\n profile.atsid = result['candidate']\n profile.save()\n\n elif not created:\n r = urllib2.Request(\n 'http://localhost/ats/api/candidate/%s/update/' % ( profile.atsid )\n ,data = urllib.urlencode( [\n ( 'first_name', profile.first_name )\n ,( 'last_name', profile.last_name )\n ,( 'phone',profile.phone )\n ,( 'mobile',profile.mobile )\n ,( 'email_1',profile.email )\n ] )\n )\n y = urllib2.urlopen( r )\n #response = y.read()\n #y.close()\n \n #result = simplejson.loads( response )\n \n","repo_name":"caroman/ats","sub_path":"jobs/main/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30903481758","text":"import codecs\nimport threading\nimport time\nfrom xml.sax.saxutils import escape\n\nimport arrow\nfrom six.moves import queue\n\nfrom slyguy import plugin, gui, userdata, inputstream, signals, settings\nfrom slyguy.log import log\nfrom slyguy.monitor import monitor\nfrom slyguy.session import Session\nfrom slyguy.util import get_system_arch\n\nfrom .api import API\nfrom .language import _\nfrom .constants import ZA_EPG_URL, LICENSE_COOLDOWN\n\napi = API()\n\n@signals.on(signals.BEFORE_DISPATCH)\ndef before_dispatch():\n api.new_session()\n plugin.logged_in = api.logged_in\n\n@plugin.route('')\ndef home(**kwargs):\n folder = plugin.Folder()\n\n if not plugin.logged_in:\n folder.add_item(label=_(_.LOGIN, _bold=True), path=plugin.url_for(login), bookmark=False)\n else:\n folder.add_item(label=_(_.LIVE_TV, _bold=True), path=plugin.url_for(live_tv))\n folder.add_item(label=_(_.SERIES, _bold=True), path=plugin.url_for(content, title=_.SERIES, tags='TV Shows'))\n folder.add_item(label=_(_.MOVIES, _bold=True), path=plugin.url_for(content, title=_.MOVIES, tags='Movies'))\n folder.add_item(label=_(_.SPORT, _bold=True), path=plugin.url_for(content, title=_.SPORT, tags='Sport'))\n folder.add_item(label=_(_.KIDS, _bold=True), path=plugin.url_for(content, title=_.KIDS, tags='Kids'))\n folder.add_item(label=_(_.SEARCH, _bold=True), path=plugin.url_for(search))\n\n if settings.getBool('bookmarks', True):\n folder.add_item(label=_(_.BOOKMARKS, _bold=True), path=plugin.url_for(plugin.ROUTE_BOOKMARKS), bookmark=False)\n\n folder.add_item(label=_.SELECT_PROFILE, path=plugin.url_for(select_profile), art={'thumb': userdata.get('avatar')}, info={'plot': userdata.get('profile_name')}, _kiosk=False, bookmark=False)\n folder.add_item(label=_.LOGOUT, path=plugin.url_for(logout), _kiosk=False, bookmark=False)\n\n folder.add_item(label=_.SETTINGS, path=plugin.url_for(plugin.ROUTE_SETTINGS), _kiosk=False, bookmark=False)\n\n return folder\n\n@plugin.route()\ndef live_tv(**kwargs):\n folder = plugin.Folder(_.LIVE_TV)\n\n show_events = 2\n\n now = arrow.utcnow()\n channels = api.channels(events=show_events)\n for channel in channels:\n plot = u''\n count = 0\n\n for event in channel.get('events', []):\n start = arrow.get(event['startDateTime'])\n end = arrow.get(event['endDateTime'])\n if (now > start and now < end) or start > now:\n plot += u'[{}] {}\\n'.format(start.to('local').format('h:mma'), event['title'])\n count += 1\n\n if count == show_events+1:\n break\n\n plot = plot.strip(u'\\n')\n\n folder.add_item(\n label = _(_.CHANNEL, channel_number=channel['number'], channel_name=channel['name']),\n info = {'plot': plot or channel['description']},\n art = {'thumb': channel['channelLogoPaths'].get('XLARGE')},\n path = plugin.url_for(play_channel, id=channel['id'], _is_live=True),\n playable = True\n )\n\n return folder\n\n@plugin.route()\n@plugin.pagination()\ndef content(title, tags, sort='az', category=None, page=1, **kwargs):\n folder = plugin.Folder(title)\n\n page = int(page)\n data = api.content(tags, sort, category=category, page=page, pagesize=24)\n\n if category is None:\n category = ''\n for section in data['subSections']:\n if section['name'].lower() != 'filter':\n continue\n\n for row in section['items']:\n split = row['endpoint'].split('filter')\n if len(split) == 1:\n category = ''\n else:\n category = split[1].split(';')[0].lstrip('=')\n\n folder.add_item(\n label = row['name'],\n path = plugin.url_for(content, title=title, tags=tags, sort=sort, category=category, page=page),\n )\n\n if not folder.items:\n items = _process_rows(data['items'])\n folder.add_items(items)\n return folder, data['total'] > ((data['pageSize'] * data['page']) + data['count'])\n else:\n return folder, False\n\ndef _process_rows(rows):\n items = []\n\n for row in rows:\n if 'program' in row:\n item = _process_program(row['program'])\n elif 'video' in row:\n item = _process_video(row['video'])\n else:\n continue\n\n items.append(item)\n\n return items\n\ndef _get_image(images, type='thumb', size='SMALL'):\n if type == 'thumb':\n keys = ['poster', 'play-image']\n elif type == 'fanart':\n keys = ['hero', ]\n\n for key in keys:\n if key in images and images[key]:\n image = images[key]\n return image.get(size) or image[list(image)[-1]]\n\n return None\n\ndef _process_program(program):\n return plugin.Item(\n label = program['title'],\n art = {'thumb': _get_image(program['images']), 'fanart': _get_image(program['images'], 'fanart')},\n info = {\n 'plot': program.get('synopsis'),\n 'genre': program.get('genres'),\n 'tvshowtitle': program['title'],\n 'mediatype': 'tvshow',\n },\n path = plugin.url_for(list_seasons, id=program['id']),\n )\n\ndef _process_video(video):\n if video.get('type') == 'Movie':\n media_type = 'movie'\n elif video.get('type') == 'Episode':\n media_type = 'episode'\n else:\n media_type = 'video'\n\n return plugin.Item(\n label = video['title'],\n info = {\n 'plot': video.get('synopsis'),\n 'year': video.get('yearOfRelease'),\n 'duration': video.get('durationInSeconds'),\n 'season': video.get('seasonNumber'),\n 'episode': video.get('seasonEpisode'),\n 'genre': video.get('genres'),\n 'dateadded': video.get('airDate'),\n 'tvshowtitle': video.get('displayTitle'),\n 'mediatype': media_type,\n },\n art = {'thumb': _get_image(video['images']), 'fanart': _get_image(video['images'], 'fanart')},\n path = plugin.url_for(play_asset, stream_url=video['videoAssets'][0]['url'], content_id=video['videoAssets'][0]['manItemId']),\n playable = True,\n )\n\n@plugin.route()\ndef list_seasons(id, **kwargs):\n series = api.series(id)\n\n # Flatten\n if len(series['seasons']) == 1 and settings.getBool('flatten_single_season', True):\n return _episodes(series, int(series['seasons'][0]['seasonNumber']))\n\n folder = plugin.Folder(series['title'])\n\n for row in series['seasons']:\n folder.add_item(\n label = 'Season {}'.format(row['seasonNumber']),\n info = {\n 'plot': row.get('synopsis'),\n 'tvshowtitle': series['title'],\n 'season': row.get('seasonNumber'),\n 'mediatype': 'season',\n },\n art = {'thumb': _get_image(series['images']), 'fanart': _get_image(series['images'], 'fanart')},\n path = plugin.url_for(episodes, series=id, season=row['seasonNumber']),\n )\n\n return folder\n\n@plugin.route()\ndef episodes(series, season, **kwargs):\n series = api.series(series)\n return _episodes(series, int(season))\n\ndef _episodes(series, season):\n folder = plugin.Folder(series['title'], fanart= _get_image(series['images'], 'fanart'))\n\n for row in series['seasons']:\n if int(row['seasonNumber']) != int(season):\n continue\n\n has_eps = len([x for x in row['videos'] if x['seasonEpisode']])\n for video in row['videos']:\n if has_eps and not video['seasonEpisode']:\n log.debug('Skipping info video item: {}'.format(video['title']))\n continue\n\n item = _process_video(video)\n folder.add_items(item)\n\n break\n\n return folder\n\n@plugin.route()\n@plugin.search()\ndef search(query, page, **kwargs):\n items = []\n for row in api.search(query):\n item = plugin.Item(\n label = row['title'],\n art = {'thumb': row['image'].get('LARGE')},\n info = {},\n )\n\n if row['editorialItemType'] == 'Program':\n item.path = plugin.url_for(list_seasons, id=row['id'])\n elif row['editorialItemType'] == 'Video':\n item.path = plugin.url_for(play_video, id=row['id'])\n item.playable = True\n else:\n continue\n\n items.append(item)\n\n return items, False\n\n@plugin.route()\ndef login(**kwargs):\n if not _device_link():\n return\n\n _select_profile()\n gui.refresh()\n\ndef _device_link():\n timeout = 600\n\n with api.device_login() as login_progress:\n with gui.progress(_(_.DEVICE_LINK_STEPS, code=login_progress.code), heading=_.DEVICE_LINK) as progress:\n for i in range(timeout):\n if progress.iscanceled() or not login_progress.is_alive() or monitor.waitForAbort(1):\n break\n\n progress.update(int((i / float(timeout)) * 100))\n\n login_progress.stop()\n return login_progress.result\n\n@plugin.route()\n@plugin.login_required()\ndef select_profile(**kwargs):\n _select_profile()\n gui.refresh()\n\ndef _select_profile():\n options = []\n values = []\n default = -1\n\n for index, profile in enumerate(api.profiles()):\n values.append(profile)\n options.append(plugin.Item(label=profile['alias'], art={'thumb': profile['avatar']['uri']}))\n\n if profile['id'] == userdata.get('profile'):\n default = index\n userdata.set('avatar', profile['avatar']['uri'])\n userdata.set('profile', profile['alias'])\n\n index = gui.select(_.SELECT_PROFILE, options=options, preselect=default, useDetails=True)\n if index < 0:\n return\n\n _set_profile(values[index])\n\ndef _set_profile(profile):\n userdata.set('profile', profile['id'])\n userdata.set('profile_name', profile['alias'])\n userdata.set('avatar', profile['avatar']['uri'])\n if profile['id']:\n gui.notification(_.PROFILE_ACTIVATED, heading=profile['alias'], icon=profile['avatar']['uri'])\n\n@plugin.route()\ndef logout(**kwargs):\n if not gui.yes_no(_.LOGOUT_YES_NO):\n return\n\n api.logout()\n userdata.delete('avatar')\n userdata.delete('profile')\n userdata.delete('profile_name')\n gui.refresh()\n\n@plugin.route()\n@plugin.plugin_request()\ndef license_request(license_url, _data, _path, _headers, **kwargs):\n resp = Session().post(license_url, data=_data, headers=_headers)\n data = resp.content\n\n if not resp.ok or not data:\n cooldown_left = _get_license_cooldown()\n if b'concurrent' in data and cooldown_left:\n msg = _(_.LICENSE_COOLDOWN_ERROR, cooldown_left=cooldown_left)\n else:\n try:\n msg = resp.json()['message']\n except:\n msg = data.decode('utf8')\n msg = _(_.WIDEVINE_ERROR, error=msg)\n\n log.error(msg)\n gui.ok(msg)\n else:\n userdata.set('last_license', int(time.time()))\n\n with open(_path, 'wb') as f:\n f.write(data)\n return {'url': _path, 'headers': dict(resp.headers)}\n\ndef _get_license_cooldown():\n if get_system_arch()[0] == 'Android':\n return 0\n\n last_license = userdata.get('last_license', 0)\n cooldown_left = int(last_license + LICENSE_COOLDOWN - time.time())\n return cooldown_left\n\n@plugin.route()\n@plugin.login_required()\ndef play_asset(stream_url, content_id, **kwargs):\n url, license_url, headers = api.play_asset(stream_url, content_id)\n license_url = plugin.url_for(license_request, license_url=license_url)\n\n return plugin.Item(\n inputstream = inputstream.Widevine(license_url),\n headers = headers,\n path = url,\n )\n\n@plugin.route()\n@plugin.login_required()\ndef play_video(id, **kwargs):\n url, license_url, headers = api.play_video(id)\n license_url = plugin.url_for(license_request, license_url=license_url)\n\n return plugin.Item(\n inputstream = inputstream.Widevine(license_url),\n headers = headers,\n path = url,\n )\n\n@plugin.route()\n@plugin.login_required()\ndef play_channel(id, **kwargs):\n url, license_url, headers = api.play_channel(id)\n license_url = plugin.url_for(license_request, license_url=license_url)\n\n return plugin.Item(\n inputstream = inputstream.Widevine(license_url, properties={'manifest_update_parameter': 'full'}),\n headers = headers,\n path = url,\n )\n\n@plugin.route()\n@plugin.merge()\ndef playlist(output, **kwargs):\n data = api.channels()\n epg_url = ZA_EPG_URL\n\n with codecs.open(output, 'w', encoding='utf-8') as f:\n f.write(u'#EXTM3U x-tvg-url=\"{}\"'.format(epg_url))\n\n for row in data:\n genres = row.get('genres', [])\n genres = ';'.join(genres) if genres else ''\n\n f.write(u'\\n#EXTINF:-1 tvg-id=\"{id}\" tvg-chno=\"{channel}\" tvg-name=\"{name}\" group-title=\"{group}\" tvg-logo=\"{logo}\",{name}\\n{url}'.format(\n id=row['id'], channel=row['number'], name=row['name'], logo=row['channelLogoPaths'].get('XLARGE', ''),\n group=genres, url=plugin.url_for(play_channel, id=row['id'], _is_live=True)))\n","repo_name":"matthuisman/slyguy.addons","sub_path":"plugin.video.dstv.now/resources/lib/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":13303,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"22"} +{"seq_id":"39114625978","text":"import socket\nimport common_ports\nimport ipaddress\n\ndef get_open_ports(target, port_range, verbose = False):\n open_ports = []\n if target.replace('.', '').isnumeric():\n try:\n host = ipaddress.ip_address(target)\n except ValueError:\n return \"Error: Invalid IP address\"\n else:\n try:\n host = socket.gethostbyname(target)\n except:\n return \"Error: Invalid hostname\"\n\n for port in range(port_range[0], port_range[-1] + 1):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = s.connect_ex((str(host), port))\n s.close()\n if result == 0:\n open_ports.append(port)\n\n if verbose == False: \n return open_ports\n else:\n if target.replace('.', '').isnumeric():\n try:\n host_name = socket.gethostbyaddr(target)[0]\n res = \"Open ports for \" + host_name + \" (\" + target + \")\\nPORT SERVICE\\n\"\n except:\n res = \"Open ports for \" + target + \"\\nPORT SERVICE\\n\"\n else:\n res = \"Open ports for \" + target + \" (\" + host + \")\\nPORT SERVICE\\n\"\n for port in open_ports:\n res += (str(port).ljust(9) + common_ports.ports_and_services.get(port, \"\"))\n if port != open_ports[-1]:\n res += '\\n'\n return res","repo_name":"hoangnam-nguyen/python-portscanner","sub_path":"port_scanner.py","file_name":"port_scanner.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14091842392","text":"'''\nnosetests picks up all python files that start or end with `test` in their name,\nand runs all methods that start or end with `test`.\n'''\n\n# pythonpath modification to make mypackage available \n# for import without requiring it to be installed\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('..'))\n\nimport mypackage.mymodule\n\ndef test_something():\n # simply use assertions to make sure your code behaves as expected\n assert(True)\n\ndef test_myclass_fibonacci():\n fibonacciComputer = mypackage.mymodule.MyClass(20)\n assert(fibonacciComputer.fibonacci(12) == 144)\n\ndef test_myclass_above_limit():\n fibonacciComputer = mypackage.mymodule.MyClass(20)\n try:\n fibonacciComputer.fibonacci(21)\n assert(False)\n except ValueError:\n assert(True)","repo_name":"ilastik/pytemplate","sub_path":"tests/test_mypackage.py","file_name":"test_mypackage.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74717188536","text":"import requests\nfrom itertools import chain\nfrom ServerAPIs.views import *\n\n\n@api_view(['POST'])\n@csrf_exempt\ndef search(request):\n global searchedResults\n filter_dict = {}\n searchJson = json.loads(json.dumps(request.data))\n try:\n if searchJson['query']:\n nameMatched = Our_Filters.objects.filter(filter_name__iexact=searchJson['query']).values_list(\n 'filter_type', 'filter_name')\n keysMatched = Our_Filters.objects.filter(keywords__icontains=searchJson['query']).values_list('filter_type',\n 'filter_name')\n matched_data = list(chain(nameMatched, keysMatched))\n\n filter_types = list(set(Enumerable(matched_data).select(lambda x: x[0]).to_list()))\n for filter_type in filter_types:\n filter_dict[filter_type] = list(\n set(Enumerable(matched_data).where(lambda x: x[0] == filter_type).select(lambda x: x[1]).to_list()))\n\n print('dict_matched_data: ', filter_dict)\n context = {\n \"userid\": 0,\n \"page\": 0,\n \"products_per_page\": \"10\",\n \"name-asc\": False,\n \"name-desc\": False,\n \"price-desc\": False,\n \"price-asc\": False\n }\n\n if filter_dict:\n context = context | filter_dict\n else:\n filter_dict['name'] = [searchJson['query']]\n context = context | filter_dict\n reqHeader = {'Content-Type': 'application/json'}\n productUrl = 'http://localhost:8000/api/' + 'GetProductsListing'\n saveData = requests.post(url=productUrl, data=json.dumps(context), headers=reqHeader, timeout=6000,verify=False)\n searchedResults = json.loads(saveData.text)\n except Exception as e:\n context = {'data':\n {'Error': 'Error getting Searched Keywords', 'return_data': []}\n }\n ShowException(e)\n return Response(context)\n passed_to = {'passed_to': filter_dict}\n return Response(data=searchedResults | passed_to)\n\ndef ShowException(e):\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n print(e)","repo_name":"rizwaankhan/FashionADs","sub_path":"ServerAPIs/Search.py","file_name":"Search.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26348676925","text":"import constants\nfrom button import ActionButton\nfrom graph import GraphFactory\nfrom .walkthrough_game_mode import WalkthroughGameMode, GameStep\n\n\nclass CompleteGraphMode(WalkthroughGameMode):\n def __init__(self, size_factor=1):\n super().__init__(size_factor)\n\n # init buttons\n margin_top = constants.GAME_MODE_MARGIN\n margin_right = constants.GAME_MODE_MARGIN\n size = (200, 40)\n pos_x = constants.GAME_MODE_SCREEN_SIZE[0] - margin_right - size[0]\n self.buttons.update({'regenerate': ActionButton('Regenerate', (pos_x, margin_top + 100), size, 'red',\n constants.GAME_MODE_HEAD_OFFSET, self.regenerate_graph)})\n\n # init steps\n self.init_game_steps([CompleteStep1(self, 5, 0),\n CompleteStep1(self, 6, 1),\n CompleteStep1(self, 8, 2),\n CompleteStep1(self, 10, 3)])\n\n # start with first step\n self.next_step()\n\n def regenerate_graph(self):\n self.current_step.regenerate_graph()\n\n\nclass CompleteStep1(GameStep):\n def __init__(self, game_mode, graph_size, step):\n super().__init__(game_mode)\n self.step = step\n self.has_finished = False\n self.size_factor = 1\n self.graph_size = graph_size\n if graph_size > 5:\n self.size_factor = 0.7\n self.graph = GraphFactory.generate_complete_graph(self.size_factor, graph_size)\n\n def regenerate_graph(self):\n self.graph = GraphFactory.generate_complete_graph(self.size_factor, self.graph_size)\n self.game_mode.active_graph = self.graph\n self.game_mode.active_graph.reset_to_one_group()\n self.game_mode.headline = self.game_mode.standard_headline\n self.has_finished = False\n self.game_mode.draw_necessary = True\n\n def enter(self):\n self.set_headline()\n self.game_mode.headline = self.game_mode.standard_headline\n self.game_mode.active_graph = self.graph\n self.game_mode.active_graph.reset_to_one_group()\n self.game_mode.change_all_buttons('show')\n self.game_mode.change_all_buttons('activate')\n if self.step == 0:\n self.game_mode.buttons['previous'].deactivate()\n self.game_mode.buttons['next'].deactivate()\n self.game_mode.show_points = True\n\n if self.has_finished and self.step < 3:\n self.game_mode.buttons['next'].activate()\n\n def set_headline(self):\n if self.step == 0:\n self.game_mode.standard_headline = (\"This is complete graph with 5 vertices. The special feature of \"\n \"complete graphs is that every vertex has an edge to every vertex.\"\n \" Try to solve it.\")\n elif self.step == 1:\n self.game_mode.standard_headline = (\"This is complete graph with 6 vertices. It might seem easy at first, \"\n \"but complete graphs get increasingly complex with more vertices.\")\n elif self.step == 2:\n self.game_mode.standard_headline = \"This complete graph has already 8 vertices\"\n elif self.step == 3:\n self.game_mode.standard_headline = \"This is the biggest complete graph in this game with 10 vertices.\"\n\n def is_finished(self):\n is_finished = self.game_mode.active_graph.is_solved()\n if self.has_finished:\n return False\n if is_finished:\n self.has_finished = True\n return is_finished\n\n def finish(self):\n if self.step < 3:\n self.game_mode.buttons['next'].activate()\n self.game_mode.headline = \"That's it. Good job!\"\n else:\n self.game_mode.headline = \"You solved them all. Try out another game mode!\"\n self.game_mode.active_graph.deactivated = True\n self.game_mode.draw_necessary = True\n","repo_name":"FrederikJW/MulticutGame","sub_path":"game_modes/complete_graph_mode.py","file_name":"complete_graph_mode.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23387936492","text":"\"\"\"Certbot command line argument & config processing.\"\"\"\n# pylint: disable=too-many-lines\nfrom __future__ import print_function\nimport logging\nimport logging.handlers\nimport argparse\nimport sys\nimport certbot._internal.plugins.selection as plugin_selection\nfrom certbot._internal.plugins import disco as plugins_disco\n\nfrom acme.magic_typing import Optional\n\n# pylint: disable=ungrouped-imports\nimport certbot\nfrom certbot._internal import constants\n\nimport certbot.plugins.enhancements as enhancements\n\n\nfrom certbot._internal.cli.cli_constants import (\n LEAUTO,\n old_path_fragment,\n new_path_prefix,\n cli_command,\n SHORT_USAGE,\n COMMAND_OVERVIEW,\n HELP_AND_VERSION_USAGE,\n ARGPARSE_PARAMS_TO_REMOVE,\n EXIT_ACTIONS,\n ZERO_ARG_ACTIONS,\n VAR_MODIFIERS\n)\n\nfrom certbot._internal.cli.cli_utils import (\n _Default,\n read_file,\n flag_default,\n config_help,\n HelpfulArgumentGroup,\n CustomHelpFormatter,\n _DomainsAction,\n add_domains,\n CaseInsensitiveList,\n _user_agent_comment_type,\n _EncodeReasonAction,\n parse_preferred_challenges,\n _PrefChallAction,\n _DeployHookAction,\n _RenewHookAction,\n nonnegative_int\n)\n\n# These imports depend on cli_constants and cli_utils.\nfrom certbot._internal.cli.report_config_interaction import report_config_interaction\nfrom certbot._internal.cli.verb_help import VERB_HELP, VERB_HELP_MAP\nfrom certbot._internal.cli.group_adder import _add_all_groups\nfrom certbot._internal.cli.subparsers import _create_subparsers\nfrom certbot._internal.cli.paths_parser import _paths_parser\nfrom certbot._internal.cli.plugins_parsing import _plugins_parsing\n\n# These imports depend on some or all of the submodules for cli.\nfrom certbot._internal.cli.helpful import HelpfulArgumentParser\n# pylint: enable=ungrouped-imports\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Global, to save us from a lot of argument passing within the scope of this module\nhelpful_parser = None # type: Optional[HelpfulArgumentParser]\n\n\ndef prepare_and_parse_args(plugins, args, detect_defaults=False):\n \"\"\"Returns parsed command line arguments.\n\n :param .PluginsRegistry plugins: available plugins\n :param list args: command line arguments with the program name removed\n\n :returns: parsed command line arguments\n :rtype: argparse.Namespace\n\n \"\"\"\n\n helpful = HelpfulArgumentParser(args, plugins, detect_defaults)\n _add_all_groups(helpful)\n\n # --help is automatically provided by argparse\n helpful.add(\n None, \"-v\", \"--verbose\", dest=\"verbose_count\", action=\"count\",\n default=flag_default(\"verbose_count\"), help=\"This flag can be used \"\n \"multiple times to incrementally increase the verbosity of output, \"\n \"e.g. -vvv.\")\n helpful.add(\n None, \"-t\", \"--text\", dest=\"text_mode\", action=\"store_true\",\n default=flag_default(\"text_mode\"), help=argparse.SUPPRESS)\n helpful.add(\n None, \"--max-log-backups\", type=nonnegative_int,\n default=flag_default(\"max_log_backups\"),\n help=\"Specifies the maximum number of backup logs that should \"\n \"be kept by Certbot's built in log rotation. Setting this \"\n \"flag to 0 disables log rotation entirely, causing \"\n \"Certbot to always append to the same log file.\")\n helpful.add(\n [None, \"automation\", \"run\", \"certonly\", \"enhance\"],\n \"-n\", \"--non-interactive\", \"--noninteractive\",\n dest=\"noninteractive_mode\", action=\"store_true\",\n default=flag_default(\"noninteractive_mode\"),\n help=\"Run without ever asking for user input. This may require \"\n \"additional command line flags; the client will try to explain \"\n \"which ones are required if it finds one missing\")\n helpful.add(\n [None, \"register\", \"run\", \"certonly\", \"enhance\"],\n constants.FORCE_INTERACTIVE_FLAG, action=\"store_true\",\n default=flag_default(\"force_interactive\"),\n help=\"Force Certbot to be interactive even if it detects it's not \"\n \"being run in a terminal. This flag cannot be used with the \"\n \"renew subcommand.\")\n helpful.add(\n [None, \"run\", \"certonly\", \"certificates\", \"enhance\"],\n \"-d\", \"--domains\", \"--domain\", dest=\"domains\",\n metavar=\"DOMAIN\", action=_DomainsAction,\n default=flag_default(\"domains\"),\n help=\"Domain names to apply. For multiple domains you can use \"\n \"multiple -d flags or enter a comma separated list of domains \"\n \"as a parameter. The first domain provided will be the \"\n \"subject CN of the certificate, and all domains will be \"\n \"Subject Alternative Names on the certificate. \"\n \"The first domain will also be used in \"\n \"some software user interfaces and as the file paths for the \"\n \"certificate and related material unless otherwise \"\n \"specified or you already have a certificate with the same \"\n \"name. In the case of a name collision it will append a number \"\n \"like 0001 to the file path name. (default: Ask)\")\n helpful.add(\n [None, \"run\", \"certonly\", \"register\"],\n \"--eab-kid\", dest=\"eab_kid\",\n metavar=\"EAB_KID\",\n help=\"Key Identifier for External Account Binding\"\n )\n helpful.add(\n [None, \"run\", \"certonly\", \"register\"],\n \"--eab-hmac-key\", dest=\"eab_hmac_key\",\n metavar=\"EAB_HMAC_KEY\",\n help=\"HMAC key for External Account Binding\"\n )\n helpful.add(\n [None, \"run\", \"certonly\", \"manage\", \"delete\", \"certificates\",\n \"renew\", \"enhance\"], \"--cert-name\", dest=\"certname\",\n metavar=\"CERTNAME\", default=flag_default(\"certname\"),\n help=\"Certificate name to apply. This name is used by Certbot for housekeeping \"\n \"and in file paths; it doesn't affect the content of the certificate itself. \"\n \"To see certificate names, run 'certbot certificates'. \"\n \"When creating a new certificate, specifies the new certificate's name. \"\n \"(default: the first provided domain or the name of an existing \"\n \"certificate on your system for the same domains)\")\n helpful.add(\n [None, \"testing\", \"renew\", \"certonly\"],\n \"--dry-run\", action=\"store_true\", dest=\"dry_run\",\n default=flag_default(\"dry_run\"),\n help=\"Perform a test run of the client, obtaining test (invalid) certificates\"\n \" but not saving them to disk. This can currently only be used\"\n \" with the 'certonly' and 'renew' subcommands. \\nNote: Although --dry-run\"\n \" tries to avoid making any persistent changes on a system, it \"\n \" is not completely side-effect free: if used with webserver authenticator plugins\"\n \" like apache and nginx, it makes and then reverts temporary config changes\"\n \" in order to obtain test certificates, and reloads webservers to deploy and then\"\n \" roll back those changes. It also calls --pre-hook and --post-hook commands\"\n \" if they are defined because they may be necessary to accurately simulate\"\n \" renewal. --deploy-hook commands are not called.\")\n helpful.add(\n [\"register\", \"automation\"], \"--register-unsafely-without-email\", action=\"store_true\",\n default=flag_default(\"register_unsafely_without_email\"),\n help=\"Specifying this flag enables registering an account with no \"\n \"email address. This is strongly discouraged, because in the \"\n \"event of key loss or account compromise you will irrevocably \"\n \"lose access to your account. You will also be unable to receive \"\n \"notice about impending expiration or revocation of your \"\n \"certificates. Updates to the Subscriber Agreement will still \"\n \"affect you, and will be effective 14 days after posting an \"\n \"update to the web site.\")\n helpful.add(\n [\"register\", \"update_account\", \"unregister\", \"automation\"], \"-m\", \"--email\",\n default=flag_default(\"email\"),\n help=config_help(\"email\"))\n helpful.add([\"register\", \"update_account\", \"automation\"], \"--eff-email\", action=\"store_true\",\n default=flag_default(\"eff_email\"), dest=\"eff_email\",\n help=\"Share your e-mail address with EFF\")\n helpful.add([\"register\", \"update_account\", \"automation\"], \"--no-eff-email\",\n action=\"store_false\", default=flag_default(\"eff_email\"), dest=\"eff_email\",\n help=\"Don't share your e-mail address with EFF\")\n helpful.add(\n [\"automation\", \"certonly\", \"run\"],\n \"--keep-until-expiring\", \"--keep\", \"--reinstall\",\n dest=\"reinstall\", action=\"store_true\", default=flag_default(\"reinstall\"),\n help=\"If the requested certificate matches an existing certificate, always keep the \"\n \"existing one until it is due for renewal (for the \"\n \"'run' subcommand this means reinstall the existing certificate). (default: Ask)\")\n helpful.add(\n \"automation\", \"--expand\", action=\"store_true\", default=flag_default(\"expand\"),\n help=\"If an existing certificate is a strict subset of the requested names, \"\n \"always expand and replace it with the additional names. (default: Ask)\")\n helpful.add(\n \"automation\", \"--version\", action=\"version\",\n version=\"%(prog)s {0}\".format(certbot.__version__),\n help=\"show program's version number and exit\")\n helpful.add(\n [\"automation\", \"renew\"],\n \"--force-renewal\", \"--renew-by-default\", dest=\"renew_by_default\",\n action=\"store_true\", default=flag_default(\"renew_by_default\"),\n help=\"If a certificate \"\n \"already exists for the requested domains, renew it now, \"\n \"regardless of whether it is near expiry. (Often \"\n \"--keep-until-expiring is more appropriate). Also implies \"\n \"--expand.\")\n helpful.add(\n \"automation\", \"--renew-with-new-domains\", dest=\"renew_with_new_domains\",\n action=\"store_true\", default=flag_default(\"renew_with_new_domains\"),\n help=\"If a \"\n \"certificate already exists for the requested certificate name \"\n \"but does not match the requested domains, renew it now, \"\n \"regardless of whether it is near expiry.\")\n helpful.add(\n \"automation\", \"--reuse-key\", dest=\"reuse_key\",\n action=\"store_true\", default=flag_default(\"reuse_key\"),\n help=\"When renewing, use the same private key as the existing \"\n \"certificate.\")\n\n helpful.add(\n [\"automation\", \"renew\", \"certonly\"],\n \"--allow-subset-of-names\", action=\"store_true\",\n default=flag_default(\"allow_subset_of_names\"),\n help=\"When performing domain validation, do not consider it a failure \"\n \"if authorizations can not be obtained for a strict subset of \"\n \"the requested domains. This may be useful for allowing renewals for \"\n \"multiple domains to succeed even if some domains no longer point \"\n \"at this system. This option cannot be used with --csr.\")\n helpful.add(\n \"automation\", \"--agree-tos\", dest=\"tos\", action=\"store_true\",\n default=flag_default(\"tos\"),\n help=\"Agree to the ACME Subscriber Agreement (default: Ask)\")\n helpful.add(\n [\"unregister\", \"automation\"], \"--account\", metavar=\"ACCOUNT_ID\",\n default=flag_default(\"account\"),\n help=\"Account ID to use\")\n helpful.add(\n \"automation\", \"--duplicate\", dest=\"duplicate\", action=\"store_true\",\n default=flag_default(\"duplicate\"),\n help=\"Allow making a certificate lineage that duplicates an existing one \"\n \"(both can be renewed in parallel)\")\n helpful.add(\n \"automation\", \"--os-packages-only\", action=\"store_true\",\n default=flag_default(\"os_packages_only\"),\n help=\"(certbot-auto only) install OS package dependencies and then stop\")\n helpful.add(\n \"automation\", \"--no-self-upgrade\", action=\"store_true\",\n default=flag_default(\"no_self_upgrade\"),\n help=\"(certbot-auto only) prevent the certbot-auto script from\"\n \" upgrading itself to newer released versions (default: Upgrade\"\n \" automatically)\")\n helpful.add(\n \"automation\", \"--no-bootstrap\", action=\"store_true\",\n default=flag_default(\"no_bootstrap\"),\n help=\"(certbot-auto only) prevent the certbot-auto script from\"\n \" installing OS-level dependencies (default: Prompt to install \"\n \" OS-wide dependencies, but exit if the user says 'No')\")\n helpful.add(\n \"automation\", \"--no-permissions-check\", action=\"store_true\",\n default=flag_default(\"no_permissions_check\"),\n help=\"(certbot-auto only) skip the check on the file system\"\n \" permissions of the certbot-auto script\")\n helpful.add(\n [\"automation\", \"renew\", \"certonly\", \"run\"],\n \"-q\", \"--quiet\", dest=\"quiet\", action=\"store_true\",\n default=flag_default(\"quiet\"),\n help=\"Silence all output except errors. Useful for automation via cron.\"\n \" Implies --non-interactive.\")\n # overwrites server, handled in HelpfulArgumentParser.parse_args()\n helpful.add([\"testing\", \"revoke\", \"run\"], \"--test-cert\", \"--staging\",\n dest=\"staging\", action=\"store_true\", default=flag_default(\"staging\"),\n help=\"Use the staging server to obtain or revoke test (invalid) certificates; equivalent\"\n \" to --server \" + constants.STAGING_URI)\n helpful.add(\n \"testing\", \"--debug\", action=\"store_true\", default=flag_default(\"debug\"),\n help=\"Show tracebacks in case of errors, and allow certbot-auto \"\n \"execution on experimental platforms\")\n helpful.add(\n [None, \"certonly\", \"run\"], \"--debug-challenges\", action=\"store_true\",\n default=flag_default(\"debug_challenges\"),\n help=\"After setting up challenges, wait for user input before \"\n \"submitting to CA\")\n helpful.add(\n \"testing\", \"--no-verify-ssl\", action=\"store_true\",\n help=config_help(\"no_verify_ssl\"),\n default=flag_default(\"no_verify_ssl\"))\n helpful.add(\n [\"testing\", \"standalone\", \"manual\"], \"--http-01-port\", type=int,\n dest=\"http01_port\",\n default=flag_default(\"http01_port\"), help=config_help(\"http01_port\"))\n helpful.add(\n [\"testing\", \"standalone\"], \"--http-01-address\",\n dest=\"http01_address\",\n default=flag_default(\"http01_address\"), help=config_help(\"http01_address\"))\n helpful.add(\n [\"testing\", \"nginx\"], \"--https-port\", type=int,\n default=flag_default(\"https_port\"),\n help=config_help(\"https_port\"))\n helpful.add(\n \"testing\", \"--break-my-certs\", action=\"store_true\",\n default=flag_default(\"break_my_certs\"),\n help=\"Be willing to replace or renew valid certificates with invalid \"\n \"(testing/staging) certificates\")\n helpful.add(\n \"security\", \"--rsa-key-size\", type=int, metavar=\"N\",\n default=flag_default(\"rsa_key_size\"), help=config_help(\"rsa_key_size\"))\n helpful.add(\n \"security\", \"--must-staple\", action=\"store_true\",\n dest=\"must_staple\", default=flag_default(\"must_staple\"),\n help=config_help(\"must_staple\"))\n helpful.add(\n [\"security\", \"enhance\"],\n \"--redirect\", action=\"store_true\", dest=\"redirect\",\n default=flag_default(\"redirect\"),\n help=\"Automatically redirect all HTTP traffic to HTTPS for the newly \"\n \"authenticated vhost. (default: Ask)\")\n helpful.add(\n \"security\", \"--no-redirect\", action=\"store_false\", dest=\"redirect\",\n default=flag_default(\"redirect\"),\n help=\"Do not automatically redirect all HTTP traffic to HTTPS for the newly \"\n \"authenticated vhost. (default: Ask)\")\n helpful.add(\n [\"security\", \"enhance\"],\n \"--hsts\", action=\"store_true\", dest=\"hsts\", default=flag_default(\"hsts\"),\n help=\"Add the Strict-Transport-Security header to every HTTP response.\"\n \" Forcing browser to always use SSL for the domain.\"\n \" Defends against SSL Stripping.\")\n helpful.add(\n \"security\", \"--no-hsts\", action=\"store_false\", dest=\"hsts\",\n default=flag_default(\"hsts\"), help=argparse.SUPPRESS)\n helpful.add(\n [\"security\", \"enhance\"],\n \"--uir\", action=\"store_true\", dest=\"uir\", default=flag_default(\"uir\"),\n help='Add the \"Content-Security-Policy: upgrade-insecure-requests\"'\n ' header to every HTTP response. Forcing the browser to use'\n ' https:// for every http:// resource.')\n helpful.add(\n \"security\", \"--no-uir\", action=\"store_false\", dest=\"uir\", default=flag_default(\"uir\"),\n help=argparse.SUPPRESS)\n helpful.add(\n \"security\", \"--staple-ocsp\", action=\"store_true\", dest=\"staple\",\n default=flag_default(\"staple\"),\n help=\"Enables OCSP Stapling. A valid OCSP response is stapled to\"\n \" the certificate that the server offers during TLS.\")\n helpful.add(\n \"security\", \"--no-staple-ocsp\", action=\"store_false\", dest=\"staple\",\n default=flag_default(\"staple\"), help=argparse.SUPPRESS)\n helpful.add(\n \"security\", \"--strict-permissions\", action=\"store_true\",\n default=flag_default(\"strict_permissions\"),\n help=\"Require that all configuration files are owned by the current \"\n \"user; only needed if your config is somewhere unsafe like /tmp/\")\n helpful.add(\n [\"manual\", \"standalone\", \"certonly\", \"renew\"],\n \"--preferred-challenges\", dest=\"pref_challs\",\n action=_PrefChallAction, default=flag_default(\"pref_challs\"),\n help='A sorted, comma delimited list of the preferred challenge to '\n 'use during authorization with the most preferred challenge '\n 'listed first (Eg, \"dns\" or \"http,dns\"). '\n 'Not all plugins support all challenges. See '\n 'https://certbot.eff.org/docs/using.html#plugins for details. '\n 'ACME Challenges are versioned, but if you pick \"http\" rather '\n 'than \"http-01\", Certbot will select the latest version '\n 'automatically.')\n helpful.add(\n \"renew\", \"--pre-hook\",\n help=\"Command to be run in a shell before obtaining any certificates.\"\n \" Intended primarily for renewal, where it can be used to temporarily\"\n \" shut down a webserver that might conflict with the standalone\"\n \" plugin. This will only be called if a certificate is actually to be\"\n \" obtained/renewed. When renewing several certificates that have\"\n \" identical pre-hooks, only the first will be executed.\")\n helpful.add(\n \"renew\", \"--post-hook\",\n help=\"Command to be run in a shell after attempting to obtain/renew\"\n \" certificates. Can be used to deploy renewed certificates, or to\"\n \" restart any servers that were stopped by --pre-hook. This is only\"\n \" run if an attempt was made to obtain/renew a certificate. If\"\n \" multiple renewed certificates have identical post-hooks, only\"\n \" one will be run.\")\n helpful.add(\"renew\", \"--renew-hook\",\n action=_RenewHookAction, help=argparse.SUPPRESS)\n helpful.add(\n \"renew\", \"--no-random-sleep-on-renew\", action=\"store_false\",\n default=flag_default(\"random_sleep_on_renew\"), dest=\"random_sleep_on_renew\",\n help=argparse.SUPPRESS)\n helpful.add(\n \"renew\", \"--deploy-hook\", action=_DeployHookAction,\n help='Command to be run in a shell once for each successfully'\n ' issued certificate. For this command, the shell variable'\n ' $RENEWED_LINEAGE will point to the config live subdirectory'\n ' (for example, \"/etc/letsencrypt/live/example.com\") containing'\n ' the new certificates and keys; the shell variable'\n ' $RENEWED_DOMAINS will contain a space-delimited list of'\n ' renewed certificate domains (for example, \"example.com'\n ' www.example.com\"')\n helpful.add(\n \"renew\", \"--disable-hook-validation\",\n action=\"store_false\", dest=\"validate_hooks\",\n default=flag_default(\"validate_hooks\"),\n help=\"Ordinarily the commands specified for\"\n \" --pre-hook/--post-hook/--deploy-hook will be checked for\"\n \" validity, to see if the programs being run are in the $PATH,\"\n \" so that mistakes can be caught early, even when the hooks\"\n \" aren't being run just yet. The validation is rather\"\n \" simplistic and fails if you use more advanced shell\"\n \" constructs, so you can use this switch to disable it.\"\n \" (default: False)\")\n helpful.add(\n \"renew\", \"--no-directory-hooks\", action=\"store_false\",\n default=flag_default(\"directory_hooks\"), dest=\"directory_hooks\",\n help=\"Disable running executables found in Certbot's hook directories\"\n \" during renewal. (default: False)\")\n helpful.add(\n \"renew\", \"--disable-renew-updates\", action=\"store_true\",\n default=flag_default(\"disable_renew_updates\"), dest=\"disable_renew_updates\",\n help=\"Disable automatic updates to your server configuration that\"\n \" would otherwise be done by the selected installer plugin, and triggered\"\n \" when the user executes \\\"certbot renew\\\", regardless of if the certificate\"\n \" is renewed. This setting does not apply to important TLS configuration\"\n \" updates.\")\n helpful.add(\n \"renew\", \"--no-autorenew\", action=\"store_false\",\n default=flag_default(\"autorenew\"), dest=\"autorenew\",\n help=\"Disable auto renewal of certificates.\")\n\n # Populate the command line parameters for new style enhancements\n enhancements.populate_cli(helpful.add)\n\n _create_subparsers(helpful)\n _paths_parser(helpful)\n # _plugins_parsing should be the last thing to act upon the main\n # parser (--help should display plugin-specific options last)\n _plugins_parsing(helpful, plugins)\n\n if not detect_defaults:\n global helpful_parser # pylint: disable=global-statement\n helpful_parser = helpful\n return helpful.parse_args()\n\n\ndef set_by_cli(var):\n \"\"\"\n Return True if a particular config variable has been set by the user\n (CLI or config file) including if the user explicitly set it to the\n default. Returns False if the variable was assigned a default value.\n \"\"\"\n detector = set_by_cli.detector # type: ignore\n if detector is None and helpful_parser is not None:\n # Setup on first run: `detector` is a weird version of config in which\n # the default value of every attribute is wrangled to be boolean-false\n plugins = plugins_disco.PluginsRegistry.find_all()\n # reconstructed_args == sys.argv[1:], or whatever was passed to main()\n reconstructed_args = helpful_parser.args + [helpful_parser.verb]\n detector = set_by_cli.detector = prepare_and_parse_args( # type: ignore\n plugins, reconstructed_args, detect_defaults=True)\n # propagate plugin requests: eg --standalone modifies config.authenticator\n detector.authenticator, detector.installer = ( # type: ignore\n plugin_selection.cli_plugin_requests(detector))\n\n if not isinstance(getattr(detector, var), _Default):\n logger.debug(\"Var %s=%s (set by user).\", var, getattr(detector, var))\n return True\n\n for modifier in VAR_MODIFIERS.get(var, []):\n if set_by_cli(modifier):\n logger.debug(\"Var %s=%s (set by user).\",\n var, VAR_MODIFIERS.get(var, []))\n return True\n\n return False\n\n\n# static housekeeping var\n# functions attributed are not supported by mypy\n# https://github.com/python/mypy/issues/2087\nset_by_cli.detector = None # type: ignore\n\n\ndef has_default_value(option, value):\n \"\"\"Does option have the default value?\n\n If the default value of option is not known, False is returned.\n\n :param str option: configuration variable being considered\n :param value: value of the configuration variable named option\n\n :returns: True if option has the default value, otherwise, False\n :rtype: bool\n\n \"\"\"\n if helpful_parser is not None:\n return (option in helpful_parser.defaults and\n helpful_parser.defaults[option] == value)\n return False\n\n\ndef option_was_set(option, value):\n \"\"\"Was option set by the user or does it differ from the default?\n\n :param str option: configuration variable being considered\n :param value: value of the configuration variable named option\n\n :returns: True if the option was set, otherwise, False\n :rtype: bool\n\n \"\"\"\n return set_by_cli(option) or not has_default_value(option, value)\n\n\ndef argparse_type(variable):\n \"\"\"Return our argparse type function for a config variable (default: str)\"\"\"\n # pylint: disable=protected-access\n if helpful_parser is not None:\n for action in helpful_parser.parser._actions:\n if action.type is not None and action.dest == variable:\n return action.type\n return str\n","repo_name":"norbusan/certbot-debian","sub_path":"certbot/_internal/cli/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":25261,"program_lang":"python","lang":"en","doc_type":"code","stars":158,"dataset":"github-code","pt":"22"} +{"seq_id":"75101905334","text":"#!/usr/bin/env python3\nimport optparse\nimport random\nimport sys\n\nfrom breezy import (\n _known_graph_py,\n _known_graph_pyx,\n branch,\n commands,\n graph,\n osutils,\n trace,\n ui,\n)\nfrom breezy.ui import text\n\np = optparse.OptionParser()\np.add_option('--quick', default=False, action='store_true')\np.add_option('--max-combinations', default=500, type=int)\np.add_option('--lsprof', default=None, type=str)\nopts, args = p.parse_args(sys.argv[1:])\n\ntrace.enable_default_logging()\nui.ui_factory = text.TextUIFactory()\n\nbegin = osutils.perf_counter()\nif len(args) >= 1:\n b = branch.Branch.open(args[0])\nelse:\n b = branch.Branch.open('.')\nwith b.lock_read():\n g = b.repository.get_graph()\n parent_map = dict(p for p in g.iter_ancestry([b.last_revision()])\n if p[1] is not None)\nend = osutils.perf_counter()\n\nprint(f'Found {len(parent_map)} nodes, loaded in {end - begin:.3f}s')\n\ndef all_heads_comp(g, combinations):\n h = []\n with ui.ui_factory.nested_progress_bar() as pb:\n for idx, combo in enumerate(combinations):\n if idx & 0x1f == 0:\n pb.update('proc', idx, len(combinations))\n h.append(g.heads(combo))\n return h\n\ncombinations = []\n# parents = parent_map.keys()\n# for p1 in parents:\n# for p2 in random.sample(parents, 10):\n# combinations.append((p1, p2))\n# Times for random sampling of 10x1150 of bzrtools\n# Graph KnownGraph\n# 96.1s vs 25.7s :)\n# Times for 500 'merge parents' from bzr.dev\n# 25.6s vs 45.0s :(\n\nfor _revision_id, parent_ids in parent_map.iteritems():\n if parent_ids is not None and len(parent_ids) > 1:\n combinations.append(parent_ids)\n# The largest portion of the graph that has to be walked for a heads() check\n# combinations = [('john@arbash-meinel.com-20090312021943-tu6tcog48aiujx4s',\n# 'john@arbash-meinel.com-20090312130552-09xa2xsitf6rilzc')]\nif opts.max_combinations > 0 and len(combinations) > opts.max_combinations:\n combinations = random.sample(combinations, opts.max_combinations)\n\nprint(f' {len(combinations)} combinations')\n\ndef combi_graph(graph_klass, comb):\n # DEBUG\n graph._counters[1] = 0\n graph._counters[2] = 0\n\n begin = osutils.perf_counter()\n g = graph_klass(parent_map)\n if opts.lsprof is not None:\n heads = commands.apply_lsprofiled(opts.lsprof, all_heads_comp, g, comb)\n else:\n heads = all_heads_comp(g, comb)\n end = osutils.perf_counter()\n return {'elapsed': (end - begin), 'graph': g, 'heads': heads}\n\ndef report(name, g):\n print(f\"{name}: {g['elapsed']:.3f}s\")\n counters_used = False\n for c in graph._counters:\n if c:\n counters_used = True\n if counters_used:\n print(f' {graph._counters}')\n\nknown_python = combi_graph(_known_graph_py.KnownGraph, combinations)\nreport('Known', known_python)\n\nknown_pyrex = combi_graph(_known_graph_pyx.KnownGraph, combinations)\nreport('Known (pyx)', known_pyrex)\n\ndef _simple_graph(parent_map):\n return graph.Graph(graph.DictParentsProvider(parent_map))\n\nif opts.quick:\n print(f\"ratio: {known_python['elapsed'] / known_pyrex['elapsed']:.1f}:1 faster\")\nelse:\n orig = combi_graph(_simple_graph, combinations)\n report('Orig', orig)\n\n print(f\"ratio: {orig['elapsed'] / known_pyrex['elapsed']:.1f}:1 faster\")\n","repo_name":"breezy-team/breezy","sub_path":"tools/time_graph.py","file_name":"time_graph.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"22"} +{"seq_id":"2612353555","text":"\"\"\"\nThe script splits an IOB2 file into batches of 10 sentences each, up\nto N (--number) sentences in total.\n\"\"\"\nimport argparse\nimport sys\nimport random\n\nSENTENCES = 1500\n\n\ndef parse_args():\n \"\"\"Parses script arguments\"\"\"\n description = (\"Split an IOB2 file into batches of 10 sentences \"\n \"each, up to N sentences in total.\")\n\n parser = argparse.ArgumentParser(\n description=description,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n 'dataset',\n type=str,\n help='IOB2 file',\n )\n parser.add_argument(\n '-n',\n '--number',\n type=int,\n default=SENTENCES,\n help='total number of sentences (total batches = N//10)',\n )\n\n return parser.parse_args()\n\n\ndef read_sentences_from_file(ifile):\n raw_sentence = \"\"\n try:\n with open(ifile) as fhi:\n for line in fhi:\n if line == \"\\n\":\n if raw_sentence == \"\":\n continue\n yield raw_sentence\n raw_sentence = \"\"\n continue\n\n if line:\n raw_sentence += line\n\n if raw_sentence:\n yield raw_sentence\n except IOError as err:\n print(err, file=sys.stderr)\n sys.exit()\n\n\nargs = parse_args()\nsents = []\n\nfor sent in read_sentences_from_file(args.dataset):\n sents.append(sent)\n\nassert len(sents) >= args.number, (\n \"The value {args.number} must be <= the \"\n f\"number of sentences in the corpus ({len(sents)})\"\n)\nrandom_sents = random.sample(sents, k=args.number)\n\nnum = 0\nfor num in range(args.number//10):\n print(f'random_batch_{num}.iob')\n with open(f'random_batch_{num}.iob', 'w') as ofile:\n ofile.write('\\n'.join(random_sents[num * 10:(num*10)+10]) + '\\n')\n\nremain = args.number % 10\nif remain != 0:\n filename = f\"random_batch_{num + 1 if num != 0 else 0}.iob\"\n print(filename)\n with open(filename, 'w') as ofile:\n ofile.write('\\n'.join(random_sents[-remain:]) + '\\n')\n","repo_name":"sdocio/NER-experiments","sub_path":"utils/random_iob.py","file_name":"random_iob.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72198933177","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets, svm\nfrom sklearn.decomposition import PCA\n\niris = datasets.load_iris()\norigData = iris.data\npca = PCA(n_components=2)\ndata = pca.fit_transform(origData)\nprint(data.shape) # breakpoint here, compare data/origData\n\ndatamax = data.max(axis=0) + 0.5 # y-axis, column\nprint(datamax)\n# foo = data.max(axis=1) # x-axis, row\n# print(foo)\ndatamin = data.min(axis=0) + 0.5\nn = 2000\nX, y = np.meshgrid(np.linspace(datamin[0], datamax[0], n),\n np.linspace(datamin[1], datamax[1], n))\n\nsvc = svm.SVC() # default kernel: RBF\nsvc.fit(data, iris.target)\nZ = svc.predict(np.c_[X.ravel(), y.ravel()])\nprint(np.unique(Z))\nplt.contour(X, y, Z.reshape(X.shape),\n levels=[-0.5, 0.5, 1.5, 2.5],\n colors=['r', 'g', 'b', 'k'])\n\nfor i, c in zip([0, 1, 2], ['r', 'g', 'b']):\n d = data[iris.target == i]\n plt.scatter(d[:, 0], d[:, 1], c=c, marker='.')\nplt.show()\n","repo_name":"leviliangtw/PYKT-MLLab","sub_path":"demo28.py","file_name":"demo28.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"32646289629","text":"from django.http import Http404\nfrom django.shortcuts import render\nfrom django.template import Context, Template\nfrom django.utils.safestring import mark_safe\nfrom .utils.model_dependent import get_model_obj_by_slug\nfrom writing.views.article import each_article\nfrom writing.views.blog import each_blog\nfrom .short_code import get_short_code_list, Decoder as ShortCodeDecoder\n\n\ndef slugified_page(request, slug):\n data = get_model_obj_by_slug(slug)\n if not data.get('model_type'):\n raise Http404()\n elif data.get('model_type') == 'article':\n return each_article(request, model_obj=data.get('model_obj'))\n elif data.get('model_type') == 'blog':\n return each_blog(request, model_obj=data.get('model_obj'))\n\n page = data.get('model_obj')\n\n short_code_list = get_short_code_list(page.content)\n context_for_rendering = {}\n for sc in short_code_list:\n SCD = ShortCodeDecoder(sc)\n rendered_html = SCD.get_html_as_str()\n context_for_rendering.update({SCD.code: mark_safe(rendered_html)})\n # print(\"\\n================\\n{}\\n=======\\n\".format(SCD.get_html_as_str()))\n\n page_content_template = Template(page.content)\n page_content_html = page_content_template.render(\n Context(context_for_rendering)\n )\n\n ctx = {\n \"page\": page,\n \"page_content\": page_content_html,\n }\n\n return render(request, 'distinct_pages/' + page.template_file, ctx)\n","repo_name":"wall-e-08/qturbo","sub_path":"packages/distinct_pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20962928215","text":"import pytest\nfrom pageObject.emailReceiptBoxPage import emailReceiptBoxPage\n\n\n\nclass TestLackSenderWriteMail():\n\n\n def test_lackSenderWriteMail(self,login003):\n self.driver = emailReceiptBoxPage(login003)\n self.driver.run_lackSenderWriteMail_case()\n\nif __name__ == '__main__':\n pytest.main([\"-v\",\"test_lackSenderWriteMail.py\"])\n","repo_name":"wuxiaokan/PycharmProjects","sub_path":"UIAutotest/emailUItest2/case/test_lackSenderWriteMail.py","file_name":"test_lackSenderWriteMail.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40695342600","text":"\"\"\"Nested Cross-Validation for scikit-learn using MPI.\n\nThis package provides nested cross-validation similar to scikit-learn's\nGridSearchCV but uses the Message Passing Interface (MPI)\nfor parallel computing.\n\"\"\"\n\nimport logging\nimport numpy\nfrom mpi4py import MPI\nimport pandas\nfrom sklearn.base import BaseEstimator\nfrom sklearn.model_selection import check_cv as _check_cv\nfrom sklearn.metrics.scorer import check_scoring\nfrom sklearn.base import is_classifier\nfrom sklearn.model_selection._search import _check_param_grid\nfrom sklearn.utils import check_X_y\n\nfrom palladio.MPIGridSearchCV import (MPIGridSearchCVMaster,\n MPIGridSearchCVSlave)\n\n\n__all__ = ('NestedGridSearchCV')\n\nLOG = logging.getLogger(__package__)\n\nMPI_TAG_RESULT = 3\n\nMPI_MSG_TERMINATE = 0\nMPI_MSG_CV = 1\nMPI_MSG_TEST = 2\nMPI_TAG_TRAIN_TEST_DATA = 5\n\ncomm = MPI.COMM_WORLD\ncomm_size = comm.Get_size()\ncomm_rank = comm.Get_rank()\n\n\ndef _get_best_parameters(fold_results, param_names):\n \"\"\"Get best setting of parameters from grid search.\n\n Parameters\n ----------\n fold_results : pandas.DataFrame\n Contains performance measures as well as hyper-parameters\n as columns. Must contain a column 'fold'.\n\n param_names : list\n Names of the hyper-parameters. Each name should be a column\n in ``fold_results``.\n\n Returns\n -------\n max_performance : pandas.Series\n Maximum performance and its hyper-parameters\n \"\"\"\n if pandas.isnull(fold_results.loc[:, 'score']).all():\n raise ValueError(\"Results are all NaN\")\n\n # average across inner folds\n grouped = fold_results.drop('fold', axis=1).groupby(param_names)\n mean_performance = grouped.mean()\n # highest average across performance measures\n max_idx = mean_performance.loc[:, 'score'].idxmax()\n\n # best parameters\n max_performance = pandas.Series({'score':\n mean_performance.loc[max_idx, 'score']})\n if len(param_names) == 1:\n key = param_names[0]\n max_performance[key] = max_idx\n else:\n # index has multiple levels\n for i, name in enumerate(mean_performance.index.names):\n max_performance[name] = max_idx[i]\n\n return max_performance\n\n\ndef _fit_and_score_with_parameters(X, y, cv, best_parameters):\n \"\"\"Distribute work of non-nested cross-validation across slave nodes.\"\"\"\n # tell slaves testing phase is next\n _task_desc = numpy.empty(2, dtype=int)\n _task_desc[1] = MPI_MSG_TEST\n\n comm.Bcast([_task_desc, MPI.INT], root=0)\n comm.bcast((X, y), root=0)\n\n # Compability with sklearn > 0.18 TODO\n _splitted_cv = [(a, b) for a, b in cv.split(X, y)]\n\n assert comm_size >= len(_splitted_cv)\n\n for i, (train_index, test_index) in enumerate(_splitted_cv):\n fold_id = i + 1\n LOG.info(\"Testing fold %d\", fold_id)\n\n parameters = best_parameters.loc[fold_id, :].to_dict()\n work_item = (fold_id, train_index, test_index, parameters)\n\n comm.send(work_item, dest=fold_id, tag=MPI_TAG_TRAIN_TEST_DATA)\n\n scores = {}\n for i in range(len(_splitted_cv)):\n fold_id, test_result = comm.recv(source=MPI.ANY_SOURCE,\n tag=MPI_TAG_RESULT)\n scores[fold_id] = test_result\n\n # Tell all nodes to terminate\n for i in range(len(_splitted_cv), comm_size):\n comm.send((0, None), dest=i, tag=MPI_TAG_TRAIN_TEST_DATA)\n\n return pandas.Series(scores)\n\n\nclass NestedGridSearchCV(BaseEstimator):\n \"\"\"Cross-validation with nested parameter search for each training fold.\n\n The data is first split into ``cv`` train and test sets. For each training\n set a grid search over the specified set of parameters is performed\n (inner cross-validation). The set of parameters that achieved the highest\n average score across all inner folds is used to re-fit a model on the\n entire training set of the outer cross-validation loop. Finally, results on\n the test set of the outer loop are reported.\n\n Parameters\n ----------\n estimator : object type that implements the \"fit\" and \"predict\" methods\n A object of that type is instantiated for each grid point.\n\n param_grid : dict or list of dictionaries\n Dictionary with parameters names (string) as keys and lists of\n parameter settings to try as values, or a list of such\n dictionaries, in which case the grids spanned by each dictionary\n in the list are explored. This enables searching over any sequence\n of parameter settings.\n\n scoring : string, callable or None, optional, default: None\n A string (see model evaluation documentation) or\n a scorer callable object / function with signature\n ``scorer(estimator, X, y)``.\n See sklearn.metrics.get_scorer for details.\n\n fit_params : dict, optional\n Parameters to pass to the fit method.\n\n cv : integer or cross-validation generator, default=3\n If an integer is passed, it is the number of folds.\n Specific cross-validation objects can be passed, see\n sklearn.cross_validation module for the list of possible objects\n\n inner_cv : integer or callable, default=3\n If an integer is passed, it is the number of folds.\n If callable, the function must have the signature\n ``inner_cv_func(X, y)`` and return a cross-validation object,\n see sklearn.model_selection module for the list of possible objects.\n\n multi_output : boolean, default=False\n Allow multi-output y, as for multivariate regression.\n\n Attributes\n ----------\n best_params_ : pandas.DataFrame\n Contains selected parameter settings for each fold.\n The validation score refers to average score across all folds of the\n inner cross-validation, the test score to the score on the test set\n of the outer cross-validation loop.\n\n grid_scores_ : list of pandas.DataFrame\n Contains full results of grid search for each training set of the\n outer cross-validation loop.\n\n scorer_ : function\n Scorer function used on the held out data to choose the best\n parameters for the model.\n \"\"\"\n\n def __init__(self, estimator, param_grid, scoring=None, fit_params=None,\n cv=None, inner_cv=None, multi_output=False):\n self.scoring = scoring\n self.estimator = estimator\n self.param_grid = param_grid\n self.scoring = scoring\n self.fit_params = fit_params\n self.cv = cv\n self.inner_cv = inner_cv\n self.multi_output = multi_output\n\n def _grid_search(self, train_X, train_y):\n if callable(self.inner_cv):\n # inner_cv = self.inner_cv(train_X, train_y)\n inner_cv = self.inner_cv.split(train_X, train_y)\n else:\n # inner_cv = _check_cv(self.inner_cv, train_X, train_y,\n # classifier=is_classifier(self.estimator))\n inner_cv = _check_cv(self.inner_cv, train_y,\n classifier=is_classifier(\n self.estimator)).split(train_X, train_y)\n\n master = MPIGridSearchCVMaster(self.param_grid, inner_cv,\n self.estimator, self.scorer_,\n self.fit_params)\n return master.run(train_X, train_y)\n\n def _fit_master(self, X, y, cv):\n param_names = list(self.param_grid.keys())\n\n best_parameters = []\n grid_search_results = []\n for i, (train_index, test_index) in enumerate(cv.split(X, y)):\n LOG.info(\"Training fold %d\", i + 1)\n\n train_X = X[train_index, :]\n train_y = y[train_index]\n\n grid_results = self._grid_search(train_X, train_y)\n grid_search_results.append(grid_results)\n\n max_performance = _get_best_parameters(grid_results, param_names)\n LOG.info(\"Best performance for fold %d:\\n%s\", i + 1,\n max_performance)\n max_performance['fold'] = i + 1\n best_parameters.append(max_performance)\n\n best_parameters = pandas.DataFrame(best_parameters)\n best_parameters.set_index('fold', inplace=True)\n best_parameters['score (Test)'] = 0.0\n best_parameters.rename(columns={'score': 'score (Validation)'},\n inplace=True)\n\n scores = _fit_and_score_with_parameters(\n X, y, cv, best_parameters.loc[:, param_names])\n best_parameters['score (Test)'] = scores\n\n self.best_params_ = best_parameters\n self.grid_scores_ = grid_search_results\n\n def _fit_slave(self):\n slave = MPIGridSearchCVSlave(\n self.estimator, self.scorer_, self.fit_params)\n slave.run()\n\n def fit(self, X, y):\n \"\"\"Fit the model to the training data.\"\"\"\n X, y = check_X_y(X, y, force_all_finite=False,\n multi_output=self.multi_output)\n _check_param_grid(self.param_grid)\n\n # cv = _check_cv(self.cv, X, y, classifier=is_classifier(self.estimator))\n cv = _check_cv(self.cv, y, classifier=is_classifier(self.estimator))\n\n self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)\n\n if comm_rank == 0:\n self._fit_master(X, y, cv)\n else:\n self._fit_slave()\n\n return self\n","repo_name":"slipguru/palladio","sub_path":"palladio/MPINestedGridSearchCV.py","file_name":"MPINestedGridSearchCV.py","file_ext":"py","file_size_in_byte":9415,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"22"} +{"seq_id":"19163541576","text":"import math\nimport sys\nsys.setrecursionlimit(10**6)\n\n\n#Undirected, unweighted, 1-indexed\nclass Tree:\n def __init__(self, n, tree):\n self.n = n\n self.tree = tree\n self.log = math.ceil(math.log(n, 2))\n self.memo = [[-1 for i in range(self.log + 1)] for j in range(n+1)]\n self.lev = [0 for i in range(n + 1)]\n self.dfss(1, 1, self.memo, self.lev, self.log, tree)\n self.size = [0 for _ in range(n+1)]\n\n\n def dfss(self, u, p):\n self.memo[u][0] = p\n for i in range(1, self.log + 1):\n self.memo[u][i] = self.memo[self.memo[u][i - 1]][i - 1]\n \n for v in self.tree[u]:\n if v != p:\n self.lev[v] = self.lev[u] + 1\n self.dfs(v, u)\n \n def lca(self, u, v):\n if self.lev[u] < self.lev[v]:\n u, v = v, u\n for i in range(self.log, -1, -1):\n if (self.lev[u] - pow(2, i)) >= self.lev[v]:\n u = self.memo[u][i] \n if u == v:\n return v\n for i in range(self.log, -1, -1):\n if self.memo[u][i] != self.memo[v][i]:\n u = self.memo[u][i]\n v = self.memo[v][i] \n return self.memo[u][0]\n\n def get_dist(self, u, v):\n lc = self.lca(u, v)\n ans = self.lev[u] + self.lev[v] - (2 * self.lev[lc])\n return ans\n\n\n def find(self, u, p):\n self.size[u] = 1\n for v in self.tree[u]:\n if v != p:\n self.find(v, u)\n self.size[u] += self.size[v]\n\n def centroid(self, u, p, sz):\n for v in self.tree[u]:\n if p != v:\n if self.size[v] * 2 > sz:\n return self.centroid(v, u, sz)\n return u\n\n def get_centroid(self):\n self.find(1, -1) \n ans = self.centroid(1, -1, self.n)\n\n\n\n def tour(self, root):\n self.timer = 0\n self.start = [10**7 for _ in range(self.n)]\n self.end = [10**7 for _ in range(self.n)]\n self.dfst(root, -1)\n return self.start, self.end\n\n def dfst(self, node, parent):\n self.start[node] = self.timer\n self.timer += 1\n for neigh in self.tree[node]:\n if neigh != parent:\n self.dfst(neigh, node)\n self.end[node] = self.timer - 1\n\n","repo_name":"martarel/AlgoM","sub_path":"AlgoPack/AlgoM/Tree/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11268789694","text":"import pygame\nfrom sys import exit\n\npygame.init()\nscreen = pygame.display.set_mode((800,400))\nclock = pygame.time.Clock()\n\ntest_surface = pygame.Surface((100,200))\n\n\nsky_surface = pygame.image.load('./bg_desert.png')\nground_surface = pygame.image.load('./grasshalf.png')\n#text_surface = test_font.render('my game',False,'Green')\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n screen.blit(sky_surface,(0,0))\n screen.blit(ground_surface(0,300))\n\n pygame.display.update()\n clock.tick(60)","repo_name":"brentshierk/intensive_game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39762133103","text":"class Solution:\n def pivotIndex(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n size = len(nums)\n if not nums:\n return -1\n if size == 1 or (sum(nums)-nums[0] == 0):\n return 0\n left = [0 for _ in range(len(nums))]\n right = [0 for _ in range(len(nums))]\n left[0] = nums[0]\n right[0] = nums[size-1]\n for i in range(1, size):\n left[i] = nums[i] + left[i-1]\n right[i] = nums[size-1-i] + right[i-1]\n for i in range(1, size):\n if left[i] == right[size-1-i]:\n return i\n return -1\n \n# if not nums:\n# return -1\n# if len(nums) == 1 or (sum(nums)-nums[0] == 0):\n# return 0\n# for i in range(1, len(nums)):\n# left = right = 0\n# for j in range(i-1, -1, -1):\n# left += nums[j]\n# for k in range(i+1,len(nums)):\n# right += nums[k]\n \n# if left == right:\n# return i\n# return -1\n ","repo_name":"teohiho/exercise-abx","sub_path":"45-724-FindPivotIndex.py","file_name":"45-724-FindPivotIndex.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"47578482239","text":"from ...abstasks import AbsTaskSTS, CrosslingualTask\n\n\n_LANGUAGES = {\n \"en\": \"en\",\n \"de\": \"de\",\n \"es\": \"es\",\n \"pl\": \"pl\",\n \"tr\": \"tr\",\n \"ar\": \"ar\",\n \"ru\": \"ru\",\n \"zh\": \"zh\",\n \"fr\": \"fr\",\n \"de-en\": \"de-en\",\n \"es-en\": \"es-en\",\n \"it\": \"it\",\n \"pl-en\": \"pl-en\",\n \"zh-en\": \"zh-en\",\n \"es-it\": \"es-it\",\n \"de-fr\": \"de-fr\",\n \"de-pl\": \"de-pl\",\n \"fr-pl\": \"fr-pl\",\n}\n\n\nclass STS22CrosslingualSTS(AbsTaskSTS, CrosslingualTask):\n @property\n def description(self):\n return {\n \"name\": \"STS22\",\n \"hf_hub_name\": \"mteb/sts22-crosslingual-sts\",\n \"description\": \"SemEval 2022 Task 8: Multilingual News Article Similarity\",\n \"reference\": \"https://competitions.codalab.org/competitions/33835\",\n \"type\": \"STS\",\n \"category\": \"p2p\",\n \"eval_splits\": [\"test\"],\n \"eval_langs\": _LANGUAGES,\n \"main_score\": \"cosine_spearman\",\n \"min_score\": 1,\n \"max_score\": 4,\n \"revision\": \"6d1ba47164174a496b7fa5d3569dae26a6813b80\",\n }\n","repo_name":"xlang-ai/instructor-embedding","sub_path":"evaluation/MTEB/mteb/tasks/STS/STS22CrosslingualSTS.py","file_name":"STS22CrosslingualSTS.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":1419,"dataset":"github-code","pt":"22"} +{"seq_id":"20911253318","text":"import copy\n\n\nclass VectorEnv:\n def __init__(self, env, n):\n self.envs = [copy.copy(env) for _ in range(n)]\n self.observation_space = self.envs[0].observation_space\n self.action_space = self.envs[0].action_space\n\n # Call this only once at the beginning of training (optional):\n def seed(self, seeds):\n assert len(self.envs) == len(seeds)\n return tuple(env.seed(s) for env, s in zip(self.envs, seeds))\n\n # Call this only once at the beginning of training:\n def reset(self):\n l = [env.reset() for env in self.envs]\n return l\n\n # Call this on every timestep:\n def step(self, actions):\n assert len(self.envs) == len(actions)\n observations = []\n rewards = []\n dones = []\n infos = []\n for env, a in zip(self.envs, actions):\n observation, reward, done, info = env.step(a)\n if done:\n observation = env.reset()\n observations.append(observation)\n rewards.append(reward)\n dones.append(done)\n infos.append(info)\n return observations, rewards, dones, infos\n\n # Call this at the end of training:\n def close(self):\n for env in self.envs:\n env.close()\n\ndef make_envs(env, kwargs):\n def make_final():\n env_ex = env(**kwargs)\n return env_ex\n return make_final\n","repo_name":"nathangrinsztajn/DAG-scheduling","sub_path":"ppo/vectorized_env.py","file_name":"vectorized_env.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"22"} +{"seq_id":"34942129231","text":"\nfrom flask import render_template, url_for\nimport logging\nimport re\nfrom typing import List, Optional\n\nfrom spaceship import email, achievements\nfrom spaceship.models import Invitation, Team, User\n\nlog = logging.getLogger('spaceship.invite')\n\nDEFAULT_SUBJECT = \"Join my Spaceship Earth crew!\"\n\n# an email is anything containing an `@` character, ending with whitespace and/or commas\nEMAIL_REGEX = re.compile(r\"\"\"[^@]+@[^\\s,]+\"\"\")\n\ndef default_message(inviter: User, team: Team):\n return render_template('invite_crew.html', inviter=inviter, team=team)\n\ndef send(inviter: User, team_id: int, subject: str, message: str, emails: List[str]) -> Optional[str]:\n team = Team.query.get(team_id)\n if not team or team not in inviter.teams:\n return f'You are not authorized to invite crew onto team {team_id}'\n\n subject = subject if subject else DEFAULT_SUBJECT\n message = message if message else default_message(inviter, team)\n\n # quilljs needs


    to show line breaks even though everything is in a paragraph\n # this creates extra line breaks in gmail so just strip it out\n message = message.replace('


    ', '')\n\n if not emails:\n return f'List a few folks to invite!'\n\n # find non-overlapping instances of an email address\n for invited_email in EMAIL_REGEX.findall(emails):\n iv = Invitation(\n inviter=inviter,\n team=team,\n invited_email=invited_email,\n message=message,\n status='sent')\n iv.save()\n\n # each recipient sees a unique invite link\n invite_url = url_for('accept_invitation', key=iv.key_for_sharing, _external=True)\n if 'href=\"join\"' in message:\n html_content = message.replace('href=\"join\"', f'href=\"{invite_url}\"')\n else:\n # if the user deleted the invite link, put it at the bottom\n html_content = f'{message}\\n

    Click here to join

    '\n\n try:\n email.send.delay(\n to_emails=invited_email,\n subject=subject,\n html_content=html_content,\n )\n except Exception as e:\n log.exception(e.args)\n return \"Failed to send email; try again?\"\n\n achievements.invite_crew(inviter)\n","repo_name":"spaceshipearth/pyspaceship","sub_path":"spaceship/team_invite.py","file_name":"team_invite.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"27773053264","text":"from pynput import mouse, keyboard\nfrom time import time\nimport json\nimport os\n#mouse listener, starts manually because otherwise the listeners would run sequentially instead of simulteaneously is a global so both can be stopped by escape key at once\nmouse_listener = None\n#declare start time globally so all callbacks can reference it\nstart_time = time()\n\nunreleased_keys=[]\n#storing all inputs\ninput_events=[]\n\nOUTPUT_FILENAME= 'runefarm2'\n\n\nclass EventType():\n KEYDOWN = 'keyDown'\n KEYUP = 'keyUp'\n CLICK = 'click'\n MOVE = 'move'\n\n\ndef main():\n runListeners()\n print(\"Recording duration {} seconds.\".format(elapsed_time()))\n global input_events\n print(json.dumps(input_events))\n \n script_dir = os.path.dirname(__file__)\n filepath= os.path.join(script_dir, 'recordings', '{}.json'.format(OUTPUT_FILENAME))\n \n with open(filepath, 'w') as outfile:\n json.dump(input_events, outfile, indent=4)\n \ndef elapsed_time():\n global start_time\n return time()-start_time\n\ndef record_event(event_type, event_time, button,pos=None):\n global input_events\n input_events.append({\n 'time': event_time,\n 'type':event_type,\n 'button': str(button),\n 'pos': pos})\n \ndef on_move(x, y):\n record_event(EventType.MOVE, elapsed_time(), None,(x,y))\n\ndef on_click(x, y, button, pressed):\n if not pressed:\n try:\n record_event(EventType.CLICK, elapsed_time(),button)\n except AttributeError:\n record_event(EventType.CLICK, elapsed_time(), button, pos)\n # if not pressed:\n # Stop listener\n # return False sdfasdfasdf\n \n \n# asdsdfsd\n \n\n \ndef on_press(key):\n #we only want to record first key press and how long it was held. So we add it to unreleased_keys we can exit sdfsdssasd\n global unreleased_keys\n if key in unreleased_keys:\n return\n else:\n unreleased_keys.append(key)\n \n try:\n record_event(EventType.KEYDOWN, elapsed_time(),key.char)\n except AttributeError:\n record_event(EventType.KEYDOWN, elapsed_time(), key)\n\ndef on_release(key):\n # remove the key from the golabl unreleased keys asda\n global unreleased_keys\n try: \n unreleased_keys.remove(key)\n except ValueError:\n print('ERROR: {} not in unreleased_keys '.format(key))\n try:\n record_event(EventType.KEYUP, elapsed_time(),key.char)\n except AttributeError:\n record_event(EventType.KEYUP, elapsed_time(), key)\n\n if key == keyboard.Key.esc:\n #Stop mouse listner\n global mouse_listener\n mouse_listener.stop()\n # Stop keyboard listener\n return False\n \ndef runListeners():\n global mouse_listener\n \n mouse_listener = mouse.Listener(on_click=on_click)# on_move=on_move) Removed for now\n mouse_listener.start()\n mouse_listener.wait() # makes mouse_listener wait until the main listener is started\n #Simple Keyboard listener\n with keyboard.Listener( on_press=on_press,\n on_release=on_release) as listener:\n global start_time\n start_time=time()\n listener.join()\n \nif __name__==\"__main__\":\n main()\n","repo_name":"Stoyko-The-Creator/Keylogger","sub_path":"keylog.py","file_name":"keylog.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72364739576","text":"# # python3\r\n# cnt = input()\r\n# Alpha = [0, 0, 0] # 甲乙\r\n# Beta = [0, 0, 0]\r\n\r\n# # count the succeed hand\r\n# Suc_Alpha = []\r\n# Suc_Beta = []\r\n\r\n# states = [(1, -1), (-1, 1), (0, 0)] # >, <, =\r\n\r\n# Table = [[(\"C\", \"J\"), states[0]], [(\"C\", \"B\"), states[1]],\r\n # [(\"J\", \"C\"), states[1]], [(\"B\", \"C\"), states[0]],\r\n # [(\"B\", \"J\"), states[1]], [(\"J\", \"B\"), states[0]]]\r\n\r\n\r\n# def jCB(a, b):\r\n # if a == b:\r\n # return states[2]\r\n # else:\r\n # for i in range(len(Table)):\r\n # if a == Table[i][0][0] and b == Table[i][0][1]:\r\n # return Table[i][1]\r\n\r\n# def maxNum(j, c, b):\r\n # # no equal condition\r\n # if j > b and j > c:\r\n # return \"J\"\r\n # elif c > j and c > b:\r\n # return \"C\"\r\n\t # # elif b > c and b > j:\t\t\r\n # else:\r\n # return \"B\"\r\n\r\n# def sucessGeture(suc):\r\n # #\r\n # j, c, b = [suc.count(G) for G in [\"J\", \"C\", \"B\"]]\r\n # if j == b == c:\r\n # return \"B\"\r\n # elif j == c:\r\n # if b > j:\r\n # return \"B\"\r\n # else:\r\n # return \"C\"\r\n # elif c == b:\r\n # if j > c:\r\n # return \"J\"\r\n # else:\r\n # return \"B\"\r\n # elif j == b:\r\n # if c > j:\r\n # return \"C\"\r\n # else:\r\n # return \"B\"\r\n # else:\r\n # return MaxNum(j, c, b)\r\n\r\n# def myPrint(array):\r\n # length= len(array)\r\n # for i in range(length):\r\n # print(array[i], end=\"\")\r\n # if i < (length - 1):\r\n # print(\" \", end=\"\")\r\n # print(\"\")\r\n\r\n# for j in range(int(cnt)):\r\n # Gestures = input()\r\n # A, B = Gestures[0], Gestures[2]\r\n # # 1 0 -1\r\n # Ans_a, Ans_b = jCB(A, B)[0], jCB(A, B)[1]\r\n # # succeed\r\n # if Ans_a == 1:\r\n # Alpha[0] += 1\r\n # Beta[2] += 1\r\n # Suc_Alpha.append(A) # count success gesture\r\n # elif Ans_b == 1:\r\n # Beta[0] += 1\r\n # Alpha[2] += 1 # failed\r\n # Suc_Beta.append(B)\r\n # # equal\r\n # elif Ans_a == 0:\r\n # Alpha[1] += 1\r\n # Beta[1] += 1\r\n\r\n# myPrint(Alpha)\r\n# myPrint(Beta)\r\n# print(sucessGeture(Suc_Alpha), sucessGeture(Suc_Beta))\r\n\r\n# python3\r\ndef jCB(a, b):\r\n if a == b:\r\n return states[2]\r\n else:\r\n return Table[(a, b)]\r\n\r\ndef sucessGeture(suc):\r\n\tmax_cnt = max(list(suc.values()))\r\n\tmax_gess = [key for key in suc.keys() if suc[key] == max_cnt]\r\n\treturn sorted(max_gess)[0]\r\n\r\ndef myPrint(array):\r\n\tprint(\"{} {} {}\".format(array[0], array[1], array[2]))\r\n\r\ncnt = input()\r\nAlpha, Beta = [0, 0, 0], [0, 0, 0] # 甲乙\r\nSuc_Alpha = {ges:0 for ges in [\"J\", \"C\", \"B\"]} # count the succeed hand\r\nSuc_Beta = {ges:0 for ges in [\"J\", \"C\", \"B\"]}\r\n\r\n\r\nstates = [(1, -1), (-1, 1), (0, 0)] # >, <, =\r\nTable = { (\"C\", \"J\"):states[0], (\"C\", \"B\"):states[1],\r\n (\"J\", \"C\"):states[1], (\"B\", \"C\"):states[0],\r\n (\"B\", \"J\"):states[1], (\"J\", \"B\"):states[0]} \r\n\r\nfor j in range(int(cnt)):\r\n Gestures = input()\r\n A, B = Gestures[0], Gestures[2]\r\n Ans_a, Ans_b = jCB(A, B)[0], jCB(A, B)[1] # 1 0 -1\r\n if Ans_a == 1:\r\n Alpha[0] += 1 # succeed\r\n Beta[2] += 1\r\n Suc_Alpha[A] += 1 # count success gesture\r\n elif Ans_b == 1:\r\n Beta[0] += 1 # failed\r\n Alpha[2] += 1 \r\n Suc_Beta[B] += 1\r\n elif Ans_a == 0: # equal\r\n Alpha[1] += 1\r\n Beta[1] += 1\r\n\r\nmyPrint(Alpha)\r\nmyPrint(Beta)\r\nprint(sucessGeture(Suc_Alpha), sucessGeture(Suc_Beta))\r\n","repo_name":"DayDreamChaser/OnlineJudge","sub_path":"PTA/B_level/1018_HammerScissors.py","file_name":"1018_HammerScissors.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29705003194","text":"import numpy as np\n\nfrom deap.helpers import bisect_min, getOutputShape\nfrom deap.photonics import MRRTransferFunction\nfrom deap.photonics import MRMTransferFunction\nfrom deap.photonics import PWB\nfrom deap.photonics import LaserDiodeArray\nfrom deap.photonics import ModulatorArray\nfrom deap.photonics import PWBArray\nfrom deap.photonics import PhotonicConvolver\n\n\nclass PWBMapper:\n \"\"\"\n Class that that maps a set of weights to an equivalent\n photonic pwb.\n \"\"\"\n def _precomputeDropputToPhaseMapping(precision):\n \"\"\"\n Creates a mapping between phase and dropput by index. For a given\n precision.\n\n I.e. At a an index i, a phase shift of phi[i] will correspond to a\n of dropput[i] and vice-versa.\n \"\"\"\n mrr = MRRTransferFunction()\n dropput = np.linspace(1.1e-4, 1, precision)\n phase = mrr.phaseFromDropput(dropput)\n return phase, dropput\n\n _precision = 127\n\n # Precomputed phase and dropput values for a given precision.\n # These will be populated when a pwb is created.\n _phase = None\n _dropput = None\n\n def setPrecision(newPrecision):\n assert 1 <= newPrecision\n if newPrecision % 2 == 0:\n newPrecision -= 1\n\n PWBMapper._precision = newPrecision\n PWBMapper._phase = None\n PWBMapper._dropput = None\n\n def computePhaseShifts(weights):\n if PWBMapper._phase is None or PWBMapper._dropput is None:\n # Compute phase and dropput mapping if it hasn't been done\n # already.\n PWBMapper._phase, PWBMapper._dropput = \\\n PWBMapper._precomputeDropputToPhaseMapping(\n PWBMapper._precision)\n\n weights = np.asarray(weights)\n maxElement = np.amax(np.abs(weights))\n\n # Normalize the weights and compute the post-optical gain.\n outputGain = max(maxElement, 1)\n normalized_weights = weights / outputGain\n\n # Determine what dropput values are needed using the formula:\n # Td - Tp = w => Td - (1 - Td) = w => Td = (w + 1) / 2\n desiredDropputs = (normalized_weights + 1) / 2\n\n phaseShifts = np.zeros(weights.size)\n for i, desiredDropput in enumerate(desiredDropputs):\n index = bisect_min(PWBMapper._dropput, desiredDropput)\n phaseShifts[i] = PWBMapper._phase[index]\n\n return phaseShifts, outputGain\n\n def build(weights):\n \"\"\"\n Creates a new photonic pwb from a set of weights\n \"\"\"\n phaseShifts, outputGain = \\\n PWBMapper.computePhaseShifts(weights)\n return PWB(phaseShifts, outputGain)\n\n def updateWeights(photonicNeuron, weights):\n \"\"\"\n Updates an existing photonic pwb from a set of weights\n \"\"\"\n phaseShifts, outputGain = \\\n PWBMapper.computePhaseShifts(weights)\n photonicNeuron._update(phaseShifts, outputGain)\n\n\nclass LaserDiodeArrayMapper:\n \"\"\"\n Class that maps a convolution size and input size to an array of laser\n diodes.\n \"\"\"\n def build(inputShape, outputShape, power=1):\n # Create adjacency list for input and output shape.\n grid = np.indices(outputShape)\n rows = grid[0] % inputShape[0]\n cols = grid[1] % inputShape[1]\n connections = np.dstack((rows, cols))\n\n return LaserDiodeArray(inputShape, outputShape, connections, power)\n\n\nclass ModulatorArrayMapper:\n \"\"\"\n Class that maps a relative intenstiy matrix to an array of optical\n modulators.\n \"\"\"\n _mrm = MRMTransferFunction()\n\n def computePhaseShifts(intenstiyMatrix, normval):\n assert not np.any(intenstiyMatrix < 0)\n if normval is None:\n normval = max(np.amax(intenstiyMatrix), 1)\n normalized = intenstiyMatrix / normval\n return ModulatorArrayMapper._mrm.phaseFromThroughput(\n normalized)\n\n def build(inputs, normval=None):\n phaseShifts = \\\n ModulatorArrayMapper.computePhaseShifts(inputs, normval)\n return ModulatorArray(phaseShifts)\n\n def updateInputs(modulatorArray, inputs, normval=None):\n phaseShifts = \\\n ModulatorArrayMapper.computePhaseShifts(inputs, normval)\n modulatorArray._update(phaseShifts)\n\n\nclass PWBArrayMapper:\n \"\"\"\n Class that maps a convolved matrix using photonic pwbs\n \"\"\"\n\n def _createConnectionGraph(\n inputShape, kernel, stride, outputShape):\n connections = np.full(\n outputShape + (kernel.shape[0] * kernel.shape[1], 2),\n fill_value=-1)\n counts = np.zeros(inputShape)\n filterSize = kernel.shape[0]\n\n for row in range(connections.shape[0]):\n for col in range(connections.shape[1]):\n rowStart = row * stride\n colStart = col * stride\n colEnd = min(colStart + filterSize, inputShape[1])\n rowEnd = min(rowStart + filterSize, inputShape[0])\n R, C = np.mgrid[rowStart:rowEnd, colStart:colEnd]\n counts[rowStart:rowEnd, colStart:colEnd] += 1\n conn = np.column_stack((R.ravel(), C.ravel()))\n connections[row, col, :conn.shape[0], :] = conn\n\n return connections, counts\n\n def _setWeights(\n pwbs, outputShape, connections, kernel, sharedCounts, stride):\n for row in range(outputShape[0]):\n for col in range(outputShape[1]):\n conn = connections[row, col]\n # Get the number of times the inupts were shared\n count = sharedCounts[conn[:, 0], conn[:, 1]].ravel()\n\n # Assign the weights using the kernel\n rDiff = row * stride\n cDiff = col * stride\n weights = count * \\\n kernel[conn[:, 0] - rDiff, conn[:, 1] - cDiff] \\\n .ravel()\n\n if pwbs[row, col] is None:\n pwbs[row, col] = \\\n PWBMapper.build(weights)\n else:\n PWBMapper.updateWeights(\n pwbs[row, col], weights)\n return pwbs\n\n def updateKernel(pwbArray, newKernel):\n PWBArrayMapper._setWeights(\n pwbArray.pwbs,\n pwbArray.connections.shape[:2],\n pwbArray.connections,\n newKernel,\n pwbArray.sharedCounts,\n pwbArray.stride)\n\n def build(inputShape, kernel, stride=1):\n assert kernel.ndim == 2 or kernel.ndim == 3\n assert kernel.shape[0] == kernel.shape[1]\n\n outputShape = getOutputShape(inputShape, kernel.shape, 0, stride)[:-1]\n connections, sharedCounts = \\\n PWBArrayMapper._createConnectionGraph(\n inputShape, kernel, stride, outputShape)\n\n pwbs = PWBArrayMapper. \\\n _setWeights(\n np.full(outputShape, fill_value=None, dtype=object),\n outputShape, connections, kernel, sharedCounts, stride)\n\n return PWBArray(\n inputShape,\n connections,\n pwbs,\n sharedCounts,\n stride)\n\n\nclass PhotonicConvolverMapper:\n \"\"\"\n Class that builds an entire photonic convolver that is capabale of\n performing a full convolution.\n \"\"\"\n\n def build(image=None, kernel=None, stride=1, power=1,\n imageShape=None, kernelShape=None, normval=None):\n\n if image is None:\n assert imageShape is not None\n image = np.zeros(imageShape)\n\n if kernel is None:\n assert kernelShape is not None\n kernel = np.zeros(kernelShape)\n\n laserDiodeArray = LaserDiodeArrayMapper.build(\n kernel.shape, image.shape, power)\n modulatorArray = ModulatorArrayMapper.build(\n image, normval=normval)\n pwbArray = PWBArrayMapper.build(\n image.shape, kernel, stride)\n\n return PhotonicConvolver(\n laserDiodeArray,\n modulatorArray,\n pwbArray)\n","repo_name":"Shastri-Lab/DEAP","sub_path":"deap/mappers.py","file_name":"mappers.py","file_ext":"py","file_size_in_byte":8079,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"22"} +{"seq_id":"7116395404","text":"from lib.card import Card\nfrom lib.turn import Turn\nfrom lib.deck import Deck\nfrom lib.round import Round\n\ncard_1 = Card(\"In what state is Greendale Community College?\", \"colorado\", 'Community Trivia')\ncard_2 = Card(\"What is the first name of the character that wins Dungeons and Dragons in the first D&D episode?\", \"pierce\", 'Community Trivia')\ncard_3 = Card(\"What is the slogan of the STD fair?\", \"catch knowledge\", 'Community Trivia')\ncard_4 = Card('According to Greek mythology, who was the first woman on Earth?', 'pandora', 'Random Trivia')\ncard_5 = Card('Two US States dont recognize daylight savings. Hawaii is one. What is the other?', 'arizona', 'Random Trivia')\ncard_6 = Card('The ____ is the loudest animal on Earth.', 'sperm whale', 'Random Trivia')\n\ncards = [card_1, card_2, card_3, card_4, card_5, card_6]\ndeck = Deck(cards)\nround = Round(deck)\ntotal_cards = len(cards)\ncategories = deck.all_categories()\n\nintro = \"Welcome to Flashcards! You are playing with %s cards\" % total_cards\nprint(intro)\n\nwhile len(cards) > 0:\n print('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*')\n print('This is card number %s out of %s' % (total_cards - len(cards) + 1, total_cards))\n print('Question: %s' % round.current_card().question)\n\n guess = input('>>').lower()\n \n guess = round.take_turn(guess)\n answer = guess.card.answer\n\n print(\"%s\" % round.turns[-1].feedback())\n\n\n if guess.correct() == False:\n print('The answer is %s' % answer)\n\nprint(\"****** Game over! ******\")\nprint(\"Here is how you did overall:\")\nprint('You had %s guesses correct out of %s questions for an overall score of %s' % (round.number_correct(), total_cards, round.percent_correct()))\nprint('------------------------------')\nprint('Here is how you did by category:')\n\nfor category in categories:\n print('%s: %s' % (category, round.percent_correct_by_category(category)))","repo_name":"jordanholtkamp/python_flash_cards","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"1555546795","text":"from flask import session\nfrom db import db\n\ndef get_attendance_info(event_id):\n user_id = session.get(\"user_id\")\n try:\n sql = \"\"\"SELECT attending FROM Attendance WHERE event_id=:event_id AND user_id=:user_id\"\"\"\n attending_old = db.session.execute(sql, {\"event_id\":event_id, \"user_id\":user_id}).fetchone()[0]\n except:\n return None\n return attending_old\n\ndef add_attendance_info(event_id, attending):\n user_id = session.get(\"user_id\")\n attending_old = get_attendance_info(event_id)\n if attending_old in (False, True):\n try:\n sql = \"\"\"UPDATE Attendance SET attending=:attending WHERE event_id=:event_id AND user_id=:user_id\"\"\"\n db.session.execute(sql, {\"attending\":attending, \"event_id\":event_id, \"user_id\":user_id})\n db.session.commit()\n except:\n return False\n else:\n try:\n sql = \"\"\"INSERT INTO Attendance (event_id, user_id, attending) VALUES (:event_id, :user_id, :attending)\"\"\"\n db.session.execute(sql, {\"event_id\":event_id, \"user_id\":user_id, \"attending\":attending})\n db.session.commit()\n except:\n return False\n return True\n\ndef get_attendees(event_id):\n try:\n sql = \"\"\"SELECT a.user_id, u.username FROM Attendance a, Users u \n WHERE a.event_id=:event_id AND attending=True AND a.user_id=u.user_id\"\"\"\n attendees = db.session.execute(sql, {\"event_id\":event_id}).fetchall()\n except:\n return []\n return attendees","repo_name":"ellisrnm/tapahtumasovellus","sub_path":"attendance.py","file_name":"attendance.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4764315895","text":"from common.srdhelper import bitpack\nimport json\nimport sigrokdecode as srd\nimport struct\n\n'''\nOUTPUT_PYTHON format:\n\nPacket:\n[, ]\n\nThis is a list of s and their respective values:\n - 'RAW': The data is a tuple of bit count and bit pattern (a number,\n assuming unsigned integer presentation of the input data bit pattern).\n - 'NUMBER': The data is the conversion result of the bit pattern.\n - 'ENUM': The data is a tuple of the raw number and its mapped text.\n'''\n\n# TODO Better raise the number of channels to 32. This allows access to\n# IEEE754 single precision numbers, and shall cover most busses, _and_\n# remains within most logic analyzers' capabilities, and keeps the UI\n# dialog somewhat managable. What's a good default for the number of\n# enum slots (which translate to annotation rows)? Notice that 2 to the\n# power of the channel count is way out of the question. :)\n_max_channels = 16\n_max_enum_slots = 32\n\nclass ChannelError(Exception):\n pass\n\nclass Pin:\n CLK, BIT_0 = range(2)\n BIT_N = BIT_0 + _max_channels\n\nclass Ann:\n RAW, NUM = range(2)\n ENUM_0 = NUM + 1\n ENUM_OVR = ENUM_0 + _max_enum_slots\n ENUMS = range(ENUM_0, ENUM_OVR)\n WARN = ENUM_OVR + 1\n\n @staticmethod\n def enum_indices():\n return [i for i in range(Ann.ENUMS)]\n\n @staticmethod\n def get_enum_idx(code):\n if code in range(_max_enum_slots):\n return Ann.ENUM_0 + code\n return Ann.ENUM_OVR\n\ndef _channel_decl(count):\n return tuple([\n {'id': 'bit{}'.format(i), 'name': 'Bit{}'.format(i), 'desc': 'Bit position {}'.format(i)\n #, 'idn':'dec_numbers_and_state_Bit{}'.format(i)\n }\n for i in range(count)\n ])\n\ndef _enum_cls_decl(count):\n return tuple([\n ('enum{}'.format(i), 'Enumeration slot {}'.format(i))\n for i in range(count)\n ] + [('enumovr', 'Enumeration overflow')])\n\ndef _enum_rows_decl(count):\n return tuple([\n ('enums{}'.format(i), 'Enumeration slots {}'.format(i), (Ann.ENUM_0 + i,))\n for i in range(count)\n ] + [('enumsovr', 'Enumeration overflows', (Ann.ENUM_OVR,))])\n\nclass Decoder(srd.Decoder):\n api_version = 3\n id = 'numbers_and_state'\n name = 'Numbers and State'\n longname = 'Interpret bit patters as numbers or state enums'\n desc = 'Interpret bit patterns as different kinds of numbers (integer, float, enum).'\n license = 'gplv2+'\n inputs = ['logic']\n outputs = ['numbers_and_state']\n tags = ['Encoding', 'Util']\n optional_channels = (\n {'id': 'clk', 'name': 'Clock', 'desc': 'Clock', 'idn':'dec_numbers_and_state_chan_clk'},\n ) + _channel_decl(_max_channels)\n options = (\n {'id': 'clkedge', 'desc': 'Clock edge', 'default': 'rising',\n 'values': ('rising', 'falling', 'either'), 'idn':'dec_numbers_and_state_opt_clkedge'},\n {'id': 'count', 'desc': 'Total bits count', 'default': 0, 'idn':'dec_numbers_and_state_opt_count'},\n {'id': 'interp', 'desc': 'Interpretation', 'default': 'unsigned',\n 'values': ('unsigned', 'signed', 'fixpoint', 'fixsigned', 'ieee754', 'enum'), 'idn':'dec_numbers_and_state_opt_interp'},\n {'id': 'fracbits', 'desc': 'Fraction bits count', 'default': 0, 'idn':'dec_numbers_and_state_opt_fracbits'},\n {'id': 'mapping', 'desc': 'Enum to text map file',\n 'default': 'enumtext.json', 'idn':'dec_numbers_and_state_opt_mapping'},\n {'id': 'format', 'desc': 'Number format', 'default': '-',\n 'values': ('-', 'bin', 'oct', 'dec', 'hex'), 'idn':'dec_numbers_and_state_opt_format'},\n )\n annotations = (\n ('raw', 'Raw pattern'),\n ('number', 'Number'),\n ) + _enum_cls_decl(_max_enum_slots) + (\n ('warning', 'Warning'),\n )\n annotation_rows = (\n ('raws', 'Raw bits', (Ann.RAW,)),\n ('numbers', 'Numbers', (Ann.NUM,)),\n ) + _enum_rows_decl(_max_enum_slots) + (\n ('warnings', 'Warnings', (Ann.WARN,)),\n )\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n pass\n\n def start(self):\n self.out_ann = self.register(srd.OUTPUT_ANN)\n self.out_python = self.register(srd.OUTPUT_PYTHON)\n\n def putg(self, ss, es, cls, data):\n self.put(ss, es, self.out_ann, [cls, data])\n\n def putpy(self, ss, es, ptype, pdata):\n self.put(ss, es, self.out_python, (ptype, pdata))\n\n def grab_pattern(self, pins):\n '''Get a bit pattern from potentially incomplete probes' values.'''\n\n # Pad and trim the input data, to achieve the user specified\n # total number of bits. Map all unassigned signals to 0 (low).\n # Return raw number (unsigned integer interpreation).\n bits = pins + (None,) * self.bitcount\n bits = bits[:self.bitcount]\n bits = [b if b in (0, 1) else 0 for b in bits]\n pattern = bitpack(bits)\n return pattern\n\n def handle_pattern(self, ss, es, pattern):\n fmt = '{{:0{}b}}'.format(self.bitcount)\n txt = fmt.format(pattern)\n self.putg(ss, es, Ann.RAW, [txt])\n self.putpy(ss, es, 'RAW', (self.bitcount, pattern))\n\n try:\n value = self.interpreter(ss, es, pattern)\n except:\n value = None\n if value is None:\n return\n self.putpy(ss, es, 'NUMBER', value)\n try:\n formatted = self.formatter(ss, es, value)\n except:\n formatted = None\n if formatted:\n self.putg(ss, es, Ann.NUM, formatted)\n if self.interpreter == self.interp_enum:\n cls = Ann.get_enum_idx(pattern)\n self.putg(ss, es, cls, formatted)\n self.putpy(ss, es, 'ENUM', (value, formatted))\n\n def interp_unsigned(self, ss, es, pattern):\n value = pattern\n return value\n\n def interp_signed(self, ss, es, pattern):\n if not 'signmask' in self.interp_state:\n self.interp_state.update({\n 'signmask': 1 << (self.bitcount - 1),\n 'signfull': 1 << self.bitcount,\n })\n is_neg = pattern & self.interp_state['signmask']\n if is_neg:\n value = -(self.interp_state['signfull'] - pattern)\n else:\n value = pattern\n return value\n\n def interp_fixpoint(self, ss, es, pattern):\n if not 'fixdiv' in self.interp_state:\n self.interp_state.update({\n 'fixsign': self.options['interp'] == 'fixsigned',\n 'fixdiv': 2 ** self.options['fracbits'],\n })\n if self.interp_state['fixsign']:\n value = self.interp_signed(ss, es, pattern)\n else:\n value = self.interp_unsigned(ss, es, pattern)\n value /= self.interp_state['fixdiv']\n return value\n\n def interp_ieee754(self, ss, es, pattern):\n if not 'ieee_has_16bit' in self.interp_state:\n self.interp_state.update({\n 'ieee_fmt_int_16': '=H',\n 'ieee_fmt_flt_16': '=e',\n 'ieee_fmt_int_32': '=L',\n 'ieee_fmt_flt_32': '=f',\n 'ieee_fmt_int_64': '=Q',\n 'ieee_fmt_flt_64': '=d',\n })\n try:\n fmt = self.interp_state.update['ieee_fmt_flt_16']\n has_16bit_support = 8 * struct.calcsize(fmt) == 16\n except:\n has_16bit_support = False\n self.interp_state['ieee_has_16bit'] = has_16bit_support\n if self.bitcount == 16:\n if not self.interp_state['ieee_has_16bit']:\n return None\n buff = struct.pack(self.interp_state['ieee_fmt_int_16'], pattern)\n value, = struct.unpack(self.interp_state['ieee_fmt_flt_16'], buff)\n return value\n if self.bitcount == 32:\n buff = struct.pack(self.interp_state['ieee_fmt_int_32'], pattern)\n value, = struct.unpack(self.interp_state['ieee_fmt_flt_32'], buff)\n return value\n if self.bitcount == 64:\n buff = struct.pack(self.interp_state['ieee_fmt_int_64'], pattern)\n value, = struct.unpack(self.interp_state['ieee_fmt_flt_64'], buff)\n return value\n return None\n\n def interp_enum(self, ss, es, pattern):\n if not 'enum_map' in self.interp_state:\n self.interp_state.update({\n 'enum_fn': self.options['mapping'],\n 'enum_map': {},\n 'enum_have_map': False,\n })\n try:\n fn = self.interp_state['enum_fn']\n # TODO Optionally try in several locations? Next to the\n # decoder implementation? Where else? Expect users to\n # enter absolute paths?\n with open(fn, 'r') as f:\n maptext = f.read()\n maptable = {}\n if fn.endswith('.js') or fn.endswith('.json'):\n # JSON requires string literals on the LHS, so the\n # table is written \"in reverse order\".\n js_table = json.loads(maptext)\n for k, v in js_table.items():\n maptable[v] = k\n elif fn.endswith('.py'):\n # Expect a specific identifier at the Python module\n # level, and assume that it's a dictionary.\n py_table = {}\n exec(maptext, py_table)\n maptable.update(py_table['enumtext'])\n self.interp_state['enum_map'].update(maptable)\n self.interp_state['enum_have_map'] = True\n except:\n # Silently ignore failure. This happens while the user\n # is typing the filename, and is non-fatal. If the file\n # exists and is not readable or not valid or of unknown\n # format, the worst thing that can happen is that the\n # decoder implementation keeps using \"anonymous\" phrases\n # until a mapping has become available. No harm is done.\n # This decoder cannot tell intermediate from final file\n # read attempts, so we cannot raise severity here.\n pass\n value = self.interp_state['enum_map'].get(pattern, None)\n if value is None:\n value = pattern\n return value\n\n def format_native(self, ss, es, value):\n return ['{}'.format(value),]\n\n def format_bin(self, ss, es, value):\n if not self.format_string:\n self.format_string = '{{:0{}b}}'.format(self.bitcount)\n return [self.format_string.format(value)]\n\n def format_oct(self, ss, es, value):\n if not self.format_string:\n self.format_string = '{{:0{}o}}'.format((self.bitcount + 3 - 1) // 3)\n return [self.format_string.format(value)]\n\n def format_dec(self, ss, es, value):\n if not self.format_string:\n self.format_string = '{:d}'\n return [self.format_string.format(value)]\n\n def format_hex(self, ss, es, value):\n if not self.format_string:\n self.format_string = '{{:0{}x}}'.format((self.bitcount + 4 - 1) // 4)\n return [self.format_string.format(value)]\n\n def decode(self):\n channels = [ch for ch in range(_max_channels) if self.has_channel(ch)]\n have_clk = Pin.CLK in channels\n if have_clk:\n channels.remove(Pin.CLK)\n if not channels:\n raise ChannelError(\"Need at least one bit channel.\")\n if have_clk:\n clkedge = {\n 'rising': 'r',\n 'falling': 'f',\n 'either': 'e',\n }.get(self.options['clkedge'])\n wait_cond = {Pin.CLK: clkedge}\n else:\n wait_cond = [{ch: 'e'} for ch in channels]\n\n bitcount = self.options['count']\n if not bitcount:\n bitcount = channels[-1] - Pin.BIT_0 + 1\n self.bitcount = bitcount\n\n self.interpreter = {\n 'unsigned': self.interp_unsigned,\n 'signed': self.interp_signed,\n 'fixpoint': self.interp_fixpoint,\n 'fixsigned': self.interp_fixpoint,\n 'ieee754': self.interp_ieee754,\n 'enum': self.interp_enum,\n }.get(self.options['interp'])\n self.interp_state = {}\n self.formatter = {\n '-': self.format_native,\n 'bin': self.format_bin,\n 'oct': self.format_oct,\n 'dec': self.format_dec,\n 'hex': self.format_hex,\n }.get(self.options['format'])\n self.format_string = None\n\n bFirst = True\n cur_cond = None\n \n while True:\n (clk, d0, d1, d2, d3, d4, d5, d6, d7,d8, d9,d10 ,d11 ,d12 ,d13 ,d14 ,d15) = self.wait(cur_cond)\n pins = (clk, d0, d1, d2, d3, d4, d5, d6, d7,d8, d9, d10, d11, d12,d13 ,d14 ,d15)\n\n if bFirst:\n bFirst = False\n ss = self.samplenum\n prev_pattern = self.grab_pattern(pins[Pin.BIT_0:])\n cur_cond = wait_cond\n continue\n\n es = self.samplenum\n pattern = self.grab_pattern(pins[Pin.BIT_0:])\n if pattern == prev_pattern:\n continue\n self.handle_pattern(ss, es, prev_pattern)\n ss = es\n prev_pattern = pattern\n","repo_name":"DreamSourceLab/DSView","sub_path":"libsigrokdecode4DSL/decoders/numbers_and_state/pd.py","file_name":"pd.py","file_ext":"py","file_size_in_byte":13339,"program_lang":"python","lang":"en","doc_type":"code","stars":1001,"dataset":"github-code","pt":"22"} +{"seq_id":"37474807272","text":"import DriverPantalla as dp\n\n\n\ndp.Config_Pins()\ndp.SPI_TFT_Reset()\n\nfor i in range(240):\n\tfor j in range(320):\n\t\tdp.SPI_TFT_pixel(i,j,0)\ndp.Free_Pins()\n\n","repo_name":"Javiuzu/DSBM-","sub_path":"Raspberry Pi/Practica3/TestPantalla.py","file_name":"TestPantalla.py","file_ext":"py","file_size_in_byte":153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"44317607832","text":"import telebot\nfrom telebot import types\nimport random\nimport omdb_parse, tmdb_reccomendator\n\nbot = telebot.TeleBot('1200425822:AAFE_ACXYmtXXvB-m4lK-xu8p_yBUc9t3QE')\n\n\nclass ListOfMoviesToShow:\n id_list_main = []\n current_element = 0\n latest_message = None\n\n def set_latest_message(self, mes):\n self.latest_message = mes\n\n def __init__(self): # Инициализация\n self.id_list_main = []\n self.current_element = 0\n self.latest_message = None\n\n def set_list(self, id_list): # Сеттер\n self.id_list_main = id_list\n self.current_element = 0\n\n def set_to_next_elem(self, message): # Следующий элемент\n if self.current_element + 1 >= len(self.id_list_main):\n self.current_element = 0\n else:\n self.current_element += 1\n self.send_info_1(message, 'existing')\n\n def set_to_prev_elem(self, message): # Предыдущий элемент\n if self.current_element -1 < 0:\n self.current_element = len(self.id_list_main) - 1\n else:\n self.current_element += 1\n self.send_info_1(message, 'existing')\n\n def if_list_empty(self):\n if len(self.id_list_main) == 0:\n return True\n else:\n return False\n\n def get_element_text_1(self): # Текст описания\n return omdb_parse.construct_movie_description(self.id_list_main[self.current_element])\n\n def get_element_text_2(self): # Последующая инфа\n return omdb_parse.construct_following_info(self.id_list_main[self.current_element])\n\n def get_menu_1(self):\n keyboard_1 = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)\n key_1 = types.KeyboardButton('⬅️')\n key_2 = types.KeyboardButton('➡️')\n key_3 = types.KeyboardButton('Further info')\n keyboard_1.add(key_1, key_2, key_3)\n return keyboard_1\n\n def get_menu_2(self):\n keyboard_2 = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)\n key_1 = types.KeyboardButton('Back to main info')\n keyboard_2.add(key_1)\n return keyboard_2\n\n def send_info_1(self, message, type): # types = ['new', 'existing']\n if self.if_list_empty():\n self.latest_message = bot.send_message(message.chat.id, 'The list of results is empty, try to start search')\n else:\n text = 'Result %i / %i \\n' % (self.current_element + 1, len(self.id_list_main))\\\n + self.get_element_text_1()\n self.latest_message = bot.send_message(message.chat.id, text, reply_markup=self.get_menu_1())\n\n def send_info_2(self, message):\n if self.if_list_empty():\n self.latest_message = bot.send_message(message.chat.id, 'The list of results is empty, try to start search')\n else:\n text = 'Result %i / %i \\n' % (self.current_element + 1, len(self.id_list_main)) +\\\n self.get_element_text_2()\n self.latest_message = bot.send_message(message.chat.id, text, reply_markup=self.get_menu_2())\n\n def start_to_show(self, message):\n self.send_info_1(message, 'new')\n self.current_element=0\n\n\nmy_list = ListOfMoviesToShow()\n\n\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n my_list.latest_message = bot.send_message(message.chat.id,\n 'oh hi Harry, wanna watch some movies, huh? \\n'\n 'Send me one of those:\\n'\n ' /movie_title + \\n'\n ' or /movie_title_year + ',\n )\n print(message)\n\n\n@bot.message_handler(content_types=['text'])\ndef handle_text_messages(message):\n if message.text == '⬅️':\n my_list.set_to_prev_elem(message)\n if message.text == '➡️':\n my_list.set_to_next_elem(message)\n if message.text == 'Further info':\n my_list.send_info_2(message)\n if message.text == 'Back to main info':\n my_list.send_info_1(message, 'existing')\n\n if '/movie_title' in message.text:\n query = message.text[13:]\n id_list = omdb_parse.title_search(query)\n my_list.set_list(id_list)\n my_list.start_to_show(message)\n\n if '/movie_title_year' in message.text:\n query = message.text[17:]\n id_list = omdb_parse.title_year_search(query, query)\n my_list.set_list(id_list)\n my_list.start_to_show(message)\n\n if 'popular_now' in message.text:\n id_list_popular = tmdb_reccomendator.popular_list()\n #print(id_list_popular)\n my_list.set_list(id_list_popular)\n my_list.start_to_show(message)\n\"\"\"\n if '/movie_for_family' in message.text:\n id_list = tmdb_reccomendator.top_family_list()\n print(id_list)\n my_list.set_list(id_list[:20])\n my_list.start_to_show(message)\n\n if '/movie_comedy' in message.text:\n id_list = tmdb_reccomendator.top_comedies_list()\n #print(id_list)\n my_list.set_list(id_list[:20])\n my_list.start_to_show(message)\n\n if '/movie_romance' in message.text:\n id_list = tmdb_reccomendator.top_romantic_list()\n #print(id_list)\n my_list.set_list(id_list[:20])\n my_list.start_to_show(message)\n\n if '/movie_horror' in message.text:\n id_list = tmdb_reccomendator.top_horror_list()\n #print(id_list)\n my_list.set_list(id_list[:20])\n my_list.start_to_show(message)\n\n if '/movie_documentary' in message.text:\n id_list = tmdb_reccomendator.top_documentaries_list()\n #print(id_list)\n my_list.set_list(id_list[:20])\n my_list.start_to_show(message)\n\n if '/movie_scifi' in message.text:\n id_list = tmdb_reccomendator.top_scifi_list()\n #print(id_list)\n my_list.set_list(id_list[:20])\n my_list.start_to_show(message)\n\n if '/movie_war' in message.text:\n id_list = tmdb_reccomendator.top_war_list()\n #print(id_list)\n my_list.set_list(id_list[:20])\n my_list.start_to_show(message)\n\"\"\"\n\n@bot.message_handler(commands=['movie_title'])\ndef movie_title_search(message):\n query = message.text[13:]\n id_list = omdb_parse.title_search(query)\n my_list.set_list(id_list)\n my_list.start_to_show(message)\n\n\n@bot.message_handler(commands=['movie_title_year'])\ndef movie_title_year_search(message):\n query = message.text[17:]\n\n a = query.str.split(separator=[',', ' '])\n\n if len(a) < 2:\n bot.send_message(message.chat.id, 'Incorrect input text, please try again')\n else:\n year = a[-1]\n title = a[:-1]\n id_list = omdb_parse.title_year_search(title, year)\n my_list.set_list(id_list)\n my_list.start_to_show(message)\n\n\ndef listener(messages):\n for m in messages:\n print(str(m), '\\n'*5)\n\n\nbot.set_update_listener(listener)\nbot.polling()\n\n# БУМАЖКА С ОПИСАНИЕМ АРХИТЕКТУРЫ\n# ВСЕ_ЕЩЕ НАДО СДЕЛАТЬ ОТОБРАЖЕНИЕ ОПИСАНИЯ, КНОПКИ ДЛЯ ВЫБОРА\n# ПЛЮС СДЕЛАТЬ ЕЩЕ ФИШЕЧКИ text=\"тру-ту-ту\", reply_markup=key )","repo_name":"EgorAbrosimov-creativity/movie_reccomendator_bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29057799954","text":"'''\nName: DominantColors.py\n\nVersion: 1.0\n\nSummary: Extract dominant colors of an image\n \nAuthor: suxing liu\n\nAuthor-email: suxingliu@gmail.com\n\nCreated: 2023-02-29\n\nUSAGE:\n\n time python3 DominantColors.py -p ~/example/Tara_data/test/ -ft jpg \n\n'''\n\nimport argparse\nimport os\nimport glob\n\nimport cv2\nfrom sklearn.cluster import KMeans\n\n\nclass DominantColors:\n \n CLUSTERS = None\n IMAGE = None\n COLORS = None\n LABELS = None\n\n def __init__(self, image, clusters):\n self.CLUSTERS = clusters\n self.IMAGE = image\n\n\n def dominantColors(self):\n # read image\n img_src = cv2.imread(self.IMAGE)\n\n # calculate the 50 percent of original dimensions\n width = int(img_src.shape[1])\n height = int(img_src.shape[0])\n\n # dsize\n dsize = (width, height)\n\n # convert to rgb from bgr\n img = cv2.cvtColor(img_src, cv2.COLOR_BGR2RGB)\n\n # reshaping to a list of pixels\n img = img.reshape((img.shape[0] * img.shape[1], 3))\n\n # save image after operations\n self.IMAGE = img\n\n # using k-means to cluster pixels\n kmeans = KMeans(n_clusters=self.CLUSTERS, random_state=0, n_init=\"auto\")\n kmeans.fit(img)\n\n # the cluster centers are our dominant colors.\n self.centroid = kmeans.cluster_centers_\n\n # save labels\n self.label = kmeans.labels_\n\n # returning after converting to integer from float\n return self.centroid.astype(int), self.label\n \n \n \n def cluster_ratio(self):\n \n labels=list(self.label)\n \n percent=[]\n for i in range(len(self.centroid)):\n j = labels.count(i)\n j = j/(len(labels))\n percent.append(j)\n \n print(percent)\n return percent\n \n \n \n def optimal_n(self):\n \n #Elbow Method\n md=[]\n for i in range(1,21):\n kmeans=KMeans(n_clusters=i, random_state=0, n_init=\"auto\")\n kmeans.fit(self.IMAGE)\n o=kmeans.inertia_\n md.append(o)\n print(md)\n\n\n\n \n \n \n \nif __name__ == '__main__':\n \n ap = argparse.ArgumentParser()\n ap.add_argument(\"-p\", \"--path\", required = True, help=\"path to image file\")\n ap.add_argument(\"-ft\", \"--filetype\", required=True, help=\"Image filetype\")\n ap.add_argument('-n', \"--n_cluster\", type = int, required = False, default = 5, help = 'Number of clusters for K-means clustering (default 2, min 2).')\n args = vars(ap.parse_args())\n \n \n # setting path to model file\n file_path = args[\"path\"]\n ext = args[\"filetype\"]\n n_cluster = args[\"n_cluster\"]\n \n #accquire image file list\n filetype = '*.' + ext\n image_file_path = file_path + filetype\n \n #accquire image file list\n imgList = sorted(glob.glob(image_file_path))\n \n \n \n \n #loop execute\n for image_id, image in enumerate(imgList):\n \n dc = DominantColors(image, n_cluster) \n \n colors = dc.dominantColors()\n \n print(colors)\n \n print(dc.cluster_ratio())\n \n #print(dc.optimal_n())\n","repo_name":"lsx1980/plant-image-analysis","sub_path":"DominantColors.py","file_name":"DominantColors.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"41417806749","text":"\"\"\"给出第一个词 first 和第二个词 second,考虑在某些文本 text 中可能以 \"first second third\" 形式出现的情况,其中 second 紧随 first 出现,third 紧随 second 出现。\n\n对于每种这样的情况,将第三个词 \"third\" 添加到答案中,并返回答案。\n\n示例 1:\n\n输入:text = \"alice is a good girl she is a good student\", first = \"a\", second = \"good\"\n输出:[\"girl\",\"student\"]\n示例 2:\n\n输入:text = \"we will we will rock you\", first = \"we\", second = \"will\"\n输出:[\"we\",\"rock\"]\n\"\"\"\n\n\nclass Solution:\n def findOcurrences(self, text: str, first: str, second: str):\n \"\"\"\n :param text: str\n :param first: str\n :param second: str\n :return: list[str]\n \"\"\"\n rt = []\n s = text.split()\n if len(s)>=3:\n for i in range(len(s) - 2):\n if s[i] == first and s[i + 1] == second:\n rt.append(s[i + 2])\n return rt\n\n\nss = Solution()\nprint(ss.findOcurrences(text=\"alice is a good girl she is a good student\", first=\"a\", second=\"good\"\n ))\nprint(ss.findOcurrences(text = \"we will we will rock you\", first = \"we\", second = \"will\"\n ))\n","repo_name":"nicefuu/leetCode-python3","sub_path":"1078.py","file_name":"1078.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"10985989990","text":"import smtplib\nfrom random import randint\nimport datetime as dt\n\n\ndef send_email(to_email, body, subject):\n email = \"xxxxxxxxxx@gmail.com\"\n pwd = \"xxxxxxxxxx\"\n\n with smtplib.SMTP(host=\"smtp.gmail.com\", port=587) as connection:\n connection.starttls()\n connection.login(user=email, password=pwd)\n connection.sendmail(\n from_addr=email,\n to_addrs=to_email,\n msg=f\"Subject:{subject}\\n\\n{body}\"\n )\n\n\ndef get_random_quote():\n with open(\"quotes.txt\") as quotes_file:\n quotes = quotes_file.readlines()\n return quotes[randint(0, len(quotes) - 1)]\n\n\nnow = dt.datetime.now()\nif now.weekday() == 0:\n quote = get_random_quote()\n send_email(to_email=\"xxxxxxxxxxxxx@gmail.com\", subject=\"Monday Motivational Quote\", body=get_random_quote())\n","repo_name":"iRipul/python-learning","sub_path":"quote-mailer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42610566199","text":"import heapq\nimport math\nimport numpy as np\n\nfrom STAstar_utils import Config\n\n\nMAX_THETA = np.deg2rad(60.0) # [rad] maximum steering angle\nL = 3.0\nN_STEER = 10\nMAX_ACC = 5.0\nMIN_ACC = -10.0\nN_ACC = 15\nN_THATA = 20 # number of steer command\n\ndt = T_GRID_RESOLUTION = 0.1\nXY_GRID_RESOLUTION = 0.3\nYAW_GRID_RESOLUTION = np.deg2rad(15.0)\nV_GRID_RESOLUTION = 1.0\nMAX_T = 5.0 # plan time\nMAX_V = 10.0\n\nN_T = int(MAX_T/T_GRID_RESOLUTION)\n\n\nclass Node:\n def __init__(self, t_ind, x_ind, y_ind, yaw_ind, v_ind,\n t, x, y, yaw, v, theta=0.0, acc=0.0,\n parent_index=None, cost=None):\n self.t_index = t_ind\n self.x_index = x_ind\n self.y_index = y_ind\n self.yaw_index = yaw_ind\n self.v_index = v_ind\n self.t = t\n self.x = x\n self.y = y\n self.yaw = yaw\n self.v = v\n self.theta = theta\n self.acc = acc\n self.parent_index = parent_index\n self.cost = cost\n\n\nclass Path:\n def __init__(self, t_list, x_list, y_list, yaw_list, v_list, cost):\n self.t_list = t_list\n self.x_list = x_list\n self.y_list = y_list\n self.yaw_list = yaw_list\n self.v_list = v_list\n self.cost = cost\n\n\ndef pi_2_pi(angle):\n return (angle + math.pi) % (2 * math.pi) - math.pi\n\n\ndef calc_motion_inputs():\n for theta in np.concatenate((np.linspace(-MAX_THETA, MAX_THETA, N_THATA), [0.0])):\n for acc in np.concatenate((np.linspace(MIN_ACC, MAX_ACC, N_ACC), [0.0])):\n yield [theta, acc]\n\n\ndef move(x, y, yaw, v, theta, acc):\n v += acc*T_GRID_RESOLUTION\n yaw += theta\n x += v * math.cos(yaw) * T_GRID_RESOLUTION\n y += v * math.sin(yaw) * T_GRID_RESOLUTION\n return x, y, yaw, v\n\n\ndef verify_index(node, c):\n t_ind, x_ind, y_ind, v_ind = node.t, node.x_index, node.y_index, node.v_index\n if (0 <= x_ind < c.x_w-1) and (0 <= y_ind < c.y_w-1) and (0 <= t_ind < c.t_w-2) and (0 <= v_ind < c.v_w-1):\n return True\n\n return False\n\n\ndef calc_index(node, c):\n ind = (node.t_index * c.x_w * c.y_w * c.yaw_w * c.v_w +\n node.v_index * c.x_w * c.y_w * c.yaw_w +\n node.yaw_index * c.x_w * c.y_w +\n node.y_index * c.x_w +\n node.x_index)\n\n if ind < 0:\n print(\"Error(calc_index):\", ind)\n\n return ind\n\n\ndef isNodeFree(node, st_grid_map, c):\n t_ind, x_ind, y_ind = node.t_index, node.x_index, node.y_index\n if (0 <= x_ind < c.x_w-1) and (5 <= y_ind < c.y_w-5) and (0 <= t_ind < c.t_w):\n if st_grid_map[t_ind, x_ind, y_ind] > 0:\n # if (node.y > 3.0 or node.y < -3.0):\n # print(st_grid_map[t_ind, x_ind, y_ind], t_ind, x_ind, y_ind)\n return True\n return False\n\n\ndef get_neighbors(current, config, st_grid_map):\n for theta, acc in calc_motion_inputs():\n node = calc_next_node(current, theta, acc, config)\n if node and verify_index(node, config) and isNodeFree(node, st_grid_map, config):\n yield node\n\n\ndef calc_next_node(current, theta, acc, config):\n x, y, yaw, v = current.x, current.y, current.yaw, current.v\n x, y, yaw, v = move(x, y, yaw, v, theta, acc)\n if np.cos(yaw) < 0.0 or v < 0.0 or v > MAX_V: return None\n x_ind = round(x / XY_GRID_RESOLUTION) - config.min_x\n y_ind = round(y / XY_GRID_RESOLUTION) - config.min_y\n yaw_ind = round(yaw / YAW_GRID_RESOLUTION) - config.min_yaw\n v_ind = round(v / V_GRID_RESOLUTION) - config.min_v\n\n e = pow((current.x - x)**2+(current.y - y)**2, 0.5)\n c = abs(v - MAX_V)\n d = current.x - x\n cost = 0.65*e + c + 5.0*d\n node = Node(current.t_index+1, x_ind, y_ind, yaw_ind, v_ind,\n current.t+T_GRID_RESOLUTION, x, y, yaw, v, theta=theta, acc=acc,\n parent_index=calc_index(current, config),\n cost=cost)\n return node\n\n\ndef is_same_grid(n1, n2):\n if ((n1.t_index == n2.t_index)\n and (n1.x_index == n2.x_index)\n and (n1.y_index == n2.y_index)\n and (n1.yaw_index == n2.yaw_index)\n and (n1.v_index == n2.v_index)):\n return True\n return False\n\n\ndef culcontrolpoints(p1, p2):\n control_points = np.zeros((2, 4))\n control_points[:,0] = p1[0:2]\n control_points[:,3] = p2[0:2]\n dx = p1[0] - p2[0]\n dy = p1[1] - p2[1]\n d = np.sqrt(dx*dx+dy*dy)/3.0\n control_points[:,1] = np.array([p1[0]+d*np.cos(p1[2]), p1[1]+d*np.sin(p1[2])])\n control_points[:,2] = np.array([p2[0]-d*np.cos(p2[2]), p2[1]-d*np.sin(p2[2])])\n return control_points\n\n\ndef recursive(control_points, T, B0=0, B1=0, dB0=0, dB1=0, ddB0=0, ddB1=0):\n if len(control_points) == 1:\n return control_points[0], -B0+(1-T)*dB0+B1+T*dB1, -2*dB0+(1-T)*ddB0+2*dB1+T*ddB1\n else:\n B0, dB0, ddB0 = recursive(control_points[0:-1], T)\n B1, dB1, ddB1 = recursive(control_points[1:], T)\n return (1-T)*B0 + T*B1 , -B0+(1-T)*dB0+B1+T*dB1, -2*dB0+(1-T)*ddB0+2*dB1+T*ddB1\n\n\ndef cullengthofcurve(x, y):\n s = 0\n for i in range(len(x)-1):\n dx = x[i+1] - x[i]\n dy = y[i+1] - y[i]\n s += np.sqrt(dx*dx+dy*dy)\n return s\n\n\ndef calc_cost(n, goal_node, c):\n h = abs(n.y-goal_node.y)\n return n.cost + h\n\n\ndef get_final_path(closed, goal_node):\n reversed_t = []\n reversed_x = []\n reversed_y = []\n reversed_yaw = []\n reversed_v = []\n\n nid = goal_node.parent_index\n final_cost = goal_node.cost\n while nid:\n n = closed[nid]\n reversed_t.append(n.t)\n reversed_x.append(n.x)\n reversed_y.append(n.y)\n reversed_yaw.append(n.yaw)\n reversed_v.append(n.v)\n nid = n.parent_index\n reversed_t = list(reversed(reversed_t))\n reversed_x = list(reversed(reversed_x))\n reversed_y = list(reversed(reversed_y))\n reversed_yaw = list(reversed(reversed_yaw))\n reversed_v = list(reversed(reversed_v))\n\n path = Path(reversed_t, reversed_x, reversed_y, reversed_yaw, reversed_v, final_cost)\n\n return path\n\n\ndef STAStar_planning(start, goal, st_grid_map, xy_resolution, yaw_resolution):\n start[2], goal[2] = pi_2_pi(start[2]), pi_2_pi(goal[2])\n config = Config(0.0, 50.0, -4.5, 4.5, xy_resolution, yaw_resolution)\n start_node = Node(0, round(start[0] / xy_resolution) - config.min_x,\n round(start[1] / xy_resolution) - config.min_y,\n round(start[2] / yaw_resolution) - config.min_yaw,\n round(start[3] / V_GRID_RESOLUTION) - config.min_v,\n 0.0, start[0], start[1], start[2], start[3], cost=0.0)\n goal_node = Node(config.t_w-1, round(goal[0] / xy_resolution) - config.min_x,\n round(goal[1] / xy_resolution) - config.min_y,\n round(goal[2] / yaw_resolution) - config.min_yaw,\n round(goal[3] / V_GRID_RESOLUTION) - config.min_v,\n MAX_T, goal[0], goal[1], goal[2], goal[3], cost=0.0)\n openList, closedList = {}, {}\n pq = []\n openList[calc_index(start_node, config)] = start_node\n heapq.heappush(pq, (calc_cost(start_node, goal_node, config),\n calc_index(start_node, config)))\n final_path = None\n ite = 0\n while ite < 50000:\n if not openList:\n print(\"Error: Cannot find path, No open set\")\n return [], [], []\n\n cost, c_id = heapq.heappop(pq)\n if c_id in openList:\n current = openList.pop(c_id)\n closedList[c_id] = current\n else: continue\n\n if ((current.t_index == goal_node.t_index)\n and (current.y_index == goal_node.y_index)):\n print(\"path found\")\n goal_node.t_index = current.t_index\n goal_node.t = current.t\n goal_node.cost = current.cost\n goal_node.parent_index = calc_index(current, config)\n break\n for neighbor in get_neighbors(current, config, st_grid_map):\n neighbor_index = calc_index(neighbor, config)\n if neighbor_index in closedList.keys():\n continue\n if (neighbor_index not in openList.keys()) or (openList[neighbor_index].cost > neighbor.cost):\n heapq.heappush(pq, (calc_cost(neighbor, goal_node, config), neighbor_index))\n openList[neighbor_index] = neighbor\n ite += 1\n\n path = get_final_path(closedList, goal_node)\n return path","repo_name":"Feng-Kaijun/safety_filter","sub_path":"src/python/PySafetyFilter/STAstar.py","file_name":"STAstar.py","file_ext":"py","file_size_in_byte":8398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72023483255","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 14 23:03:49 2023\n\n@author: osman\n\"\"\"\n\n\nimport pandas as pd \nimport numpy as np\ndf = pd.read_csv('./BT_outputs_new/test_results_fold_3_updated.csv')\n\nobjs = df['avg_objs'].values\npred = df['pred_beam_5'].values\nactual = df['gt_beam_5'].values\nabs_index = df['abs_index'].values\nacc = df['Acc_diff'].values\nbeam = df['beam_diff'].values\n# acc = []\n# for val in range(len(abs_index)):\n# acc.append(np.abs(pred[val]- actual[val]))\n \n# df['Acc_diff'] = acc\n# df.to_csv('./BT_output/CV_output_1/test_results_fold_0_updated.csv',index=False)\n\nfirst = 0\nsecond = 0\nthird = 0\nforth = 0\nfive =0\n\nfirst_all = 0\nsecond_all = 0\nthird_all = 0\nforth_all = 0\nfive_all =0\n\nfirst_5 = 0\nsecond_5 = 0\nthird_5 = 0\nforth_5 = 0\nfive_5 =0\nother_5 =0\n\nsix = 0\nother =0\nother_all =0\nfor val in range(len(abs_index)):\n if beam[val] ==0 and acc[val] == 0:\n first_all+=1\n elif beam[val] in range(1,2) and acc[val] ==0:\n second_all+=1\n elif beam[val] in range(2,3) and acc[val] == 0:\n third_all+=1 \n elif beam[val] in range(3,11) and acc[val] == 0:\n forth_all+=1 \n # elif beam[val] in range(5,10) and acc[val] == 0:\n # five_all+=1\n# elif beam[val] in range(10,16) and acc[val] == 0:\n# six+=1\n elif beam[val] in range(11,np.max(beam) )and acc[val] == 0:\n other_all+=1\n \n \nfor val in range(len(abs_index)):\n if beam[val] ==0 and acc[val] <=5:\n first_5+=1\n elif beam[val] in range(1,2) and acc[val] <=5:\n second_5+=1\n elif beam[val] in range(2,3) and acc[val] <= 5:\n third_5+=1 \n elif beam[val] in range(3,11) and acc[val] <= 5:\n forth_5+=1 \n # elif beam[val] in range(5,10) and acc[val] <= 5:\n # five_5+=1\n# elif beam[val] in range(10,16) and acc[val] == 0:\n# six+=1\n elif beam[val] in range(11,np.max(beam) )and acc[val] <= 5:\n other_5+=1\n \n \n \nfor val in range(len(abs_index)):\n if beam[val] ==0:\n first+=1\n elif beam[val] in range(1,2) :\n second+=1\n elif beam[val] in range(2,3) :\n third+=1 \n elif beam[val] in range(3,11) :\n forth+=1 \n # elif beam[val] in range(5,10) :\n # five+=1\n # elif beam[val] in range(10,16) :\n # six+=1\n elif beam[val] in range(11,np.max(beam)+1):\n other+=1\n\n# for val in range(len(abs_index)):\n# if objs[val] in range(6):\n# first+=1\n# elif objs[val] in range(5,11) :\n# second+=1\n# elif objs[val] in range(11,16) :\n# third+=1 \n# elif objs[val] in range(16,21) :\n# forth+=1 \n# elif objs[val] in range(21,28) :\n# five+=1\n# else:\n# other+=1\n\ndf1 = pd.DataFrame()\ndf1['classes'] = ['first','second','third','forth','other']\ndf1['beam_acc_count'] = [first_all,second_all,third_all,forth_all,other_all]\ndf1['beam_acc_count_5'] = [first_5,second_5,third_5,forth_5,other_5]\ndf1['beam_count'] = [first,second,third,forth,other]\ndf1.to_csv('./BT_outputs_new/test_results_fold_3_updated_plot_beam_count_1.csv',index=False) \n","repo_name":"tmosman/V2V-Vision-Beam-Tracking","sub_path":"scripts/Plot_results/beam_difference_plot.py","file_name":"beam_difference_plot.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37753448401","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.cluster import KMeans\nimport new_featureEngeneer.config as config\n\n\ndef featureAcross(df_list, names, method):\n '''\n 连续变量的分箱\n :param df_list: \n :param methods_list: {'bin':{'names':'houseArea', 'bin_dot':[100,500,1000]},\n }\n :return: \n '''\n if method not in ('bin','decision tree','cluster'):\n raise ValueError(\"Input a method in ('bin','decision tree','cluster')\")\n\n if method == 'bin':\n for col in names:\n bin = get_quantile_based_boundaries(df_list[col], num_buckets=5)\n df_list[col + '_cut_frequency'] = pd.cut(df_list[col], bin=bin)\n\n if method == 'decision_tree':\n for col in names:\n bin = optimal_binning_boundary(df_list[col],df_list.target)\n df_list[col+'_cut_decisiontree'] = pd.cut(df_list[col],bin=bin)\n\n if method == 'cluster':\n for col in names:\n k = config()\n cluster_label = get_cluter_boundaries(df_list[col],k)\n df_list[col+'_cut_cluster'] = cluster_label\n\ndef get_quantile_based_boundaries(feature_values, num_buckets):\n boundaries = np.arange(1.0, num_buckets) / num_buckets\n quantiles = feature_values.quantile(boundaries)\n return [quantiles[q] for q in quantiles.keys()]\n\n\ndef optimal_binning_boundary(x: pd.Series, y: pd.Series, nan: float = -999.) -> list:\n '''\n 利用决策树获得最优分箱的边界值列表\n '''\n boundary = [] # 待return的分箱边界值列表\n\n x = x.fillna(nan).values # 填充缺失值\n y = y.values\n\n clf = DecisionTreeClassifier(criterion='entropy', # “信息熵”最小化准则划分\n max_leaf_nodes=6, # 最大叶子节点数\n min_samples_leaf=0.05) # 叶子节点样本数量最小占比\n\n clf.fit(x.reshape(-1, 1), y) # 训练决策树\n\n n_nodes = clf.tree_.node_count\n children_left = clf.tree_.children_left\n children_right = clf.tree_.children_right\n threshold = clf.tree_.threshold\n\n for i in range(n_nodes):\n if children_left[i] != children_right[i]: # 获得决策树节点上的划分边界值\n boundary.append(threshold[i])\n\n boundary.sort()\n\n min_x = x.min()\n max_x = x.max() + 0.1 # +0.1是为了考虑后续groupby操作时,能包含特征最大值的样本\n boundary = [min_x] + boundary + [max_x]\n\n return boundary\n\ndef get_cluter_boundaries(data,k):\n '''\n 聚类分箱\n :param data: \n :param k: \n :return: \n '''\n cluster= KMeans(data, k)\n return cluster.labels_\n","repo_name":"rwbfd/OpenCompetition","sub_path":"src/backup/feature_engineering/new_featureEngeneer/feature_across.py","file_name":"feature_across.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"22"} +{"seq_id":"16590688385","text":"def found_palindroms(text, list_of_palindroms):\n if word == \"\".join(reversed(word)):\n palindromes.append(word)\n return [palindromes]\n\n\n# some Input\n\nstrings = input().split(\" \")\nsearched_palindrome = input()\npalindromes = []\n\nfor word in strings:\n found_palindroms(word, palindromes)\n\nprint(f\"{palindromes}\")\nprint(f\"Found palindrome {palindromes.count(searched_palindrome)} times\")\n","repo_name":"DKolev1978/SoftUni-Python","sub_path":"fundamentals/lists_advanced/palindrome_strings_2.py","file_name":"palindrome_strings_2.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31196487229","text":"import requests\nfrom datetime import datetime\n\nmy_loc = {'lat': 13.117363, 'long': 80.101572, 'formatted': 0}\n\nresponse = requests.get('https://api.sunrise-sunset.org/json', params=my_loc)\nresponse.raise_for_status()\nprint(response.json())\n\nsunrise_hour = response.json()['results']['sunrise'].split('T')[\n 1].split(':')[0]\n\nsunset_hour = response.json()['results']['sunset'].split('T')[1].split(':')[0]\n\nprint(f\"Sunrise: {sunrise_hour}, Sunset: {sunset_hour}\")\n\nnow = datetime.now()\nhour_now = now.hour\nprint(f\"Current time: {hour_now}\")\n","repo_name":"tonydavidx/100-days-of-code-python","sub_path":"day_33_api/2_sunrise.py","file_name":"2_sunrise.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"13860185144","text":"\nmx_chain = 1\nnumber = 1\ndic = {1:1}\n\ndef itera(num):\n global mx_chain\n global number\n \n li = []\n while num!=1 and dic.get(num, 0)==0:\n li.append(num)\n if num%2:\n num = 3*num + 1\n else:\n num = num//2\n \n prev = num\n while li:\n nm = li.pop()\n dic[nm] = dic[prev] + 1\n if mx_chain < dic[nm]:\n mx_chain = dic[nm]\n number = nm\n prev = nm\nn = 2\nlimit = 10**6\nwhile n < limit:\n itera(n)\n n+=1\n\nprint(number)\n","repo_name":"r-tron18/ProjectEuler","sub_path":"euler14.py","file_name":"euler14.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16943046016","text":"import random\r\n\r\nglobal kodi\r\nglobal shuma\r\n\r\ndef gjenerim():\r\n\r\n germat = [\"G\", \"H\", \"I\", \"J\", \"K\", \"L\"]\r\n a = random.choice(germat)\r\n b = random.choice(germat)\r\n \r\n nr1 = random.randint(2,8)\r\n nr2 = random.randint(2,8)\r\n nr3 = random.randint(2,8)\r\n kodi = a, b, nr1, nr2, nr3\r\n x=''\r\n x=x.join(str(i) for i in kodi)\r\n \r\n a = germat.index(a)+10\r\n b = germat.index(b)+10 \r\n\r\n shuma = a + b + nr1 + nr2 + nr3\r\n \r\n if shuma % 10 == 0:\r\n print (x)\r\n print(shuma)\r\n return shuma\r\n\r\n \r\nn = int(input(\"Numri i iteracioneve: \"))\r\ni = 0\r\nwhile i < n:\r\n gjenerim()\r\n\r\n i+=1\r\n \r\ninput('Shtyp ENTER per ta mbyllur') \r\n \r\n \r\n \r\n\r\n\r\n","repo_name":"aldo-arch/Software-Testing","sub_path":"Samples/code generator/a0.py","file_name":"a0.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29933513320","text":"import argparse\nimport re\nimport sys\n\nimport git\n\nfrom src.modules.config_reader import ConfigReader\nfrom src.modules.utils import Arguments, dir_path, file_path\nfrom src.modules.version.__init__ import increase_version, Version\n\n\ndef parse_args() -> Arguments:\n parser = argparse.ArgumentParser(description='GitAutomaton Client.')\n parser.add_argument('-r', '--repo', metavar='repository path', type=dir_path, default='.')\n parser.add_argument('-c', '--config', metavar='configuration file', type=file_path, default='default.yaml')\n parser.add_argument('-p', '--profile', metavar='profile', type=str, default='default')\n parser.add_argument('-f', '--feature-branch', metavar='use feature branch', action=argparse.BooleanOptionalAction)\n parser.add_argument('-i', '--increment-version', metavar='increment version', type=str,\n choices=[Version.MAJOR.value, Version.MINOR.value, Version.PATCH.value])\n\n args = parser.parse_args()\n print(\"Opening Path : %s with config: %s\" % (args.repo, args.config))\n\n return Arguments(config=args.config,\n repo=args.repo,\n profile=args.profile,\n increment_version=args.increment_version,\n feature_branch=args.feature_branch)\n\n\ndef get_ticket_id(repo_reader: ConfigReader, branch: git.SymbolicReference) -> str:\n m = re.search(repo_reader.repo_config.ticket_structure, str(branch))\n\n return m.group(1) if m is not None else None\n\n\ndef init_git(git_folder: str, use_feature_branch: bool, repo_reader: ConfigReader):\n g = git.Repo(git_folder)\n\n remote_from_config = [remote for remote in g.remotes if remote.name == repo_reader.repo_config.remote]\n if len(remote_from_config) == 0:\n raise Exception(\"There is no remote called %s, possibles values are: %s\" % (\n repo_reader.repo_config.remote, ','.join(list(map(lambda x: x.name, g.remotes))[:5])))\n remote = next(iter(remote_from_config))\n ticket_id = get_ticket_id(repo_reader, g.active_branch)\n if ticket_id is None:\n sys.exit(\"You are not inside of a feature branch or your ticket config is invalid!\")\n\n print(\"Using remote: %s\" % remote)\n print(\"Current Branch: %s, ticket: %s\" % (str(g.active_branch), ticket_id))\n\n for submodule in g.submodules:\n latest_branches = list()\n for remote in submodule.module().remotes:\n remote.update()\n\n # find feature branch with ticket id\n if use_feature_branch:\n find_feature_branch_commit(latest_branches, remote, submodule, ticket_id)\n else:\n find_default_branch_commit(remote, latest_branches, submodule, ticket_id)\n\n if len(latest_branches) == 0:\n raise Exception(\"No branch found related to given ticket\")\n\n latest_branch = max(latest_branches, key=lambda x: x.committed_date)\n\n if latest_branch.binsha != submodule.binsha:\n submodule.binsha = latest_branch.binsha\n g.index.add([submodule])\n g.index.commit(reader.repo_config.commit_messages.update_submodule.format(ticket_id=ticket_id))\n print(\"Updating Submodule to SHA: %s\" % str(latest_branch))\n else:\n print(\"Submodule is already up to date!\")\n\n\ndef find_default_branch_commit(remote, latest_branches, submodule, ticket_id):\n remotes = list(filter(lambda x: x.name.endswith(reader.repo_config.default_branch), remote.refs))\n if len(remotes) == 0:\n raise NameError(\"There is no default branch with the name '%s'\\nPossible branches are: %s\" % (\n reader.repo_config.default_branch, list(map(lambda x: x.name, remote.refs))[:5]))\n for remote in remotes:\n find_ticket_in_branch(latest_branches, remote, submodule, ticket_id)\n\n\ndef find_feature_branch_commit(latest_branches, remote, submodule, ticket_id):\n for remote_ref in remote.refs:\n if ticket_id in remote_ref.name:\n # print(\"Found ticket in branch; \" + remote_ref.name)\n find_ticket_in_branch(latest_branches, remote_ref, submodule, ticket_id)\n\n\ndef find_ticket_in_branch(latest_branches, remote_ref, submodule, ticket_id):\n for commit in submodule.module().iter_commits(rev=remote_ref):\n if ticket_id in commit.message:\n latest_branches.append(commit)\n break\n\n\nif __name__ == '__main__':\n arguments = parse_args()\n reader = ConfigReader(arguments.config, arguments.repo)\n reader.active_config = arguments.profile\n\n if arguments.increment_version is not None:\n increase_version(Version(arguments.increment_version), reader.repo_config)\n\n init_git(arguments.repo, arguments.feature_branch, reader)\n","repo_name":"SetZero/GitAutomaton","sub_path":"gauto.py","file_name":"gauto.py","file_ext":"py","file_size_in_byte":4719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"8962779213","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n# Author: Donny You(yas@meitu.com)\n\n\nfrom __future__ import division\nimport torch\nfrom torch.autograd import Variable\nfrom torch.utils import data\nfrom model import FCN \nfrom datasets import CSDataSet\nfrom loss import CrossEntropy2d, CrossEntropyLoss2d\nfrom transform import ReLabel, ToLabel, ToSP, Scale, Augment, CocoLabel, ReLabel\nfrom torchvision.transforms import Compose, CenterCrop, Normalize, ToTensor\nfrom PIL import Image\nimport numpy as np\n\nimport utils\nfrom image_augmentor import ImageAugmentor\n\nimage_augmentor = ImageAugmentor()\n\nNUM_CLASSES = 6\nMODEL_NAME = \"seg-model\"\n\ninput_transform = Compose([\n Scale((512, 256), Image.BILINEAR),\n Augment(0, image_augmentor),\n ToTensor(),\n Normalize([.485, .456, .406], [.229, .224, .225]),\n\n])\ntarget_transform = Compose([\n Scale((512, 256), Image.NEAREST),\n ToLabel(),\n ReLabel(),\n])\n\ntrainloader = data.DataLoader(CSDataSet(\"/root/group-incubation-bj\", split=\"train\",\n img_transform=input_transform, label_transform=target_transform),\n batch_size=10, shuffle=True, pin_memory=True)\n\nvalloader = data.DataLoader(CSDataSet(\"/root/group-incubation-bj\", split=\"val\",\n img_transform=input_transform, label_transform=target_transform),\n batch_size=1, pin_memory=True)\n\nif torch.cuda.is_available():\n model = torch.nn.DataParallel(FCN(NUM_CLASSES))\n model.cuda()\n\nepoches = 8\nlr = 1e-3\n\ncriterion = CrossEntropyLoss2d()\noptimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.99))\n\n# pretrained_dict = torch.load(\"./pth/fcn-deconv-40.pth\")\n# model_dict = model.state_dict()\n# pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n# model_dict.update(pretrained_dict)\n# model.load_state_dict(model_dict)\n\n# model.load_state_dict(torch.load(\"./pth/seg-norm-2.pth\"))\n\nmodel.train()\n\nx_index = 1\n\nfor epoch in range(epoches):\n # epoch = epoch_ + 2\n running_loss = 0.0\n iter_loss = 0.0\n for i, (images, labels) in enumerate(trainloader):\n if torch.cuda.is_available():\n images = Variable(images.cuda())\n labels = Variable(labels.cuda())\n else:\n images = Variable(image)\n labels = Variable(labels)\n\n optimizer.zero_grad()\n outputs = model(images)\n loss = criterion(outputs, labels) \n loss.backward()\n optimizer.step()\n running_loss += loss.data[0]\n iter_loss += loss.data[0]\n if (i + 1) % 100 == 0:\n print(\"Iter [%d] Loss: %.4f\" % (i+1, iter_loss/100.0))\n iter_loss = 0.0\n\n if (i + 1) % 300 == 0:\n utils.plot(MODEL_NAME + \"-train_loss\", x_index, running_loss/300.0)\n print(\"Epoch [%d] Loss: %.4f\" % (x_index, running_loss/300.0))\n running_loss = 0\n\n val_loss = 0.0\n for j, (images, labels) in enumerate(valloader):\n if torch.cuda.is_available():\n images = Variable(images.cuda())\n labels = Variable(labels.cuda())\n else:\n images = Variable(image)\n labels = Variable(labels)\n \n outputs = model(images)\n loss = criterion(outputs, labels)\n \n val_loss += loss.data[0]\n \n print(\"Val [%d] Loss: %.4f\" % (x_index, val_loss/len(valloader)))\n utils.plot(MODEL_NAME + \"-val_loss\", x_index, val_loss/len(valloader))\n x_index += 1\n val_loss = 0\n\n if (epoch+1) % 1 == 0:\n if (epoch + 1) % 3 == 0:\n lr /= 10.0\n \n optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.99))\n\n torch.save(model.state_dict(), \"./pth/\" + MODEL_NAME + (\"-%d.pth\" % (epoch+1)))\n\n\ntorch.save(model.state_dict(), \"./pth/\" + MODEL_NAME + \".pth\")\n","repo_name":"donnyyou/AttentionModule","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"22"} +{"seq_id":"73917567735","text":"# 插入排序\n# 最优时间复杂度:O(n)\n# 最坏时间复杂度:O(n^2)\n# 稳定性:稳定\n\n\ndef insertion_sort(nums):\n len_nums = len(nums)\n for sorted_idx in range(1, len_nums):\n for idx in range(sorted_idx-1, -1, -1):\n if nums[idx] > nums[idx+1]:\n nums[idx], nums[idx+1] = nums[idx+1], nums[idx]\n else:\n break\n\n return None\n\n\ninput_list = [54, 26, 93, 77, 44, 31, 44, 55, 20]\nprint('原列表为: %s' % input_list)\ninsertion_sort(input_list)\nprint('新列表为: %s' % input_list)\n","repo_name":"CaiZhongheng1987/algorithm_questions_and_codes","sub_path":"sort/Insertion_Sort.py","file_name":"Insertion_Sort.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26688980575","text":"#!/usr/bin/env python\n#Author:TangHu\nimport hashlib\n#利用md5进行用户登陆网站进行注册之后密码加密的基本事例,加深理解\n\ndef md5(arg): #这是加密函数,将传进来的函数加密\n md5_pwd=hashlib.md5(bytes('abc',encoding='utf-8'))\n md5_pwd.update(bytes(arg,encoding='utf-8'))\n return md5_pwd.hexdigest()\n\ndef log(user,pwd): #登录时候的函数,由于md5不能反解,因此登录的时候用正解\n with open('db.config','r',encoding='utf-8') as f:\n for line in f:\n u,p = line.strip().split('|')\n if u==user and p == md5(pwd): #登录时验证用户名以及加密的密码跟之前的保存是否一样\n return True\n\ndef register(user,pwd): #注册的时候把用户名和加密的密码写进文件,保存起来\n with open('db.config','a',encoding='utf-8') as f:\n temp=user+'|'+md5(pwd)\n f.write(temp)\n\ni=input('1.表示登录,2.表示注册')\nif i == '2':\n user=input('用户名:')\n pwd=input('密码:')\n register(user,pwd)\nelif i == '1':\n user=input('输入登录用户名:')\n pwd=input('输入登录密码:')\n r=log(user,pwd)\n if r==True:\n print(\"登录成功!\")\n else:\n print(\"登陆失败\")\nelse:\n print(\"账号不存在!\")","repo_name":"tanghulu9312/mygithub_testrepro","sub_path":"pytest1/day6/hashlib_test.py","file_name":"hashlib_test.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31977394230","text":"#!/usr/bin/env python\n# encoding: utf-8\nimport math\n\n\ndef get_digit_sum(num):\n result = 0\n while num != 0:\n result += num % 10\n num /= 10\n return result\n\n\ndef main():\n print(get_digit_sum(math.factorial(100)))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"white1033/ProjectEuler","sub_path":"Python/020.py","file_name":"020.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43686633278","text":"# Import required libraries\nimport discord\nfrom datetime import datetime\nfrom discord.ext import commands\n\n# Set atributes for the help command\nattributes = {\n 'name': \"help\",\n 'aliases': [\"h\"],\n 'description': \"Get help with commands.\"\n}\n\n\n# Create class for help message based off the default one, here I'm overwriting existing functions.\nclass MyHelp(commands.HelpCommand):\n \"\"\"Get help with commands!\"\"\"\n # ^ Docstring.\n \n # Used to get the 'signature'/usage of a command, i.e. `_help`\n def get_command_signature(self, command):\n # Checks if there are required/optional arguments\n if command.signature == '':\n return '`{0.clean_prefix}{1.qualified_name}`'.format(self, command)\n else:\n return '`{0.clean_prefix}{1.qualified_name}` `{1.signature}`'.format(self, command)\n \n # Used when someone only does `sc!help`\n async def send_bot_help(self, mapping):\n # Generate embed\n embed = discord.Embed(title=\"Help\", color=discord.Color.from_rgb(241,210,231))\n # For each cog and it's associated list of commands in `mapping.items`...\n for cog, commands in mapping.items():\n # Filter out the commands that the user isn't allowed to use and sort them alphabetically\n filtered = await self.filter_commands(commands, sort=True)\n # Put the signatures of those commands into an array (search 'List Comprehension' for this format)\n command_signatures = [self.get_command_signature(command) for command in filtered]\n # If a value is returned (since `None` will be returned if nothing is found)\n if command_signatures:\n # Get the attribute `qualified_name` of cog, putting \"Miscellaneous\" if not found\n cog_name = getattr(cog, \"qualified_name\", \"❓ Miscellaneous\")\n # Adds an inline field with title of cog name and value of command signatures.\n if cog_name == \"❓ Miscellaneous\":\n pass\n else:\n embed.add_field(name=f'{cog_name}', value=\"\\n\".join(command_signatures), inline=True)\n # More embed generation\n embed.set_author(name='Help', icon_url=self.context.author.avatar_url)\n # Send the embed as a reply\n await self.context.reply(embed=embed)\n \n # Used when someone does \"sc!help \"\n async def send_cog_help(self, cog):\n # Get an array of commands\n commands = [command for command in cog.get_commands()]\n # Get a filtered version of commands\n filtered = await self.filter_commands(commands, sort=True)\n # Get an array of command signatures\n command_signatures = [self.get_command_signature(c) for c in filtered]\n # Sets embed description to `None`\n desc = None\n # Checks if anything was returned\n if command_signatures:\n # Joins the array line by line\n desc = '\\n'.join(command_signatures)\n # Embed generation\n embed = discord.Embed(title=f\"{cog.qualified_name[cog.qualified_name.find(' ')+1:]} Help\", description=f'**{cog.__doc__}**\\n\\n{desc}', colour=discord.Color.from_rgb(241,210,231))\n embed.set_author(name='Help', icon_url=self.context.author.avatar_url)\n # Send embed as a reply\n await self.context.reply(embed=embed)\n # If nothing was returned, i.e. user doesn't have access to use that cog\n else:\n await self.context.reply(f'No category called \"{cog.qualified_name}\" found.')\n \n # Used when someone does \"sc!help \", such as \"sc!help genshin\".\n async def send_group_help(self, group):\n # Get an array of command signatures. There is no filter check since the only groups I plan to use will be available to everyone\n command_signatures = [self.get_command_signature(command) for command in group.commands]\n # Gets the part to remove from each signature to only get the subcommand name\n tostrip = f'`sc!{group.name} '\n # Since subcommands are required, begin the arguments (``) with \"<\"\n arg_subcmds = ''\n # For each element in the array\n for i in command_signatures:\n # Remove all backticks (`) and replace the occuring value of tostrip with nothing. Add it to the string as well as a |\n arg_subcmds += f\"`{i[len(tostrip):].replace('`', '')}`|\"\n # Remove the last \"|\" and add the last \">\"\n arg_subcmds = arg_subcmds[:-1]\n # If there were no subcommands\n if arg_subcmds == '':\n arg_subcmds = None\n # Checks if the group has aliases\n if len(group.aliases) == 0:\n aliases = None\n else:\n aliases = []\n # For each element in group.aliases\n for i in group.aliases:\n # Adds all of the aliases to a new list with backticks around each individual element\n aliases.append(f'`{i}`')\n # Joins the array with \", \" between each element\n aliases = ', '.join(aliases)\n # Embed generation\n embed = discord.Embed(title=f'Help for {group.name}', description=f'Displaying help for {group}.\\n`<>` marks required parameters.\\n`[]` marks optional parameters.', color=discord.Color.from_rgb(241,210,231))\n embed.add_field(name=f'Description', value=group.description, inline=False)\n embed.add_field(name=f'Aliases', value=aliases, inline=False)\n # Different depending on if there were subcommands\n if arg_subcmds:\n embed.add_field(name=f'Usage', value=f'`sc!{group.name}` {arg_subcmds}', inline=False)\n else:\n embed.add_field(name=f'Usage', value=f'`sc!{group}`', inline=False)\n embed.set_author(name='Help', icon_url=self.context.author.avatar_url)\n # Send as a reply\n await self.context.reply(embed=embed)\n\n \n # Used when someone does \"sc!help \"\n async def send_command_help(self, command):\n # If the user has access to use the command\n if await self.filter_commands([command], sort=True):\n # Embed generation\n embed = discord.Embed(title=f\"Help for {command.name}\", description=f'Displaying help for {command.name}.\\n`<>` marks required parameters.\\n`[]` marks optional parameters.', color=discord.Color.from_rgb(241,210,231))\n embed.add_field(name='Description', value=command.description, inline=False)\n # For aliases, same as above\n if len(command.aliases) == 0:\n aliases = None\n else:\n aliases = []\n for i in range(len(command.aliases)):\n aliases.append(f'`{command.aliases[i]}`')\n aliases = ', '.join(aliases)\n embed.add_field(name='Aliases', value=aliases, inline=False)\n embed.add_field(name='Usage', value=self.get_command_signature(command), inline=False)\n embed.set_author(name='Help', icon_url=self.context.author.avatar_url)\n # Send embed as a reply\n await self.context.reply(embed=embed)\n # If user doesn't have permission to use command\n else:\n await self.get_destination().send(f'No command called \"{command.name}\" found.')\n\n # If an error occurs in the help message\n async def on_help_command_error(self, ctx, error):\n # If it's a bad argument\n if isinstance(error, commands.BadArgument):\n embed = discord.Embed(title=\"Error\", description=str(error))\n embed.set_author(name='Help', icon_url=self.context.author.avatar_url)\n await ctx.reply(embed=embed)\n # Otherwise, raise the error\n else:\n raise error\n\n# Create the cog for the help command to be in the bot\nclass Help(commands.Cog, name='🤔 Help'):\n \"\"\"Get help with commands!\"\"\"\n def __init__(self, bot):\n self.bot = bot\n\n # Setting the cog for the help\n help_command = MyHelp(command_attrs=attributes)\n help_command.cog = self # Instance of YourCog class\n bot.help_command = help_command\n\n\n# Allows cog to be loaded\ndef setup(client):\n client.add_cog(Help(client))\n","repo_name":"Sarmqn/Sumi-Chan-Discord-Bot","sub_path":"cogs/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":8295,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"22458210620","text":"#!/usr/bin/env python\nimport sys\nsys.path.insert(0, '/usr/local/share/silverlining/lib')\nimport os\nimport optparse\nimport shutil\nfrom silversupport.appconfig import AppConfig\nfrom silversupport import transfermethods\n\nparser = optparse.OptionParser(\n usage='%prog LOCAL_BACKUP LOCATION')\nparser.add_option(\n '--keep', action='store_true',\n help=\"Keep the archive after restoring\")\n\n\ndef main():\n options, args = parser.parse_args()\n local_backup = args[0]\n location = args[1]\n app_config = AppConfig.from_location(location)\n if transfermethods.is_archive(local_backup):\n dir = transfermethods.make_temp_name()\n transfermethods.unarchive(local_backup, dir)\n if not options.keep:\n os.unlink(local_backup)\n options.keep = False\n local_backup = dir\n app_config.restore_services(local_backup)\n if not options.keep:\n shutil.rmtree(local_backup)\n\nif __name__ == '__main__':\n main()\n","repo_name":"ianb/silverlining","sub_path":"silverlining/mgr-scripts/restore-services.py","file_name":"restore-services.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"7758084234","text":"from turtle import width\nfrom pyautogui import *\nimport pyautogui\nimport time\nimport keyboard\nimport win32api, win32con\nimport mss\nfrom PIL import Image\nimport time\nimport neat\nimport pickle as pickle\nimport os\nfrom pathlib import Path\nimport math\n\npyautogui.FAILSAFE = False\n# ^^^^ Careful with this one ^^^^\n\nSCORE = 0\nGENERATION = 0\nMAX_FITNESS = float('-inf')\nMAX_FITNESS_THIS_GEN = float('-inf')\nMAX_FITNESS_LAST_GEN = float('-inf')\nBEST_GENOME = 0\nsleepTime = .005\n\n\ndef pressSpace():\n '''\n one press, one release.\n accepts as many arguments as you want. e.g. press('left_arrow', 'a','b').\n '''\n win32api.keybd_event(0x20,0,0,0)\n time.sleep(sleepTime)\n win32api.keybd_event(0x20,0,win32con.KEYEVENTF_KEYUP,0)\n\ndef game(genome, config):\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n\n global SCORE\n\n reg1=915\n reg2=400\n reg3=245\n reg4=450\n running = True\n pipeSeenCounter = 0\n pipeScoreCounter = 0\n autoPilot = True\n floorY = int(200)\n prevFloorY = floorY\n floorYDebounce = [floorY,floorY]\n floorX = int(reg3)\n birdY = int(160)\n birdX = int(32)\n tryCounter = 0\n afterSpaceCount = 0\n time.sleep(2)\n start = time.time()\n # birdLastSeen = time.time()\n # birdFlewAway = False\n lastPressed = time.time()\n end = time.time()\n pipeBottomImage = Image.open('assets/PipeBottom.png')\n pic = pyautogui.screenshot(region=(reg1,reg2,reg3,reg4))\n highestScore = 0\n pipeScored = True\n totalJumpedPerRound = 0\n fitness = 0\n gameOver = False\n\n while keyboard.is_pressed('q') == False and running:\n # if tryCounter > 0 and time.time()-birdLastSeen>1.25:\n # end = time.time()\n # birdFlewAway = True\n # sleep(2)\n pic = pyautogui.screenshot(region=(reg1,reg2,reg3,reg4))\n width, height = pic.size\n r,g,b=pic.getpixel((25,350))\n if((r == 23 and g == 166 and b == 76)or(r == 22 and g == 159 and b == 73)):\n autoPilot = True\n # if not birdFlewAway:\n # end = time.time()\n if pipeScoreCounter>highestScore:\n highestScore = pipeScoreCounter\n if tryCounter > 0:\n print('GAME OVER! Score: %s Highest: %s Time: %s'%(pipeScoreCounter,highestScore,end-start))\n # if(birdFlewAway):\n # print('After %s jumps the bird flew away...'%(totalJumpedPerRound))\n # fitness = float('-inf')\n # birdFlewAway = False\n # if(totalJumpedPerRound<1):\n # print('After not even trying bird went splat...')\n # fitness = float('-inf')\n if tryCounter < 1:\n running = True\n tryCounter = tryCounter + 1\n time.sleep(.5)\n pyautogui.moveTo(50, 50)\n pyautogui.click(950, 750)\n pyautogui.moveTo(50, 50)\n time.sleep(1)\n # birdLastSeen = time.time()\n # birdFlewAway = False\n start = time.time()\n lastPressed = time.time()\n pressSpace()\n totalJumpedPerRound = 0\n else:\n tryCounter = 0\n gameOver = True\n running = False\n pipeScoreCounter = 0\n pipeSeenCounter = 0\n floorY = int(200)\n prevFloorY = floorY\n floorYDebounce = [floorY,floorY]\n floorX = int(reg3)\n birdY = int(160)\n birdX = int(32)\n pipeScored = True\n afterSpaceCount = 0\n if (not gameOver):\n gameOver = True\n for y in range(height):\n r,g,b=pic.getpixel((birdX,y))\n if r<180:\n gameOver = False\n if((r == 250 and g == 250 and b == 250) or (r == 221 and g == 221 and b == 221)):\n birdY = y\n gameOver = False\n # birdLastSeen = time.time()\n break\n if not gameOver:\n pipeLocate = pyautogui.locate(pipeBottomImage,pic)\n if pipeLocate != None:\n floorYDebounce[1] = floorYDebounce[0]\n floorYDebounce[0] = pipeLocate[1]\n prevFloorY = floorY\n floorX = pipeLocate[0]\n if (pipeLocate[1] != prevFloorY and floorYDebounce[0] == floorYDebounce[1]):\n floorY = pipeLocate[1]\n autoPilot = False\n pipeSeenCounter = pipeSeenCounter + 1\n pipeScored = False\n print('Floor Change %s from %s to %s'%(pipeSeenCounter, prevFloorY, floorY))\n else:\n if(not pipeScored and floorX < birdX):\n pipeScored = True\n pipeScoreCounter = pipeScoreCounter + 1\n SCORE = pipeScoreCounter * 1000\n if autoPilot:\n if floorY < birdY+80 and afterSpaceCount > 4:\n afterSpaceCount = 0\n lastPressed = time.time()\n pressSpace()\n else: \n afterSpaceCount = 1 + afterSpaceCount\n time.sleep(sleepTime)\n else:\n midYWithOffset = floorY-70\n # pyautogui.moveTo(floorX+reg1, midYWithOffset+reg2,_pause=False)\n distanceFromOptimalY = math.dist([midYWithOffset],[birdY])\n fitness = SCORE - distanceFromOptimalY + (time.time()-start) * (1+pipeScoreCounter)\n if birdY < 85:\n fitness = fitness - 300\n if totalJumpedPerRound < 1:\n fitness = fitness - 300\n inp = (birdY,math.dist([floorX,floorY],[birdX,birdY]),math.dist([floorX+140,floorY],[birdX,birdY])) #,afterSpaceCount,time.time()-lastPressed)\n output = net.activate(inp)\n if (output[0]>=0.5):\n afterSpaceCount = 0\n lastPressed = time.time()\n pressSpace()\n totalJumpedPerRound = totalJumpedPerRound + 1\n else: \n afterSpaceCount = 1 + afterSpaceCount\n time.sleep(sleepTime)\n return(fitness)\n\n# def eval_genomes(genomes, config):\n# global SCORE\n# global GENERATION, MAX_FITNESS, BEST_GENOME, MAX_FITNESS_THIS_GEN, MAX_FITNESS_LAST_GEN\n\n# MAX_FITNESS_LAST_GEN = MAX_FITNESS_THIS_GEN\n# MAX_FITNESS_THIS_GEN = float('-inf')\n# GENERATION += 1\n# i = 0\n# for genome_id, genome in genomes:\n# i+=1\n# genome.fitness = game(genome, config)\n# if genome.fitness is None:\n# genome.fitness = float('-inf') #fixes errors on early termination\n# print(\"Gen : {} Genome # : {} Fitness : {} Max Fitness : {}\".format(GENERATION,i,genome.fitness,MAX_FITNESS))\n# if (genome.fitness):\n# if genome.fitness >= MAX_FITNESS:\n# MAX_FITNESS = genome.fitness\n# BEST_GENOME = genome\n# if genome.fitness >= MAX_FITNESS_THIS_GEN:\n# MAX_FITNESS_THIS_GEN = genome.fitness\n# SCORE = 0\n# print(\"GEN COMPLETE : {} Best Fitness : {} Improvement Over Last : {}\".format(GENERATION, MAX_FITNESS_THIS_GEN, MAX_FITNESS_THIS_GEN - MAX_FITNESS_LAST_GEN))\n\nconfig = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n 'config')\n\n# pop = neat.Population(config)\n# stats = neat.StatisticsReporter()\n# pop.add_reporter(stats)\n\n# winner = pop.run(eval_genomes, 100)\n\n# print(winner)\n\n\n\n# outputDir = os.getcwd() + '/bestGenomes'\n# Path(outputDir).mkdir(parents =True, exist_ok=True)\n# os.chdir(outputDir)\n# serialNo = len(os.listdir(outputDir))+1\n# outputFile = open(str(serialNo)+'_'+str(int(MAX_FITNESS))+'.p','wb' )\n\n# pickle.dump(winner, outputFile)\n\n\n\ngenomeDir = os.getcwd() + '/bestGenomes'\ngenomeFile = '%s/3_6064.p'%(genomeDir)\ngenome = pickle.load(open(genomeFile,'rb'))\n\nfitnessScores = []\n\nfor i in range(10):\n\tfitness = game(genome, config)\n\tSCORE = 0\n\tprint('Fitness is %f'% fitness)\n\tfitnessScores.append(fitness)\n\n","repo_name":"dthulin/flappy-bot","sub_path":"RunBot.py","file_name":"RunBot.py","file_ext":"py","file_size_in_byte":8500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73425403255","text":"import discord\nfrom time import sleep\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\nimport os\nimport re\nimport cogs\n\nload_dotenv()\n\nintents = discord.Intents().all()\nbot = commands.Bot(command_prefix=\";\", intents=intents)\n\n\n@bot.event\nasync def on_ready():\n print(f\"Logged in as {bot.user}\")\n\n\ndef is_spam(message):\n full_of_spaces = re.compile(r'(.*)(\\s{5})(.*)', re.IGNORECASE)\n repeating_the_same_word = re.compile(\n r'\\b(\\w+)\\n\\1\\b', re.IGNORECASE)\n is_full_of_spaces = re.search(\n full_of_spaces, message.content)\n is_repeating_the_same_word = re.search(\n repeating_the_same_word, message.content)\n\n is_spam = True if is_full_of_spaces or is_repeating_the_same_word else False\n\n return is_spam\n\n\n@bot.event\nasync def on_message(message):\n if is_spam(message):\n await message.delete()\n\n request_channel_id = register_channel(message)\n\n if message.author.id != bot.user.id:\n bot_member = await message.guild.fetch_member(bot.user.id)\n if message.channel.id == request_channel_id:\n\n try:\n voice_client = await connect(message)\n await bot_member.edit(deafen=True)\n\n except discord.errors.ClientException:\n members_on_channel = bot_member.voice.channel.members\n\n if len(members_on_channel) > 1 and message.author.voice.channel != bot_member.voice.channel:\n await message.channel.send(\n content='Eu já estou sendo usado em outra chamada, entre nela para ouvir junto com os outros :)')\n elif len(members_on_channel) == 1:\n voice_client = message.guild.voice_client\n await disconnect(message)\n await connect(message)\n await bot_member.edit(deafen=True)\n finally:\n voice_client.play(discord.FFmpegPCMAudio(\n executable=\"C:/ffmpeg/bin/ffmpeg.exe\", source=music_path))\n\n await bot.process_commands(message)\n\n\n@bot.command()\nasync def tempban(ctx, member, time=10):\n split = member.split('@!')\n member = ctx.guild.get_member(int(split[1][:-1]))\n tempban_role = discord.utils.get(ctx.guild.roles, name=\"tempban\")\n\n if member.top_role.permissions.administrator:\n return\n\n if discord.utils.get(ctx.guild.roles, name=\"tempban\") not in ctx.guild.roles:\n await ctx.guild.create_role(\n name='tempban', permissions=discord.Permissions(send_messages=False))\n\n roles = {}\n roles[f'{member}'] = []\n\n for role in member.roles:\n if 'everyone' in role.name:\n continue\n\n roles[f'{member}'].append(role)\n await member.remove_roles(role)\n\n await member.add_roles(tempban_role)\n\n sleep(int(time))\n await member.remove_roles(tempban_role)\n for role in roles[f'{member}']:\n await member.add_roles(role)\n\n\nbot.add_cog(cogs.Music(bot))\nbot.add_cog(cogs.Wiki(bot))\n\nbot.run(os.getenv('TOKEN'))\n","repo_name":"gCarvalhoF/Zezinho-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72185481657","text":"# -*- coding: utf-8 -*-\r\nr\"\"\"\r\n mail\r\n ~~~~~~~\r\n\r\n Utility for generating verify-code.\r\n\r\n :copyright: (c) 2013 by Harvey Wang.\r\n\"\"\"\r\n\r\nimport time\r\nimport base64\r\nimport hashlib\r\nfrom flask.ext.mail import Mail, Message\r\nfrom flask import current_app, render_template\r\nfrom threading import Thread\r\n\r\n# 利用当前管理员的信息生成验证码\r\ndef create_verify_token(user):\r\n timestamp = int(time.time())\r\n secret = current_app.secret_key\r\n token = '%s%s%s%s' % (secret, timestamp, user.accid, user.token)\r\n hsh = hashlib.sha1(token).hexdigest()\r\n return str(base64.b32encode('%s|%s%s' % (timestamp, user.accid, hsh)))[0:16]\r\n\r\ndef send_async_mail(msg):\r\n mail.send(msg)\r\n\r\ndef send_email(subject, sender, recipients, text_body, html_body):\r\n msg = Message(subject, sender = sender, recipients = recipients)\r\n msg.body = text_body\r\n msg.html = html_body\r\n thr = Thread(target = send_async_mail, args = [msg])\r\n thr.start()\r\n\r\ndef send_verify_code(username, useremail, verifycode, sender):\r\n send_email(\"您的UE坐标邀请码\",\r\n sender[0],\r\n [useremail],\r\n render_template(\"user/verify_code.txt\",\r\n username = username,\r\n verifycode = verifycode),\r\n render_template(\"user/verify_code.html\",\r\n username = username,\r\n verifycode = verifycode),\r\n )\r\n\r\nmail = Mail()","repo_name":"harveyqing/ue_rear","sub_path":"ueBackstage/utils/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20621470552","text":"import os\nfrom setuptools import setup\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\nsetup(\n name=\"anno_goats\",\n version=\"0.1.0\", # Keep in sync with __init__.py\n author=\"sqwishy\",\n author_email=\"somebody@froghat.ca\",\n description=(),\n license=\"GPLv3\",\n packages=[\"anno_goats\"],\n package_data={},\n long_description=read(\"README.md\"),\n classifiers=[],\n install_requires=[\"PySide6\",\"lxml\"],\n entry_points={\n \"console_scripts\": [\n \"anno-goats=anno_goats.__main__:main\",\n \"anno-goats-ui=anno_goats.ui.__main__:main\",\n ],\n },\n)\n","repo_name":"sqwishy/anno_goats","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14235975360","text":"from a_Xcel_functions import *\nfrom Stock_functions import float_entry\nimport Xcel_files\n\n# ********************** Add your excel file path here ********************** #\nCommon_excel_file_name = f\"{Xcel_files.file_path}{Xcel_files.file1_name}\"\nJasi_excel_file_name = f\"{Xcel_files.file_path}{Xcel_files.file2_name}\"\nsheet_name_for_common_and_jasi = \"P&L writing\"\n# ********************** Add your excel file path here ********************** #\n# For reading & writing intraday list from & to excel file\nintraday_list_row_start = 11\nintraday_list_row_ending = 67\nintraday_list_column_start = 2\nintraday_list_column_ending = 6\nintraday_blank_list_format = ['', '', '', '', '']\n\n# For reading & writing delivery buy list from excel file\ndelivery_list_row_start = 73\ndelivery_list_row_ending = 198\ndelivery_list_column_start = 2\ndelivery_list_column_ending = 8\ndelivery_blank_list_format = [None, None, None, None, None, None, None]\n\n\ndef to_write_current_month_profit_for_recording(excel_filename):\n current_month_pnl = float_entry('Prev month P&L to be added : ')\n print(\"Processing.....\")\n row_number_to_write = iteration_to_find_a_filled_till(excel_filename, sheet_name_for_common_and_jasi, 18, 66,\n 16, 16, [None])\n # print(excel_filename, sheet_name_for_common_and_jasi, row_number_to_write, 16,\n # current_month_pnl)\n over_writing_a_cell_to_excel(excel_filename, sheet_name_for_common_and_jasi, row_number_to_write, 16,\n current_month_pnl)\n # input(f'Copy \"{month} {year} P&L\" and paste in the excel file and press enter')\n # over_writing_a_cell_to_excel(excel_filename, sheet_name_for_common_and_jasi, row_number_to_write, 15,\n # f\"{Month} {Year} P&L\")\n\n\ndef intraday_trade_resetting(excel_filename):\n list_for_resetting = []\n for y in range(56):\n list_for_resetting.append(intraday_blank_list_format)\n over_write_list_of_list_to_excel(excel_filename, sheet_name_for_common_and_jasi, intraday_list_row_start,\n intraday_list_column_start, list_for_resetting)\n\n\ndef delivery_trade_resetting(excel_filename):\n delivery_list_of_list = reading_list_from_excel(excel_filename, sheet_name_for_common_and_jasi,\n delivery_list_row_start, delivery_list_row_ending,\n delivery_list_column_start, delivery_list_column_ending)\n x = 0\n new_list_of_list = []\n for individual_list in delivery_list_of_list:\n if individual_list[0:4] != [None, None, None] and individual_list[4:7] == [None, None, None]:\n new_list_of_list.append(individual_list)\n else:\n x += 1\n for y in range(x):\n new_list_of_list.append([None, None, None, None, None, None, None])\n delivery_list_of_list = new_list_of_list\n over_write_list_of_list_to_excel(excel_filename, sheet_name_for_common_and_jasi,\n delivery_list_row_start, intraday_list_column_start, delivery_list_of_list)\n\n\ndef to_reset_excel_file():\n file_name = Common_excel_file_name\n if True:\n if file_name != Jasi_excel_file_name and file_name == Common_excel_file_name:\n to_write_current_month_profit_for_recording(file_name)\n intraday_trade_resetting(file_name)\n delivery_trade_resetting(file_name)\n print(f'\"{Xcel_files.file1_name}\" is reset successfully!!')\n if file_name == Jasi_excel_file_name:\n pass\n else:\n to_jasi = input(\"\\nTo reset Jasi's file also: press 'y':\")\n if to_jasi == \"y\":\n file_name = Jasi_excel_file_name\n to_write_current_month_profit_for_recording(file_name)\n intraday_trade_resetting(file_name)\n delivery_trade_resetting(file_name)\n print(f'\"{Xcel_files.file2_name}\" is reset successfully!!\\n')\n\n# ******************************************************************************************************\n# ************************************** END OF CODE ***************************************************\n# ******************************************************************************************************\n\n# ******************************************************************************************************\n# *********************************TEST CODES BELOW ***************************************************\n# ******************************************************************************************************\n\n\n# to_reset_excel_file()\n\n# intraday_trade_resetting(f\"{Xcel_files.file_path}{Xcel_files.file1_name}\")\n# print('first done')\n# delivery_trade_resetting(f\"{Xcel_files.file_path}{Xcel_files.file1_name}\")\n# print('second done')\n\n# to_write_current_month_profit_for_recording(f\"{Xcel_files.file_path}{Xcel_files.file1_name}\")\n","repo_name":"afsalmpm/pythonProject","sub_path":"stock_with_list_process/Resetting_the_excel_for_next_month.py","file_name":"Resetting_the_excel_for_next_month.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36196669913","text":"DEFAULT_NAMESPACE = \"default\"\n\nAPI_VERSION_V1 = \"v1\"\nAPPS_API_VERSION_V1 = \"apps/v1\"\nDEPLOYMENT_KIND = \"Deployment\"\nNAMESPACE_KIND = \"Namespace\"\nPOD_KIND = \"Pod\"\nPERSISTENT_VOLUME_CLAIM_KIND = \"PersistentVolumeClaim\"\nSECRET_KIND = \"Secret\"\nSECRET_TYPE_DOCKER_CONFIG = \"kubernetes.io/dockerconfigjson\"\nSECRET_DATA_DOCKER_CONFIG = {\n '.dockerconfigjson': 'eyJhdXRocyI6eyJzd3IuY24tbm9ydGgtNC5teWh1YXdlaWNsb3VkLmNvbSI6eyJ1c2VybmFtZSI6ImNuLW5vcnRoLTRAWlhYSlVGTENDNEJFMU5IR0FWSzQiLCJwYXNzd29yZCI6ImZjOWZmYTU3ZjI4YzdhNzZjMjgzNTE4Njk4NTBmZTg2MjU0MjNlYjBjZmY2NThhYzNiYTBjMGY0ZTUxZWIyYmYiLCJhdXRoIjoiWTI0dGJtOXlkR2d0TkVCYVdGaEtWVVpNUTBNMFFrVXhUa2hIUVZaTE5EcG1ZemxtWm1FMU4yWXlPR00zWVRjMll6STRNelV4T0RZNU9EVXdabVU0TmpJMU5ESXpaV0l3WTJabU5qVTRZV016WW1Fd1l6Qm1OR1UxTVdWaU1tSm0ifX19'}\nSECRET_NAME_DOCKER_CONFIG = \"huaweicloud-bj-registry\"\n\nIMAGE_PULL_POLICY_ALWAYS = \"always\"\nIMAGE_PULL_POLICY_IF_NOT_PRESENT = \"IfNotPresent\"\n\nRESTART_POLICY_NEVER = \"Never\"\n\n# resource requirement fields\nRESOURCE_REQUIREMENTS_CPU = \"cpu\"\nRESOURCE_REQUIREMENTS_MEMORY = \"memory\"\nRESOURCE_REQUIREMENTS_GPU = \"nvidia.com/gpu\"\nRESOURCE_REQUIREMENTS_STORAGE = \"storage\"\nRESOURCE_REQUIREMENTS_ATTRIBUTE_SETS = frozenset(\n [RESOURCE_REQUIREMENTS_CPU, RESOURCE_REQUIREMENTS_MEMORY, RESOURCE_REQUIREMENTS_GPU])\n\n# storage mount fields\nVOLUME_MOUNT_NAME = \"name\"\nVOLUME_MOUNT_PATH = \"mount_path\"\nVOLUME_MOUNT_ATTRIBUTE_SETS = frozenset([VOLUME_MOUNT_NAME, VOLUME_MOUNT_PATH])\n\nVOLUME_ACCESS_MODE_READ_WRITE_MANY = \"ReadWriteMany\"\nVOLUME_STORAGE_CLASS_JUICEFS = \"juicefs-mvp\"\n","repo_name":"ClaytonWang/huanghe","sub_path":"source/services/cluster/k8s/const/workloads_const.py","file_name":"workloads_const.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3494191961","text":"from carro import VeiculoLocacao\nfrom script import tela_inicial\nfrom cliente import Cliente\nfrom reserva import Reserva\n \n \ndef main():\n carro1 = VeiculoLocacao(\"Creta\", \"Hyundai\", \"SUV\", \"Manual\", \"Gasolina\", \"2003\", \"AAA-1111\", 111)\n carro2 = VeiculoLocacao(\"Marea\", \"Fiat\", \"Wagon\", \"Automatico\", \"gasolina\", \"2005\", \"BBB-2222\", 222)\n carro3 = VeiculoLocacao(\"Gol\", \"Volkswagen\", \"Popular\", \"Automatico\", \"flex\", \"2012\", \"CCC-3333\", 333)\n\n cliente1 = Cliente(\"cookie\", \"Cookie Monster\", \"1234\", \"1234\")\n cliente2 = Cliente(\"aladin\", \"Aladin The King\", \"0987\", \"0987\")\n cliente3 = Cliente(\"cesar\", \"Cesar Labs\", \"2424\", \"2424\")\n cliente4 = Cliente(\"gato\", \"Garfield The Cat\", \"0000\", \"0000\")\n\n\n\n tela_inicial()\n\nif __name__ == \"__main__\":\n main()","repo_name":"helenamagaldi/locadora_acme","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33515861812","text":"\"\"\"Breach The Door\"\"\"\ndef decode(sentence, newbox):\n \"\"\"Func. decode for decoding this problem\"\"\"\n box = sentence.split(\" \")\n for i in box:\n if len(i) > 6:\n if i.isalpha():\n newbox.append(i)\n elif i.isalnum():\n newbox.append(i.replace(\"0\", \"\").replace(\"1\", \"\").replace(\"2\", \"\").replace(\"3\", \"\")\\\n .replace(\"4\", \"\").replace(\"5\", \"\").replace(\"6\", \"\").replace(\"7\", \"\")\\\n .replace(\"8\", \"\").replace(\"9\", \"\"))\n else:\n text = ''\n for j in i:\n if j.isalpha():\n text += j\n else:\n j = ''\n text += j\n newbox.append(text)\n for i in newbox:\n if len(i) > 6:\n if i.isalpha():\n print(i, end=\" \")\ndecode(input(), [])\n","repo_name":"AnTznimalz/python_prepro","sub_path":"btd.py","file_name":"btd.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9819449975","text":"import sys\nsys.stdin = open('input_2439.txt', 'r')\n\n# 백준 단계별 풀이 - for문 - 2439 문제\n# 별을 찍되, 오른쪽 정렬시키기\n\nN = int(input())\nstar = '*'\nblank = ' '\n\nfor i in range(1, N+1):\n print((blank*(N-i)) + (star*i))\n","repo_name":"YunyLee/BaekJoon","sub_path":"3. for문/for문_별찍기2.py","file_name":"for문_별찍기2.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16833737870","text":"import numpy as np\nimport pandas as pd\n\ntolerancia = 1e-6\nxl = 0\nxu = 4\n\n#b = 0.618*0.618*xu - xl\n\ndata = [] # Lista para almacenar los valores en cada iteración\ndef f(x):\n return 2 * np.sin(x) - x**2 / 10\n\ndef dorada(xl, xu):\n error = abs(xu - xl)\n d = 0.618*(xu - xl)\n x1 = xl + d \n x2 = xu - d\n fx1 = f(x1)\n fx2 = f(x2)\n\n \n \n if error <= tolerancia:\n df = pd.DataFrame(data, columns=[\"xu\", \"xl\", \"x1\", \"x2\", \"error\"])\n return df\n else:\n if fx2 > fx1:\n xu = x2\n data.append([xu, xl, x1, x2, error])\n return dorada(xl, xu) \n elif fx2 < fx1:\n xl = x1\n data.append([xu, xl, x1, x2, error])\n return dorada(xl, xu) \n else:\n \n return x1\n \n\n\nroot = dorada(xl, xu)\nprint(root)\n","repo_name":"Neltrin22/optimizacionudea","sub_path":"razondorada.py","file_name":"razondorada.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6531003068","text":"########################################################################################\n#\n# This run script encapsulates the training and evaluation of a speaker\n# recognition model defined by the hydra configuration.\n#\n# Author(s): Nik Vaessen\n########################################################################################\n\nimport os\nimport time\nimport hydra\n\nfrom dotenv import load_dotenv\nfrom omegaconf import DictConfig, OmegaConf\n\nfrom src.util.hydra_resolvers import (\n division_resolver,\n integer_division_resolver,\n random_uuid,\n random_experiment_id,\n)\n\n################################################################################\n# set custom resolvers\n\nOmegaConf.register_new_resolver(\"divide\", division_resolver)\nOmegaConf.register_new_resolver(\"idivide\", integer_division_resolver)\nOmegaConf.register_new_resolver(\"random_uuid\", random_uuid)\nOmegaConf.register_new_resolver(\"random_name\", random_experiment_id)\n\n################################################################################\n# wrap around main hydra script\n\n\n@hydra.main(config_path=\"config\", config_name=\"train_speaker\")\ndef run(cfg: DictConfig):\n # we import here such that tab-completion in bash\n # does not need to import everything (which slows it down\n # significantly)\n from src.main import main\n\n return main(cfg)\n\n\n################################################################################\n# execute hydra application\n\nif __name__ == \"__main__\":\n load_dotenv()\n\n env_var = os.environ\n if \"SLURM_ARRAY_TASK_ID\" in env_var:\n job_id = int(env_var[\"SLURM_ARRAY_TASK_ID\"])\n sleep_sec = 2 * int(job_id) + 1\n print(f\"detected slurm array job: sleeping for {sleep_sec} sec\")\n time.sleep(sleep_sec)\n\n run()\n","repo_name":"nikvaessen/w2v2-speaker-few-samples","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"22"} +{"seq_id":"28070696392","text":"import torch\nfrom torch import nn\nfrom torch.utils.data import Dataset, DataLoader\nimport math\nNUM_EPOCHS = 17\nBATCH_SIZE = 256\nCHANNEL_SIZE = 4\nUSE_CUDA = True\nDOUBLE_N = 7\nM = 2**CHANNEL_SIZE\n# bit / channel_use\ncommunication_rate = 4/7\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nclass RadioTransformerNetwork(nn.Module):\n def __init__(self, in_channels, compressed_dim):\n super(RadioTransformerNetwork, self).__init__()\n\n self.in_channels = in_channels\n\n self.encoder = nn.Sequential(\n nn.Linear(in_channels, in_channels),\n nn.ReLU(inplace=True),\n nn.Linear(in_channels, compressed_dim),\n nn.BatchNorm1d(compressed_dim)\n )\n\n self.decoder = nn.Sequential(\n nn.Linear(compressed_dim, compressed_dim),\n nn.ReLU(inplace=True),\n nn.Linear(compressed_dim, in_channels)\n )\n\n def forward(self, x):\n x = self.encoder(x)\n\n # Normalization.\n #x = (self.in_channels ** 2) * (x / x.norm(dim=-1)[:, None])\n\n # 7dBW to SNR.\n training_signal_noise_ratio = 5.01187\n\n # Simulated Gaussian noise.\n noise = torch.autograd.Variable(torch.randn(*x.size()) / math.sqrt(2 * communication_rate * training_signal_noise_ratio))\n if USE_CUDA: noise = noise.cuda()\n x += noise\n\n x = self.decoder(x)\n\n return x\n\n\nclass Encoder(nn.Module):\n def __init__(self,in_channels, compressed_dim):\n super(Encoder, self).__init__()\n\n self.in_channels = in_channels\n\n self.encoder = nn.Sequential(\n nn.Linear(in_channels, in_channels),\n nn.ReLU(inplace=True),\n nn.Linear(in_channels, compressed_dim),\n nn.BatchNorm1d(compressed_dim)\n )\n\n\n def forward(self, x):\n x = self.encoder(x)\n\n # Normalization.\n #x = (self.in_channels ** 2) * (x / x.norm(dim=-1)[:, None])\n\n return x\n\n\nclass Decoder(nn.Module):\n def __init__(self, in_channels, compressed_dim):\n super(Decoder, self).__init__()\n self.decoder = nn.Sequential(\n nn.Linear(compressed_dim, compressed_dim),\n nn.ReLU(inplace=True),\n nn.Linear(compressed_dim, in_channels)\n )\n\n def forward(self, x):\n x = self.decoder(x)\n\n return x\n\nclass TensorDataset(Dataset):\n\n def __init__(self, data_name, label_name,transform = None):\n self.data_all = data_name\n print(self.data_all.shape)\n\n self.label_all = label_name\n print(self.label_all.shape)\n\n self.transform = transform\n\n def __len__(self):\n return len(self.label_all)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n data = self.data_all[idx,:]\n label = self.label_all[idx]\n\n sample = {'signal':data, 'label':label}\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\nif __name__ == \"__main__\":\n import numpy as np\n import torch.optim as optim\n model = RadioTransformerNetwork(M, compressed_dim=DOUBLE_N)\n if USE_CUDA: model = model.cuda()\n\n train_labels = (torch.rand(10000) * M).long()\n train_data = torch.sparse.torch.eye(M).index_select(dim=0, index=train_labels)\n\n test_labels = (torch.rand(45000) * M).long()\n test_data = torch.sparse.torch.eye(M).index_select(dim=0, index=test_labels)\n\n optimizer = optim.Adam(model.parameters())\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=8, gamma=0.1)\n loss_fn = nn.CrossEntropyLoss()\n\n trainset = TensorDataset(train_data, train_labels)\n trainloader = DataLoader(trainset, batch_size=BATCH_SIZE,\n shuffle=True, num_workers=4)\n\n testset = TensorDataset(test_data, test_labels)\n testloader = DataLoader(testset, batch_size=BATCH_SIZE,\n shuffle=True, num_workers=4)\n\n Loss_list = []\n Accuracy_list = []\n\n for epoch in range(NUM_EPOCHS):\n running_loss = 0.0\n for i, data in enumerate(trainloader, 0):\n inputs, labels = data['signal'], data['label']\n\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n optimizer.zero_grad()\n\n outputs = model(inputs)\n loss = loss_fn(outputs, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n _, predicted = torch.max(outputs, 1)\n c = (predicted == labels)#.squeeze()\n c = c.cpu()\n c = c.sum()\n c = c.numpy()\n\n Accuracy_list.append(c / BATCH_SIZE) if i % 40 != 39 else Accuracy_list.append(c / 16)\n\n Loss_list.append(loss.item() / BATCH_SIZE)\n\n if i % 40 == 39:\n print('[%d, %5d] loss: %.3f acc: %.3f' %\n (epoch + 1, i + 1, running_loss / 39, Accuracy_list[-2]))\n running_loss = 0.0\n\n print('Finished Training')\n\n class_correct = list(0. for i in range(M))\n class_total = list(0. for i in range(M))\n with torch.no_grad():\n for data in testloader:\n # print(data.shape)\n inputs, labels = data['signal'], data['label']\n\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n outputs = model(inputs)\n _, predicted = torch.max(outputs, 1)\n\n c = (predicted == labels)\n c = c.cpu()\n #c = c.numpy()\n for i in range(len(labels)):\n label = labels[i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n\n for i in range(M):\n print('Accuracy of %5s : %2d %2d ' % (\n i, class_correct[i], class_total[i]))\n\n PATH = './E2E.pth'\n torch.save(model.state_dict(), PATH)\n\n encoder = Encoder(M, DOUBLE_N)\n encoder.load_state_dict(torch.load(PATH), strict=False)\n #encoder.cuda()\n\n decoder = Decoder(M, DOUBLE_N)\n decoder.load_state_dict(torch.load(PATH), strict=False)\n #decoder.cuda()\n\n EbNodB_range = list(i for i in np.arange(-4.0, 8.5, 0.5))\n #print(EbNodB_range)\n ber = [None] * len(EbNodB_range)\n\n for n in range(len(EbNodB_range)):\n EbNo = 10.0 ** (EbNodB_range[n] / 10.0)\n noise_std = np.sqrt(1 / (2 * communication_rate * EbNo))\n all_errors = 0\n with torch.no_grad():\n for data in testloader:\n inputs, labels = data['signal'], data['label']\n\n encoded_signal = encoder(inputs)\n noise = torch.autograd.Variable(torch.randn(*encoded_signal.size()) / ((2 * communication_rate *EbNo) ** 0.5))\n #print(encoded_signal.shape, len(noise))\n final_signal = encoded_signal + noise\n final_signal = final_signal.float()\n #print(encoded_signal, final_signal)\n #print(final_signal.dtype)\n _, outputs = torch.max(decoder(final_signal), 1)\n errors = (outputs != labels)\n errors = errors.numpy().sum()\n all_errors += errors\n\n ber[n] = all_errors/45000\n print(\"SNR:\", EbNodB_range[n], \"BER:\", ber[n])\n\n import matplotlib.pyplot as plt\n\n plt.plot(EbNodB_range, ber, 'bo', label='Autoencoder(7,4)')\n # plt.plot(list(EbNodB_range), ber_theory, 'ro-',label='BPSK BER')\n plt.yscale('log')\n plt.xlabel('SNR Range')\n plt.ylabel('Block Error Rate')\n plt.grid()\n plt.legend(loc='upper right', ncol=1)\n\n plt.savefig('AutoEncoder_7_4_BER_matplotlib')\n plt.show()","repo_name":"wkzza1830/radio-transformer-networks-master","sub_path":"RTN_noTorchnet.py","file_name":"RTN_noTorchnet.py","file_ext":"py","file_size_in_byte":7644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"8183061194","text":"import random\nfrom django.shortcuts import render, redirect\nfrom ..forms import GraczeForm, GraczeWTurniejuForm\nfrom ..models import Gracze, GraczeWTurnieju, Turnieje, Mecze\nfrom django.contrib.auth.decorators import login_required\n\n\n@login_required(login_url=\"login\")\ndef gracze_view(request):\n print(request.user)\n gracze = Gracze.objects.all()\n data = {\n 'gracze': gracze,\n 'name': request.user,\n 'title': 'Lista graczy'\n }\n return render(request, 'lista_graczy.html', data)\n\n\n@login_required(login_url=\"login\")\ndef dodaj_gracza_view(request):\n print(request.user)\n form = GraczeForm(request.GET or None)\n if request.POST:\n form = GraczeForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(gracze_view)\n data = {\n 'form': form,\n 'name': request.user,\n 'title': 'Dodaj gracza'\n }\n return render(request, 'dodaj_gracza.html', data)\n\n\n@login_required(login_url=\"login\")\ndef usun_gracza_view(request, gracz_id):\n print(request.user)\n gracz = Gracze.objects.get(id=gracz_id)\n gracz.delete()\n return redirect(gracze_view)\n\n\n@login_required(login_url=\"login\")\ndef dodaj_gracza_do_turnieju_view(request, turniej_id):\n print(request.user)\n form = GraczeWTurniejuForm(request.POST or None)\n if form.is_valid():\n form.save()\n return redirect('/lista_turniejow/' + str(turniej_id))\n data = {\n 'form': form,\n 'name': request.user,\n 'title': 'Dodaj gracza do turnieju'\n }\n return render(request, 'dodaj_gracza.html', data)\n\n\n@login_required(login_url=\"login\")\ndef usun_gracza_z_turnieju_view(request, id, turniej_id):\n print(request.user)\n gracz_w_turnieju = GraczeWTurnieju.objects.get(id=id)\n gracz_w_turnieju.delete()\n return redirect('/lista_turniejow/' + str(turniej_id))\n\n\n@login_required(login_url=\"login\")\ndef paruj_graczy_turnieju_view(request, turniej_id):\n print(request.user)\n turniej = Turnieje.objects.get(id=turniej_id)\n gracze_w_turnieju = GraczeWTurnieju.objects.filter(turniej=turniej_id)\n lista_graczy = []\n for gracz in gracze_w_turnieju:\n lista_graczy.append(gracz)\n if turniej.ilosc_graczy > len(lista_graczy):\n liczba_meczy = len(lista_graczy)//2\n else:\n liczba_meczy = turniej.ilosc_graczy//2\n for mecz in range(liczba_meczy):\n para = random.sample(set(lista_graczy), 2)\n Mecze.objects.create(id_turnieju=turniej, faza=1, id_gracza1=para[0].gracz, id_gracza2=para[1].gracz)\n lista_graczy.remove(para[0])\n lista_graczy.remove(para[1])\n return redirect('/lista_turniejow/' + str(turniej_id))\n","repo_name":"aleksandrazb/Django-Turnieje","sub_path":"project_turnieje/app_turnieje/Views/gracze.py","file_name":"gracze.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12387106081","text":"from apscheduler.schedulers.background import BackgroundScheduler\nimport atexit\nfrom app.reco_fetcher import RecoFetcher\nfrom app.history_manager import HistoryManager\nfrom app.email_manager import EmailManager\nfrom app.utils import Constants\n\n\nclass Scheduling:\n def __init__(self):\n self.scheduler = BackgroundScheduler()\n\n def run(self):\n self.scheduler.add_job(func=self.check_recommendations, trigger=\"interval\",\n seconds=30)\n self.scheduler.start()\n atexit.register(lambda: self.scheduler.shutdown())\n\n def check_recommendations(self):\n print(\"checking recommendations!\")\n rf = RecoFetcher()\n www = rf.get_www()\n reco_list_new = rf.get_reco_list(www)\n\n # history\n hm = HistoryManager()\n history = hm.get_from_file()\n new_only = hm.compare_lists_and_choose_unseen(history, reco_list_new)\n hm.save_new_reco_to_file(new_only)\n\n # send email\n if len(new_only) > 0:\n em = EmailManager()\n message = em.prepare(new_only)\n subject = '[MDM] New note!'\n em.send(login=Constants.LOGIN, password=Constants.PASSWORD,\n recipient=Constants.MY_EMAIL, subject=subject, message=message)\n\n\n\n","repo_name":"rafaluk/RecoAlerter","sub_path":"app/scheduling.py","file_name":"scheduling.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16461635630","text":"\"\"\"\ncreated: 2023/02/22 13:24:46 \n@author: seraph★vega\ncontact: admin@pythonspecialops.com\nproject: Bank Management System (BMS)\nmetadoc: BMS that connects to a sqlite3 database\nlicense: MIT\n\"\"\"\n\nfrom bms_connection import BMSConnection\nfrom bms_application import BMSApplication\n\n\ndef main():\n conn = BMSConnection()\n app = BMSApplication(conn)\n while True:\n app.display_menu()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"seraph776/bank-management-system","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74678397816","text":"import ui\n\nclass StopWatch(ui.View):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.value = 0\n self.state = 'stop'\n self.update_interval = .1\n \n def draw(self):\n t0 = (self.value//(600*60), self.value//600, self.value//10)\n t1 = (t0[0], t0[1]%60, t0[2]%60)\n ui.draw_string(\"{:02}:{:02}:{:02}\".format(*t1),\n font=('Helvetica', 20),\n rect=(150, 0, 0, 0),\n color='black',\n alignment=ui.ALIGN_CENTER)\n \n def update(self):\n if self.state == 'run':\n self.value += 1\n self.set_needs_display()\n\n\n \ndef button_action(sender):\n v1 = sender.superview['view1'] \n if sender.title == 'Reset':\n v1.value = 0\n v1.state = 'stop'\n elif sender.title == 'Start':\n v1.value = 0\n v1.state = 'run'\n elif sender.title == 'Stop':\n v1.state = 'stop'\n \n \nv = ui.load_view()\nv.present('sheet') \n\n","repo_name":"encela95dus/ios_pythonista_examples","sub_path":"stopwatch1.py","file_name":"stopwatch1.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"22"} +{"seq_id":"28051575953","text":"from pyspark import SparkContext, SparkConf,SQLContext\nimport pyspark\nfrom zipcode import zipCode,zipCodeTree\nimport time\nimport os\nimport boto3\nimport PIL\nfrom PIL import Image\nimport tempfile\nimport numpy as np\nfrom itertools import islice\nfrom satelliteImage import satelliteImage\nfrom SparktomySQL import MySQLConnector\nfrom imageProcessor import ImageProcessor\nimport pyspark.sql.functions\nfrom solarPanels import SolarPanels\nfrom pyspark.sql import Row\nfrom pyspark.sql.types import StructType, StructField, IntegerType, FloatType, StringType, TimestampType, LongType\n\n\n\ndef fetchzipcodes():\n zipcodes = sc.textFile('s3a://ideanalytics/zcta2010.csv')\n zc =zipcodes.mapPartitionsWithIndex(lambda idx, it: islice(it, 1, None) if idx == 0 else it)\n return zc.collect()\n\n\ndef processBatchEncloser(data):\n global nbThread\n imageProcessor = ImageProcessor(zipsearch,nbThread)\n return imageProcessor.processBatch(data)\n\nif __name__=='__main__':\n\n #Configuring spark\n conf = SparkConf().setAppName('test')\n sc = SparkContext(conf = conf, pyFiles=['zipcode.py', 'solarirradiance.py','satelliteImage.py','SparktomySQL.py','imageProcessor.py','solarPanels.py'])\n sqlContext = SQLContext(sc)\n\n #Set up S3 access keys for spark\n sc._jsc.hadoopConfiguration().set(\"fs.s3a.access.key\", os.environ[\"AWS_ACCESS_KEY\"])\n sc._jsc.hadoopConfiguration().set(\"fs.s3a.secret.key\", os.environ[\"AWS_SECRET_KEY\"])\n\n #Create a mysqlclient\n sqlhost='10.0.0.8'\n sqluser='ubuntu'\n sqlpassword = os.environ[\"MYSQL_SECRET_KEY\"]\n sqldatabase='zipcloud'\n mysqlConnector = MySQLConnector(sqlContext,host=sqlhost, user=sqluser, password=sqlpassword,databaseName=sqldatabase)\n \n # Agreage solar panels by zipcode location return a dataFrame and Uudate the mysqldatabase\n #solarUpdate = SolarPanels(sqlContext,'s3a://ideanalytics/solar_panel_install.csv','s3a://ideanalytics/zcta2010.csv')\n #solarUpdate.updateDataBase(mysqlConnector,table='solarPanels', mode='overwrite')\n\n #Create a zipCode search tree\n zipcodeList = fetchzipcodes()\n start_time = time.time()\n zipsearch = zipCodeTree(list(map(zipCode,zipcodeList)))\n print('zipCodeTree init :', time.time() - start_time)\n \n #imagelist = sc.textFile('s3a://ideanalytics/scene_list_LandSat_Data.csv').mapPartitionsWithIndex(lambda idx, it: islice(it, 1, None) if idx == 0 else it)\n imagelist = sqlContext.read.csv('s3a://ideanalytics/scene_list_LandSat_Data.csv',header='true')\n \n #Remove duplicates\n columns = imagelist.columns\n aggregationdic = {column : 'max' for column in columns}\n imagelist = imagelist.groupBy(imagelist[\"entityId\"]).agg(aggregationdic).select([pyspark.sql.functions.col('max('+column+')').alias(column) for column in columns])\n \n #Remove image already processed\n #Download the csv file containing satellite images metadata\n proccessedImages = mysqlConnector.loadImageIDInDatabase()\n imagelist = imagelist.join(proccessedImages, imagelist[\"entityId\"] == proccessedImages[\"id\"],\"leftanti\")\n \n #Remove images not intersecting any zipcode\n \n imagelist = imagelist.rdd\n imagelist = imagelist.filter(lambda x: len(zipsearch.intersects(list(map(float,x[7:11]))))>0)\n\n \n \n #Group rdd elements by small batches to speed up download time\n nbThread = 10\n\n imlen = imagelist.count()\n nbThread = min(nbThread,imlen) \n imagelist = imagelist.map(lambda x: ','.join(x))\n imagelist = imagelist.zipWithIndex().map(lambda x: (x[1]%(imlen//nbThread),x[0]))\n #print(imagelist.take(10))\n \n imagelist = imagelist.groupByKey().mapValues(list).map(lambda x: x[1])\n print(imagelist.take(10))\n\n #ProcessImages\n imagelist = imagelist.flatMap(processBatchEncloser)\n \n #Push cloudcoverage data to mysql database\n DFRow = Row(\"id\",\"zipcode\",\"timestamp\",\"cloudcoverage\",\"area\",\"zipcodearea\")\n cloudcoveragedf = imagelist.map(lambda x: DFRow(*x)).toDF()\n mysqlConnector.writeToMySQL('cloudcoverage',cloudcoveragedf)\n \n \n","repo_name":"biby/InsightProject","sub_path":"src/spark/SolarInsight.py","file_name":"SolarInsight.py","file_ext":"py","file_size_in_byte":4031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9979354333","text":"#!/usr/bin/env python\nimport rospkg\n\nimport datetime as dt\nimport numpy as np\nimport pandas as pd\n\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\n\nrospack = rospkg.RosPack()\npkg_path = rospack.get_path('stopsign')\n\nIMAGE_RATE = 11 # hz\n\nBULK_DATA_FILE = '%s/data/003_manual_labels/all.csv' % (pkg_path,)\n\nstart_image_id = 0\nend_image_id = 2189\n\nIMAGE_BASE_STRING = '%s/data/002_original_images/%s' % (pkg_path, 'frame%04d.jpg')\n\ndescriptors = []\nfor i in range(32):\n descriptors.append('descr%02d' % (i,))\n\nklass = ['class'.ljust(7)]\n\ndef load_data(seed=None):\n df = pd.read_csv(BULK_DATA_FILE, header=0)\n # mutate data back from stored form\n df['class '] = df['class '].apply(lambda cls: cls / 1000.0)\n df['angle '] = df['angle '].apply(lambda ang: ang / 1000.0)\n df['respons'] = df['respons'].apply(lambda res: res / 100000000.0)\n\n # split into class, features\n X = df[descriptors]\n y = df[klass]\n print(y.describe())\n\n # use mask to split into test, train\n if seed is not None:\n np.random.seed(seed)\n msk = np.random.rand(len(df)) < 0.8\n train_X = X[msk].as_matrix()\n test_X = X[~msk].as_matrix()\n train_y = y[msk].as_matrix().ravel()\n test_y = y[~msk].as_matrix().ravel()\n return train_X, train_y, test_X, test_y\n\ndef subsample_data(X, y, ratio=0.5, seed=None):\n size = 1100\n rus = RandomUnderSampler(\n ratio={\n 0: int(size * ratio),\n 1: int(size * (1 - ratio)),\n },\n random_state=seed)\n return rus.fit_sample(X, y)\n\ndata = {}\n\ndef fit_predict_time_accuracy_precision_recall(classifier, classifier_id, train_X, train_y, test_X, test_y):\n acc_accum = 0\n pre_accum = 0\n rec_accum = 0\n tim_accum = 0\n for seed in range(0, 10):\n print('round %4d/%4d' % (seed, num_tests))\n train_X, train_y = subsample_data(train_X, train_y, 0.5, seed+9001)\n # print('begin fitting')\n classifier.fit(train_X, train_y)\n # print('end fitting')\n\n # print('begin pred')\n stime = dt.datetime.now()\n y_pred = classifier.predict(test_X)\n time_accum += (dt.datetime.now() - stime).total_seconds()\n # print('end pred')\n # print('begin scoring')\n acc_accum += accuracy_score(y_true=test_y, y_pred=y_pred)\n pre_accum += precision_score(y_true=test_y, y_pred=y_pred)\n rec_accum += recall_score(y_true=test_y, y_pred=y_pred)\n\n acc = acc_accum / 10.0\n pre = pre_accum / 10.0\n rec = rec_accum / 10.0\n time = time_accum / 10.0\n\n global data\n data[classifier_id] = {\n 'classifier': str(type(classifier)),\n 'classifier_id': str(classifier_id),\n 'accuracy': acc,\n 'precision': pre,\n 'recall': rec,\n 'predict time': time,\n }\n\nfptapr = fit_predict_time_accuracy_precision_recall\n\nif __name__ == '__main__':\n ### Begin the whole process ###\n\n '''\n Matrix of factors:\n List of Algorithms\n List of tuning parameters for each algorithm\n\n Output\n For each classifier:\n The list of tuning parameters sorted by accuracy, precision, recall, prediction time\n Each algorithm is then represented by its best parameter set for each of accuracy, precision, recall, time\n\n \n '''\n\n # load data from csv, split into training and test sets\n print('begin loading data')\n train_X, train_y, test_X, test_y = load_data(12345)\n\n Klassifiers = [\n (GradientBoostingClassifier, [\n {'loss': 'deviance'},\n {'loss': 'exponential'},\n ]), \n # GaussianProcessClassifier, # This gave a MemoryError on round 0/6\n (SGDClassifier, []), \n (KNeighborsClassifier, []), \n (MLPClassifier, []),\n (SVC, []),\n (DecisionTreeClassifier, []),\n ]\n num_tests = 10\n for index, Klassifier in enumerate(Klassifiers):\n acc = []\n pre = []\n rec = []\n for num_neighbors in range(0, 7):\n print('num neighbors %d' % (num_neighbors + 1,))\n acc_accum = 0\n pre_accum = 0\n rec_accum = 0\n for seed in range(0, num_tests):\n print('round %4d/%4d' % (seed, num_tests))\n train_X, train_y = subsample_data(train_X, train_y, 0.5, seed+9001)\n # print('begin fitting')\n classifier = Klassifier(n_neighbors=num_neighbors+1)\n classifier.fit(train_X, train_y)\n # print('end fitting')\n\n # print('begin pred')\n y_pred = classifier.predict(test_X)\n # print('end pred')\n # print('begin scoring')\n acc_accum += accuracy_score(y_true=test_y, y_pred=y_pred)\n pre_accum += precision_score(y_true=test_y, y_pred=y_pred)\n rec_accum += recall_score(y_true=test_y, y_pred=y_pred)\n # print('end scoring')\n acc.append(acc_accum / num_tests)\n pre.append(pre_accum / num_tests)\n rec.append(rec_accum / num_tests)\n print(Klassifier)\n print('a: %.4f (percent correctly classified)' % (sum(acc)/len(acc),))\n print('p: %.4f (percent of correct positives)' % (sum(pre)/len(pre),))\n print('r: %.4f (percent of positive results found)' % (sum(rec)/len(rec),))\n\n","repo_name":"cwrucutter/stopsign","sub_path":"src/v1/parallel_ml_compare.py","file_name":"parallel_ml_compare.py","file_ext":"py","file_size_in_byte":5738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72224066617","text":"def ope(x, y):\n ad = x + y\n su = x - y\n mu = x * y\n di = x / y\n return ad, su, mu, di\n\nprint('Inicio do programa')\na = int(input('Digite um numero: '))\nb = int(input('Digite outro numero: '))\ns = ope(a, b)\nprint(f'Soma: {s[0]}, Subritração: {s[1]}, Multiplicação: {s[2]}, Divisão: {s[3]}')\n\nlista = ['Lista', 'é mutável' , 'pode receber novos valores', 'e é sequencial inicia em 0 e vai a n - 1']\nlista1 = [] # pode ser criada sem valor\nlista2 = list(2*x for x in range(10)) # pode ser criada usando list() comprehension, com for in, Sua sintaxe básica é:\n# [item for item in lista]\nprint(lista, lista1, lista2)\ntupla = ('Tuplas', 'Imutável ', 'Não permite insersão ', 'mas pode ser acessado pela posição ', 'na sequência')\nprint(tupla)\nset = {'Set', 'permite adicionar', 'valores usando a função add() ', 'mas não permite acesso pela posição'}\nprint(set)\ndicionario = {'nome': 'João', 'idade': 30, 'cidade': 'São Paulo'} # dicionários dict, é mutável, pode ser adicionado nos valores,\n# Para acessar um valor em um dicionário, basta digitar: nome_dicionario[chave],\n# E, para atribuir um novo valor, use: nome_dicionario[chave] = novo_valor","repo_name":"andreluizdsantos/Curso_ADS","sub_path":"python/ex/exfunc.py","file_name":"exfunc.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"72348209656","text":"\"\"\"Helper methods for computing activation.\n\n--- NOTATION ---\n\nThe following letters are used throughout this module.\n\nT = number of input tensors to the model\nE = number of examples (storm objects)\n\"\"\"\n\nimport pickle\nimport numpy\nfrom keras import backend as K\nfrom gewittergefahr.gg_io import storm_tracking_io as tracking_io\nfrom gewittergefahr.gg_utils import file_system_utils\nfrom gewittergefahr.gg_utils import error_checking\nfrom gewittergefahr.deep_learning import model_interpretation\n\nFULL_IDS_KEY = tracking_io.FULL_IDS_KEY\nSTORM_TIMES_KEY = tracking_io.STORM_TIMES_KEY\nMODEL_FILE_NAME_KEY = 'model_file_name'\nCOMPONENT_TYPE_KEY = 'component_type_string'\nTARGET_CLASS_KEY = 'target_class'\nLAYER_NAME_KEY = 'layer_name'\nNEURON_INDICES_KEY = 'neuron_index_matrix'\nCHANNEL_INDICES_KEY = 'channel_indices'\n\nHIT_INDICES_KEY = 'hit_indices'\nMISS_INDICES_KEY = 'miss_indices'\nFALSE_ALARM_INDICES_KEY = 'false_alarm_indices'\nCORRECT_NULL_INDICES_KEY = 'correct_null_indices'\n\n\ndef check_metadata(\n component_type_string, target_class=None, layer_name=None,\n neuron_index_matrix=None, channel_indices=None):\n \"\"\"Error-checks metadata for activation calculations.\n\n C = number of model components (classes, neurons, or channels) for which\n activations were computed\n\n :param component_type_string: Component type (must be accepted by\n `model_interpretation.check_component_type`).\n :param target_class: See doc for `get_class_activation_for_examples`.\n :param layer_name: See doc for `get_neuron_activation_for_examples` or\n `get_channel_activation_for_examples`.\n :param neuron_index_matrix: [used only if component_type_string = \"neuron\"]\n C-by-? numpy array, where neuron_index_matrix[j, :] contains array\n indices of the [j]th neuron whose activation was computed.\n :param channel_indices: [used only if component_type_string = \"channel\"]\n length-C numpy array, where channel_indices[j] is the index of the\n [j]th channel whose activation was computed.\n :return: num_components: Number of model components (classes, neurons, or\n channels) whose activation was computed.\n \"\"\"\n\n model_interpretation.check_component_type(component_type_string)\n\n if (component_type_string ==\n model_interpretation.CLASS_COMPONENT_TYPE_STRING):\n error_checking.assert_is_integer(target_class)\n error_checking.assert_is_geq(target_class, 0)\n num_components = 1\n\n if component_type_string in [\n model_interpretation.NEURON_COMPONENT_TYPE_STRING,\n model_interpretation.CHANNEL_COMPONENT_TYPE_STRING\n ]:\n error_checking.assert_is_string(layer_name)\n\n if (component_type_string ==\n model_interpretation.NEURON_COMPONENT_TYPE_STRING):\n error_checking.assert_is_integer_numpy_array(neuron_index_matrix)\n error_checking.assert_is_geq_numpy_array(neuron_index_matrix, 0)\n error_checking.assert_is_numpy_array(\n neuron_index_matrix, num_dimensions=2)\n num_components = neuron_index_matrix.shape[0]\n\n if (component_type_string ==\n model_interpretation.CHANNEL_COMPONENT_TYPE_STRING):\n error_checking.assert_is_integer_numpy_array(channel_indices)\n error_checking.assert_is_geq_numpy_array(channel_indices, 0)\n num_components = len(channel_indices)\n\n return num_components\n\n\ndef get_class_activation_for_examples(\n model_object, target_class, list_of_input_matrices):\n \"\"\"For each input example, returns predicted probability of target class.\n\n :param model_object: Instance of `keras.models.Model`.\n :param target_class: Predictions will be returned for this class. Must be\n an integer in 0...(K - 1), where K = number of classes.\n :param list_of_input_matrices: length-T list of numpy arrays, comprising\n one or more examples (storm objects). list_of_input_matrices[i] must\n have the same dimensions as the [i]th input tensor to the model.\n :return: activation_values: length-E numpy array, where activation_values[i]\n is the activation (predicted probability or logit) of the target class\n for the [i]th example.\n \"\"\"\n\n check_metadata(\n component_type_string=model_interpretation.CLASS_COMPONENT_TYPE_STRING,\n target_class=target_class\n )\n\n if isinstance(model_object.input, list):\n list_of_input_tensors = model_object.input\n else:\n list_of_input_tensors = [model_object.input]\n\n num_output_neurons = model_object.layers[-1].output.get_shape().as_list()[\n -1]\n\n if num_output_neurons == 1:\n error_checking.assert_is_leq(target_class, 1)\n if target_class == 1:\n output_tensor = model_object.layers[-1].output[..., 0]\n else:\n output_tensor = 1. - model_object.layers[-1].output[..., 0]\n else:\n error_checking.assert_is_less_than(target_class, num_output_neurons)\n output_tensor = model_object.layers[-1].output[..., target_class]\n\n activation_function = K.function(\n list_of_input_tensors + [K.learning_phase()],\n [output_tensor])\n\n return activation_function(list_of_input_matrices + [0])[0]\n\n\ndef get_neuron_activation_for_examples(\n model_object, layer_name, neuron_indices, list_of_input_matrices):\n \"\"\"For each input example, returns the activation of one neuron.\n\n :param model_object: Instance of `keras.models.Model`.\n :param layer_name: Name of layer containing the relevant neuron.\n :param neuron_indices: 1-D numpy array with indices of the relevant neuron.\n Must have length K - 1, where K = number of dimensions in layer output.\n The first dimension of the layer output is the example dimension, for\n which all indices from 0...(E - 1) are used.\n :param list_of_input_matrices: See doc for\n `get_class_activation_for_examples`.\n :return: activation_values: length-E numpy array, where activation_values[i]\n is the activation of the given neuron by the [i]th example.\n \"\"\"\n\n check_metadata(\n component_type_string=model_interpretation.NEURON_COMPONENT_TYPE_STRING,\n layer_name=layer_name,\n neuron_index_matrix=numpy.expand_dims(neuron_indices, axis=0)\n )\n\n if isinstance(model_object.input, list):\n list_of_input_tensors = model_object.input\n else:\n list_of_input_tensors = [model_object.input]\n\n activation_function = K.function(\n list_of_input_tensors + [K.learning_phase()],\n [model_object.get_layer(name=layer_name).output[..., neuron_indices]])\n\n return activation_function(list_of_input_matrices + [0])[0]\n\n\ndef get_channel_activation_for_examples(\n model_object, layer_name, channel_index, list_of_input_matrices,\n stat_function_for_neuron_activations):\n \"\"\"For each input example, returns the activation of one channel.\n\n :param model_object: Instance of `keras.models.Model`.\n :param layer_name: Name of layer containing the relevant channel.\n :param channel_index: Index of the relevant channel. This method computes\n activations for the [j]th output channel of `layer_name`, where\n j = `channel_index`.\n :param list_of_input_matrices: See doc for\n `get_class_activation_for_examples`.\n :param stat_function_for_neuron_activations: Function used to process neuron\n activations (needed because a channel generally has many neurons). This\n function must take a Keras tensor (containing neuron activations) and\n return a single number. Some examples are `keras.backend.max` and\n `keras.backend.mean`.\n :return: activation_values: length-E numpy array, where activation_values[i]\n is stat_function_for_neuron_activations(channel_activations) for the\n [i]th example.\n \"\"\"\n\n check_metadata(\n component_type_string=\n model_interpretation.CHANNEL_COMPONENT_TYPE_STRING,\n layer_name=layer_name,\n channel_indices=numpy.array([channel_index], dtype=int)\n )\n\n if isinstance(model_object.input, list):\n list_of_input_tensors = model_object.input\n else:\n list_of_input_tensors = [model_object.input]\n\n activation_function = K.function(\n list_of_input_tensors + [K.learning_phase()],\n [stat_function_for_neuron_activations(\n model_object.get_layer(name=layer_name).output[..., channel_index])\n ]\n )\n\n return activation_function(list_of_input_matrices + [0])[0]\n\n\ndef get_hilo_activation_examples(\n storm_activations, num_high_activation_examples,\n num_low_activation_examples, unique_storm_cells,\n full_storm_id_strings=None):\n \"\"\"Finds examples (storm objects) with highest and lowest activations.\n\n E = number of examples\n\n :param storm_activations: length-E numpy array of model activations.\n :param num_high_activation_examples: Number of high-activation examples to\n return.\n :param num_low_activation_examples: Number of low-activation examples to\n return.\n :param unique_storm_cells: Boolean flag. If True, each set will contain no\n more than one example per storm cell. If False, each set may contain\n multiple examples from the same storm cell.\n :param full_storm_id_strings: [used only if `unique_storm_cells == True`]\n length-E list of full storm IDs.\n :return: low_indices: 1-D numpy array with indices of low-activation\n examples.\n :return: high_indices: 1-D numpy array with indices of high-activation\n examples.\n \"\"\"\n\n error_checking.assert_is_numpy_array(storm_activations, num_dimensions=1)\n error_checking.assert_is_boolean(unique_storm_cells)\n num_examples = len(storm_activations)\n\n if unique_storm_cells:\n expected_dim = numpy.array([num_examples], dtype=int)\n\n error_checking.assert_is_string_list(full_storm_id_strings)\n error_checking.assert_is_numpy_array(\n numpy.array(full_storm_id_strings), exact_dimensions=expected_dim\n )\n\n error_checking.assert_is_integer(num_high_activation_examples)\n error_checking.assert_is_geq(num_high_activation_examples, 0)\n error_checking.assert_is_integer(num_low_activation_examples)\n error_checking.assert_is_geq(num_low_activation_examples, 0)\n error_checking.assert_is_greater(\n num_high_activation_examples + num_low_activation_examples, 0\n )\n\n num_low_activation_examples = min([\n num_low_activation_examples, num_examples\n ])\n num_high_activation_examples = min([\n num_high_activation_examples, num_examples\n ])\n\n low_indices = numpy.array([], dtype=int)\n high_indices = numpy.array([], dtype=int)\n\n if num_low_activation_examples > 0:\n sort_indices = numpy.argsort(storm_activations)\n\n if unique_storm_cells:\n these_id_strings = numpy.array(full_storm_id_strings)[sort_indices]\n _, these_unique_indices = numpy.unique(\n these_id_strings, return_index=True)\n\n these_unique_indices = numpy.sort(these_unique_indices)\n sort_indices = sort_indices[these_unique_indices]\n\n low_indices = sort_indices[:num_low_activation_examples]\n\n if num_high_activation_examples > 0:\n sort_indices = numpy.argsort(-1 * storm_activations)\n\n if unique_storm_cells:\n these_id_strings = numpy.array(full_storm_id_strings)[sort_indices]\n _, these_unique_indices = numpy.unique(\n these_id_strings, return_index=True)\n\n these_unique_indices = numpy.sort(these_unique_indices)\n sort_indices = sort_indices[these_unique_indices]\n\n high_indices = sort_indices[:num_high_activation_examples]\n\n return high_indices, low_indices\n\n\ndef get_contingency_table_extremes(\n storm_activations, storm_target_values, num_hits, num_misses,\n num_false_alarms, num_correct_nulls, unique_storm_cells,\n full_storm_id_strings=None):\n \"\"\"Returns \"contingency-table extremes\".\n\n Specifically, this method returns the following:\n\n - best hits (positive examples with the highest activations)\n - worst misses (positive examples with the lowest activations)\n - worst false alarms (negative examples with the highest activations)\n - best correct nulls (negative examples with the lowest activations)\n\n DEFINITIONS\n\n One \"example\" is one storm object.\n A \"negative example\" is a storm object with target = 0.\n A \"positive example\" is a storm object with target = 1.\n The target variable must be binary.\n\n E = number of examples\n\n :param storm_activations: length-E numpy array of model activations.\n :param storm_target_values: length-E numpy array of target values. These\n must be integers from 0...1.\n :param num_hits: Number of best hits.\n :param num_misses: Number of worst misses.\n :param num_false_alarms: Number of worst false alarms.\n :param num_correct_nulls: Number of best correct nulls.\n :param unique_storm_cells: See doc for `get_hilo_activation_examples`.\n :param full_storm_id_strings: Same.\n :return: ct_extreme_dict: Dictionary with the following keys.\n ct_extreme_dict['hit_indices']: 1-D numpy array with indices of best hits.\n ct_extreme_dict['miss_indices']: 1-D numpy array with indices of worst\n misses.\n ct_extreme_dict['false_alarm_indices']: 1-D numpy array with indices of\n worst false alarms.\n ct_extreme_dict['correct_null_indices']: 1-D numpy array with indices of\n best correct nulls.\n \"\"\"\n\n error_checking.assert_is_numpy_array(storm_activations, num_dimensions=1)\n error_checking.assert_is_boolean(unique_storm_cells)\n\n num_examples = len(storm_activations)\n expected_dim = numpy.array([num_examples], dtype=int)\n\n if unique_storm_cells:\n error_checking.assert_is_string_list(full_storm_id_strings)\n error_checking.assert_is_numpy_array(\n numpy.array(full_storm_id_strings), exact_dimensions=expected_dim\n )\n\n error_checking.assert_is_integer_numpy_array(storm_target_values)\n error_checking.assert_is_geq_numpy_array(storm_target_values, 0)\n error_checking.assert_is_leq_numpy_array(storm_target_values, 1)\n error_checking.assert_is_numpy_array(\n storm_target_values, exact_dimensions=expected_dim)\n\n error_checking.assert_is_integer(num_hits)\n error_checking.assert_is_geq(num_hits, 0)\n error_checking.assert_is_integer(num_misses)\n error_checking.assert_is_geq(num_misses, 0)\n error_checking.assert_is_integer(num_false_alarms)\n error_checking.assert_is_geq(num_false_alarms, 0)\n error_checking.assert_is_integer(num_correct_nulls)\n error_checking.assert_is_geq(num_correct_nulls, 0)\n error_checking.assert_is_greater(\n num_hits + num_misses + num_false_alarms + num_correct_nulls, 0\n )\n\n positive_indices = numpy.where(storm_target_values == 1)[0]\n negative_indices = numpy.where(storm_target_values == 0)[0]\n\n num_hits = min([num_hits, len(positive_indices)])\n num_misses = min([num_misses, len(positive_indices)])\n num_false_alarms = min([num_false_alarms, len(negative_indices)])\n num_correct_nulls = min([num_correct_nulls, len(negative_indices)])\n\n hit_indices = numpy.array([], dtype=int)\n miss_indices = numpy.array([], dtype=int)\n false_alarm_indices = numpy.array([], dtype=int)\n correct_null_indices = numpy.array([], dtype=int)\n\n if num_hits > 0:\n these_indices = numpy.argsort(-1 * storm_activations[positive_indices])\n sort_indices = positive_indices[these_indices]\n\n if unique_storm_cells:\n these_id_strings = numpy.array(full_storm_id_strings)[sort_indices]\n _, these_unique_indices = numpy.unique(\n these_id_strings, return_index=True)\n\n these_unique_indices = numpy.sort(these_unique_indices)\n sort_indices = sort_indices[these_unique_indices]\n\n hit_indices = sort_indices[:num_hits]\n\n if num_misses > 0:\n these_indices = numpy.argsort(storm_activations[positive_indices])\n sort_indices = positive_indices[these_indices]\n\n if unique_storm_cells:\n these_id_strings = numpy.array(full_storm_id_strings)[sort_indices]\n _, these_unique_indices = numpy.unique(\n these_id_strings, return_index=True)\n\n these_unique_indices = numpy.sort(these_unique_indices)\n sort_indices = sort_indices[these_unique_indices]\n\n miss_indices = sort_indices[:num_misses]\n\n if num_false_alarms > 0:\n these_indices = numpy.argsort(-1 * storm_activations[negative_indices])\n sort_indices = negative_indices[these_indices]\n\n if unique_storm_cells:\n these_id_strings = numpy.array(full_storm_id_strings)[sort_indices]\n _, these_unique_indices = numpy.unique(\n these_id_strings, return_index=True)\n\n these_unique_indices = numpy.sort(these_unique_indices)\n sort_indices = sort_indices[these_unique_indices]\n\n false_alarm_indices = sort_indices[:num_false_alarms]\n\n if num_correct_nulls > 0:\n these_indices = numpy.argsort(storm_activations[negative_indices])\n sort_indices = negative_indices[these_indices]\n\n if unique_storm_cells:\n these_id_strings = numpy.array(full_storm_id_strings)[sort_indices]\n _, these_unique_indices = numpy.unique(\n these_id_strings, return_index=True)\n\n these_unique_indices = numpy.sort(these_unique_indices)\n sort_indices = sort_indices[these_unique_indices]\n\n correct_null_indices = sort_indices[:num_correct_nulls]\n\n return {\n HIT_INDICES_KEY: hit_indices,\n MISS_INDICES_KEY: miss_indices,\n FALSE_ALARM_INDICES_KEY: false_alarm_indices,\n CORRECT_NULL_INDICES_KEY: correct_null_indices\n }\n\n\ndef write_file(\n pickle_file_name, activation_matrix, full_id_strings,\n storm_times_unix_sec, model_file_name, component_type_string,\n target_class=None, layer_name=None, neuron_index_matrix=None,\n channel_indices=None):\n \"\"\"Writes activations to Pickle file.\n\n E = number of examples (storm objects)\n C = number of model components (classes, neurons, or channels) for which\n activations were computed\n\n :param pickle_file_name: Path to output file.\n :param activation_matrix: E-by-C numpy array of activations, where\n activation_matrix[i, j] = activation of the [j]th model component for\n the [i]th example.\n :param full_id_strings: length-E list of full storm IDs.\n :param storm_times_unix_sec: length-E numpy array of storm times.\n :param model_file_name: Path to file with trained model.\n :param component_type_string: See doc for `check_metadata`.\n :param target_class: Same.\n :param layer_name: Same.\n :param neuron_index_matrix: Same.\n :param channel_indices: Same.\n \"\"\"\n\n num_components = check_metadata(\n component_type_string=component_type_string, target_class=target_class,\n layer_name=layer_name, neuron_index_matrix=neuron_index_matrix,\n channel_indices=channel_indices)\n error_checking.assert_is_string(model_file_name)\n\n error_checking.assert_is_string_list(full_id_strings)\n error_checking.assert_is_numpy_array(\n numpy.array(full_id_strings), num_dimensions=1)\n num_examples = len(full_id_strings)\n\n error_checking.assert_is_integer_numpy_array(storm_times_unix_sec)\n error_checking.assert_is_numpy_array(\n storm_times_unix_sec, exact_dimensions=numpy.array([num_examples]))\n\n error_checking.assert_is_numpy_array_without_nan(activation_matrix)\n error_checking.assert_is_numpy_array(\n activation_matrix,\n exact_dimensions=numpy.array([num_examples, num_components]))\n\n metadata_dict = {\n FULL_IDS_KEY: full_id_strings,\n STORM_TIMES_KEY: storm_times_unix_sec,\n MODEL_FILE_NAME_KEY: model_file_name,\n COMPONENT_TYPE_KEY: component_type_string,\n TARGET_CLASS_KEY: target_class,\n LAYER_NAME_KEY: layer_name,\n NEURON_INDICES_KEY: neuron_index_matrix,\n CHANNEL_INDICES_KEY: channel_indices,\n }\n\n file_system_utils.mkdir_recursive_if_necessary(file_name=pickle_file_name)\n pickle_file_handle = open(pickle_file_name, 'wb')\n pickle.dump(activation_matrix, pickle_file_handle)\n pickle.dump(metadata_dict, pickle_file_handle)\n pickle_file_handle.close()\n\n\ndef read_file(pickle_file_name):\n \"\"\"Reads activations from Pickle file.\n\n :param pickle_file_name: Path to input file.\n :return: activation_matrix: See doc for `write_file`.\n :return: metadata_dict: Dictionary with the following keys.\n metadata_dict['full_id_strings']: See doc for `write_file`.\n metadata_dict['storm_times_unix_sec']: Same.\n metadata_dict['model_file_name']: Same.\n metadata_dict['component_type_string']: Same.\n metadata_dict['target_class']: Same.\n metadata_dict['layer_name']: Same.\n metadata_dict['neuron_index_matrix']: Same.\n metadata_dict['channel_indices']: Same.\n \"\"\"\n\n pickle_file_handle = open(pickle_file_name, 'rb')\n activation_matrix = pickle.load(pickle_file_handle)\n metadata_dict = pickle.load(pickle_file_handle)\n pickle_file_handle.close()\n\n return activation_matrix, metadata_dict\n","repo_name":"thunderhoser/GewitterGefahr","sub_path":"gewittergefahr/deep_learning/model_activation.py","file_name":"model_activation.py","file_ext":"py","file_size_in_byte":21441,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"22"} +{"seq_id":"16731894557","text":"#!/usr/bin/env python3.8\nN, M = map(int, input().split())\nA = []\nfor i in range(N):\n a = input()\n A.append(a)\n\nB = []\nfor i in range(M):\n b = input()\n B.append(b)\n\n# Aを[0,N-M]の範囲で探索する\nfor i in range(N - M + 1):\n for j in range(N - M + 1):\n is_inside = True\n # Bの値と照合していき、不一致があればFalse\n for di in range(M):\n for dj in range(M):\n if B[di][dj] != A[i + di][j + dj]:\n is_inside = False\n if is_inside:\n print('Yes')\n exit()\nprint('No')","repo_name":"harukaeru/CompetitiveProgramming","sub_path":"abc054/B/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"12145484992","text":"import logging\nfrom datetime import datetime\nfrom tools.optscale_exceptions.common_exc import (\n ConflictException, WrongArgumentsException)\nfrom rest_api.rest_api_server.controllers.discovery_info import DiscoveryInfoController\nfrom rest_api.rest_api_server.controllers.base_async import BaseAsyncControllerWrapper\nfrom rest_api.rest_api_server.exceptions import Err\nfrom rest_api.rest_api_server.models.models import DiscoveryInfo\nfrom rest_api.rest_api_server.utils import check_list_attribute\n\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.sql import and_, exists\n\nLOG = logging.getLogger(__name__)\n\n\nclass DiscoveryInfosBulkController(DiscoveryInfoController):\n\n def _validate(self, item, is_new=True, **kwargs):\n if is_new:\n self.check_cloud_acc_and_org(item.cloud_account_id)\n resource_type = kwargs.get('resource_type')\n query = self.session.query(exists().where(\n and_(DiscoveryInfo.cloud_account_id == item.cloud_account_id,\n DiscoveryInfo.resource_type == resource_type,\n DiscoveryInfo.deleted.is_(False))))\n di_exist = query.scalar()\n if di_exist:\n raise ConflictException(Err.OE0518,\n [kwargs.get('resource_type'),\n item.cloud_account_id])\n\n def create(self, cloud_account_id, **kwargs):\n result = []\n discovery_infos = kwargs['discovery_info']\n model_type = self._get_model_type()\n try:\n for di_params in discovery_infos:\n di_params['cloud_account_id'] = cloud_account_id\n self.check_create_restrictions(**di_params)\n item = model_type(**di_params)\n self._validate(item, True, **di_params)\n self.session.add(item)\n result.append(item)\n self.session.commit()\n except IntegrityError as ex:\n self.session.rollback()\n raise WrongArgumentsException(Err.OE0003, [str(ex)])\n return result\n\n def delete(self, cloud_account_id, **kwargs):\n self.check_cloud_acc_and_org(cloud_account_id)\n discovery_infos_ids = kwargs['discovery_info']\n now = int(datetime.utcnow().timestamp())\n self.session.query(DiscoveryInfo).filter(\n DiscoveryInfo.id.in_(discovery_infos_ids),\n DiscoveryInfo.deleted.is_(False)\n ).update({DiscoveryInfo.deleted_at: now},\n synchronize_session=False)\n self.session.commit()\n\n\nclass DiscoveryInfosAsyncBulkController(BaseAsyncControllerWrapper):\n def _get_controller_class(self):\n return DiscoveryInfosBulkController\n","repo_name":"hystax/optscale","sub_path":"rest_api/rest_api_server/controllers/discovery_info_bulk.py","file_name":"discovery_info_bulk.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","stars":646,"dataset":"github-code","pt":"22"} +{"seq_id":"74357664375","text":"num = int(input(\"Digite o numero de temperaturas registradas: \"))\nsoma = maior = menor = float(input('Digite a 1 temperatura: '))\n\nfor i in range(2, num+1):\n temp = float(input(\"Digite a %d temperatura: \"%i))\n soma += temp\n\n if temp > maior:\n maior = temp\n if temp < menor:\n menor = temp\nmedia = (soma/num)\nprint(\"A maior temperatura é: %.2f\"%maior)\nprint(\"A menor temperatura é: %.2f\"%menor)\nprint(\"A média das temperaturas é: %.2f\"%media)\n","repo_name":"WagnerSteffen/Aulas-Python","sub_path":"FuncoesBasicas/Maior e menor temperatura.py","file_name":"Maior e menor temperatura.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42061827551","text":"class Solution1:\r\n \"\"\"解法一: 二分查找法【左闭右闭】\r\n \"\"\"\r\n\r\n def mySqrt(self, x: int) -> int:\r\n if x <= 1:\r\n return x\r\n left, right = 0, x\r\n while left <= right:\r\n middle = left + (right - left) // 2\r\n if middle * middle <= x < (middle + 1) * (middle + 1):\r\n return middle\r\n elif x < middle * middle:\r\n right = middle - 1\r\n else:\r\n left = middle + 1\r\n\r\n\r\nclass Solution2:\r\n \"\"\"解法二: 二分查找法【左闭右开】\r\n \"\"\"\r\n\r\n def mySqrt(self, x: int) -> int:\r\n if x <= 1:\r\n return x\r\n left, right = 0, x + 1\r\n while left < right:\r\n middle = left + (right - left) // 2\r\n if middle * middle <= x < (middle+1) * (middle+1):\r\n return middle\r\n elif x < middle * middle:\r\n right = middle\r\n else:\r\n left = middle + 1\r\n\r\n\r\nclass Solution3:\r\n \"\"\"解法三: 牛顿迭代法\r\n\r\n 牛顿迭代法是一种求解无约束优化问题的方法,基于泰勒级数展开以及牛顿-莱布尼茨公式。在求解平方根的问题上,它可以被用来找到满足 f(y) = y^2 - x = 0 的 y 值。\r\n\r\n 牛顿迭代法的基本思想是,首先猜测一个值 y,并不断更新这个猜测值,直到找到一个 y 使得 f(y) 足够接近 0。更新猜测值的公式如下:\r\n\r\n y_new = y - f(y) / f'(y)\r\n\r\n 其中 f'(y) 是 f(y) 对 y 的导数。在这个问题中,f(y) = y^2 - x,因此 f'(y) = 2y。将这些代入更新公式中,我们可以得到:\r\n\r\n y_new = y - (y^2 - x) / 2y\r\n = y/2 + x / (2y)\r\n = (y + x / y) / 2\r\n\r\n 这就是我们在代码中使用的公式 y = (y + x / y) // 2。\r\n\r\n 公式 y_new = y - f(y) / f'(y) 的意思是:新的猜测值 y_new 是当前猜测值 y 减去 f(y) 与 f'(y) 的比值。\r\n\r\n 在这个公式中:\r\n\r\n y 是当前的猜测值\r\n f(y) 是方程 f 在 y 处的值\r\n f'(y) 是方程 f 在 y 处的导数值\r\n\r\n 这个公式来源于泰勒级数的一阶近似。它利用了方程在当前猜测值附近的局部线性性质,来更新当前猜测值,使得新的猜测值更接近方程的根。\r\n \"\"\"\r\n\r\n def mySqrt(self, x: int) -> int:\r\n if x <= 1:\r\n return x\r\n y = x // 2\r\n while y * y > x:\r\n y = (y + x / y) // 2\r\n return int(y)\r\n","repo_name":"lzzzzl/leetcode","sub_path":"array/4-sqrtx/sqrtx.py","file_name":"sqrtx.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7016281759","text":"#!/usr/bin/env python3\n\nimport os\n\nfrom flipper.app import App\nfrom flipper.assets.icon import file2image\n\nICONS_SUPPORTED_FORMATS = [\"png\"]\n\nICONS_TEMPLATE_H_HEADER = \"\"\"#pragma once\n\n#include \n\n\"\"\"\nICONS_TEMPLATE_H_ICON_NAME = \"extern const Icon {name};\\n\"\n\nICONS_TEMPLATE_C_HEADER = \"\"\"#include \"{assets_filename}.h\"\n\n#include \n\n\"\"\"\nICONS_TEMPLATE_C_FRAME = \"const uint8_t {name}[] = {data};\\n\"\nICONS_TEMPLATE_C_DATA = \"const uint8_t* const {name}[] = {data};\\n\"\nICONS_TEMPLATE_C_ICONS = \"const Icon {name} = {{.width={width},.height={height},.frame_count={frame_count},.frame_rate={frame_rate},.frames=_{name}}};\\n\"\n\n\nclass Main(App):\n def init(self):\n # command args\n self.subparsers = self.parser.add_subparsers(help=\"sub-command help\")\n self.parser_icons = self.subparsers.add_parser(\n \"icons\", help=\"Process icons and build icon registry\"\n )\n self.parser_icons.add_argument(\"input_directory\", help=\"Source directory\")\n self.parser_icons.add_argument(\"output_directory\", help=\"Output directory\")\n self.parser_icons.add_argument(\n \"--filename\",\n help=\"Base filename for file with icon data\",\n required=False,\n default=\"assets_icons\",\n )\n\n self.parser_icons.set_defaults(func=self.icons)\n\n self.parser_manifest = self.subparsers.add_parser(\n \"manifest\", help=\"Create directory Manifest\"\n )\n self.parser_manifest.add_argument(\"local_path\", help=\"local_path\")\n self.parser_manifest.add_argument(\n \"--timestamp\",\n help=\"timestamp value to embed\",\n default=0,\n type=int,\n required=False,\n )\n self.parser_manifest.set_defaults(func=self.manifest)\n\n self.parser_copro = self.subparsers.add_parser(\n \"copro\", help=\"Gather copro binaries for packaging\"\n )\n self.parser_copro.add_argument(\"cube_dir\", help=\"Path to Cube folder\")\n self.parser_copro.add_argument(\"output_dir\", help=\"Path to output folder\")\n self.parser_copro.add_argument(\n \"--cube_ver\", dest=\"cube_ver\", help=\"Cube version\", required=True\n )\n self.parser_copro.add_argument(\n \"--stack_type\", dest=\"stack_type\", help=\"Stack type\", required=True\n )\n self.parser_copro.add_argument(\n \"--stack_file\",\n dest=\"stack_file\",\n help=\"Stack file name in copro folder\",\n required=True,\n )\n self.parser_copro.add_argument(\n \"--stack_addr\",\n dest=\"stack_addr\",\n help=\"Stack flash address, as per release_notes\",\n type=lambda x: int(x, 16),\n default=0,\n required=False,\n )\n self.parser_copro.set_defaults(func=self.copro)\n\n self.parser_dolphin = self.subparsers.add_parser(\n \"dolphin\", help=\"Assemble dolphin resources\"\n )\n self.parser_dolphin.add_argument(\n \"-s\",\n \"--symbol-name\",\n help=\"Symbol and file name in dolphin output directory\",\n default=None,\n )\n self.parser_dolphin.add_argument(\n \"input_directory\", help=\"Dolphin source directory\"\n )\n self.parser_dolphin.add_argument(\n \"output_directory\", help=\"Dolphin output directory\"\n )\n self.parser_dolphin.set_defaults(func=self.dolphin)\n\n def _icon2header(self, file):\n image = file2image(file)\n return image.width, image.height, image.data_as_carray()\n\n def _iconIsSupported(self, filename):\n extension = filename.lower().split(\".\")[-1]\n return extension in ICONS_SUPPORTED_FORMATS\n\n def icons(self):\n self.logger.debug(\"Converting icons\")\n icons_c = open(\n os.path.join(self.args.output_directory, f\"{self.args.filename}.c\"),\n \"w\",\n newline=\"\\n\",\n )\n icons_c.write(\n ICONS_TEMPLATE_C_HEADER.format(assets_filename=self.args.filename)\n )\n icons = []\n # Traverse icons tree, append image data to source file\n for dirpath, dirnames, filenames in os.walk(self.args.input_directory):\n self.logger.debug(f\"Processing directory {dirpath}\")\n dirnames.sort()\n filenames.sort()\n if not filenames:\n continue\n if \"frame_rate\" in filenames:\n self.logger.debug(\"Folder contains animation\")\n icon_name = \"A_\" + os.path.split(dirpath)[1].replace(\"-\", \"_\")\n width = height = None\n frame_count = 0\n frame_rate = 0\n frame_names = []\n for filename in sorted(filenames):\n fullfilename = os.path.join(dirpath, filename)\n if filename == \"frame_rate\":\n frame_rate = int(open(fullfilename, \"r\").read().strip())\n continue\n elif not self._iconIsSupported(filename):\n continue\n self.logger.debug(f\"Processing animation frame {filename}\")\n temp_width, temp_height, data = self._icon2header(fullfilename)\n if width is None:\n width = temp_width\n if height is None:\n height = temp_height\n assert width == temp_width\n assert height == temp_height\n frame_name = f\"_{icon_name}_{frame_count}\"\n frame_names.append(frame_name)\n icons_c.write(\n ICONS_TEMPLATE_C_FRAME.format(name=frame_name, data=data)\n )\n frame_count += 1\n assert frame_rate > 0\n assert frame_count > 0\n icons_c.write(\n ICONS_TEMPLATE_C_DATA.format(\n name=f\"_{icon_name}\", data=f'{{{\",\".join(frame_names)}}}'\n )\n )\n icons_c.write(\"\\n\")\n icons.append((icon_name, width, height, frame_rate, frame_count))\n else:\n # process icons\n for filename in filenames:\n if not self._iconIsSupported(filename):\n continue\n self.logger.debug(f\"Processing icon {filename}\")\n icon_name = \"I_\" + \"_\".join(filename.split(\".\")[:-1]).replace(\n \"-\", \"_\"\n )\n fullfilename = os.path.join(dirpath, filename)\n width, height, data = self._icon2header(fullfilename)\n frame_name = f\"_{icon_name}_0\"\n icons_c.write(\n ICONS_TEMPLATE_C_FRAME.format(name=frame_name, data=data)\n )\n icons_c.write(\n ICONS_TEMPLATE_C_DATA.format(\n name=f\"_{icon_name}\", data=f\"{{{frame_name}}}\"\n )\n )\n icons_c.write(\"\\n\")\n icons.append((icon_name, width, height, 0, 1))\n # Create array of images:\n self.logger.debug(\"Finalizing source file\")\n for name, width, height, frame_rate, frame_count in icons:\n icons_c.write(\n ICONS_TEMPLATE_C_ICONS.format(\n name=name,\n width=width,\n height=height,\n frame_rate=frame_rate,\n frame_count=frame_count,\n )\n )\n icons_c.write(\"\\n\")\n icons_c.close()\n\n # Create Public Header\n self.logger.debug(\"Creating header\")\n icons_h = open(\n os.path.join(self.args.output_directory, f\"{self.args.filename}.h\"),\n \"w\",\n newline=\"\\n\",\n )\n icons_h.write(ICONS_TEMPLATE_H_HEADER)\n for name, width, height, frame_rate, frame_count in icons:\n icons_h.write(ICONS_TEMPLATE_H_ICON_NAME.format(name=name))\n icons_h.close()\n self.logger.debug(\"Done\")\n return 0\n\n def manifest(self):\n from flipper.assets.manifest import Manifest\n\n directory_path = os.path.normpath(self.args.local_path)\n if not os.path.isdir(directory_path):\n self.logger.error(f'\"{directory_path}\" is not a directory')\n exit(255)\n manifest_file = os.path.join(directory_path, \"Manifest\")\n old_manifest = Manifest()\n if os.path.exists(manifest_file):\n self.logger.info(\"Manifest is present, loading to compare\")\n old_manifest.load(manifest_file)\n self.logger.info(\n f'Creating temporary Manifest for directory \"{directory_path}\"'\n )\n new_manifest = Manifest(self.args.timestamp)\n new_manifest.create(directory_path)\n\n self.logger.info(\"Comparing new manifest with existing\")\n only_in_old, changed, only_in_new = Manifest.compare(old_manifest, new_manifest)\n for record in only_in_old:\n self.logger.info(f\"Only in old: {record}\")\n for record in changed:\n self.logger.info(f\"Changed: {record}\")\n for record in only_in_new:\n self.logger.info(f\"Only in new: {record}\")\n if any((only_in_old, changed, only_in_new)):\n self.logger.warning(\"Manifests are different, updating\")\n new_manifest.save(manifest_file)\n else:\n self.logger.info(\"Manifest is up-to-date!\")\n\n self.logger.info(\"Complete\")\n\n return 0\n\n def copro(self):\n from flipper.assets.copro import Copro\n\n self.logger.info(\"Bundling coprocessor binaries\")\n copro = Copro()\n try:\n self.logger.info(\"Loading CUBE info\")\n copro.loadCubeInfo(self.args.cube_dir, self.args.cube_ver)\n self.logger.info(\"Bundling\")\n copro.bundle(\n self.args.output_dir,\n self.args.stack_file,\n self.args.stack_type,\n self.args.stack_addr,\n )\n except Exception as e:\n self.logger.error(f\"Failed to bundle: {e}\")\n return 1\n self.logger.info(\"Complete\")\n\n return 0\n\n def dolphin(self):\n from flipper.assets.dolphin import Dolphin\n\n self.logger.info(\"Processing Dolphin sources\")\n dolphin = Dolphin()\n self.logger.info(\"Loading data\")\n dolphin.load(self.args.input_directory)\n self.logger.info(\"Packing\")\n dolphin.pack(self.args.output_directory, self.args.symbol_name)\n self.logger.info(\"Complete\")\n\n return 0\n\n\nif __name__ == \"__main__\":\n Main()()\n","repo_name":"DarkFlippers/unleashed-firmware","sub_path":"scripts/assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":10866,"program_lang":"python","lang":"en","doc_type":"code","stars":12119,"dataset":"github-code","pt":"22"} +{"seq_id":"71226539577","text":"import os.path\nfrom typing import Union\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom src.arguments.env_args import EnvArgs\nfrom src.arguments.wm_key_args import WatermarkingKeyArgs\nfrom src.utils.highlited_print import bcolors\n\n\nclass WatermarkingKey(nn.Module):\n\n def __init__(self, wm_key_args: WatermarkingKeyArgs, env_args: EnvArgs = None):\n \"\"\" The base class for a watermarking key. \"\"\"\n super().__init__()\n self.wm_key_args = wm_key_args\n self.env_args = EnvArgs() if env_args is None else env_args # assume default env arguments if none are given.\n\n @staticmethod\n def bits_to_str(bits: torch.Tensor) -> str:\n \"\"\" Convert bits to human-readable string \"\"\"\n if isinstance(bits, str):\n return bits\n msg = \"\"\n for i in range(int(np.ceil(len(bits) / 5))):\n index = [str(x.item()) for x in bits[i * 5:(i + 1) * 5]]\n index = ''.join(map(str, index))\n index = int(index, 2)\n msg += WatermarkingKeyArgs.ALPHABET[index]\n return msg\n\n @staticmethod\n def str_to_bits(msg: str) -> torch.Tensor:\n \"\"\" Convert human-readable string to bits \"\"\"\n if isinstance(msg, torch.Tensor):\n return msg\n msg = msg.upper() # only uppercase\n print(f\"> Converting message '{bcolors.OKGREEN}{msg}{bcolors.ENDC}' to bits.\")\n bits = torch.zeros(size=(len(msg) * 5,))\n for i, letter in enumerate(msg):\n try:\n pos = WatermarkingKeyArgs.ALPHABET.index(letter)\n except ValueError:\n raise ValueError(f\"Letter '{letter}' is not in the alphabet ('{bcolors.OKGREEN}{WatermarkingKeyArgs.ALPHABET}{bcolors.ENDC}').\")\n bitstr = '{0:05b}'.format(pos)\n for j, x in enumerate(bitstr):\n bits[i * 5 + j] = int(x)\n return bits\n\n def save(self, ckpt_fn: str = None) -> dict:\n \"\"\" Saves a key to a single '*.pt' file. If no ckpt_fn is given, only returns the save dict.\"\"\"\n save_dict = {\n WatermarkingKeyArgs.WM_KEY_ARGS_KEY: self.wm_key_args\n }\n if ckpt_fn is not None:\n print(f\"> Saving Watermarking Decoder checkpoint to '{bcolors.OKGREEN}{os.path.abspath(ckpt_fn)}{bcolors.ENDC}'\")\n torch.save(save_dict, ckpt_fn)\n return save_dict\n\n def load(self, ckpt=None):\n \"\"\" Loads a key from a '*.pt' file. \"\"\"\n raise NotImplementedError\n\n def gen_msg(self, n: int) -> torch.Tensor:\n \"\"\" Generate n random binary messages. \"\"\"\n return torch.randint(0, 2, size=(n, self.wm_key_args.bitlen))\n\n def extract(self, x: torch.Tensor, sigmoid=True, **kwargs):\n \"\"\"\n Extracts a embedded_message from one or more images.\n Note: Sigmoid can be turned off if used to compute the loss.\n \"\"\"\n raise NotImplementedError\n\n def validate(self, x: torch.Tensor, msg: Union[str, torch.Tensor]):\n \"\"\"\n Extracts a embedded_message from one or more images and computes the mean bit accuracy.\n \"\"\"\n\n if isinstance(msg, str):\n msg = WatermarkingKey.str_to_bits(msg).unsqueeze(0).repeat([x.shape[0], 1])\n\n msg_pred = self.extract(x, sigmoid=True)\n msg_pred[msg_pred >= 0.5] = 1\n msg_pred[msg_pred < 0.5] = 0\n bitwise_acc = (msg_pred == msg[:, :self.wm_key_args.bitlen].to(x.device)).float().mean(dim=1).mean().item()\n return bitwise_acc\n\n def loss(self, x: torch.Tensor, msg: torch.Tensor):\n \"\"\"\n Given images and a embedded_message, compute the loss.\n \"\"\"\n extracted_msg = self.extract(x, sigmoid=False)\n bitlength = min(self.wm_key_args.bitlen, msg.shape[1])\n return nn.BCEWithLogitsLoss()(extracted_msg[:, :bitlength], msg[:, :bitlength].to(extracted_msg.device))\n\n\n","repo_name":"nilslukas/gan-watermark","sub_path":"src/watermarking_key/wm_key.py","file_name":"wm_key.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"18879316258","text":"#!/usr/bin/env python\n\nimport socket\nimport sys\nimport pickle\n\nHOST, PORT = \"localhost\", 9999\n\n\ncom_object = {\"command\": \"HAHAHA\"}\n\ndata = pickle.dumps(com_object)\n\n# Create a socket (SOCK_STREAM means a TCP socket)\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ntry:\n\t# Connect to server and send data\n\tsock.connect((HOST, PORT))\n\tsock.sendall(data)\n\n\t# Receive data from the server and shut down\n\treceived = sock.recv(1024)\n\tkalle = pickle.loads(received)\n\tfor i in kalle:\n\t\tprint(i[\"Name\"])\nfinally:\n\tsock.close()\n\nprint((\"Sent:\t {}\".format(data)))\n","repo_name":"tomplast/topqt","sub_path":"netclient.py","file_name":"netclient.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"23160195621","text":"def dig_pow(n, p):\n\n #Convert n to str and then to an array\n digitString = str(n)\n digitArray = []\n for digit in digitString:\n digitArray.append(int(digit))\n\n #check the condition in a loop\n total = 0\n for i in range(0, len(digitArray), 1):\n foo = digitArray[i] ** (p + i)\n total += foo\n k = float(total) / n\n foo = int(k)\n if k == foo:\n return k\n\n return -1","repo_name":"rohituppalapati/codewars","sub_path":"playingwithdigits.py","file_name":"playingwithdigits.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37077333042","text":"import numpy as np\n\nfrom qiskit import QuantumCircuit, assemble\nfrom qiskit.providers.aer import QasmSimulator\nfrom qiskit.providers.aer.library import (\n SaveStatevector, SaveDensityMatrix, SaveStabilizer,\n SaveMatrixProductState)\n\n\nclass QasmSaveStateTests:\n \"\"\"QasmSimulator SaveState instruction tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n BACKEND_OPTS = {}\n\n def test_save_state(self):\n \"\"\"Test save_amplitudes instruction\"\"\"\n\n REFERENCE_SAVE = {\n 'automatic': SaveStabilizer,\n 'stabilizer': SaveStabilizer,\n 'statevector': SaveStatevector,\n 'statevector_gpu': SaveStatevector,\n 'statevector_thrust': SaveStatevector,\n 'density_matrix': SaveDensityMatrix,\n 'density_matrix_gpu': SaveDensityMatrix,\n 'density_matrix_thrust': SaveDensityMatrix,\n 'matrix_product_state': SaveMatrixProductState\n }\n REFERENCE_LABEL = {\n 'automatic': 'stabilizer',\n 'stabilizer': 'stabilizer',\n 'statevector': 'statevector',\n 'statevector_gpu': 'statevector',\n 'statevector_thrust': 'statevector',\n 'density_matrix': 'density_matrix',\n 'density_matrix_gpu': 'density_matrix',\n 'density_matrix_thrust': 'density_matrix',\n 'matrix_product_state': 'matrix_product_state'\n }\n\n opts = self.BACKEND_OPTS.copy()\n method = opts.get('method', 'automatic')\n\n if method in REFERENCE_SAVE:\n\n # Stabilizer test circuit\n num_qubits = 4\n target_instr = REFERENCE_SAVE[method](num_qubits, label='target')\n circ = QuantumCircuit(num_qubits)\n circ.h(0)\n for i in range(1, num_qubits):\n circ.cx(i - 1, i)\n circ.save_state()\n circ.append(target_instr, range(num_qubits))\n label = REFERENCE_LABEL[method]\n\n # Run\n qobj = assemble(circ, self.SIMULATOR)\n result = self.SIMULATOR.run(qobj, **opts).result()\n self.assertTrue(result.success)\n data = result.data(0)\n self.assertIn(label, data)\n self.assertIn('target', data)\n value = data[label]\n target = data['target']\n if method == 'matrix_product_state':\n for val, targ in zip(value[0], target[0]):\n self.assertTrue(np.allclose(val, targ))\n for val, targ in zip(value[1], target[1]):\n self.assertTrue(np.allclose(val, targ))\n else:\n self.assertTrue(np.all(value == target))\n","repo_name":"LaurinFischer/qiskit-aer","sub_path":"test/terra/backends/qasm_simulator/qasm_save_state.py","file_name":"qasm_save_state.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"14958450181","text":"from flask_rebar.swagger_generation import swagger_words as sw\n\n\nclass ExternalDocumentation:\n \"\"\"Represents a Swagger \"External Documentation Object\"\n\n :param str url: The URL for the target documentation. Value MUST be in the format of a URL\n :param str description: A short description of the target documentation\n \"\"\"\n\n def __init__(self, url, description=None):\n self.url = url\n self.description = description\n\n def as_swagger(self):\n \"\"\"Create a Swagger representation of this object\n\n :rtype: dict\n \"\"\"\n doc = {sw.url: self.url}\n if self.description:\n doc[sw.description] = self.description\n return doc\n\n\nclass Tag:\n \"\"\"Represents a Swagger \"Tag Object\"\n\n :param str name: The name of the tag\n :param str description: A short description for the tag\n :param ExternalDocumentation external_docs: Additional external documentation for this tag\n \"\"\"\n\n def __init__(self, name, description=None, external_docs=None):\n self.name = name\n self.description = description\n self.external_docs = external_docs\n\n def as_swagger(self):\n \"\"\"Create a Swagger representation of this object\n\n :rtype: dict\n \"\"\"\n doc = {sw.name: self.name}\n if self.description:\n doc[sw.description] = self.description\n if self.external_docs:\n doc[sw.external_docs] = self.external_docs.as_swagger()\n return doc\n\n\nclass ServerVariable:\n \"\"\"Represents a Swagger \"Server Variable Object\"\n\n :param str default:\n :param str description:\n :param list[str] enum:\n \"\"\"\n\n def __init__(self, default, description=None, enum=None):\n self.default = default\n self.description = description\n self.enum = enum\n\n def as_swagger(self):\n \"\"\"Create a Swagger representation of this object\n\n :rtype: dict\n \"\"\"\n doc = {sw.default: self.default}\n if self.description:\n doc[sw.description] = self.description\n if self.enum:\n doc[sw.enum] = self.enum\n return doc\n\n\nclass Server:\n \"\"\"Represents a Swagger \"Server Object\"\n\n :param str url:\n :param str description:\n :param dict[str, ServerVariable] variables:\n \"\"\"\n\n def __init__(self, url, description=None, variables=None):\n self.url = url\n self.description = description\n self.variables = variables\n\n def as_swagger(self):\n \"\"\"Create a Swagger representation of this object\n\n :rtype: dict\n \"\"\"\n doc = {sw.url: self.url}\n if self.description:\n doc[sw.description] = self.description\n if self.variables:\n doc[sw.variables] = {k: v.as_swagger() for k, v in self.variables.items()}\n return doc\n","repo_name":"plangrid/flask-rebar","sub_path":"flask_rebar/swagger_generation/swagger_objects.py","file_name":"swagger_objects.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":229,"dataset":"github-code","pt":"22"} +{"seq_id":"73812679735","text":"import sys\r\n\r\ninput = lambda: sys.stdin.readline().rstrip()\r\n\r\ndef get_path(n, dist):\r\n path = {}\r\n for _ in range(n):\r\n f, t, l = map(int, input().split())\r\n if t <= dist:\r\n if t in path:\r\n path[t].append((f, l))\r\n else:\r\n path[t] = [(f, l)]\r\n\r\n return path\r\n\r\n\r\ndef dp(dist, path):\r\n d = [0] * (dist + 1)\r\n for i in range(1, dist + 1):\r\n if i in path:\r\n route = [d[i - 1] + 1]\r\n for f, length in path[i]:\r\n route.append(d[f] + length)\r\n d[i] = min(route)\r\n else:\r\n d[i] = d[i - 1] + 1\r\n\r\n return d[dist]\r\n\r\n\r\nn, dist = map(int, input().split())\r\npath = get_path(n, dist)\r\nresult = dp(dist, path)\r\n\r\nprint(result)","repo_name":"lepetitprinz/coding-challenge-auto-push","sub_path":"백준/Silver/1446. 지름길/지름길.py","file_name":"지름길.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2098367100","text":"\"\"\"Takes care of all of the grunt work of obtaining and parsing the\ndata and fixtures for all of the leagues. We also implement a script to\nanalyse upcoming fixtures using the past data of the current season.\n\"\"\"\n\nfrom IPython.display import display, HTML\nimport urllib.error\nimport urllib.request\nimport os\nimport datetime\nimport csv\nimport matplotlib.pyplot as plot\nimport numpy\nimport itertools\nimport collections\n\n\n# Make the graphs twice as big.\nplot.rcParams['savefig.dpi'] = 2 * plot.rcParams['savefig.dpi']\n\n\nclass Match(object):\n\n \"\"\"Holds a parsed match object, each line of a data file is parsed\n into one Match object.\n \"\"\"\n\n def opponent(self, team):\n if team == self.HomeTeam:\n return self.AwayTeam\n else:\n assert team == self.AwayTeam\n return self.HomeTeam\n\n def get_quick_title(self):\n return '{0} vs {1}'.format(self.HomeTeam, self.AwayTeam)\n\n @property\n def home_shots(self):\n return self.HS\n\n @property\n def away_shots(self):\n return self.AS\n\n @property\n def home_tsr(self):\n return clean_ratio(self.HS, self.HS + self.AS)\n\n @property\n def away_tsr(self):\n return clean_ratio(self.AS, self.HS + self.AS)\n\n @property\n def home_sotr(self):\n return clean_ratio(self.HST, self.HST + self.AST)\n\n @property\n def away_sotr(self):\n return clean_ratio(self.AST, self.HST + self.AST)\n\n @property\n def home_tsotr(self):\n return self.home_target_ratio - self.away_target_ratio\n\n @property\n def away_tsotr(self):\n return self.away_target_ratio - self.home_target_ratio\n\n @property\n def home_shooting_ratio(self):\n # The 'max' is here because occassionally teams score without\n # getting a shot on target, this obviously does not entirely solve that\n # problem but does allow a sensible result in the case that the number\n # of goals is higher than the number of shots on target.\n return clean_ratio(self.FTHG, max(self.FTHG, self.HST))\n\n @property\n def away_shooting_ratio(self):\n # See home_shooting_percentage for explanation of max.\n return clean_ratio(self.FTAG, max(self.FTAG, self.AST))\n\n @property\n def home_save_ratio(self):\n return 1.0 - self.away_shooting_ratio\n\n @property\n def away_save_ratio(self):\n return 1.0 - self.home_shooting_ratio\n\n @property\n def home_pdo(self):\n return self.home_shooting_ratio + self.home_save_ratio\n\n @property\n def away_pdo(self):\n return self.away_shooting_ratio + self.away_save_ratio\n\n @property\n def home_target_ratio(self):\n return clean_ratio(self.HST, self.HS + self.HST)\n\n @property\n def away_target_ratio(self):\n return clean_ratio(self.AST, self.AS + self.AST)\n\n @property\n def home_team_rating(self):\n return get_team_rating(self.home_pdo, self.home_tsotr, self.home_tsr)\n\n @property\n def away_team_rating(self):\n return get_team_rating(self.away_pdo, self.away_tsotr, self.away_tsr)\n\n @property\n def home_booking_points(self):\n return (self.HY * 10) + (self.HR * 25)\n\n @property\n def away_booking_points(self):\n return (self.AY * 10) + (self.AR * 25)\n\n @property\n def winning_odds(self):\n return {'H': self.BbAvH, 'A': self.BbAvA, 'D': self.BbAvD}[self.FTR]\n\nint_fields = ['FTHG', 'FTAG', 'HTHG', 'HTAG', 'HS', 'AS', 'HST', 'AST', 'HHW',\n 'AHW', 'HC', 'AC', 'HF', 'AF', 'HO', 'AO', 'HY', 'AY', 'HR', 'AR',\n 'HBP', 'ABP', 'Bb1X2']\n\nfloat_fields = ['BbMxH', 'BbAvH', 'BbMxD',\n 'BbAvD', 'BbMxA', 'BbAvA']\n\n\ndef create_match(field_names, row):\n match = Match()\n for index, name in enumerate(field_names):\n value = row[index]\n if name in int_fields:\n value = int(value)\n if name in float_fields:\n value = float(value)\n setattr(match, name, value)\n return match\n\n\ndef is_home_game(team, game):\n return team == game.HomeTeam\n\n\ndef is_away_game(team, game):\n return team == game.AwayTeam\n\n\ndef involved_in_game(team, game):\n return is_home_game(team, game) or is_away_game(team, game)\n\n\ndef points_in_game(team, game):\n assert involved_in_game(team, game)\n assert game.FTR in ['H', 'D', 'A']\n if game.FTR == 'H' and is_home_game(team, game):\n return 3\n elif game.FTR == 'A' and is_away_game(team, game):\n return 3\n elif game.FTR == 'D':\n return 1\n else:\n return 0\n\n\ndef stats_for_in_game(team, game, home_stat, away_stat):\n assert involved_in_game(team, game)\n if is_home_game(team, game):\n return home_stat\n else:\n return away_stat\n\n\ndef stats_against_in_game(team, game, home_stat, away_stat):\n # A slightly cheeky way to implement this\n return stats_for_in_game(team, game, away_stat, home_stat)\n\n\ndef shots_for_in_game(team, game):\n return stats_for_in_game(team, game, game.HS, game.AS)\n\n\ndef shots_against_in_game(team, game):\n return stats_against_in_game(team, game, game.HS, game.AS)\n\n\ndef goals_for_in_game(team, game):\n return stats_for_in_game(team, game, game.FTHG, game.FTAG)\n\n\ndef goals_against_in_game(team, game):\n return stats_against_in_game(team, game, game.FTHG, game.FTAG)\n\n\ndef sot_for_in_game(team, game):\n return stats_for_in_game(team, game, game.HST, game.AST)\n\n\ndef sot_against_in_game(team, game):\n return stats_against_in_game(team, game, game.HST, game.AST)\n\n\ndef booking_points_for_in_game(team, game):\n return stats_for_in_game(team, game, game.home_booking_points,\n game.away_booking_points)\n\n\ndef booking_points_against_in_game(team, game):\n return stats_against_in_game(team, game, game.home_booking_points,\n game.away_booking_points)\n\n\ndef yellow_cards_in_game(team, game):\n return stats_for_in_game(team, game, game.HY, game.AY)\n\n\ndef red_cards_in_game(team, game):\n return stats_for_in_game(team, game, game.HR, game.AR)\n\n\ndef get_team_rating(pdo, tsotr, tsr):\n \"\"\"Essentially tsr * tsott * pdo, but not weighted equally, James Grayson\n gives it as: Rating = (0.5+(TSR-0.5)*0.732^0.5) *\n (1.0+(%TSOTt-1.0)*0.166^0.5) *\n (1000+(PDO-1000)*0.176^0.5)\n But we have normalised the three values to average at 0. Note that by\n doing this we really shouldn't multipy. Instead we will add, but we will\n add only 0.82 of tsr, 0.45 of tsott and 0.4 of pdo because these appear\n to be the repeatable portions.\n \"\"\"\n normalised_tsr = (tsr - 0.5) * 2.0\n rating = (0.82 * normalised_tsr) + (0.45 * tsotr) + (0.4 * pdo)\n return rating\n\n\ndef clean_ratio(sub, total, default=0.0):\n \"\"\"Returns the ratio of sub to total, assuming that sub is included within\n total. So for example sub may be the shots on target and total may be\n all shots. We return default in the case that total is zero. The default\n likely does not come up much for the stats we're looking at here, you\n would have to have a small sample of games and even then the stats tend\n not to be zero for the total for even a sample of one game. Eg. the\n total number of shots taken is rarely zero for even one game. This is\n just a warning that if you have your default wrongly set, then you\n likely won't notice this and may mess up, say, at the start of the\n season.\n \"\"\"\n return sub / total if total else default\n\n\nclass TeamStats(object):\n\n \"\"\"Note that this is intended to only be used with a set number of games,\n if you change the set of games, then you pretty much have to recalculate\n all of the stats.\n \"\"\"\n\n def __init__(self, teamname, games):\n self.teamname = teamname\n self.games = games\n\n def sum_stat(stat_fun):\n return sum(stat_fun(teamname, game) for game in games)\n\n self.num_games = len(games)\n self.points = sum_stat(points_in_game)\n self.shots_for = sum_stat(shots_for_in_game)\n self.shots_against = sum_stat(shots_against_in_game)\n total_shots = self.shots_for + self.shots_against\n self.tsr = clean_ratio(self.shots_for, total_shots, default=0.5)\n self.goals_for = sum_stat(goals_for_in_game)\n self.goals_against = sum_stat(goals_against_in_game)\n self.sot_for = sum_stat(sot_for_in_game)\n self.sot_against = sum_stat(sot_against_in_game)\n total_sot = self.sot_for + self.sot_against\n self.sotr = clean_ratio(self.sot_for, total_sot, default=0.5)\n self.sot_for_ratio = clean_ratio(self.sot_for, self.shots_for,\n default=0.0)\n self.sot_against_ratio = clean_ratio(self.sot_against,\n self.shots_against, default=0.0)\n self.tsotr = self.sot_for_ratio - self.sot_against_ratio\n self.goals_sot_for_ratio = clean_ratio(self.goals_for, self.sot_for,\n default=0.0)\n self.goals_sot_against_ratio = clean_ratio(self.goals_against,\n self.sot_against,\n default=0.0)\n self.pdo = self.goals_sot_for_ratio - self.goals_sot_against_ratio\n self.yellows = sum_stat(yellow_cards_in_game)\n self.reds = sum_stat(red_cards_in_game)\n self.booking_points_for = sum_stat(booking_points_for_in_game)\n self.booking_points_against = sum_stat(booking_points_against_in_game)\n self.team_rating = get_team_rating(self.pdo, self.tsotr, self.tsr)\n\n self.current_winning_run = 0\n self.current_unbeaten_run = 0\n self.current_winless_run = 0\n self.current_losing_run = 0\n for game in self.games:\n points = points_in_game(self.teamname, game)\n if points == 3:\n self.current_winning_run += 1\n self.current_unbeaten_run += 1\n self.current_winless_run = 0\n self.current_losing_run = 0\n elif points == 1:\n self.current_winning_run = 0\n self.current_unbeaten_run += 1\n self.current_winless_run += 1\n self.current_losing_run = 0\n else:\n assert points == 0\n self.current_winning_run = 0\n self.current_unbeaten_run = 0\n self.current_winless_run += 1\n self.current_losing_run += 1\n\n def average_stat(self, stat_name):\n return getattr(self, stat_name) / float(self.num_games)\n\n def get_stat_from_label(self, label):\n stat_name = label.replace(' ', '_').lower()\n return getattr(self, stat_name)\n\n\ninteresting_stats = ['Shots For', 'Shots Against', 'TSR', 'SOTR', 'Goals For',\n 'Goals Against', 'SOT For', 'SOT Against', 'SOT For Ratio',\n 'SOT Against Ratio', 'TSOTR', 'Goals SOT For Ratio',\n 'Goals SOT Against Ratio', 'PDO', 'Team Rating'\n ]\n\n\ndef compare_home_away_form(league, team, stat_names=None):\n home_stats = league.home_team_stats[team]\n away_stats = league.away_team_stats[team]\n if stat_names is None:\n stat_names = interesting_stats\n headings = ['Stat', 'Home', 'Away']\n rows = [[s, home_stats.get_stat_from_label(s),\n away_stats.get_stat_from_label(s)]\n for s in stat_names]\n per_game_stats = ['points', 'goals_for', 'goals_against']\n per_game_rows = [[s + \"-avg\", home_stats.average_stat(s),\n away_stats.average_stat(s)]\n for s in per_game_stats]\n display_table(headings, per_game_rows + rows)\n\n\ndef last_modified_date(filepath):\n modification_timestamp = os.path.getmtime(filepath)\n modification_date = datetime.date.fromtimestamp(modification_timestamp)\n return modification_date\n\n\ndef needs_refreshing(filepath):\n \"\"\"Basically we assume that if the file in question is for a season\n before the current one, then the data has not been updated and we do\n not need to refresh it. If it is from the current season, then we\n check whether we have downloaded the file previously today and if\n not we re-download it. Note, that this assumes the file does exist.\n \"\"\"\n today = datetime.date.today()\n year = today.year - 2000 # Obviously does not work prior to 2000\n if today.month <= 6:\n current_season = str(year - 1) + str(year)\n else:\n current_season = str(year) + str(year + 1)\n return (current_season in filepath and\n last_modified_date(filepath) != today)\n\n\ndef download_if_stale(filepath, fileurl):\n \"\"\"Given a file to download we check if there exists a file in\n the filesystem that was downloaded today, if so we do not download\n it again, otherwise we download it afresh.\n \"\"\"\n if not os.path.exists(filepath) or needs_refreshing(filepath):\n try:\n urllib.request.urlretrieve(fileurl, filepath)\n except urllib.error.HTTPError:\n print('The {0} is not reachable'.format(fileurl))\n\n\n# We sometimes call this from within the 'blog/posts' directory and\n# sometimes from the parent directory.\ndata_dir_base = 'data/' if os.path.isdir('data/') else '../../data/'\n\n\nclass League(object):\n\n def __init__(self, short_title, fixtures_directory, year, title=None):\n self.title = title if title is not None else fixtures_directory\n data_dir_url = 'http://www.football-data.co.uk/mmz4281/' + year\n data_file_basename = short_title + \".csv\"\n self.data_url = data_dir_url + '/' + data_file_basename\n self.data_dir = data_dir_base + year\n self.data_file = self.data_dir + '/' + data_file_basename\n fixtures_base_url = \"http://www.bbc.co.uk/sport/football/\"\n self.fixtures_url = fixtures_base_url + \\\n fixtures_directory + \"/fixtures\"\n self.fixtures_file = \"{0}/{1}-fixtures.html\".format(self.data_dir,\n short_title)\n self._retrieve_data()\n self._retrieve_fixtures()\n self._parse_league_data()\n self._calculate_statistics()\n\n def _retrieve_data(self):\n if not os.path.isdir(self.data_dir):\n os.makedirs(self.data_dir)\n download_if_stale(self.data_file, self.data_url)\n\n def _retrieve_fixtures(self):\n download_if_stale(self.fixtures_file, self.fixtures_url)\n\n def display_title(self):\n display(HTML(\"

    \" + self.title + \"

    \"))\n\n def _parse_league_data(self):\n with open(self.data_file, newline='') as csvfile:\n cvsreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n self.field_names = next(cvsreader)\n self.matches = []\n for row in cvsreader:\n try:\n match = create_match(self.field_names, row)\n self.matches.append(match)\n except ValueError:\n # Slightly dodgy in that we assume the problem is that\n # 'football-data' have input the stub of a game without\n # actually filling in the data yet, as it does.\n pass\n # We assume you're in the league if you play at least one game, that is\n # you're the home or the away side at least once.\n home_teams = {m.HomeTeam for m in self.matches}\n away_teams = {m.AwayTeam for m in self.matches}\n self.teams = list(home_teams.union(away_teams))\n\n def get_game(self, home, away, date):\n games = [m for m in self.matches\n if (is_home_game(home, m) and\n is_away_game(away, m) and\n m.Date == date)]\n if len(games) == 1:\n return games[0]\n else:\n return None\n\n def get_stats(self, filter_fun):\n def get_team_stats(team):\n games = [game for game in self.matches if filter_fun(team, game)]\n return TeamStats(team, games)\n return {team: get_team_stats(team) for team in self.teams}\n\n def _calculate_statistics(self):\n self.team_stats = self.get_stats(involved_in_game)\n self.home_team_stats = self.get_stats(is_home_game)\n self.away_team_stats = self.get_stats(is_away_game)\n self._calculate_league_shot_stats()\n\n def _calculate_league_shot_stats(self):\n # Now shot and shots on target per goal.\n def sum_attribute(attr):\n return float(sum(getattr(m, attr) for m in self.matches))\n\n self.home_goals = sum_attribute('FTHG')\n self.away_goals = sum_attribute('FTAG')\n self.all_goals = self.home_goals + self.away_goals\n\n self.home_shots = sum_attribute('HS')\n self.away_shots = sum_attribute('AS')\n self.all_shots = self.home_shots + self.away_shots\n\n self.home_sot = sum_attribute('HST')\n self.away_sot = sum_attribute('AST')\n self.all_sot = self.home_sot + self.away_sot\n\n self.shots_per_goal = self.all_shots / self.all_goals\n self.sot_per_goal = self.all_sot / self.all_goals\n\n self.home_spg = self.home_shots / self.home_goals\n self.home_sotpg = self.home_sot / self.home_goals\n\n self.away_spg = self.away_shots / self.away_goals\n self.away_sotpg = self.away_sot / self.away_goals\n\n def compare_game_against_opponents(self, home, away, date):\n \"\"\" Does not compare against the league average game, but\n compares against the two team's average game. So if the match\n is between Leicester and Arsenal it compares how well Leicester\n have done against Arsenal compared to the average team against\n Arsenal, and vice-versa.\n \"\"\"\n match = get_match(self, home, away, date)\n home_stats = self.team_stats[match.HomeTeam]\n away_stats = self.team_stats[match.AwayTeam]\n display_table(['Measure',\n 'Average Per Game for',\n 'This game for',\n 'Average Per Game against opponents'],\n [['{0} Shots'.format(home),\n home_stats.average_stat('shots_for'),\n shots_for_in_game(home, match),\n away_stats.average_stat('shots_against')],\n ['{0} Shots'.format(away),\n away_stats.average_stat('shots_for'),\n shots_for_in_game(away, match),\n home_stats.average_stat('shots_against')],\n\n ['{0} SOT'.format(home),\n home_stats.average_stat('sot_for'),\n sot_for_in_game(home, match),\n away_stats.average_stat('sot_against')],\n ['{0} SOT'.format(away),\n away_stats.average_stat('sot_for'),\n sot_for_in_game(away, match),\n home_stats.average_stat('sot_against')]])\n\n def compare_average_team_games(self, home, away, date):\n match = get_match(self, home, away, date)\n home_stats = self.team_stats[match.HomeTeam]\n home_home_stats = self.home_team_stats[match.HomeTeam]\n away_stats = self.team_stats[match.AwayTeam]\n away_away_stats = self.away_team_stats[match.AwayTeam]\n headers = ['Game Type', 'Shots For', 'Shots Against',\n 'SOT For', 'SOT Against']\n\n def make_row(title, stats):\n return [title,\n stats.average_stat('shots_for'),\n stats.average_stat('shots_against'),\n stats.average_stat('sot_for'),\n stats.average_stat('sot_against')]\n rows = [['This {0} game'.format(home),\n shots_for_in_game(home, match),\n shots_against_in_game(home, match),\n sot_for_in_game(home, match),\n sot_against_in_game(home, match)],\n make_row('Avg {0} game'.format(home), home_stats),\n make_row('Avg {0} game'.format(away), away_stats),\n make_row('Avg {0} home game'.format(home),\n home_home_stats),\n make_row('Avg {0} away game'.format(away),\n away_away_stats)]\n display_table(headers, rows)\n\n\nclass Year(object):\n\n def __init__(self, year):\n self.year_name = year\n self.epl_league = League(\"E0\", \"premier-league\", year)\n self.ech_league = League(\"E1\", \"championship\", year)\n self.elo_league = League(\"E2\", \"league-one\", year)\n self.elt_league = League(\"E3\", \"league-two\", year)\n self.spl_league = League(\"SC0\", \"scottish-premiership\", year)\n # No shots data for the scottish championship.\n self.all_leagues = [self.epl_league, self.ech_league,\n self.elo_league, self.elt_league,\n self.spl_league]\n\n def get_all_matches(self, leagues=None, filter_fun=None):\n if leagues is None:\n leagues = self.all_leagues\n else:\n leagues = [getattr(self, league) for league in leagues]\n match_lists = (league.matches for league in leagues)\n matches_iter = itertools.chain.from_iterable(match_lists)\n if filter_fun is None:\n return matches_iter\n else:\n return filter(filter_fun, matches_iter)\n\n\nyear_201011 = Year('1011')\nyear_201112 = Year('1112')\nyear_201213 = Year('1213')\nyear_201314 = Year('1314')\nyear_201415 = Year('1415')\nyear_201516 = Year('1516')\nall_years = [year_201011, year_201112, year_201213,\n year_201314, year_201415, year_201516]\nall_leagues = list(itertools.chain.from_iterable(\n y.all_leagues for y in all_years))\nall_epl_leagues = [year.epl_league for year in all_years]\n\n\ncurrent_year = year_201516\nepl = current_year.epl_league\nchampionship = current_year.ech_league\nleague_one = current_year.elo_league\nleague_two = current_year.elt_league\nspl = current_year.spl_league\n\n\ndef get_match(league, home, away, date):\n def filter_fun(match):\n return (match.HomeTeam == home and\n match.AwayTeam == away and match.Date == date)\n return next(m for m in league.matches if filter_fun(m))\n\n\ndef get_matches_between(leagues, home, away):\n def filter_fun(match):\n return (match.HomeTeam in [home, away] and\n match.AwayTeam in [home, away])\n match_lists = (league.matches for league in leagues)\n matches_iter = itertools.chain.from_iterable(match_lists)\n return filter(filter_fun, matches_iter)\n\n\ndef get_all_matches(years=None, leagues=None, filter_fun=None):\n if years is None:\n years = all_years\n match_lists = (year.get_all_matches(leagues=leagues,\n filter_fun=filter_fun)\n for year in years)\n return itertools.chain.from_iterable(match_lists)\n\n\ndef count_matches(filter_fun, matches):\n return len([m for m in matches if filter_fun(m)])\n\n\ndef get_fraction_of_matches(filter_fun, matches=None):\n if matches is None:\n matches = get_all_matches()\n num_matches = 0\n num_filtered = 0\n for m in matches:\n num_matches += 1\n if filter_fun(m):\n num_filtered += 1\n return (num_filtered, num_matches)\n\n\ndef match_to_html(match):\n template = \"\"\"\n \n \n \n \n \n \n {8}\n
    HomeAway
    Team{0}{1}
    Goals{2}{3}
    Shots{4}{5}
    SOT{6}{7}
    \n \"\"\"\n if hasattr(match, 'HHW') and hasattr(match, 'AHW'):\n woodwork_tmpl = \"Woodwork{0}{1}\"\n woodwork = woodwork_tmpl.format(match.HHW, match.AHW)\n else:\n woodwork = \"\"\n html = template.format(match.HomeTeam, match.AwayTeam,\n match.FTHG, match.FTAG,\n match.HS, match.AS,\n match.HST, match.AST,\n woodwork)\n return html\n\n\ndef create_inline_block(html):\n return '
    {0}
    '.format(html)\n\n\ndef display_pairs(pairs, inline_block=True):\n row_template = \"{0}{1}\"\n rows = [row_template.format(k, e) for k, e in pairs]\n html_rows = \"\\n\".join(rows)\n html = \"\\n\".join([\"\", html_rows, \"
    \"])\n if inline_block:\n html = create_inline_block(html)\n display(HTML(html))\n\n\ndef display_dictionary(dictionary):\n pairs = sorted(dictionary.items(), key=lambda p: p[1], reverse=True)\n display_pairs(pairs)\n\n\ndef html_table(header_data, row_data):\n \"\"\"Create an html table given the headers and the row data.\"\"\"\n def make_header_cell(s):\n return '{}'.format(s)\n\n def make_cell(s):\n return '{}'.format(s)\n\n def make_row(s):\n return '{}'.format(s)\n headers = \" \".join([make_header_cell(h) for h in header_data])\n header_row = make_row(headers)\n rows = [make_row(\" \".join([make_cell(c) for c in row]))\n for row in row_data]\n rows = \"\\n\".join(rows)\n html = '' + header_row + rows + '
    '\n return html\n\n\ndef display_table(header_data, row_data):\n html = html_table(header_data, row_data)\n display(HTML(html))\n\n\ndef date_from_string(date_string):\n date_fields = date_string.split('/')\n day, month, year = [int(f) for f in date_fields]\n # We allow you to specify the year as a two-digit number, we assume\n # that such a number which is greater than 50 refers to the 20th\n # century and one that is less than 50 refers to the 21st century,\n # it seems unlikely I will still be using this script in 2050. So,\n # 01/02/16 is the first of February 2016\n # 01/02/95 is the first of Feburary 1995\n if year < 50:\n year += 2000\n elif year < 100:\n year += 1900\n return datetime.date(year, month, day)\n\n\ndef html_blocks(blocks):\n inline_blocks = [create_inline_block(b) for b in blocks]\n html = \"\\n\".join(inline_blocks)\n return html\n\n\ndef display_given_matches(matches):\n \"\"\"Display a given set of matches.\"\"\"\n html_matches = [match_to_html(m) for m in matches]\n html = html_blocks(html_matches)\n display(HTML(html))\n\n\ndef date_in_range(start_date, datestring, end_date):\n date = date_from_string(datestring)\n return start_date <= date and date <= end_date\n\n\ndef get_matches(league, starting_date, ending_date,\n home_team=None, away_team=None, team_involved=None):\n start_date = date_from_string(starting_date)\n end_date = date_from_string(ending_date)\n\n def filter_fun(m):\n if home_team is not None and m.HomeTeam != home_team:\n return False\n if away_team is not None and m.AwayTeam != away_team:\n return False\n if (team_involved is not None and\n not involved_in_game(team_involved, m)):\n return False\n return date_in_range(start_date, m.Date, end_date)\n matches = [m for m in league.matches if filter_fun(m)]\n return matches\n\n\ndef display_match(league, home_team, away_team, date):\n\n match = get_match(league, home_team, away_team, date)\n match_html = match_to_html(match)\n title = '{0} {1} - {2} {3}'.format(match.HomeTeam, match.FTHG,\n match.FTAG, match.AwayTeam)\n html = '

    {0}

    {1}'.format(title, match_html)\n display(HTML(html))\n\n\ndef display_matches(league, starting_date, ending_date):\n \"\"\"Display all matches within a league between the given dates.\"\"\"\n matches = get_matches(league, starting_date, ending_date)\n display_given_matches(matches)\n\n\ndef display_shots_per_goal_info(years=None):\n if years is None:\n years = all_years\n\n def get_data_row(league_short_name):\n if league_short_name == 'Overall':\n leagues = [l for y in years for l in y.all_leagues]\n else:\n league_name = league_short_name + '_league'\n leagues = [getattr(y, league_name) for y in years]\n\n def sum_attribute(attribute):\n return sum(getattr(l, attribute) for l in leagues)\n\n home_goals = sum_attribute('home_goals')\n away_goals = sum_attribute('away_goals')\n all_goals = sum_attribute('all_goals')\n\n home_shots = sum_attribute('home_shots')\n home_spg = home_shots / home_goals\n away_shots = sum_attribute('away_shots')\n away_spg = away_shots / away_goals\n all_shots = sum_attribute('all_shots')\n shots_per_goal = all_shots / all_goals\n\n home_sot = sum_attribute('home_sot')\n home_sotpg = home_sot / home_goals\n away_sot = sum_attribute('away_sot')\n away_sotpg = away_sot / away_goals\n all_sot = sum_attribute('all_sot')\n sot_per_goal = all_sot / all_goals\n\n return [league_short_name, shots_per_goal, sot_per_goal,\n home_spg, home_sotpg, away_spg, away_sotpg]\n\n leagues = ['epl', 'ech', 'elo', 'elt', 'spl', 'Overall']\n data_rows = [get_data_row(league) for league in leagues]\n header_row = ['league', 'shots per goal', 'sot per goal',\n 'home spg', 'home sotpg', 'away spg', 'away sotpg']\n display_table(header_row, data_rows)\n\n\ndef collect_after_game_dicts(league, start_date, end_date):\n \"\"\"Returns a dictionary of dictionaries. The outer dictionary has\n intergers as keys. The integer represents the number of games. The\n value associated with a number of games is a dictionary which maps\n the teams of the league to team stats. So if you want to find out a\n team's statistics after x number of games you call this function and\n then:\n dictionaries = collect_after_game_dicts(...)\n team_stats = dictionaries[x][team]\n 'team_stats' will now old a TeamStats object represents the\n statistics for 'team' after 'x' games.\n This method allows you to do things such as plot how a team's stat\n has changed over the course of a season.\n \"\"\"\n after_game_no_dicts = collections.defaultdict(dict)\n\n def add_team_stats(team, after_game_no, stat):\n stat_dict = after_game_no_dicts[after_game_no]\n stat_dict[team] = stat\n\n for team in league.teams:\n matches = get_matches(league, start_date, end_date, team_involved=team)\n for x in range(1, len(matches) + 1):\n stats = TeamStats(team, matches[:x])\n add_team_stats(team, x, stats)\n\n for x, dictionary in after_game_no_dicts.items():\n if len(dictionary) != len(league.teams):\n del after_game_no_dicts[x]\n return after_game_no_dicts\n\n\ndef get_stats_rankings(stats_dictionary, stat_name):\n pairs = stats_dictionary.items()\n key_fun = lambda p: getattr(p[1], stat_name)\n sorted_pairs = sorted(pairs, key=key_fun, reverse=True)\n return sorted_pairs\n\n\ndef rank_sorted_pairs(sorted_pairs):\n def get_rows(sorted_pairs):\n latest_value = None\n latest_position = None\n for position, (key, value) in enumerate(sorted_pairs, start=1):\n if value == latest_value:\n position = latest_position\n position_string = '-'\n else:\n latest_position = position\n position_string = str(position)\n latest_value = value\n yield [position_string, key, value]\n return get_rows(sorted_pairs)\n\n# TODO: There is definitely some overlap between 'display_ranked_table'\n# and 'display_stats_table', but note that display_stats_tables allows\n# for more columns than the one that is sorted on.\n\n\ndef create_ranked_table(headers, pairs, reverse=None):\n if reverse is None:\n reverse = True\n sorted_pairs = sorted(pairs, key=lambda r: r[1], reverse=reverse)\n rows = rank_sorted_pairs(sorted_pairs)\n return html_table(['Position'] + headers, rows)\n\n\ndef display_ranked_table(headers, pairs, reverse=None):\n display(HTML(create_ranked_table(headers, pairs, reverse=reverse)))\n\n\ndef display_ranked_tables(tables_data):\n ranked_tables = [create_ranked_table(h, p, r) for h, p, r in tables_data]\n html = html_blocks(ranked_tables)\n display(HTML(html))\n\n\ndef rank_teams_single_matches(matches, stat_suffix, stat_header_name=None):\n if stat_header_name is None:\n stat_header_name = stat_suffix\n\n def get_pairs():\n for match in matches:\n yield (match.HomeTeam, getattr(match, 'home_' + stat_suffix))\n yield (match.AwayTeam, getattr(match, 'away_' + stat_suffix))\n display_ranked_table(['Team', stat_header_name], get_pairs())\n\n\ndef display_stats_table(after_game_no_dicts, stat_names):\n \"\"\"To get the argument you can simply call the above\n 'collect_after_game_dicts', this means that we will give the table\n after a set number of games, which will mean all teams will have\n played the same number. This allows us to give a meaningful table\n for something like 'goals', or 'shots' which are cumulative.\n \"\"\"\n latest_dict = after_game_no_dicts[len(after_game_no_dicts)]\n first_stat_name = stat_names[0]\n sorted_pairs = get_stats_rankings(latest_dict, first_stat_name)\n\n def get_rows(sorted_pairs):\n latest_stat = None\n latest_position = None\n for position, (team, stats) in enumerate(sorted_pairs, start=1):\n this_stat = getattr(stats, first_stat_name)\n if latest_stat == this_stat:\n position = latest_position\n else:\n latest_position = position\n stat_cells = [getattr(stats, name) for name in stat_names]\n row = [position, team] + stat_cells\n yield_row\n rows = get_rows(sorted_pairs)\n display_table(['Position', 'Team'] + stat_names, rows)\n\n\ndef display_statistic_rankings(league, stat_name):\n def get_rows(stat_rankings, stat_name):\n latest_stat = None\n for position, (team, stats) in enumerate(stat_rankings, start=1):\n stat = getattr(stats, stat_name)\n position_string = '-' if stat == latest_stat else str(position)\n latest_stat = stat\n row = [position_string, team, stat]\n yield row\n\n stat_rankings = get_stats_rankings(league.team_stats, stat_name)\n rows = get_rows(stat_rankings, stat_name)\n display_table(['Position', 'Team', stat_name], rows)\n\n\ndef get_stat_pairs(stats_list, stat_name):\n return [(stats.teamname, getattr(stats, stat_name))\n for stats in stats_list]\n\n\ndef get_stat_table_data(team_stats, stat_header, stat_name, reverse):\n pairs = get_stat_pairs(team_stats, stat_name)\n return (['Team', stat_header], pairs, reverse)\n\ndef header_stat_tables(league, stats=None):\n if stats is None:\n stats = league.team_stats.values()\n tables_data = [get_stat_table_data(stats, 'Points', 'points', True),\n get_stat_table_data(stats, 'Team Rating', 'team_rating', True),\n get_stat_table_data(stats, 'PDO', 'pdo', True),\n ]\n ranked_tables = [create_ranked_table(h, p, r) for h, p, r in tables_data]\n return html_blocks(ranked_tables)\n\ndef last_x_game_stats(league, x):\n return [TeamStats(ts.teamname, ts.games[-x:])\n for ts in league.team_stats.values()]\n\ndef html_games(games):\n return html_blocks(match_to_html(m) for m in games)\n\n\ndef blog_weekly_header(league, start_date, end_date):\n weekend_matches = get_matches(league, start_date, end_date)\n html_matches = html_blocks([match_to_html(m) for m in weekend_matches])\n html_tables = header_stat_tables(league)\n return \"
    {}
    {}
    \".format(html_matches, html_tables)\n\n\ndef display_current_runs(league):\n stats = league.team_stats.values()\n tables_data = [get_stat_table_data(stats, 'Winning Run', 'current_winning_run', True),\n get_stat_table_data(stats, 'Unbeaten Run', 'current_unbeaten_run', True),\n get_stat_table_data(stats, 'Winless', 'current_winless_run', True),\n get_stat_table_data(stats, 'Losing', 'current_losing_run', True),\n ]\n display_ranked_tables(tables_data)\n\n\nteam_line_colors = {'Sunderland': ('DarkGreen', '--'),\n 'Crystal Palace': ('Crimson', '-'),\n 'Southampton': ('Red', '--'),\n 'West Ham': ('MediumTurquoise', '--'),\n 'Liverpool': ('Red', '-'),\n 'West Brom': ('Black', '-'),\n 'Man City': ('LightSkyBlue', '-'),\n 'Chelsea': ('Blue', '-'),\n 'Everton': ('Blue', '--'),\n 'Leicester': ('Blue', ':'),\n 'Swansea': ('Black', '--'),\n 'Watford': ('Gold', '-'),\n 'Man United': ('Red', ':'),\n 'Aston Villa': ('MediumTurquoise', '-'),\n 'Newcastle': ('Black', ':'),\n 'Norwich': ('Gold', '--'),\n 'Tottenham': ('DarkBlue', '-.'),\n 'Arsenal': ('Red', '-.'),\n 'Stoke': ('DarkRed', '-'),\n 'Bournemouth': ('DarkRed', ':')}\n\n\ndef plot_changing_stats(league, after_game_no_dicts,\n stat_name, teams=None, rankings=False,\n y_axis_lims=None):\n if teams is None:\n teams = league.teams\n plot.xlabel('Game Number')\n\n if rankings:\n plot.title('Rank in {0} after game #'.format(stat_name))\n plot.ylabel('Rank in {0}'.format(stat_name))\n\n def get_team_rank(ranking_table, team_name):\n latest_position = 1\n latest_stat = None\n for position, (team, stats) in enumerate(ranking_table, start=1):\n stat = getattr(stats, stat_name)\n if latest_stat != stat:\n latest_position = position\n latest_stat = stat\n if team == team_name:\n return latest_position\n else:\n raise KeyError\n\n ranking_tables = [get_stats_rankings(d, stat_name)\n for d in after_game_no_dicts.values()]\n get_ys = lambda team: [get_team_rank(table, team)\n for table in ranking_tables]\n plot.gca().set_ylim(len(league.teams) + 1, 1)\n else:\n plot.title('{0} after game #'.format(stat_name))\n plot.ylabel(stat_name)\n get_ys = lambda team: [getattr(after_game_no_dicts[x][team],\n stat_name)\n for x in xs]\n\n if y_axis_lims is not None:\n plot.gca().set_ylim(*y_axis_lims)\n xs = range(1, len(after_game_no_dicts) + 1)\n for team in teams:\n ys = get_ys(team)\n color, line_style = team_line_colors.get(team, (None, None))\n plot.plot(xs, ys, label=team, color=color, linestyle=line_style)\n plot.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plot.xticks(xs)\n plot.show()\n\n\ndef scatter_stats(league, title='', xlabel='', ylabel='', teams=None,\n get_x_stat=None, get_y_stat=None, annotate_teams=None):\n \"\"\"By default all teams are annotated, to annotate none pass in '[]' as the\n list of teams to annotate.\n \"\"\"\n if teams is None:\n teams = league.teams\n if annotate_teams is None:\n annotate_teams = league.teams\n\n plot.title(title)\n plot.xlabel(xlabel)\n plot.ylabel(ylabel)\n xs = []\n ys = []\n for team in teams:\n x_stat = get_x_stat(league, team)\n xs.append(x_stat)\n y_stat = get_y_stat(league, team)\n ys.append(y_stat)\n plot.scatter(x_stat, y_stat)\n\n if team in annotate_teams:\n plot.annotate(team, xy=(x_stat, y_stat),\n xytext=(40, -20),\n textcoords='offset points',\n ha='right', va='bottom',\n bbox=dict(boxstyle='round,pad=0.5',\n fc='yellow', alpha=0.5),\n arrowprops=dict(arrowstyle='->',\n connectionstyle='arc3,rad=0'))\n\n coefficients = numpy.polyfit(xs, ys, 1)\n polynomial = numpy.poly1d(coefficients)\n ys = polynomial(xs)\n plot.plot(xs, ys)\n plot.show()\n plot.close()\n display(HTML('line of best fit: ' + str(polynomial)))\n\n\ndef graph_leagues(x_label, y_label, leagues=None, get_x_stat=None,\n get_y_stat=None, annotate_teams=None):\n \"\"\"Produce a scatter plot for each team in each of the provided\n leagues. Will error if `leagues` is not specified.\n \"\"\"\n def get_stat_from_label(label):\n stat_name = label.replace(' ', '_').lower()\n return lambda league, team: getattr(league.team_stats[team], stat_name)\n if get_x_stat is None:\n get_x_stat = get_stat_from_label(x_label)\n if get_y_stat is None:\n get_y_stat = get_stat_from_label(y_label)\n for league in leagues:\n title = '{0}: {1}/{2}'.format(league.title, x_label, y_label)\n scatter_stats(league, title=title,\n xlabel=x_label, ylabel=y_label,\n get_x_stat=get_x_stat, get_y_stat=get_y_stat,\n annotate_teams=annotate_teams\n )\n\n\ndef scatter_match_stats(matches, xlabel='', ylabel='', title='',\n get_x_stat=None, get_y_stat=None,\n annotate=True):\n plot.title(title)\n plot.xlabel(xlabel)\n plot.ylabel(ylabel)\n xs = []\n ys = []\n for match in matches:\n x_stat = get_x_stat(match)\n xs.append(x_stat)\n y_stat = get_y_stat(match)\n ys.append(y_stat)\n plot.scatter(x_stat, y_stat)\n\n if annotate:\n annotation = match.HomeTeam + ' v ' + match.AwayTeam\n plot.annotate(annotation, xy=(x_stat, y_stat),\n xytext=(40, -20),\n textcoords='offset points',\n ha='right', va='bottom',\n bbox=dict(boxstyle='round,pad=0.5',\n fc='yellow', alpha=0.5),\n arrowprops=dict(arrowstyle='->',\n connectionstyle='arc3,rad=0'))\n\n coefficients = numpy.polyfit(xs, ys, 1)\n polynomial = numpy.poly1d(coefficients)\n ys = polynomial(xs)\n plot.plot(xs, ys)\n plot.show()\n plot.close()\n display(HTML('line of best fit: ' + str(polynomial)))\n\n\ndef get_adjusted_stat(matches, team, stat_home_name, stat_away_name, reverse_stat_dict):\n matches = [m for m in matches if involved_in_game(team, m)]\n\n def get_adjusted_stat(match):\n if match.HomeTeam == team:\n opponent = match.AwayTeam\n team_stat = getattr(match, stat_home_name)\n else:\n opponent = match.HomeTeam\n team_stat = getattr(match, stat_away_name)\n opponent_avg_stat = reverse_stat_dict[opponent]\n return team_stat - opponent_avg_stat\n sum_diff = sum(get_adjusted_stat(m) for m in matches)\n diff_per_game = sum_diff / float(len(matches))\n return diff_per_game\n\n\ndef get_adjusted_stat_dictionary(league, stat_home_name,\n stat_away_name, reverse_stat_name):\n def get_reverse_stat(team):\n stats = league.team_stats[team]\n return getattr(stats, reverse_stat_name) / float(stats.num_games)\n reverse_stat_dict = {t: get_reverse_stat(t) for t in league.teams}\n adjusted_stats = {t: get_adjusted_stat(league.matches, t,\n stat_home_name,\n stat_away_name,\n reverse_stat_dict)\n for t in league.teams}\n return adjusted_stats\n\n\nclass BaseAnalyser(object):\n\n def __init__(self, ignore_matches=None):\n self.leagues = [year.epl_league for year in all_years]\n self.reset_per_league_stats()\n self.ignore_matches = ignore_matches\n\n def valid_match(self, home_stats, away_stats):\n \"\"\"Determines whether a match should be evaluated\"\"\"\n # If either of the statistics is None then you cannot evaluate the\n # match\n if home_stats is None or away_stats is None:\n return False\n # Otherwise if we have not specified a number of matches to ignore then\n # this match can be evaluated.\n if self.ignore_matches is None:\n return True\n # If we have specified a number of matches to ignore, check both teams\n # have played at least that many:\n if (len(home_stats.games) < self.ignore_matches or\n len(away_stats.games) < self.ignore_matches):\n return False\n return True\n\n def analyse_matches_with_contemporary_stats(self, league):\n team_stats = dict()\n\n def update_stats(match, team, stats):\n stats = team_stats.get(team, None)\n if stats is None:\n games = [match]\n else:\n games = stats.games + [match]\n team_stats[team] = TeamStats(team, games)\n for match in league.matches:\n home_stats = team_stats.get(match.HomeTeam, None)\n away_stats = team_stats.get(match.AwayTeam, None)\n if self.valid_match(home_stats, away_stats):\n self.evaluate_match(match, home_stats, away_stats)\n\n update_stats(match, match.HomeTeam, home_stats)\n update_stats(match, match.AwayTeam, away_stats)\n\n def reset_per_league_stats(self):\n pass\n\n def display_per_league_results(self):\n pass\n\n def analyse_leagues(self):\n for league in self.leagues:\n self.reset_per_league_stats()\n self.analyse_matches_with_contemporary_stats(league)\n self.display_per_league_results()\n\n\nclass HistoricalBetAnalyser(BaseAnalyser):\n\n def __init__(self, get_bet, commission=0.05, ignore_matches=None):\n self.leagues = [year.epl_league for year in all_years]\n self.get_bet = get_bet\n self.commission = commission\n self.total_profit_loss = 0.0\n self.reset_per_league_stats()\n self.ignore_matches = ignore_matches\n\n def reset_per_league_stats(self):\n self.counter = {'H': 0, 'A': 0, 'D': 0}\n self.pl_counter = {'H': 0.0, 'A': 0.0, 'D': 0.0}\n self.wins_counter = {'H': 0, 'A': 0, 'D': 0}\n self.league_profit_loss = 0.0\n\n def match_profit_loss(self, match, bet):\n if bet == match.FTR:\n return (match.winning_odds - 1.0) * (1.0 - self.commission)\n else:\n return -1.0\n\n def evaluate_match(self, match, home_stats, away_stats):\n bet = self.get_bet(match, home_stats, away_stats)\n if bet is None:\n return\n self.counter[bet] += 1\n\n profit_loss = self.match_profit_loss(match, bet)\n self.pl_counter[bet] += profit_loss\n if profit_loss > 0.0:\n self.wins_counter[bet] += 1\n self.league_profit_loss += profit_loss\n self.total_profit_loss += profit_loss\n\n def display_per_league_results(self):\n print(\"League profit/loss: {0}\".format(self.league_profit_loss))\n for result in ['H', 'D', 'A']:\n msg_format = \"{0} bets wins {1} out of {2} for profit/loss: {3}\"\n print(msg_format.format(result, self.wins_counter[result],\n self.counter[result],\n self.pl_counter[result]))\n\n def display_results(self):\n print('Total profit loss for all leagues: {0}'.format(\n self.total_profit_loss))\n\n def historical_betting(self, get_bet):\n self.analyse_leagues()\n self.display_results()\n\n\ndef result_count_factory():\n return {'H': 0, 'D': 0, 'A': 0}\n\n\nclass HistoricalStatAnalyser(BaseAnalyser):\n\n def __init__(self, stat_name, leagues=None):\n self.leagues = all_leagues if leagues is None else leagues\n self.reset_per_league_stats()\n self.buckets = collections.defaultdict(result_count_factory)\n # [-1.0, -0.9 ... 0.0, 0.1, ... 1.0]\n self.bucket_keys = [x / 10.0 for x in range(-10, 10)]\n self.ignore_matches = 4\n self.stat_name = stat_name\n\n def reset_per_league_stats(self):\n pass\n\n def get_bucket(self, diff):\n for bucket in self.bucket_keys:\n if diff < bucket:\n return bucket\n else:\n assert False\n\n def get_stat_diff(self, home_stats, away_stats):\n return (getattr(home_stats, self.stat_name) -\n getattr(away_stats, self.stat_name))\n\n def evaluate_match(self, match, home_stats, away_stats):\n stat_diff = self.get_stat_diff(home_stats, away_stats)\n bucket = self.get_bucket(stat_diff)\n self.buckets[bucket][match.FTR] += 1\n\n def display_per_league_results(self):\n pass\n\n def display_results(self):\n for bucket in self.bucket_keys:\n result_counts = self.buckets[bucket]\n print('Bucket {0} to {1}'.format(bucket - 0.1, bucket))\n total_matches = sum(r for r in result_counts.values())\n if total_matches == 0:\n (print('no such matches'))\n continue\n proportions = self.get_proportions(result_counts)\n for result in ['H', 'D', 'A']:\n number = result_counts[result]\n proportion = proportions[result]\n print(\" '{0}': {1}, {2}\".format(\n result, number, proportion))\n\n def get_proportions(self, result_counts):\n total_matches = sum(r for r in result_counts.values())\n\n return {result: float(result_counts[result]) / float(total_matches)\n for result in result_counts.keys()}\n\n def get_reasonable_result_counts(self, stat_diff):\n \"\"\" Just gets the bucket related to the stat_diff, however,\n if the bucket has fewer than 100 results, we get the closest bucket\n that has 100 or more results \"\"\"\n result_counts = self.buckets[self.get_bucket(stat_diff)]\n num_results = sum(result_counts.values())\n if num_results < 100:\n if stat_diff < 0:\n new_stat_diff = stat_diff + 0.1\n else:\n new_stat_diff = stat_diff - 0.1\n return self.get_reasonable_result_counts(new_stat_diff)\n return result_counts\n\n def get_implied_odds(self, home_stats, away_stats):\n stat_diff = self.get_stat_diff(home_stats, away_stats)\n result_counts = self.get_reasonable_result_counts(stat_diff)\n proportions = self.get_proportions(result_counts)\n implied_odds = {r: 1.0 / p for r, p in proportions.items()}\n return implied_odds\n\n def historically_bet(self):\n def get_bet(match, home_stats, away_stats):\n implied_odds = self.get_implied_odds(home_stats, away_stats)\n differences = [(r, getattr(match, 'BbAv' + r) - implied_odds[r])\n for r in ['H', 'A', 'D']]\n sorted_differences = sorted(\n differences, reverse=True, key=lambda p: p[1])\n bet, gain = sorted_differences[1]\n if gain > 0.0:\n return bet\n else:\n return None\n self.historical_betting(get_bet)\n\nfrom bs4 import BeautifulSoup\n# The teams on the left here, that is the keys of the dictionary are\n# team names from sources other than the data files. So in particular\n# from the fixture list, but also betfair etc. The idea is that we can\n# lookup a team name from any source in the data files by first using\n# this dictionary via 'alias_team'.\nteam_aliases = {'Dundee Utd': 'Dundee United',\n 'Inverness CT': 'Inverness C',\n 'Partick Thistle': 'Partick',\n 'Man Utd': 'Man United',\n 'Sheff Wed': 'Sheffield Weds',\n 'Nottm Forest': \"Nott'm Forest\",\n 'Sheff Utd': 'Sheffield United',\n 'MK Dons': 'Milton Keynes Dons',\n 'Fleetwood': 'Fleetwood Town',\n 'Peterborough': 'Peterboro',\n 'Crawley': 'Crawley Town',\n 'Newport': 'Newport County',\n 'Dag & Red': 'Dag and Red',\n 'Oxford Utd': 'Oxford',\n 'Wimbledon': 'AFC Wimbledon',\n 'Bristol Rovers': 'Bristol Rvs',\n 'Cambridge Utd': 'Cambridge',\n 'York City': 'York',\n 'Notts Co': 'Notts County',\n 'Accrington S': 'Accrington',\n 'C Palace': 'Crystal Palace',\n 'Ross Co': 'Ross County',\n }\n\n\ndef alias_team(team):\n return team_aliases.get(team, team)\n\n\ndef get_match_teams(match_details):\n home_team_span = match_details.find('span', class_='team-home teams')\n home_team = home_team_span.a.contents[0]\n away_team_span = match_details.find('span', class_='team-away teams')\n away_team = away_team_span.a.contents[0]\n return (alias_team(home_team), alias_team(away_team))\n\n\nmonth_strings = {'January': 1,\n 'February': 2,\n 'March': 3,\n 'April': 4,\n 'May': 5,\n 'June': 6,\n 'July': 7,\n 'August': 8,\n 'September': 9,\n 'October': 10,\n 'November': 11,\n 'December': 12}\n\n\ndef fixtures_date_on_or_before(datestring, date):\n # An example datestring 'Saturday 9th April 2016'\n fields = [f for f in datestring.split(' ') if f not in ['', '\\n']]\n day_string = fields[1]\n day = int(day_string[:len(day_string) - 2])\n month = month_strings[fields[2]]\n year = int(fields[3])\n return datetime.date(year, month, day) <= date\n\n\ndef get_fixtures(fixtures_page, end_date, team=None):\n with open(fixtures_page, encoding='utf-8') as fixtures_file:\n soup = BeautifulSoup(fixtures_file)\n dates = soup.find_all('h2', class_='table-header')\n fixtures = []\n for date in dates:\n if end_date is None or fixtures_date_on_or_before(date.string, end_date):\n table = date.next_sibling.next_sibling\n match_details_list = table.find_all('td', class_='match-details')\n matches = [get_match_teams(md) for md in match_details_list]\n fixtures.extend(matches)\n else:\n break\n return fixtures\n\ndef get_team_fixtures(league, team):\n fixtures = ((alias_team(h), alias_team(a))\n for (h,a) in get_fixtures(league.fixtures_file, None))\n fixtures = [(h, a) for (h,a) in fixtures if team in [h,a]]\n return fixtures\n\ndef get_fixture_string(fixture, team):\n home, away = fixture\n if team == home:\n return \"{} (H)\".format(away)\n else:\n assert team == away\n return \"{} (A)\".format(home)\n\ndef compare_fixtures(league, teams):\n def get_fixture_strings(team):\n return [get_fixture_string(f, team) for f in get_team_fixtures(league, team)]\n fixtures = [get_fixture_strings(team) for team in teams]\n headers = teams\n rows = zip(*fixtures)\n display_table(headers, rows)\n\n\ndef last_x_matches(league, team, x):\n matches = [m for m in league.matches if involved_in_game(team, m)]\n start_index = max(0, len(matches) - x)\n matches = matches[start_index:]\n for match in matches:\n output_template = ' {0}/{1}/{2}/{3} vs {4}/{5}/{6}/{7}'\n output = output_template.format(match.HomeTeam, match.FTHG,\n match.HS, match.HST,\n match.AwayTeam, match.FTAG,\n match.AS, match.AST)\n print(output)\n\n\ncount_dict = dict()\ncount_dict['H'] = 0\ncount_dict['A'] = 0\ncount_dict['D'] = 0\n\n\ndef analyse_fixtures(league, end_date, stat_analysers):\n fixtures = get_fixtures(league.fixtures_file, end_date)\n fixtures = [(alias_team(h), alias_team(a)) for h, a in fixtures]\n\n adjusted_shots_for_per_game = get_adjusted_stat_dictionary(\n league, 'HS', 'AS', 'shots_against')\n adjusted_shots_against_per_game = get_adjusted_stat_dictionary(\n league, 'AS', 'HS', 'shots_for')\n\n def get_adjusted_tsr(team):\n shots_for = adjusted_shots_for_per_game[team]\n shots_against = adjusted_shots_against_per_game[team]\n return shots_for - shots_against\n\n adjusted_sot_for_per_game = get_adjusted_stat_dictionary(\n league, 'HST', 'AST', 'sot_against')\n adjusted_sot_against_per_game = get_adjusted_stat_dictionary(\n league, 'AST', 'HST', 'sot_for')\n\n def get_adjusted_sotr(team):\n shots_for = adjusted_sot_for_per_game[team]\n shots_against = adjusted_sot_against_per_game[team]\n return shots_for - shots_against\n\n def get_avg_adjusted_stat(stats, stat_name):\n opponents = (match.opponent(stats.teamname) for match in stats.games)\n opponents_stats = [\n getattr(league.team_stats[opp], stat_name) for opp in opponents]\n avg_opp_stat = sum(opponents_stats) / len(opponents_stats)\n team_stat = getattr(stats, stat_name)\n adj_stat = team_stat + avg_opp_stat - 0.5\n return adj_stat\n\n for home_team, away_team in fixtures:\n def print_statline(attribute):\n home = getattr(home_stats, attribute)\n away = getattr(away_stats, attribute)\n print(' {0}: {1} vs {2}'.format(attribute, home, away))\n home_stats = league.team_stats[home_team]\n home_stats.adjsr = get_adjusted_tsr(home_team)\n home_stats.adjsotr = get_adjusted_sotr(home_team)\n away_stats = league.team_stats[away_team]\n away_stats.adjsr = get_adjusted_tsr(away_team)\n away_stats.adjsotr = get_adjusted_sotr(away_team)\n\n home_stats.avgadjtsr = get_avg_adjusted_stat(home_stats, 'tsr')\n home_stats.avgadjsotr = get_avg_adjusted_stat(home_stats, 'sotr')\n away_stats.avgadjtsr = get_avg_adjusted_stat(away_stats, 'tsr')\n away_stats.avgadjsotr = get_avg_adjusted_stat(away_stats, 'sotr')\n\n suggested_bet = 'D'\n adjsr_diff = home_stats.adjsr - away_stats.adjsr\n adjsotr_diff = home_stats.adjsotr - away_stats.adjsotr\n\n if adjsr_diff > 1.5 and adjsotr_diff > 0.68:\n suggested_bet = 'H'\n elif adjsr_diff < -2.5 and adjsotr_diff < -1.1:\n suggested_bet = 'A'\n\n home_bet_threshold = 2.4 - adjsotr_diff\n away_bet_threshold = 4.4 + adjsotr_diff\n\n avgadjtsr_diff = home_stats.avgadjtsr - away_stats.avgadjtsr\n avgadjsotr_diff = home_stats.avgadjsotr - away_stats.avgadjsotr\n home_sotr_odds = 1.0 / (max(0.2, 0.45 + avgadjsotr_diff))\n away_sotr_odds = 1.0 / (max(0.1, 0.25 - (0.8 * avgadjsotr_diff)))\n if avgadjsotr_diff > -0.2 and avgadjsotr_diff < 0.1:\n draw_probability = 0.27\n else:\n draw_probability = 0.2\n draw_sotr_odds = 1.0 / draw_probability\n\n # Team Rating stuff\n tr_analyser = stat_analysers['team_rating']\n tr_implied_odds = tr_analyser.get_implied_odds(home_stats, away_stats)\n\n tr_diff = home_stats.team_rating - away_stats.team_rating\n tr_home_win_proportion = (0.7799322440116623 * tr_diff) + 0.4433888763201811\n tr_home_win_odds = 1.0 / tr_home_win_proportion\n tr_away_win_proportion = (-0.6267394935044425 * tr_diff) + 0.3460323970831241\n tr_away_win_odds = 1.0 / tr_away_win_proportion\n tr_draw_proportion = 1 - (tr_home_win_proportion + tr_away_win_proportion)\n tr_draw_odds = 1.0 / tr_draw_proportion\n\n def get_2_degree_odds(tr_diff, x2, x1, x0):\n proportion = ((tr_diff ** 2) * x2) + (tr_diff * x1) + x0\n return 1.0 / proportion\n\n tr_home_win_pf_odds = get_2_degree_odds(tr_diff, 0.264179061438, 0.754584559712, 0.411753380201)\n tr_away_win_pf_odds = get_2_degree_odds(tr_diff, 0.263604481914, -0.650789277595, 0.314691118779)\n tr_draw_pf_odds = get_2_degree_odds(tr_diff, 0.527783543352, 0.103795282117, 0.273555501021)\n\n pf_total = (1.0 / tr_home_win_pf_odds) + (1.0/tr_away_win_pf_odds) + (1.0/tr_draw_pf_odds)\n\n # Do the same again, but for the last ten games:\n home_last_ten_stats = TeamStats(home_stats.teamname, home_stats.games[-10:])\n away_last_ten_stats = TeamStats(away_stats.teamname, away_stats.games[-10:])\n lt_tr_diff = home_last_ten_stats.team_rating - away_last_ten_stats.team_rating\n # Of course we should calculate these co-efficients for last ten game stats\n # rather than just use the coefficients above.\n lt_tr_home_win_pf_odds = get_2_degree_odds(lt_tr_diff, 0.264179061438, 0.754584559712, 0.411753380201)\n lt_tr_away_win_pf_odds = get_2_degree_odds(lt_tr_diff, 0.263604481914, -0.650789277595, 0.314691118779)\n lt_tr_draw_pf_odds = get_2_degree_odds(lt_tr_diff, 0.527783543352, 0.103795282117, 0.273555501021)\n lt_pf_total = (1.0 / lt_tr_home_win_pf_odds) + (1.0/lt_tr_away_win_pf_odds) + (1.0/lt_tr_draw_pf_odds)\n\n\n print('{0} vs {1}'.format(home_team, away_team))\n last_x_matches(league, home_team, 3)\n last_x_matches(league, away_team, 3)\n print_statline('points')\n print_statline('tsr')\n # print_statline('adjsr')\n print_statline('avgadjtsr')\n print_statline('sotr')\n # print_statline('adjsotr')\n print_statline('avgadjsotr')\n print_statline('pdo')\n print_statline('tsotr')\n print_statline('team_rating')\n # print(\" Adj TSR diff = {0}\".format(avgadjtsr_diff))\n # print(\" Adj Sotr diff = {0}\".format(avgadjsotr_diff))\n print(\" home_sotr_odds: {0}\".format(home_sotr_odds))\n print(\" away_sotr_odds: {0}\".format(away_sotr_odds))\n print(\" draw_sotr_odds: {0}\".format(draw_sotr_odds))\n print(\" old home_tr_odds: {0}\".format(tr_implied_odds['H']))\n print(\" old away_tr_odds: {0}\".format(tr_implied_odds['A']))\n print(\" old draw_tr_odds: {0}\".format(tr_implied_odds['D']))\n print(\" home_tr_odds: {0}\".format(tr_home_win_odds))\n print(\" away_tr_odds: {0}\".format(tr_away_win_odds))\n print(\" draw_tr_odds: {0}\".format(tr_draw_odds))\n print(\" home_tr_pf_odds: {0}\".format(tr_home_win_pf_odds))\n print(\" away_tr_pf_odds: {0}\".format(tr_away_win_pf_odds))\n print(\" draw_tr_pf_odds: {0}\".format(tr_draw_pf_odds))\n print(\" pf total proportion: {}\".format(pf_total))\n print(\" lt_home_tr_pf_odds: {0}\".format(lt_tr_home_win_pf_odds))\n print(\" lt_away_tr_pf_odds: {0}\".format(lt_tr_away_win_pf_odds))\n print(\" lt_draw_tr_pf_odds: {0}\".format(lt_tr_draw_pf_odds))\n print(\" lt_pf total proportion: {}\".format(lt_pf_total))\n\n\nif __name__ == '__main__':\n import sys\n try:\n date_string = sys.argv[1]\n date = date_from_string(date_string)\n except IndexError:\n date = datetime.date.today() + datetime.timedelta(days=3)\n\n tr_analyser = HistoricalStatAnalyser('team_rating')\n tr_analyser.analyse_leagues()\n\n stat_analysers = {'team_rating': tr_analyser}\n for league in reversed(current_year.all_leagues):\n if league.title != 'premier-league':\n continue\n print(league.title)\n try:\n analyse_fixtures(league, date, stat_analysers)\n except urllib.error.HTTPError:\n print('Fixtures not reachable')\n","repo_name":"allanderek/football-analysis","sub_path":"blog/posts/league_analysis.py","file_name":"league_analysis.py","file_ext":"py","file_size_in_byte":64057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10185451288","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n__author__ = \"Heinrich Widmann\"\n\n# system relevant modules:\nimport os, glob, sys\nimport time, datetime, subprocess\n\n# program relevant modules:\nimport logging\nimport traceback\nimport re\nfrom output import Output\n\n# needed for MAPPER :\nimport codecs\nimport xml.etree.ElementTree as ET\nimport simplejson as json\nimport io\nfrom pyparsing import *\nimport Levenshtein as lvs\nimport iso639\nfrom collections import OrderedDict, Iterable, Counter\n\nPY2 = sys.version_info[0] == 2\nif PY2:\n from urllib2 import urlopen\n from urllib2 import HTTPError,URLError\nelse:\n from urllib.request import urlopen\n from urllib.error import HTTPError,URLError\n\nclass Mapper(object):\n \"\"\"\n ### MAPPER - class\n # Parameters:\n # -----------\n # Public Methods:\n # ---------------\n # map(request) - maps records according to request on B21FIND schema\n # using mapfiles in md-mapping and stores resulting files in subdirectory '../json'\n #\n \"\"\"\n\n def __init__ (self, OUT, base_outdir,fromdate):\n ##HEW-D logging = logging.getLogger()\n self.base_outdir = base_outdir\n self.fromdate = fromdate\n self.logger = logging.getLogger('root')\n self.OUT = OUT\n # Read in B2FIND metadata schema and fields\n schemafile = '%s/mapfiles/b2find_schema.json' % (os.getcwd())\n with open(schemafile, 'r') as f:\n self.b2findfields=json.loads(f.read(), object_pairs_hook=OrderedDict)\n\n ## settings for pyparsing\n nonBracePrintables = ''\n if PY2:\n unicodePrintables = u''.join(unichr(c) for c in range(65536)\n if not unichr(c).isspace())\n else:\n unicodePrintables = u''.join(chr(c) for c in range(65536)\n if not chr(c).isspace())\n \n for c in unicodePrintables: ## printables:\n if c not in '(){}[]':\n nonBracePrintables = nonBracePrintables + c\n\n self.enclosed = Forward()\n value = Combine(OneOrMore(Word(nonBracePrintables) ^ White(' ')))\n nestedParens = nestedExpr('(', ')', content=self.enclosed)\n nestedBrackets = nestedExpr('[', ']', content=self.enclosed)\n nestedCurlies = nestedExpr('{', '}', content=self.enclosed)\n self.enclosed << OneOrMore(value | nestedParens | nestedBrackets | nestedCurlies)\n\n class cv_disciplines(object):\n \"\"\"\n This class represents the closed vocabulary used for the mapoping of B2FIND discipline mapping\n Copyright (C) 2014 Heinrich Widmann.\n\n \"\"\"\n def __init__(self):\n self.discipl_list = self.get_list()\n\n @staticmethod\n def get_list():\n import csv\n import os\n disctab = []\n discipl_file = '%s/mapfiles/b2find_disciplines.json' % (os.getcwd())\n with open(discipl_file) as f:\n disctab = json.load(f)['disciplines']\n return disctab\n\n class cv_geonames(object):\n \"\"\"\n This class represents the closed vocabulary used for the mapoping of B2FIND spatial coverage to coordinates\n Copyright (C) 2016 Heinrich Widmann.\n\n \"\"\"\n def __init__(self):\n self.geonames_list = self.get_list()\n\n @staticmethod\n def get_list():\n import csv\n import os\n geonames_file = '%s/mapfiles/b2find_geonames.tab' % (os.getcwd())\n geonamestab = []\n with open(geonames_file, 'r') as f:\n ## define csv reader object, assuming delimiter is tab\n tsvfile = csv.reader(f, delimiter='\\t')\n\n ## iterate through lines in file\n for line in tsvfile:\n geonamestab.append(line)\n \n return geonamestab\n\n def str_equals(self,str1,str2):\n \"\"\"\n performs case insensitive string comparison by first stripping trailing spaces \n \"\"\"\n return str1.strip().lower() == str2.strip().lower()\n\n def date2UTC(self,old_date):\n \"\"\"\n changes date to UTC format\n \"\"\"\n # UTC format = YYYY-MM-DDThh:mm:ssZ\n try:\n if type(old_date) is list:\n inlist=old_date\n else:\n inlist=[old_date]\n utc = re.compile(r'\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}Z')\n\n utc_day1 = re.compile(r'\\d{4}-\\d{2}-\\d{2}') # day (YYYY-MM-DD)\n utc_day = re.compile(r'\\d{8}') # day (YYYYMMDD)\n utc_year = re.compile(r'\\d{4}') # year (4-digit number)\n\n new_date=None\n for val in inlist:\n if utc.search(val):\n new_date = utc.search(val).group()\n elif utc_day1.search(val):\n day = utc_day1.search(val).group()\n new_date = day + 'T11:59:59Z'\n elif utc_day.search(val):\n rep=re.findall(utc_day, val)[0]\n new_date = rep[0:4]+'-'+rep[4:6]+'-'+rep[6:8] + 'T11:59:59Z'\n elif utc_year.search(val):\n year = utc_year.search(val).group()\n new_date = year + '-07-01T11:59:59Z'\n return new_date\n except Exception :\n logging.error('[ERROR] : %s - in date2UTC replace old date %s by new date %s' % (e,val,new_date))\n return None\n else:\n return new_date\n\n def replace(self,setname,dataset,facet,old_value,new_value):\n \"\"\"\n replaces old value - can be a regular expression - with new value for a given facet\n \"\"\"\n\n try:\n old_regex = re.compile(old_value)\n\n for key in dataset:\n if key == facet :\n if re.match(old_regex, dataset[key]):\n dataset[key] = new_value\n return dataset\n except Exception :\n logging.error('[ERROR] : %s - in replace of pattern %s in facet %s with new_value %s' % (e,old_value,facet,new_value))\n return dataset\n else:\n return dataset\n\n return dataset\n\n def check_url(self,url):\n ## check_url (MAPPER object, url) - method\n # Checks and validates a url via urllib module\n #\n # Parameters:\n # -----------\n # (url) url - Url to check\n #\n # Return Values:\n # --------------\n # 1. (boolean) result\n \n try:\n resp = urlopen(url, timeout=10).getcode()\n except HTTPError as err:\n if (err.code == 422):\n self.logger.error('%s in check_url of %s' % (err.code,url))\n return Warning\n else :\n return False\n except URLError as err: ## HEW : stupid workaraound for SSL: CERTIFICATE_VERIFY_FAILED]\n self.logger.warning('%s in check_url of %s' % (err,url))\n if str(e.reason).startswith('[SSL: CERTIFICATE_VERIFY_FAILED]') :\n return Warning\n else :\n return False\n else:\n return True\n \n def map_url(self, invalue):\n \"\"\"\n Convert identifiers to data access links, i.e. to 'Source' (ds['url']) or 'PID','DOI' etc. pp\n \n Copyright (C) 2015 by Heinrich Widmann.\n Licensed under AGPLv3.\n \"\"\"\n try:\n if type(invalue) is not list :\n invalue=invalue.split(\";\")\n iddict=dict()\n\n self.logger.debug('invalue %s' % invalue)\n for id in filter(None,invalue) :\n self.logger.debug(' id\\t%s' % id)\n if id.startswith('http://data.theeuropeanlibrary'):\n iddict['url']=id\n elif id.startswith('ivo:'):\n iddict['IVO']='http://registry.euro-vo.org/result.jsp?searchMethod=GetResource&identifier='+id\n elif id.startswith('10.'): ##HEW-??? or id.startswith('10.5286') or id.startswith('10.1007') :\n iddict['DOI'] = self.concat('http://dx.doi.org/doi:',id)\n elif 'doi.org/' in id:\n iddict['DOI'] = 'http://dx.doi.org/'+re.compile(\".*doi.org/(.*)\\s?.*\").match(id).groups()[0].strip(']')\n elif 'doi:' in id: ## and 'DOI' not in iddict :\n iddict['DOI'] = 'http://dx.doi.org/doi:'+re.compile(\".*doi:(.*)\\s?.*\").match(id).groups()[0].strip(']')\n elif 'hdl.handle.net' in id:\n reurl = re.search(\"(?Phttps?://[^\\s<>]+)\", id)\n if reurl :\n iddict['PID'] = reurl.group(\"url\")\n elif 'hdl:' in id:\n iddict['PID'] = id.replace('hdl:','http://hdl.handle.net/')\n elif 'http:' in id or 'https:' in id:\n reurl = re.search(\"(?Phttps?://[^\\s<>]+)\", id)\n if reurl :\n iddict['url'] = reurl.group(\"url\")##[0]\n \n except Exception as e :\n self.logger.critical('%s - in map_identifiers %s can not converted !' % (e,invalue))\n return {}\n else:\n if self.OUT.verbose > 3 :\n for id in iddict :\n self.logger.debug('iddict\\t(%s,%s)' % (id,iddict[id]))\n if self.check_url(iddict[id]):\n self.logger.debug('Identifier %s checked successfully' % iddict[id])\n else:\n self.logger.crtitical('Identifier %s failed in url checker' % iddict[id])\n\n return iddict\n\n def map_lang(self, invalue):\n \"\"\"\n Convert languages and language codes into ISO names\n \n Copyright (C) 2014 Mikael Karlsson.\n Adapted for B2FIND 2014 Heinrich Widmann\n Licensed under AGPLv3.\n \"\"\"\n def mlang(language):\n if '_' in language:\n language = language.split('_')[0]\n if ':' in language:\n language = language.split(':')[1]\n if len(language) == 2:\n try: return iso639.languages.get(alpha2=language.lower())\n except KeyError: pass\n elif len(language) == 3:\n try: return iso639.languages.get(alpha3=language.lower())\n except KeyError: pass\n except AttributeError: pass\n try: return iso639.languages.get(terminology=language.lower())\n except KeyError: pass\n try: return iso639.languages.get(bibliographic=language.lower())\n except KeyError: pass\n else:\n try: return iso639.languages.get(name=language.title())\n except KeyError: pass\n for l in re.split('[,.;: ]+', language):\n try: return iso639.languages.get(name=l.title())\n except KeyError: pass\n\n newvalue=list()\n if type(invalue) == list :\n for lang in invalue:\n mcountry = mlang(lang)\n if mcountry:\n newvalue.append(mcountry.name)\n else:\n mcountry = mlang(invalue)\n if mcountry:\n newvalue.append(mcountry.name)\n\n return newvalue\n \n def map_geonames(self,invalue):\n \"\"\"\n Map geonames to coordinates\n \n Copyright (C) 2014 Heinrich Widmann\n Licensed under AGPLv3.\n \"\"\"\n from geopy.geocoders import Nominatim\n from geopy.exc import GeocoderQuotaExceeded\n geolocator = Nominatim()\n try:\n location = geolocator.geocode(invalue.split(';')[0])\n if not location :\n return None ### (None,None)\n if location.raw['importance'] < 0.9 :\n return None\n except GeocoderQuotaExceeded:\n logging.error('%s can not converted !' % (invalue.split(';')[0]))\n sleep(5)\n return None\n except Exception :\n logging.error('[ERROR] : %s - in map_geonames %s can not converted !' % (e,invalue.split(';')[0]))\n return None ### (None,None)\n else:\n return location ### (location.latitude, location.longitude)\n\n def map_temporal(self,invalue):\n \"\"\"\n Map date-times to B2FIND start and end time\n \n Copyright (C) 2015 Heinrich Widmann\n Licensed under AGPLv3.\n \"\"\"\n desc=''\n try:\n logging.info('Invalue\\t%s' % invalue)\n if type(invalue) is not list:\n invalue=invalue.split(';')\n if type(invalue[0]) is dict :\n invalue=invalue[0]\n if '@type' in invalue :\n if invalue['@type'] == 'single':\n if \"date\" in invalue : \n desc+=' %s : %s' % (invalue[\"@type\"],invalue[\"date\"])\n return (desc,self.date2UTC(invalue[\"date\"]),self.date2UTC(invalue[\"date\"]))\n else :\n desc+='%s' % invalue[\"@type\"]\n return (desc,None,None)\n elif invalue['@type'] == 'verbatim':\n if 'period' in invalue :\n desc+=' %s : %s' % (invalue[\"type\"],invalue[\"period\"])\n else:\n desc+='%s' % invalue[\"type\"]\n return (desc,None,None)\n elif invalue['@type'] == 'range':\n if 'start' in invalue and 'end' in invalue :\n desc+=' %s : ( %s - %s )' % (invalue['@type'],invalue[\"start\"],invalue[\"end\"])\n return (desc,self.date2UTC(invalue[\"start\"]),self.date2UTC(invalue[\"end\"]))\n else:\n desc+='%s' % invalue[\"@type\"]\n return (desc,None,None)\n elif 'start' in invalue and 'end' in invalue :\n desc+=' %s : ( %s - %s )' % ('range',invalue[\"start\"],invalue[\"end\"])\n return (desc,self.date2UTC(invalue[\"start\"]),self.date2UTC(invalue[\"end\"]))\n else:\n return (desc,None,None)\n else:\n outlist=list()\n if len(invalue) == 1 :\n try:\n desc+=' point in time : %s' % self.date2UTC(invalue[0]) \n return (desc,self.date2UTC(invalue[0]),self.date2UTC(invalue[0]))\n except ValueError:\n return (desc,None,None)\n## else:\n## desc+=': ( %s - %s ) ' % (self.date2UTC(invalue[0]),self.date2UTC(invalue[0])) \n## return (desc,self.date2UTC(invalue[0]),self.date2UTC(invalue[0]))\n elif len(invalue) == 2 :\n try:\n desc+=' period : ( %s - %s ) ' % (self.date2UTC(invalue[0]),self.date2UTC(invalue[1])) \n return (desc,self.date2UTC(invalue[0]),self.date2UTC(invalue[1]))\n except ValueError:\n return (desc,None,None)\n else:\n return (desc,None,None)\n except Exception :\n logging.debug('[ERROR] : %s - in map_temporal %s can not converted !' % (e,invalue))\n return (None,None,None)\n else:\n return (desc,None,None)\n\n def is_float_try(self,str):\n try:\n float(str)\n return True\n except ValueError:\n return False\n\n def flatten(self,l):\n for el in l:\n if isinstance(el, Iterable) and not isinstance(el, str):\n for sub in flatten(el):\n yield sub\n else:\n yield el\n\n def check_spatial(self,invalue,geotab):\n \"\"\"\n Check spatial coverage and map to representiable form\n Copyright (C) 2018 Heinrich Widmann\n Licensed under AGPLv3.\n \"\"\"\n\n self.logger.debug('invalue %s' % (invalue,))\n\n if not any(invalue) :\n self.logger.warning('Coordinate list has only None entries : %s' % (invalue,))\n return (desc,None,None,None,None)\n desc=''\n ## check coordinates\n if len(invalue) > 1 :\n for lat in [invalue[1],invalue[3]]:\n if float(lat) < -90. or float(lat) > 90. :\n self.logger.error('Latitude %s is not in range [-90,90]' % lat)\n for lon in [invalue[2],invalue[4]]:\n if float(lon) < 0. or float(lon) > 360. :\n self.logger.warning('Longitude %s is not in range [0,360]' % lon)\n if float(lon) < -180. or float(lon) > 180 :\n self.logger.critical('Longitude %s is not in range [-180,180] nor in [0,360]' % lon)\n\n if invalue[1]==invalue[3] and invalue[2]==invalue[4] :\n self.logger.info('[%s,%s] seems to be a point' % (invalue[1],invalue[2]))\n if float(invalue[1]) > 0 : # northern latitude\n desc+='(%-2.0fN,' % float(invalue[1])\n else : # southern lat\n desc+='(%-2.0fS,' % (float(invalue[1]) * -1.0)\n if float(invalue[2]) >= 0 : # eastern longitude\n desc+='%-2.0fE)' % float(invalue[2]) ## (float(invalue[2]) -180.)\n else : # western longitude\n desc+='%-2.0fW)' % (float(invalue[2]) * -1.0)\n else:\n self.logger.info('[%s,%s,%s,%s] seems to be a box' % (invalue[1],invalue[2],invalue[3],invalue[4]))\n if float(invalue[1]) > 0 : # northern min latitude\n desc+='(%-2.0fN-' % float(invalue[1])\n else : # southern min lat\n desc+='(%-2.0fS-' % (float(invalue[1]) * -1.0)\n if float(invalue[3]) > 0 : # northern max latitude\n desc+='%-2.0fN,' % float(invalue[3])\n else : # southern max lat\n desc+='%-2.0fS,' % (float(invalue[3]) * -1.0)\n if float(invalue[2]) >= 0 : # eastern min longitude\n desc+='%-2.0fE-' % float(invalue[2])\n else : # western min longitude\n desc+='%-2.0fW-' % (float(invalue[2]) * -1.0)\n if float(invalue[4]) > 0 : # eastern max longitude\n desc+='%-2.0fE)' % float(invalue[4])\n else : # western max longitude\n desc+='%-2.0fW)' % (float(invalue[4]) * -1.0) \n\n self.logger.info('Spatial description %s' % desc)\n return (desc,invalue[1],invalue[2],invalue[3],invalue[4])\n \n def map_spatial(self,invalue,geotab):\n \"\"\"\n Map coordinates to spatial\n \n Copyright (C) 2014 Heinrich Widmann\n Licensed under AGPLv3.\n \"\"\"\n desc=''\n pattern = re.compile(r\";|\\s+\")\n try:\n self.logger.info(' | Invalue:\\t%s' % invalue)\n if isinstance(invalue,list) :\n if len(invalue) == 1:\n valarr=invalue[0].split()\n else:\n valarr=' '.join(invalue).split()\n else:\n valarr=invalue.split() ##HEW??? [invalue]\n self.logger.info(' | Valarr:\\t%s' % valarr)\n coordarr=list()\n nc=0\n for val in valarr:\n if type(val) is dict : ## special dict case\n coordict=dict()\n if \"description\" in val :\n desc=val[\"description\"]\n if \"boundingBox\" in val :\n coordict=val[\"boundingBox\"]\n retValue = (desc,coordict[\"minLatitude\"],coordict[\"maxLongitude\"],coordict[\"maxLatitude\"],coordict[\"minLongitude\"])\n else :\n retValue = (desc)\n else:\n self.logger.debug('value %s' % val)\n if self.is_float_try(val) is True :\n coordarr.append(val)\n nc+=1\n self.logger.debug('coordarr %s' % coordarr)\n if nc==2 :\n retValue = (desc,coordarr[0],coordarr[1],coordarr[0],coordarr[1])\n elif nc>=4 :\n retValue = (desc,coordarr[0],coordarr[1],coordarr[2],coordarr[3])\n elif desc :\n retValue = (desc,None,None,None,None) \n else :\n retValue = (None,None,None,None,None) \n\n if len(coordarr)==2 :\n retValue = (desc,coordarr[0],coordarr[1],coordarr[0],coordarr[1])\n elif len(coordarr)==4 :\n retValue = (desc,coordarr[0],coordarr[1],coordarr[2],coordarr[3])\n\n except Exception as e :\n self.logger.error('%s : %s can not converted !' % (e,retValue))\n retValue = (None,None,None,None,None) \n ##print('KKKKKKKKKKKK %s' % (self.check_spatial(retValue,geotab)),)\n return self.check_spatial(retValue,geotab)\n\n def map_checksum(self,invalue):\n \"\"\"\n Filter out md checksum from value list\n \n Copyright (C) 2016 Heinrich Widmann\n Licensed under AGPLv3.\n \"\"\"\n if type(invalue) is not list :\n inlist=re.split(r'[;&\\s]\\s*',invalue)\n inlist.append(invalue)\n else:\n inlist=invalue\n\n for inval in inlist: \n if re.match(\"[a-fA-F0-9]{32}\",inval) : ## checks for MD5 checksums !!! \n return inval \n\n return None\n\n def map_discipl(self,invalue,disctab):\n \"\"\"\n Convert disciplines along B2FIND disciplinary list\n \n Copyright (C) 2014 Heinrich Widmann\n Licensed under AGPLv3.\n \"\"\"\n \n retval=list()\n if type(invalue) is not list :\n inlist=re.split(r'[;&\\s]\\s*',invalue)\n inlist.append(invalue)\n else:\n seplist=[re.split(r\"[;&\\xe2]\",i) for i in invalue]\n swlist=[re.findall(r\"[\\w']+\",i) for i in invalue]\n inlist=swlist ## +seplist\n inlist=[item for sublist in inlist for item in sublist] ##???\n for indisc in inlist :\n self.logger.debug('\\t\\t Next input discipline value %s of type %s' % (indisc,type(indisc)))\n if PY2:\n indisc=indisc.encode('utf8').replace('\\n',' ').replace('\\r',' ').strip().title()\n else:\n indisc=indisc.replace('\\n',' ').replace('\\r',' ').strip().title()\n maxr=0.0\n maxdisc=''\n for line in disctab :\n line=re.split(r'#', line) \n try:\n if len(line) < 3:\n self.logger.critical('Missing base element in dicipline array %s' % line)\n sys.exit(-2)\n else:\n disc=line[2].strip()\n r=lvs.ratio(indisc,disc)\n except Exception as e :\n self.logger.error('%s : %s of type %s can not compared to %s of type %s' % (e,indisc,type(indisc),disc,type(disc)))\n continue\n if r > maxr :\n maxdisc=disc\n maxr=r\n self.logger.debug('--- %s \\n|%s|%s| %f | %f' % (line,indisc,disc,r,maxr))\n rethier=line\n if maxr == 1 and indisc == maxdisc :\n self.logger.info(' | Perfect match of >%s< : nothing to do, DiscHier %s' % (indisc,line))\n retval.append(indisc.strip())\n elif maxr > 0.90 :\n self.logger.info(' | Similarity ratio %f is > 0.90 : replace value >>%s<< with best match --> %s' % (maxr,indisc,maxdisc))\n ##return maxdisc\n retval.append(indisc.strip())\n else:\n self.logger.debug(' | Similarity ratio %f is < 0.90 compare value >>%s<< and discipline >>%s<<' % (maxr,indisc,maxdisc))\n continue\n\n if len(retval) > 0:\n retval=list(OrderedDict.fromkeys(retval)) ## this elemenates real duplicates\n return (';'.join(retval),rethier)\n else:\n return ('Not stated',list()) \n \n def cut(self,invalue,pattern,nfield=None):\n \"\"\"\n Invalue is expected as list (if is not, it is splitted). \n Loop over invalue and for each elem : \n - If pattern is None truncate characters specified by nfield (e.g. ':4' first 4 char, '-2:' last 2 char, ...)\n - else if pattern is in invalue, split according to pattern and return field nfield (if 0 return the first found pattern),\n - else return invalue.\n\n Copyright (C) 2015 Heinrich Widmann.\n Licensed under AGPLv3.\n \"\"\"\n\n outvalue=list()\n if not isinstance(invalue,list): invalue = invalue.split()\n for elem in invalue:\n logging.debug('elem:%s\\tpattern:%s\\tnfield:%s' % (elem,pattern,nfield))\n try:\n if pattern is None :\n if nfield :\n outvalue.append(elem[nfield])\n else:\n outvalue.append(elem)\n else:\n rep=''\n cpat=re.compile(pattern)\n if nfield == 0 :\n rep=re.findall(cpat,elem)[0]\n elif len(re.split(cpat,elem)) > nfield-1 :\n rep=re.split(cpat,elem)[nfield-1]\n logging.debug('rep\\t%s' % rep)\n if rep :\n outvalue.append(rep)\n else:\n outvalue.append(elem)\n except Exception :\n logging.error(\"%s in cut() with invalue %s\" % (e,invalue))\n\n return outvalue\n\n def list2dictlist(self,invalue,valuearrsep):\n \"\"\"\n transfer list of strings/dicts to list of dict's { \"name\" : \"substr1\" } and\n - eliminate duplicates, numbers and 1-character- strings, ... \n \"\"\"\n\n dictlist=[]\n valarr=[]\n rm_chars = '@(){}<>;|`\\'\\\"\\\\#' ## remove chars not allowed in CKAN tags\n repl_chars = ':,=/?' ## replace chars not allowed in CKAN tags\n # read in list of stopwords\n swfile='%s/mapfiles/stopwords.txt' % os.getcwd()\n with open(swfile) as sw:\n stopwords = sw.read().splitlines()\n if isinstance(invalue,dict):\n invalue=invalue.values()\n elif not isinstance(invalue,list):\n invalue=invalue.split(';')\n invalue=list(OrderedDict.fromkeys(invalue)) ## this eliminates real duplicates\n for lentry in invalue :\n self.logger.debug('lentry %s' % lentry)\n try:\n if type(lentry) is dict :\n if \"value\" in lentry:\n valarr.append(lentry[\"value\"])\n else:\n valarr=lentry.values()\n else:\n valarr=re.split(r\"[\\n&,;+]+\",lentry)\n self.logger.debug('valarr %s' % valarr)\n for entry in valarr:\n if len(entry.split()) > 8 :\n logging.debug('String has too many words : %s' % entry)\n continue\n entry=\"\". join(c for c in entry if c not in rm_chars and not c.isdigit())\n for c in repl_chars :\n if c in entry:\n entry = entry.replace(c,' ')\n entry=entry.strip()\n if isinstance(entry,int) or len(entry) < 2 : continue\n entrywords = entry.split()\n resultwords = [word for word in entrywords if word.lower() not in stopwords]\n self.logger.debug(\"resultwords %s\" % resultwords)\n if resultwords :\n entry=' '.join(resultwords).encode('ascii','ignore').strip()\n self.logger.debug(\"entry %s\" % entry)\n dictlist.append({ \"name\": entry })\n except (Exception,AttributeError) as err:\n self.logger.error('%s in list2dictlist of lentry %s , entry %s' % (err,lentry,entry))\n continue\n return dictlist[:12]\n\n def uniq(self,input):\n\n ## eleminates duplicates and removes words in blacklist from list\n\n blacklist=[\"Unspecified\"]\n for string in blacklist :\n if string in input : input.remove(string)\n uniqset = set(input)\n return list(uniqset)\n\n def concat(self,str1,str2):\n \"\"\"\n concatenete given strings\n\n Copyright (C) 2015 Heinrich Widmann.\n Licensed under AGPLv3.\n \"\"\"\n\n return str1+str2\n\n def utc2seconds(self,dt):\n \"\"\"\n converts datetime to seconds since year 0\n\n Copyright (C) 2015 Heinrich Widmann.\n Licensed under AGPLv3.\n \"\"\"\n year1epochsec=62135600400\n utc1900=datetime.datetime.strptime(\"1900-01-01T11:59:59Z\", \"%Y-%m-%dT%H:%M:%SZ\")\n utc=self.date2UTC(dt)\n try:\n utctime = datetime.datetime.strptime(utc, \"%Y-%m-%dT%H:%M:%SZ\")\n diff = utc1900 - utctime\n diffsec= int(diff.days) * 24 * 60 *60\n if diff > datetime.timedelta(0): ## date is before 1900\n sec=int(time.mktime((utc1900).timetuple()))-diffsec+year1epochsec\n else:\n sec=int(time.mktime(utctime.timetuple()))+year1epochsec\n except Exception as err :\n logging.error('[ERROR] : %s - in utc2seconds date-time %s can not converted !' % (err,utc))\n return None\n\n return sec\n\n def splitstring2dictlist(self,dataset,facetName,valuearrsep,entrysep):\n \"\"\"\n split string in list of string and transfer to list of dict's [ { \"name1\" : \"substr1\" }, ... ] \n \"\"\"\n\n # read in list of stopwords\n swfile='%s/stopwords' % os.getcwd()\n with open(swfile) as sw:\n stopwords = sw.read().splitlines()\n na_arr=['not applicable','Unspecified']\n for facet in dataset:\n if facet == facetName and len(dataset[facet]) == 1 :\n valarr=dataset[facet][0]['name'].split(valuearrsep)\n valarr=list(OrderedDict.fromkeys(valarr)) ## this elimintas real duplicates\n dicttagslist=[]\n for entry in valarr:\n if entry in na_arr : continue\n entrywords = entry.split()\n resultwords = [word for word in entrywords if word.lower() not in stopwords]\n print ('resultwords %s' % resultwords)\n entrydict={ \"name\": ' '.join(resultwords).replace('/','-') } \n dicttagslist.append(entrydict)\n \n dataset[facet]=dicttagslist\n return dataset \n\n\n def changeDateFormat(self,dataset,facetName,old_format,new_format):\n \"\"\"\n changes date format from old format to a new format\n current assumption is that the old format is anything (indicated in the \n config file by * ) and the new format is UTC\n \"\"\"\n for facet in dataset:\n if self.str_equals(facet,facetName) and old_format == '*':\n if self.str_equals(new_format,'UTC'):\n old_date = dataset[facet]\n new_date = date2UTC(old_date)\n dataset[facet] = new_date\n return dataset\n return dataset\n\n def normalize(self,x):\n \"\"\"normalize the path expression; outside jsonpath to allow testing\"\"\"\n subx = []\n \n # replace index/filter expressions with placeholders\n # Python anonymous functions (lambdas) are cryptic, hard to debug\n def f1(m):\n n = len(subx) # before append\n g1 = m.group(1)\n subx.append(g1)\n ret = \"[#%d]\" % n\n return ret\n x = re.sub(r\"[\\['](\\??\\(.*?\\))[\\]']\", f1, x)\n \n # added the negative lookbehind -krhodes\n x = re.sub(r\"'?(? 1: print (\"\\tf03\", key, loc, expr, path)\n trace(s(key, expr), obj, path)\n walk(loc, x, obj, path, f03)\n elif loc == \"..\":\n trace(x, obj, path)\n def f04(key, loc, expr, obj, path):\n if debug > 1: print (\"\\tf04\", key, loc, expr, path)\n if isinstance(obj, dict):\n if key in obj:\n trace(s('..', expr), obj[key], s(path, key))\n else:\n if key < len(obj):\n trace(s('..', expr), obj[key], s(path, key))\n walk(loc, x, obj, path, f04)\n elif loc == \"!\":\n # Perl jsonpath extension: return keys\n def f06(key, loc, expr, obj, path):\n if isinstance(obj, dict):\n trace(expr, key, path)\n walk(loc, x, obj, path, f06)\n elif isinstance(obj, dict) and loc in obj:\n trace(x, obj[loc], s(path, loc))\n elif isinstance(obj, list) and isint(loc):\n iloc = int(loc)\n if len(obj) >= iloc:\n trace(x, obj[iloc], s(path, loc))\n else:\n # [(index_expression)]\n if loc.startswith(\"(\") and loc.endswith(\")\"):\n if debug > 1: print (\"index\", loc)\n e = evalx(loc, obj)\n trace(s(e,x), obj, path)\n return\n \n # ?(filter_expression)\n if loc.startswith(\"?(\") and loc.endswith(\")\"):\n if debug > 1: print (\"filter\", loc)\n def f05(key, loc, expr, obj, path):\n if debug > 1: print (\"f05\", key, loc, expr, path)\n if isinstance(obj, dict):\n eval_result = evalx(loc, obj[key])\n else:\n eval_result = evalx(loc, obj[int(key)])\n if eval_result:\n trace(s(key, expr), obj, path)\n \n loc = loc[2:-1]\n walk(loc, x, obj, path, f05)\n return\n \n m = re.match(r'(-?[0-9]*):(-?[0-9]*):?(-?[0-9]*)$', loc)\n if m:\n if isinstance(obj, (dict, list)):\n def max(x,y):\n if x > y:\n return x\n return y\n \n def min(x,y):\n if x < y:\n return x\n return y\n \n objlen = len(obj)\n s0 = m.group(1)\n s1 = m.group(2)\n s2 = m.group(3)\n \n # XXX int(\"badstr\") raises exception\n start = int(s0) if s0 else 0\n end = int(s1) if s1 else objlen\n step = int(s2) if s2 else 1\n \n if start < 0:\n start = max(0, start+objlen)\n else:\n start = min(objlen, start)\n if end < 0:\n end = max(0, end+objlen)\n else:\n end = min(objlen, end)\n \n for i in xrange(start, end, step):\n trace(s(i, x), obj, path)\n return\n \n # after (expr) & ?(expr)\n if loc.find(\",\") >= 0:\n # [index,index....]\n for piece in re.split(r\"'?,'?\", loc):\n if debug > 1: print (\"piece\", piece)\n trace(s(piece, x), obj, path)\n else:\n store(path, obj)\n \n def walk(loc, expr, obj, path, funct):\n if isinstance(obj, list):\n for i in xrange(0, len(obj)):\n funct(i, loc, expr, obj, path)\n elif isinstance(obj, dict):\n for key in obj:\n funct(key, loc, expr, obj, path)\n \n def evalx(loc, obj):\n \"\"\"eval expression\"\"\"\n \n if debug: print (\"evalx\", loc)\n \n # a nod to JavaScript. doesn't work for @.name.name.length\n # Write len(@.name.name) instead!!!\n loc = loc.replace(\"@.length\", \"len(__obj)\")\n \n loc = loc.replace(\"&&\", \" and \").replace(\"||\", \" or \")\n \n # replace !@.name with 'name' not in obj\n # XXX handle !@.name.name.name....\n def notvar(m):\n return \"'%s' not in __obj\" % m.group(1)\n loc = re.sub(\"!@\\.([a-zA-Z@_]+)\", notvar, loc)\n \n # replace @.name.... with __obj['name']....\n # handle @.name[.name...].length\n def varmatch(m):\n def brackets(elts):\n ret = \"__obj\"\n for e in elts:\n if isint(e):\n ret += \"[%s]\" % e # ain't necessarily so\n else:\n ret += \"['%s']\" % e # XXX beware quotes!!!!\n return ret\n g1 = m.group(1)\n elts = g1.split('.')\n if elts[-1] == \"length\":\n return \"len(%s)\" % brackets(elts[1:-1])\n return brackets(elts[1:])\n \n loc = re.sub(r'(? == translation\n # causes problems if a string contains =\n \n # replace @ w/ \"__obj\", but \\@ means a literal @\n loc = re.sub(r'(?\", v)\n return v\n \n # body of jsonpath()\n\n # Get caller globals so eval can pick up user functions!!!\n caller_globals = sys._getframe(1).f_globals\n result = []\n if expr and obj:\n cleaned_expr = self.normalize(expr)\n if cleaned_expr.startswith(\"$;\"):\n cleaned_expr = cleaned_expr[2:]\n \n trace(cleaned_expr, obj, '$')\n\n if len(result) > 0:\n return result\n return False\n\n def add_unique_to_dict_list(self,dict_list, key, value):\n for d in dict_list:\n if d[\"key\"] == key:\n return d[\"value\"]\n\n dict_list.append({\"key\": key, \"value\": value})\n return value\n \n\n def jsonmdmapper(self,dataset,jrules):\n \"\"\"\n changes JSON dataset field values according to mapfile\n \"\"\" \n format = 'VALUE'\n newds=dict()\n \n for rule in jrules:\n if rule.startswith('#'):\n continue\n field=rule.strip('\\n').split(' ')[0]\n jpath=rule.strip('\\n').split(' ')[1]\n\n try:\n if not jpath.startswith('$') :\n value=[jpath]\n else:\n result=self.jsonpath(dataset, jpath, format)\n if isinstance(result, (list, tuple)): ## and (len(result)>0):\n if isinstance(result[0], (list, tuple)):\n value=result[0]\n else:\n value=result\n else:\n continue\n\n # add value to JSON key\n if field in newds:\n newds[field].extend(value)\n else:\n newds[field]=value\n\n##HEW-T if field == 'SpatialCoverage' :\n##HEW-T print('SpatialCoverage newds %s' % newds[field])\n\n except Exception as e:\n logging.debug(' %s:[ERROR] %s : processing rule %s : %s : %s' % (self.jsonmdmapper.__name__,e,field,jpath,value))\n continue\n return newds\n \n def postprocess(self,dataset,specrules):\n \"\"\"\n changes dataset field values according to configuration\n \"\"\" \n \n for rule in specrules:\n try: \n # jump over rule lines starting with #\n if rule.startswith('#'):\n continue\n # specrules can be checked for correctness\n assert(rule.count(',,') == 5),\"a double comma should be used to separate items in rule\"\n \n rule = rule.rstrip('\\n').split(',,') # splits each line of config file\n ## print ('rule %s' % rule\n groupName = rule[0]\n setName = rule[1]\n facetName = rule[2]\n old_value = rule[3]\n new_value = rule[4]\n action = rule[5]\n \n oai_set=dataset['oai_set']\n\n ## call action\n if action == \"replace\":\n dataset = self.replace(setName,dataset,facetName,old_value,new_value)\n## elif action == \"truncate\":\n## dataset = self.truncate(dataset,facetName,old_value,new_value)\n elif action == \"changeDateFormat\":\n dataset = self.changeDateFormat(dataset,facetName,old_value,new_value)\n elif action == 'splitstring2dictlist':\n dataset = self.splitstring2dictlist(dataset,facetName,old_value,new_value)\n elif action == \"another_action\":\n pass\n else:\n pass\n except Exception as e:\n self.logger.error(\" [ERROR] %s : perform %s for facet %s with invalue %s and new_value %s\" % (e,action,facetName,old_value,new_value))\n continue\n\n return dataset\n \n def evalxpath(self, obj, expr, ns):\n # returns list of selected entries from xml obj using xpath expr\n flist=re.split(r'[\\(\\),]',expr.strip()) ### r'[(]',expr.strip())\n retlist=list()\n for func in flist:\n func=func.strip()\n if func.startswith('//'):\n fxpath= '.'+re.sub(r'/text()','',func)\n self.logger.debug('xpath %s' % fxpath)\n try:\n for elem in obj.findall(fxpath,ns):\n self.logger.debug(' //elem %s' % elem)\n if elem.text :\n self.logger.debug(' |- elem.text %s' % elem.text)\n retlist.append(elem.text)\n except Exception as e:\n self.logger.error('%s : during xpath extraction of %s' % (e,fxpath))\n return []\n elif func == '/':\n try:\n for elem in obj.findall('.//',ns):\n self.logger.debug(' /elem %s' % elem)\n if elem.text :\n self.logger.debug(' |- elem.text %s' % elem.text)\n retlist.append(elem.text)\n except Exception as e:\n self.logger.error('%s : during xpath extraction of %s' % (e,'./'))\n return []\n\n return retlist\n\n def xpathmdmapper(self,xmldata,xrules,namespaces):\n # returns list or string, selected from xmldata by xpath rules (and namespaces)\n self.logger.debug(' XPATH rules %s' % xrules)\n self.logger.info(' | %-10s | %-10s | %-20s | \\n' % ('Field','XPATH','Value'))\n jsondata=dict()\n\n for line in xrules:\n self.logger.debug(' Next line of xpath rules : %-20s' % (line))\n try:\n retval=list()\n m = re.match(r'(\\s+)', line)\n if m:\n field=m.group(2)\n if field in ['Discipline','oai_set','Source']: ## set default for mandatory fields !!\n retval=['Not stated']\n self.logger.debug(' Field:xpathrule : %-10s:%-20s\\n' % (field,line))\n else:\n xpath=''\n m2 = re.compile('(\\s+)()(.*?)()').search(line)\n m3 = re.compile('(\\s+)()(.*?)()').search(line)\n if m3:\n xpath=m3.group(3)\n self.logger.info(' xpath %-10s' % xpath)\n retval=xpath\n elif m2:\n xpath=m2.group(3)\n self.logger.debug(' xpath %-10s' % xpath)\n retval=self.evalxpath(xmldata, xpath, namespaces)\n else:\n self.logger.debug(' Found no xpath expression')\n continue\n\n if retval and len(retval) > 0 :\n jsondata[field]=retval ### .extend(retval)\n self.logger.info(' | %-10s | %10s | %20s | \\n' % (field,xpath,';'.join(str[:40] for str in retval)))\n elif field in ['Discipline','oai_set']:\n jsondata[field]=['Not stated']\n except Exception as e:\n logging.error(' | [ERROR] : %s in xpathmdmapper processing\\n\\tfield\\t%s\\n\\txpath\\t%s\\n\\tretvalue\\t%s' % (e,field,xpath,retval))\n continue\n\n return jsondata\n\n def map(self,request): ### community,mdprefix,path,target_mdschema):\n ## map(MAPPER object, community, mdprefix, path) - method\n # Maps XML files formated in source specific MD schema/format (=mdprefix)\n # to JSON files formatted in target schema (by default B2FIND schema) \n # For each file two steps are performed\n # 1. select entries by Python XPATH converter according \n # the mapfile [-].xml . \n # 2. perform generic and semantic mapping \n # versus iso standards and closed vovabularies ...\n #\n # Parameters:\n # -----------\n # 1. (list) request - specifies the processing parameters as , etc. \n # 2. (string, optinal) target_mdschema - specifies the schema the inpted records are be mapped to\n #\n # Return Values:\n # --------------\n # 1. (dict) results statistics\n \n resKeys=['count','tcount','ecount','time']\n results = dict.fromkeys(resKeys,0)\n \n # set processing parameters\n community=request[0]\n mdprefix=request[3]\n mdsubset=request[4] if len(request)>4 else None\n target_mdschema=request[8] if len(request)>8 else None\n\n # settings according to md format (xml or json processing)\n if mdprefix == 'json' :\n mapext='conf'\n insubdir='/hjson'\n infformat='json'\n else:\n mapext='xml'\n insubdir='/xml'\n infformat='xml'\n\n # read target_mdschema (degfault : B2FIND_schema) and set mapfile\n if (target_mdschema and not target_mdschema.startswith('#')):\n mapfile='%s/mapfiles/%s-%s.%s' % (os.getcwd(),community,target_mdschema,mapext)\n else:\n mapfile='%s/mapfiles/%s-%s.%s' % (os.getcwd(),community,mdprefix,mapext)\n\n if not os.path.isfile(mapfile):\n self.logger.error(' Can not access community specific mapfile %s ' % mapfile )\n mapfile='%s/mapfiles/%s.%s' % (os.getcwd(),mdprefix,mapext)\n if not os.path.isfile(mapfile):\n self.logger.critical(' ... nor md schema specific mapfile %s ' % mapfile )\n return results\n else :\n self.logger.error(' ... but only generic mapfile %s ' % mapfile )\n\n print('\\t|- Mapfile\\t%s' % os.path.basename(mapfile))\n mf = codecs.open(mapfile, \"r\", \"utf-8\")\n maprules = mf.readlines()\n maprules = list(filter(lambda x:len(x) != 0,maprules)) # removes empty lines\n\n # check namespaces\n namespaces=dict()\n for line in maprules:\n ns = re.match(r'(\\s+)(', line)\n if ns:\n namespaces[ns.group(3)]=ns.group(5)\n continue\n self.logger.debug(' |- Namespaces\\t%s' % json.dumps(namespaces,sort_keys=True, indent=4))\n\n # instance of B2FIND discipline table\n disctab = self.cv_disciplines()\n # instance of B2FIND discipline table\n geotab = self.cv_geonames()\n # instance of British English dictionary\n\n # community-mdschema root path\n cmpath='%s/%s-%s' % (self.base_outdir,community,mdprefix)\n self.logger.info('\\t|- Input path:\\t%s' % cmpath)\n subdirs=next(os.walk(cmpath))[1] ### [x[0] for x in os.walk(cmpath)]\n totcount=0 # total counter of processed files\n subsettag=re.compile(r'_\\d+')\n # loop over all available subdirs\n for subdir in sorted(subdirs) :\n if mdsubset and not subdir.startswith(mdsubset) :\n self.logger.warning('\\t |- Subdirectory %s does not match %s - no processing required' % (subdir,mdsubset))\n continue\n elif self.fromdate :\n datematch = re.search(r'\\d{4}-\\d{2}-\\d{2}$', subdir[:-2])\n if datematch :\n subdirdate = datetime.datetime.strptime(datematch.group(), '%Y-%m-%d').date()\n fromdate = datetime.datetime.strptime(self.fromdate, '%Y-%m-%d').date()\n if (fromdate > subdirdate) :\n self.logger.warning('\\t |- Subdirectory %s has timestamp older than fromdate %s - no processing required' % (subdir,self.fromdate))\n continue\n else :\n self.logger.warning('\\t |- Subdirectory %s with timestamp newer than fromdate %s is processed' % (subdir,self.fromdate))\n else:\n self.logger.warning('\\t |- Subdirectory %s does not contain a timestamp %%Y-%%m-%%d - no processing required' % subdir)\n continue \n else:\n print('\\t |- Subdirectory %s is processed' % subdir)\n self.logger.debug('Processing of subdirectory %s' % subdir)\n\n # check input path\n inpath='%s/%s/%s' % (cmpath,subdir,insubdir)\n if not os.path.exists(inpath):\n self.logger.critical('Can not access directory %s' % inpath)\n return results \n\n # make output directory for mapped json's\n if (target_mdschema and not target_mdschema.startswith('#')):\n outpath='%s-%s/%s/%s/' % (cmpath,target_mdschema,subdir,'json')\n else:\n outpath='%s/%s/%s/' % (cmpath,subdir,'json')\n if (not os.path.isdir(outpath)): os.makedirs(outpath)\n self.logger.debug('Ouput path is %s' % outpath)\n\n files = list(filter(lambda x: x.endswith(infformat), os.listdir(inpath)))\n results['tcount'] += len(list(files))\n oldperc=0\n err = None\n self.logger.debug(' |- Processing of %s files in %s' % (infformat.upper(),inpath))\n \n ## start processing loop\n start = time.time()\n fcount=0 # counter per sub dir !\n for filename in files:\n ## counter and progress bar\n fcount+=1\n perc=int(fcount*100/int(len(list(files)))) ## int(results['tcount'])\n bartags=int(perc/5)\n if perc%10 == 0 and perc != oldperc:\n oldperc=perc\n print (\"\\r\\t [%-20s] %5d (%3d%%) in %d sec\" % ('='*bartags, fcount, perc, time.time()-start ))\n sys.stdout.flush()\n self.logger.debug(' | m | %-4d | %-45s |' % (fcount,filename))\n\n jsondata = dict()\n infilepath=inpath+'/'+filename \n if ( os.path.getsize(infilepath) > 0 ):\n ## load and parse raw xml rsp. json\n with open(infilepath, 'r') as f:\n try:\n if mdprefix == 'json':\n jsondata=json.loads(f.read())\n else:\n xmldata= ET.parse(infilepath)\n except Exception as e:\n self.logger.error(' | [ERROR] %s : Cannot load or parse %s-file %s' % (e,infformat,infilepath))\n results['ecount'] += 1\n continue\n else:\n self.logger.debug(' |- Read file %s ' % infilepath)\n \n ## XPATH rsp. JPATH converter\n if mdprefix == 'json':\n try:\n self.logger.debug(' |- %s INFO %s to JSON FileProcessor - Processing: %s/%s' % (time.strftime(\"%H:%M:%S\"),infformat,inpath,filename))\n jsondata=self.jsonmdmapper(jsondata,maprules)\n except Exception as e:\n self.logger.error(' | [ERROR] %s : during %s 2 json processing' % (infformat) )\n results['ecount'] += 1\n continue\n else:\n try:\n # Run Python XPATH converter\n self.logger.warning(' | xpathmapper | %-4d | %-45s |' % (fcount,os.path.basename(filename)))\n jsondata=self.xpathmdmapper(xmldata,maprules,namespaces)\n ##HEW-T print ('jsondata %s' % jsondata)\n except Exception as e:\n self.logger.error(' | [ERROR] %s : during XPATH processing' % e )\n results['ecount'] += 1\n continue\n\n iddict=dict()\n blist=list()\n spvalue=None\n stime=None\n etime=None\n publdate=None\n # loop over target schema (B2FIND)\n self.logger.info(' Mapping of ...')\n self.logger.info('\\t|-> %-10s : %-10s |' % ( 'InField','Invalue'))\n if 'url' not in jsondata:\n self.logger.error('|- No identifier for id %s' % filename)\n\n for facetdict in self.b2findfields.values() :\n facet=facetdict[\"ckanName\"]\n ##HEW-T print ('facet %s ' % facet)\n if facet in jsondata:\n if facet in ['fulltext']:\n self.logger.debug('\\t|-> %-10s : %-10s |' % (facet,jsondata[facet]))\n else:\n self.logger.debug('\\t|-> %-10s : %-10s |' % (facet,jsondata[facet]))\n try:\n if facet == 'author':\n jsondata[facet] = self.uniq(self.cut(jsondata[facet],'\\(\\d\\d\\d\\d\\)',1))\n elif facet == 'tags':\n jsondata[facet] = self.list2dictlist(jsondata[facet],\" \")\n elif facet == 'url':\n iddict = self.map_url(jsondata[facet])\n\n if 'DOI' in iddict :\n if not 'DOI' in jsondata :\n jsondata['DOI']=iddict['DOI']\n if 'PID' in iddict :\n if not ('DOI' in jsondata and jsondata['DOI']==iddict['PID']):\n jsondata['PID']=iddict['PID']\n if 'url' in iddict:\n ##HEW-D if not ('DOI' in jsondata and jsondata['DOI']==iddict['url']) and not ('PID' in jsondata and jsondata['PID']==iddict['url'] and iddict['url'].startswith('html')) :\n jsondata['url']=iddict['url']\n else:\n jsondata['url']=''\n\n elif facet == 'Checksum':\n jsondata[facet] = self.map_checksum(jsondata[facet])\n elif facet == 'Discipline':\n (jsondata[facet],jsondata['DiscHierarchy']) = self.map_discipl(jsondata[facet],disctab.discipl_list)\n self.logger.debug('DiscHierarchy %s' % jsondata['DiscHierarchy'])\n elif facet == 'Publisher':\n blist = self.cut(jsondata[facet],'=',2)\n jsondata[facet] = self.uniq(blist)\n elif facet == 'Contact':\n if all(x is None for x in jsondata[facet]):\n jsondata[facet] = ['Not stated']\n else:\n blist = self.cut(jsondata[facet],'=',2)\n jsondata[facet] = self.uniq(blist)\n elif facet == 'SpatialCoverage':\n spdesc,slat,wlon,nlat,elon = self.map_spatial(jsondata[facet],geotab.geonames_list)\n if wlon and slat and elon and nlat :\n spvalue=\"{\\\"type\\\":\\\"Polygon\\\",\\\"coordinates\\\":[[[%s,%s],[%s,%s],[%s,%s],[%s,%s],[%s,%s]]]}\" % (wlon,slat,wlon,nlat,elon,nlat,elon,slat,wlon,slat)\n if spdesc != None :\n jsondata[facet] = spdesc\n elif facet == 'TemporalCoverage':\n tempdesc,stime,etime=self.map_temporal(jsondata[facet])\n if tempdesc:\n jsondata[facet] = tempdesc\n elif facet == 'Language': \n jsondata[facet] = self.map_lang(jsondata[facet])\n elif facet in ['Format']: \n jsondata[facet] = self.uniq(jsondata[facet])\n elif facet == 'PublicationYear':\n publdate=self.date2UTC(jsondata[facet])\n if publdate:\n jsondata[facet] = self.cut([publdate],'\\d\\d\\d\\d',0)\n elif facet == 'fulltext':\n encoding='utf-8'\n jsondata[facet] = ';'.join([x for x in jsondata[facet] if x.strip()])[:32000]\n if self.OUT.verbose > 2 :\n jsondata[facet] = jsondata[facet][:100]\n elif facet == 'oai_set':\n if jsondata[facet]==['Not stated'] :\n jsondata[facet]=mdsubset\n except Exception as err :\n self.logger.error('%s during mapping of field\\t%s' % (err,facet))\n self.logger.debug('\\t\\tvalue%s' % (jsondata[facet]))\n continue\n else: # B2FIND facet not in jsondata\n if facet == 'title':\n if 'notes' in jsondata :\n jsondata[facet] = jsondata['notes'][:20]\n else:\n jsondata[facet] = 'Not stated'\n\n if spvalue :\n jsondata[\"spatial\"]=spvalue\n if stime and etime :\n jsondata[\"TemporalCoverage:BeginDate\"] = stime\n jsondata[\"TempCoverageBegin\"] = self.utc2seconds(stime) \n jsondata[\"TemporalCoverage:EndDate\"] = etime \n jsondata[\"TempCoverageEnd\"] = self.utc2seconds(etime)\n if publdate :\n jsondata[\"PublicationTimestamp\"] = publdate\n\n self.logger.info(' Results of Mapping :')\n self.logger.info('\\t|<- %-10s : %-10s |' % ( 'MappedFacet','Mappedvalue'))\n\n for key in jsondata :\n if key in ['fulltext','notes']:\n self.logger.debug('\\t|<- %-10s : %-10s |' % (key,jsondata[key]))\n else:\n self.logger.info('\\t|<- %-10s : %-10s |' % (key,jsondata[key]))\n\n ## write to JSON file\n jsonfilename=os.path.splitext(filename)[0]+'.json'\n \n with io.open(outpath+'/'+jsonfilename, 'w') as json_file:\n try:\n self.logger.debug('decode json data')\n if PY2 :\n data = json.dumps(jsondata,sort_keys = True, indent = 4).decode('utf-8') ## needed, else : Cannot write json file ... : must be unicode, not str\n else :\n data = json.dumps(jsondata,sort_keys = True, indent = 4) ## no decoding for PY3 !!\n\n except Exception as err:\n self.logger.error('%s : Cannot decode jsondata %s' % (err,jsondata))\n try:\n self.logger.debug('Write to json file %s/%s' % (outpath,jsonfilename))\n json_file.write(data)\n except TypeError as err:\n self.logger.error(' %s : Cannot write data in json file %s ' % (jsonfilename,err))\n except Exception as err:\n self.logger.error(' %s : Cannot write json file %s' % (err,outpath+'/'+filename))\n results['ecount'] += 1\n continue\n else:\n self.logger.debug(' Succesfully written to json file %s' % outpath+'/'+filename)\n\n results['count'] += 1\n continue\n else:\n self.logger.error('Can not access content of %s' % infilepath)\n results['ecount'] += 1\n continue\n\n out=' %s to json stdout\\nsome stuff\\nlast line ..' % infformat\n ##HEW-D if (err is not None ): self.logger.error('[ERROR] ' + err)\n\n totcount+=results['count'] # total # of sucessfully processed files\n print (' \\t|- %-10s |@ %-10s |\\n\\t| Provided | Mapped | Failed |\\n\\t| %8d | %6d | %6d |' % ( 'Finished',time.strftime(\"%H:%M:%S\"),\n results['tcount'],\n totcount,\n results['ecount']\n ))\n\n # search in output for result statistics\n last_line = out.split('\\n')[-2]\n if ('INFO Main - ' in last_line):\n string = last_line.split('INFO Main ')[1]\n [results['count'], results['ecount']] = re.findall(r\"\\d{1,}\", string)\n results['count'] = int(results['count']); results['ecount'] = int(results['ecount'])\n \n return results\n\n def is_valid_value(self,facet,valuelist):\n \"\"\"\n checks if value is the consitent for the given facet\n \"\"\"\n vall=list()\n if not isinstance(valuelist,list) : valuelist=[valuelist]\n\n for value in valuelist:\n errlist=''\n if facet in ['title','notes','author','Publisher']:\n cvalue=value\n try:\n if PY2 :\n if isinstance(value, unicode) :\n ## value=value.decode('utf-8')\n cvalue=value.encode(\"iso-8859-1\")\n else :\n if isinstance(value, str) :\n cvalue=value.encode(\"iso-8859-1\")\n except (Exception,UnicodeEncodeError) as e :\n self.logger.error(\"%s : { %s:%s }\" % (e,facet,value))\n else:\n vall.append(cvalue)\n finally:\n pass\n elif self.str_equals(facet,'Discipline'):\n if self.map_discipl(value,self.cv_disciplines().discipl_list)[0] is None :\n errlist+=' | %10s | %20s |' % (facet, value)\n else :\n vall.append(value)\n elif self.str_equals(facet,'PublicationYear'):\n try:\n datetime.datetime.strptime(value, '%Y')\n except ValueError:\n errlist+=' | %10s | %20s |' % (facet, value)\n else:\n vall.append(value)\n elif self.str_equals(facet,'PublicationTimestamp'):\n try:\n datetime.datetime.strptime(value, '%Y-%m-%d'+'T'+'%H:%M:%S'+'Z')\n except ValueError:\n errlist+=' | %10s | %20s |' % (facet, value)\n else:\n vall.append(value)\n elif self.str_equals(facet,'Language'):\n if self.map_lang(value) is None:\n errlist+=' | %10s | %20s |' % (facet, value)\n else:\n vall.append(value)\n elif self.str_equals(facet,'tags'):\n if isinstance(value,dict) and value[\"name\"]:\n vall.append(value[\"name\"])\n else:\n errlist+=' | %10s | %20s |' % (facet, value)\n else:\n vall.append(value)\n # to be continued for every other facet\n\n ##if errlist != '':\n ## print (' Following key-value errors fails validation:\\n' + errlist \n return vall\n \n def validate(self,request,target_mdschema):\n ## validate(MAPPER object, community, mdprefix, path) - method\n # validates the (mapped) JSON files in directory against the B2FIND md schema\n # Parameters:\n # -----------\n # 1. (string) community - B2FIND community the md are harvested from\n # 2. (string) mdprefix - metadata format of original harvested source (not needed her)\n # 3. (string) path - path to subset directory \n # (without (!) 'json' subdirectory)\n #\n # Return Values:\n # --------------\n # 1. (dict) statistic of validation \n \n resKeys=['count','tcount','ecount','time']\n results = dict.fromkeys(resKeys,0)\n \n # set processing parameters\n community=request[0]\n mdprefix=request[3]\n mdsubset=request[4] if len(request)>4 else None\n\n # set extension of mapfile according to md format (xml or json processing)\n if mdprefix == 'json' :\n mapext='conf' ##!!!HEW --> json\n else:\n mapext='xml'\n mapfile='%s/mapfiles/%s-%s.%s' % (os.getcwd(),community,mdprefix,mapext)\n if not os.path.isfile(mapfile):\n mapfile='%s/mapfiles/%s.%s' % (os.getcwd(),mdprefix,mapext)\n if not os.path.isfile(mapfile):\n self.logger.error('Mapfile %s does not exist !' % mapfile)\n return results\n mf=open(mapfile) \n\n # community-mdschema root path\n cmpath='%s/%s-%s' % (self.base_outdir,community,mdprefix)\n self.logger.info('\\t|- Input path:\\t%s' % cmpath)\n subdirs=next(os.walk(cmpath))[1] ### [x[0] for x in os.walk(cmpath)]\n # loop over all available subdirs\n fcount=0\n for subdir in sorted(subdirs) :\n if mdsubset and not subdir.startswith(mdsubset) :\n self.logger.warning('\\t |- Subdirectory %s does not match %s - no processing required' % (subdir,mdsubset))\n continue\n elif self.fromdate :\n datematch = re.search(r'\\d{4}-\\d{2}-\\d{2}$', subdir[:-2])\n if datematch :\n subdirdate = datetime.datetime.strptime(datematch.group(), '%Y-%m-%d').date()\n fromdate = datetime.datetime.strptime(self.fromdate, '%Y-%m-%d').date()\n if (fromdate > subdirdate) :\n self.logger.warning('\\t |- Subdirectory %s has timestamp older than fromdate %s - no processing required' % (subdir,self.fromdate))\n continue\n else :\n self.logger.warning('\\t |- Subdirectory %s with timestamp newer than fromdate %s is processed' % (subdir,self.fromdate))\n else:\n self.logger.warning('\\t |- Subdirectory %s does not contain a timestamp %%Y-%%m-%%d - no processing required' % subdir)\n continue \n else:\n print('\\t |- Subdirectory %s is processed' % subdir)\n self.logger.debug('Processing of subdirectory %s' % subdir)\n\n # check input path\n inpath='%s/%s/%s' % (cmpath,subdir,'json')\n if not os.path.exists(inpath):\n self.logger.critical('Can not access directory %s' % inpath)\n continue \n elif not os.path.exists(inpath) or not os.listdir(inpath):\n self.logger.critical('The directory %s does not exist or no json files to validate are found!' % (inpath))\n continue\n\n # find all .json files in inpath/json:\n files = list(filter(lambda x: x.endswith('.json'), os.listdir(inpath)))\n results['tcount'] = len(files)\n\n # sum of all .json files of all sub dirs\n results['count'] += results['tcount'] \n \n self.logger.info(' %s Validation of %d files in %s/json' % (time.strftime(\"%H:%M:%S\"),results['tcount'],inpath))\n if results['tcount'] == 0 :\n self.logger.error(' ERROR : Found no files to validate !')\n return results\n self.logger.info(' | | %-4s | %-45s |\\n |%s|' % ('#','infile',\"-\" * 53))\n\n totstats=dict()\n for facetdict in self.b2findfields.values() :\n facet=facetdict[\"ckanName\"]\n if facet.startswith('#') or facetdict[\"display\"] == \"hidden\" :\n continue\n totstats[facet]={\n 'xpath':'',\n 'mapped':0,\n 'valid':0,\n 'vstat':[]\n } \n\n mf.seek(0, 0)\n for line in mf:\n if '' in line:\n totstats[facet]['xpath']=re.sub(r\"(.*?)\", r\"\\1\", next(mf))\n break\n\n fcount = 0\n oldperc = 0\n start = time.time()\n for filename in files: ## loop over datasets\n fcount+=1\n perc=int(fcount*100/int(len(files)))\n bartags=int(perc/10)\n if perc%10 == 0 and perc != oldperc :\n oldperc=perc\n print (\"\\r\\t [%-20s] %d / %d%% in %d sec\" % ('='*bartags, fcount, perc, time.time()-start ))\n sys.stdout.flush()\n\n jsondata = dict()\n self.logger.info(' | v | %-4d | %-s/%s |' % (fcount,os.path.basename(inpath),filename))\n\n if ( os.path.getsize(inpath+'/'+filename) > 0 ):\n with open(inpath+'/'+filename, 'r') as f:\n try:\n jsondata=json.loads(f.read())\n except:\n self.logger.error(' | [ERROR] Cannot load the json file %s' % inpath+'/'+filename)\n results['ecount'] += 1\n continue\n else:\n results['ecount'] += 1\n continue\n \n try:\n valuearr=list()\n for facetdict in self.b2findfields.values() : ## loop over facets\n facet=facetdict[\"ckanName\"]\n if facet.startswith('#') or facetdict[\"display\"] == \"hidden\" :\n continue\n value = None\n if facet in jsondata:\n value = jsondata[facet]\n self.logger.warning('facet:value : %s:%s' % (facet,value))\n if value:\n totstats[facet]['mapped']+=1\n pvalue=self.is_valid_value(facet,value)\n self.logger.debug(' key %s\\n\\t|- value %s\\n\\t|- type %s\\n\\t|- pvalue %s' % (facet,value[:30],type(value),pvalue[:30]))\n if pvalue and len(pvalue) > 0:\n totstats[facet]['valid']+=1 \n if type(pvalue) is list :\n totstats[facet]['vstat'].extend(pvalue)\n else:\n totstats[facet]['vstat'].append(pvalue)\n else:\n totstats[facet]['vstat']=[] \n else:\n if facet == 'title':\n self.logger.debug(' | [ERROR] Facet %s is mandatory, but value is empty' % facet)\n except IOError :\n self.logger.error(\" %s in validation of facet '%s' and value '%s' \\n\" % (e,facet, value))\n exit()\n\n outfile='%s/%s' % (cmpath,'validation.stat')\n printstats='\\n Statistics of\\n\\tcommunity\\t%s\\n\\tsubset\\t\\t%s\\n\\t# of records\\t%d\\n see as well %s\\n\\n' % (community,subdir,fcount,outfile) \n printstats+=\" |-> {:<16} <-- {:<20} \\n |-- {:<12} | {:<9} | \\n\".format('Facet name','XPATH','Mapped','Validated')\n printstats+=\" |-- {:>5} | {:>4} | {:>5} | {:>4} |\\n\".format('#','%','#','%')\n printstats+=\" |- Value statistics:\\n |- {:<5} : {:<30} |\\n\".format('#','Value')\n printstats+=\" ----------------------------------------------------------\\n\"\n\n for key,facetdict in self.b2findfields.items() : ###.values() :\n facet=facetdict[\"ckanName\"]\n if facet.startswith('#') or facetdict[\"display\"] == \"hidden\" :\n continue\n\n if float(fcount) > 0 :\n printstats+=\"\\n |-> {:<16} <-- {:<20}\\n |-- {:>5} | {:>4.0f} | {:>5} | {:>4.0f}\\n\".format(key,totstats[facet]['xpath'],totstats[facet]['mapped'],totstats[facet]['mapped']*100/float(fcount),totstats[facet]['valid'],totstats[facet]['valid']*100/float(fcount))\n try:\n counter=Counter(totstats[facet]['vstat'])\n if totstats[facet]['vstat']:\n for tuple in counter.most_common(10):\n ucvalue=tuple[0]##HEW-D .encode('utf8')\n if len(ucvalue) > 80 :\n restchar=len(ucvalue)-80\n contt=' [...(%d chars follow)...]' % restchar \n else: \n contt=''\n ##HEW-D?? printstats+=\" |- {:<5d} : {:<30}{:<5} |\\n\".format(tuple[1],unicode(tuple[0])[:80],contt) ##HEW-D??? .encode(\"utf-8\")[:80],contt)\n printstats+=\" |- {:<5d} : {:<30s}{:<5s} |\\n\".format(tuple[1],ucvalue[:80],contt) ##HEW-D??? .encode(\"utf-8\")[:80],contt)\n except TypeError as e:\n self.logger.error('%s : facet %s' % (e,facet))\n continue\n except Exception as e:\n self.logger.error('%s : facet %s' % (e,facet))\n continue\n\n if self.OUT.verbose > 2:\n print (printstats)\n\n f = open(outfile, 'w')\n f.write(printstats)\n f.write(\"\\n\")\n f.close\n\n self.logger.debug('%s INFO B2FIND : %d records validated; %d records caused error(s).' % (time.strftime(\"%H:%M:%S\"),fcount,results['ecount']))\n\n\n print (' \\t|- %-10s |@ %-10s |\\n\\t| Provided | Validated | Failed |\\n\\t| %8d | %9d | %6d |' % ( 'Finished',time.strftime(\"%H:%M:%S\"),\n results['tcount'],\n fcount,\n results['ecount']\n ))\n\n return results\n\n def json2xml(self,json_obj, line_padding=\"\", mdftag=\"\", mapdict=\"b2findfields\"):\n\n result_list = list()\n json_obj_type = type(json_obj)\n\n\n if json_obj_type is list:\n for sub_elem in json_obj:\n result_list.append(json2xml(sub_elem, line_padding, mdftag, mapdict))\n\n return \"\\n\".join(result_list)\n\n if json_obj_type is dict:\n for tag_name in json_obj:\n sub_obj = json_obj[tag_name]\n if tag_name in mapdict : \n tag_name=mapdict[tag_name]\n if not isinstance(tag_name,list) : tag_name=[tag_name]\n for key in tag_name:\n result_list.append(\"%s<%s%s>\" % (line_padding, mdftag, key))\n if type(sub_obj) is list:\n for nv in sub_obj:\n if tag_name == 'tags' or tag_name == 'KEY_CONNECT.GENERAL_KEY':\n result_list.append(\"%s%s\" % (line_padding, nv[\"name\"].strip()))\n else:\n result_list.append(\"%s%s\" % (line_padding, nv.strip()))\n else:\n result_list.append(self.json2xml(sub_obj, \"\\t\" + line_padding, mdftag, mapdict))\n\n result_list.append(\"%s\" % (line_padding, mdftag, key))\n\n\n\n else:\n self.logger.debug ('[WARNING] : Field %s can not mapped to B2FIND schema' % tag_name)\n continue\n \n return \"\\n\".join(result_list)\n\n return \"%s%s\" % (line_padding, json_obj)\n\n def oaiconvert(self,request): ##HEW-D community,mdprefix,path,target_mdschema):\n ## oaiconvert(MAPPER object, request) - method\n # Converts B2FIND JSON files to XML files formatted in target format, e.g. 'CERA' (exp_) and ds2_ files\n \n results = {\n 'count':0,\n 'tcount':0,\n 'ecount':0,\n 'time':0\n }\n \n # set processing parameters\n community=request[0]\n mdprefix=request[3]\n mdsubset=request[4] if len(request)>4 else None\n target_mdschema=request[5] if len(request)>5 else None\n # set subset:\n if (not mdsubset):\n subset = 'SET_1' ## or 2,...\n elif mdsubset.endswith('_'): # no OAI subsets, but store in sub dirs\n subset = mdsubset+'1' ## or 2,...\n elif mdsubset[-1].isdigit() and mdsubset[-2] == '_' :\n subset = mdsubset\n else:\n subset = mdsubset+'_1'\n self.logger.info(' |- Subset: \\t%s' % subset )\n\n # check for target_mdschema and set subset and path\n if (target_mdschema):\n # data subset dir :\n outpath = '/'.join([self.base_outdir,community+'-'+mdprefix+'-'+target_mdschema,subset,'xml'])\n self.logger.info('\\t|- Data out path:\\t%s' % outpath)\n else:\n self.logger.critical('For OAI converter processing target metaschema must be given!')\n sys.exit()\n\n inpath = '/'.join([self.base_outdir,community+'-'+mdprefix,subset])\n # check data in and out path\n if not os.path.exists(inpath+'/json') or not os.listdir(inpath + '/json'):\n logging.error('[ERROR] Can not access input data path %s' % (inpath+'/json') )\n return results\n elif not os.path.exists(outpath) :\n logging.warning('[ERROR] Create not existing output data path %s' % (outpath) )\n os.makedirs(outpath)\n \n # run oai-converting\n # find all .json files in inpath/json:\n files = filter(lambda x: x.endswith('.json'), os.listdir(inpath+'/json'))\n \n results['tcount'] = len(files)\n\n ##oaiset=path.split(target_mdschema)[0].split('_')[0].strip('/')\n ##oaiset=os.path.basename(path)\n ## outpath=path.split(community)[0]+'/b2find-oai_b2find/'+community+'/'+mdprefix +'/'+path.split(mdprefix)[1].split('_')[0]+'/xml'\n ##HEW-D outpath=path.split(community)[0]+'b2find-oai_b2find/'+community+'/'+mdprefix +'/xml'\n\n logging.debug(' %s INFO OAI-Converter of files in %s' % (time.strftime(\"%H:%M:%S\"),inpath))\n logging.debug(' | | %-4s | %-40s | %-40s |\\n |%s|' % ('#','infile','outfile',\"-\" * 53))\n\n fcount = 0\n oldperc = 0\n start = time.time()\n\n # Read in B2FIND metadata schema and fields\n schemafile = '%s/mapfiles/b2find_schema.json' % (os.getcwd())\n with open(schemafile, 'r') as f:\n b2findfields=json.loads(f.read())\n\n for filename in files:\n ## counter and progress bar\n fcount+=1\n perc=int(fcount*100/int(len(files)))\n bartags=perc/10\n if perc%10 == 0 and perc != oldperc :\n oldperc=perc\n print (\"\\r\\t[%-20s] %d / %d%% in %d sec\" % ('='*bartags, fcount, perc, time.time()-start ))\n sys.stdout.flush()\n\n createdate = str(datetime.datetime.utcnow())\n jsondata = dict()\n logging.debug(' |- %s INFO JSON2XML - Processing: %s/%s' % (time.strftime(\"%H:%M:%S\"),os.path.basename(inpath),filename))\n\n if ( os.path.getsize(inpath+'/json/'+filename) > 0 ):\n with open(inpath+'/json/'+filename, 'r') as f:\n try:\n jsondata=json.loads(f.read())\n except:\n logging.error(' | [ERROR] Can not access json file %s' % inpath+'/json/'+filename)\n results['ecount'] += 1\n continue\n else:\n results['ecount'] += 1\n continue\n \n ### oai-convert !!\n if target_mdschema == 'cera':\n ##HEW-T print('JJJJJJJJ %s' % jsondata)\n if 'oai_identifier' in jsondata :\n identifier=jsondata['oai_identifier'][0]\n else:\n identifier=os.path.splitext(filename)[0]\n convertfile='%s/mapfiles/%s%s.%s' % (os.getcwd(),'json2',target_mdschema,'json')\n with open(convertfile, 'r') as f:\n try:\n mapdict=json.loads(f.read())\n except:\n logging.error(' | [ERROR] Cannot load the convert file %s' % convertfile)\n sys.exit()\n\n for filetype in ['ds2','exp']:\n outfile=outpath+'/'+filetype+'_'+community+'_'+identifier+'.xml' \n\t ### load xml template\n templatefile='%s/mapfiles/%s_%s_%s.%s' % (os.getcwd(),target_mdschema,filetype,'template','xml')\n with open(templatefile, 'r') as f:\n try:\n dsdata= f.read() ##HEW-D ET.parse(templatefile).getroot()\n except Exception :\n logging.error(' | Cannot load tempalte file %s' % (templatefile))\n\n data=dict()\n jsondata['community']=community\n ##HEW-D dsdata = Template(dsdata)\n for facetdict in b2findfields.values() :\n facet=facetdict[\"ckanName\"]\n ##HEW-T print ('facet %s ' % facet)\n if facet in jsondata:\n if isinstance(jsondata[facet],list) and len(jsondata[facet])>0 :\n if facet == 'tags':\n data[facet]=''\n for tagndict in jsondata[facet]:\n data[facet]+=tagndict['name']\n else:\n data[facet]=' '.join(jsondata[facet]).strip('\\n ')\n else :\n data[facet]=jsondata[facet]\n ## outdata = dsdata.substitute(key=data[key])\n ##HEW-T print('KKKK key %s\\t data %s' % (key,data[key]))\n else:\n data[facet]=''\n\n data['identifier']=identifier\n try:\n outdata=dsdata%data\n except KeyError as err :\n logging.error(\"[ERROR] %s\\n\" % err )\n pass\n\n outfile=outpath+'/'+filetype+'_'+identifier+'.xml'\n try :\n f = open(outfile, 'w')\n f.write(outdata.encode('utf-8'))\n f.write(\"\\n\")\n f.close\n except IOError :\n logging.error(\"[ERROR] Cannot write data in xml file '%s': %s\\n\" % (outfile))\n return(False, outfile , outpath, fcount)\n\t\n else:\n identifier=jsondata[\"oai_identifier\"]\n outfile=outpath+'/'+filetype+'/'+community+'_'+identifier+'.xml'\n mapdict=self.b2findfields ##HEW-D ??? ckanfields ???\n header=\"\"\"\n
    \n \"\"\"+identifier+\"\"\"\n \"\"\"+createdate+\"\"\"\n \"\"\"+oaiset+\"\"\"\n
    \n \n \n\"\"\"\n footer=\"\"\"\n \n \n
    \"\"\"\n xmlprefix='b2find'\n xmldata=header+self.json2xml(jsondata,'\\t',xmlprefix,mapdict)+footer\n try:\n f = open(outfile, 'w')\n f.write(xmldata.encode('utf-8'))\n f.write(\"\\n\")\n f.close\n except IOError :\n logging.error(\"[ERROR] Cannot write data in xml file '%s': %s\\n\" % (outfile))\n return(False, outfile , outpath, fcount)\n\n logging.debug(' | o | %-4d | %-45s | %-45s |' % (fcount,os.path.basename(filename),os.path.basename(outfile)))\n \n\n logging.info('%s INFO B2FIND : %d records converted; %d records caused error(s).' % (time.strftime(\"%H:%M:%S\"),fcount,results['ecount']))\n\n # count ... all .xml files in path/b2find\n results['count'] = len(filter(lambda x: x.endswith('.xml'), os.listdir(outpath)))\n print (' \\t|- %-10s |@ %-10s |\\n\\t| Provided | Converted | Failed |\\n\\t| %8d | %6d | %6d |' % ( 'Finished',time.strftime(\"%H:%M:%S\"),\n results['tcount'],\n fcount,\n results['ecount']\n ))\n \n return results\n","repo_name":"EUDAT-Training/B2FIND-Training","sub_path":"mapping.py","file_name":"mapping.py","file_ext":"py","file_size_in_byte":90807,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"12077761463","text":"import Modules.parse as Parser\nfrom Modules.info import Info\nimport os\nimport argparse\nimport sys\nimport shutil\nimport csv\nfrom Modules.single_file_controller import SFC, NUMBER_OF_KBYTES\n\n\nclass Node:\n JSON_NAME = 'local_info.json'\n\n def __init__(self, dir_path):\n self.dir_path = dir_path\n info_file_name = os.path.join(dir_path, self.JSON_NAME)\n\n if not os.path.isdir(dir_path):\n raise ValueError('no such directory')\n\n if not os.path.isfile(info_file_name):\n shutil.rmtree(dir_path, ignore_errors=True)\n os.mkdir(dir_path)\n self.info = Info(info_file_name, True)\n else:\n self.info = Info(info_file_name)\n\n def __len__(self):\n self._fix_missing_files()\n return len(self.info.transitions)\n\n def __iter__(self):\n self._fix_missing_files()\n return iter(self.info.transitions)\n\n def __getitem__(self, key):\n return self.get_value(key)\n\n def __setitem__(self, key, value):\n if key in self:\n self.replace_data(key, value)\n else:\n self.write_data(key, value)\n\n def __contains__(self, key):\n return key in self.info.transitions\n\n def write_multiple(self, **kwargs):\n for key in kwargs:\n self[key] = kwargs[key]\n\n def del_multiple(self, case_sensitive, *keys):\n for key in keys:\n self.del_data(key, case_sensitive)\n\n def contains_key(self, key, case_sensitive=True):\n if case_sensitive:\n return key in self.info.transitions\n lower_key = key.casefold()\n return lower_key in self.info.alternatives\n\n def clear(self):\n shutil.rmtree(self.dir_path, ignore_errors=True)\n os.mkdir(self.dir_path)\n self.info.transitions = {}\n self.info.sizes = []\n self.info.alternatives = {}\n self.info.dump()\n\n def get_value(self, key, case_sensitive=True, boundary=(None, None)):\n if case_sensitive:\n value = self._get_value_by_key(key)\n if value is None:\n raise ValueError(\"key doesn't exists\")\n return [self._get_value_by_key(key, boundary)]\n\n data = []\n lower_key = key.casefold()\n if lower_key not in self.info.alternatives:\n raise ValueError(\"key doesn't exists\")\n\n for new_key in self.info.alternatives[lower_key]:\n data.append(self._get_value_by_key(new_key, boundary))\n\n return data\n\n def _get_value_by_key(self, key, boundary=(None, None)):\n if key not in self:\n return None\n\n pathes = self.info.transitions[key]\n all_bytes = bytearray()\n for index in pathes:\n path = self._path(index)\n try:\n file = SFC(path, self.info.sizes[int(index)])\n except FileNotFoundError:\n self._fix_missing_files()\n return None\n datas = file.get_data(Parser.get_index(key))\n\n for data in datas:\n new_key = Parser.get_key(data)\n if key == new_key:\n all_bytes.extend(Parser.get_value_bytes(data))\n break\n else:\n return None\n return Parser.get_value(all_bytes[boundary[0]:boundary[1]])\n\n def replace_data(self, key, value):\n if key not in self:\n raise ValueError(\"key doesn't exists\")\n\n self._fix_missing_files()\n self.del_data(key)\n self.write_data(key, value)\n\n def del_data(self, key, case_sensitive=True):\n lower_key = key.casefold()\n if case_sensitive:\n self._del_data_by_single_key(key)\n self.info.alternatives[lower_key].remove(key)\n if not self.info.alternatives[lower_key]:\n del self.info.alternatives[lower_key]\n self.info.dump()\n return\n\n if lower_key not in self.info.alternatives:\n raise ValueError(\"key doesn't exists\")\n\n for key in self.info.alternatives[lower_key]:\n self._del_data_by_single_key(key)\n del self.info.alternatives[lower_key]\n self.info.dump()\n\n def _del_data_by_single_key(self, key):\n if key not in self:\n raise ValueError(\"key doesn't exists\")\n\n index = Parser.get_index(key)\n pathses = self.info.transitions[key]\n for file_index in pathses:\n try:\n file = SFC(self._path(file_index), self.info.sizes[file_index])\n except FileNotFoundError:\n self._fix_missing_files()\n raise ValueError(\"key doesn't exists\")\n datas = file.get_data(index)\n\n for i in range(len(datas)):\n new_key, value = Parser.decode_pair(datas[i])\n\n if new_key == key:\n file.del_data(index, i)\n self.info.sizes[self.info.transitions[key][i]] = file.size\n break\n\n del self.info.transitions[key]\n self.info.dump()\n\n def write_data(self, key, value):\n if key in self:\n raise ValueError('key already in storage')\n\n all_data = Parser.encode_value(value)\n max_size = NUMBER_OF_KBYTES * 1024 - 2\n key_data = Parser.encode_key(key)\n self.info.transitions[key] = []\n\n available_size = max_size - len(key_data)\n num_of_divides = len(all_data) // available_size\n for i in range(num_of_divides):\n data_to_write = all_data[i*available_size:(i+1)*available_size]\n write_index = self._write_short(key, data_to_write)\n self.info.transitions[key].append(write_index)\n\n data_to_write = all_data[num_of_divides * available_size:]\n write_index = self._write_short(key, data_to_write)\n self.info.transitions[key].append(write_index)\n\n lower_key = key.casefold()\n if lower_key in self.info.alternatives:\n self.info.alternatives[lower_key].append(key)\n else:\n self.info.alternatives[lower_key] = [key]\n self.info.dump()\n\n def _write_short(self, key, data_bytes):\n key_data = Parser.encode_key(key)\n key_data.extend(data_bytes)\n\n size = len(key_data)\n if size > NUMBER_OF_KBYTES * 1024 - 2:\n raise ValueError('data size is to big')\n\n file_index = self._get_best_file_index(len(key_data))\n path = self._path(file_index)\n if file_index is None:\n file_index = self._create_new_file()\n path = self._path(file_index)\n\n index = Parser.get_index(key)\n try:\n file = SFC(path, self.info.sizes[int(file_index)])\n except FileNotFoundError:\n self._fix_missing_files()\n return self._write_short(key, data_bytes)\n file.write_data(data=key_data, index=index)\n self.info.sizes[int(file_index)] = file.size\n return file_index\n\n def _path(self, index):\n return os.path.join(self.dir_path, str(index))\n\n def _create_new_file(self):\n path = len(self.info.sizes)\n if os.path.isfile(self._path(path)):\n os.remove(self._path(path))\n file = SFC(self._path(path), create_new=True)\n self.info.sizes.append(file.size)\n self.info.dump()\n return path\n\n def _get_best_file_index(self, size):\n min_difference = NUMBER_OF_KBYTES * 1024\n index = -1\n\n for i in range(len(self.info.sizes)):\n difference = self.info.sizes[i] - size\n\n if 0 <= difference < min_difference:\n index = i\n min_difference = difference\n\n return None if index == - 1 else index\n\n def _fix_missing_files(self):\n missing_indexes = [i for i in range(len(self.info.sizes))\n if not os.path.isfile(self._path(i))]\n\n if not missing_indexes:\n return\n\n mis = set(missing_indexes)\n missing_keys = {}\n for key in self.info.transitions:\n key_set = set(map(int, self.info.transitions[key]))\n if mis & key_set:\n missing_keys[key] = list(key_set - mis)\n\n for key in missing_keys:\n index_in_file = Parser.get_index(key)\n for file_index in missing_keys[key]:\n file = SFC(self._path(file_index), self.info.sizes[file_index])\n datas = file.get_data(index_in_file)\n\n for i in range(len(datas)):\n new_key = Parser.get_key(datas[i])\n if new_key == key:\n file.del_data(index_in_file, i)\n self.info.sizes[file_index] = file.size\n break\n\n self.info.alternatives[key.casefold()].remove(key)\n if not self.info.alternatives[key.casefold()]:\n del self.info.alternatives[key.casefold()]\n del self.info.transitions[key]\n\n for file_index in missing_indexes:\n file = SFC(self._path(file_index), create_new=True)\n self.info.sizes[file_index] = file.size\n self.info.dump()\n\n def process_args(self, args):\n if args.empty:\n self.clear()\n\n elif args.contains is not None:\n contains = self.contains_key(args.contains, args.reg)\n if contains:\n return ['YES']\n return ['NO']\n\n elif args.write is not None:\n key, value = args.write\n try:\n self[key] = value\n except ValueError:\n return ['Error: key is to big']\n\n elif args.write_multiple is not None:\n if args.write_multiple:\n data = args.write_multiple\n else:\n data = sys.stdin.readlines()\n data = [a.rstrip() for a in data]\n try:\n\n def get_key_value(line):\n splits = list(csv.reader([line],\n delimiter='=',\n quotechar='\"'))[0]\n if len(splits) <= 1:\n raise TypeError()\n return splits[0], '='.join(splits[1:])\n\n data = dict(map(get_key_value, data))\n except TypeError:\n return ['Error: input is not in format KEY=VALUE']\n try:\n self.write_multiple(**data)\n except ValueError:\n return ['Error: key is to big']\n\n elif args.read is not None:\n\n def try_int(value):\n if value is None:\n return None\n return int(value)\n\n key = args.read\n r = list(map(try_int, args.range))\n return list(self.get_value(key, args.reg, r))\n\n elif args.delete is not None:\n key = args.delete\n self.del_data(key, args.reg)\n\n elif args.delete_multiple is not None:\n if args.delete_multiple:\n data = args.delete_multiple\n else:\n data = sys.stdin.readlines()\n data = list(set([a.rstrip() for a in data]))\n self.del_multiple(args.reg, *data)\n\n elif args.list:\n return list(self)\n\n else:\n return None\n\n @staticmethod\n def get_parser():\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('-w', '--write', nargs=2, metavar=('KEY', 'VALUE'),\n help='writes VALUE to store by KEY and exit')\n\n parser.add_argument('-W', '--write_multiple', nargs='*',\n metavar=('KEY=VALUE', 'KEY=VALUE'),\n help='''writes multiple VALUEs by its KEY,\n if no pairs were given - reads data from\n stdin''')\n\n parser.add_argument('-r', '--read', metavar='KEY',\n help='read value by KEY in storage and exit')\n\n parser.add_argument('-g', '--range', metavar=('START', 'END'), nargs=2,\n default=(None, None),\n help='if used with -r, outs values with range')\n\n parser.add_argument('-d', '--delete', metavar='KEY',\n help='delete value in storage by KEY and exit')\n parser.add_argument('-D', '--delete_multiple', nargs='*',\n metavar=('KEY', 'KEY'),\n help='''deletes multiple KEYs from node, if no KEY\n were given - reads data from stdin''')\n\n parser.add_argument('-e', '--empty', action='store_true',\n default=False,\n help='clear the storage and exit')\n\n parser.add_argument('-c', '--contains', metavar='KEY',\n help='writes whether node contains KEY')\n\n parser.add_argument('-l', '--list', action='store_true', default=False,\n help='writes all keys in storage and exit')\n\n parser.add_argument('-i', '--ignore_register', action='store_false',\n dest='reg',\n default=True,\n help='allows to ignore register in key')\n\n return parser\n\n\ndef answer():\n usage = 'node.py DIRECTORY OPTIONS'\n parser = Node.get_parser()\n parser.usage = usage\n parser.description = 'local storage node'\n\n parser.add_argument('DIRECTORY', help='path to local storage')\n\n parser.add_argument('-h', '--help', action='help')\n\n parser.add_argument('-s', '--silent_mode', action='store_true',\n dest='silent',\n default=False,\n help='makes program write nothing to the output')\n\n args = parser.parse_args()\n node = Node(args.DIRECTORY)\n result = node.process_args(args)\n if not args.silent and result is not None:\n for line in result:\n print(line)\n\n\nif __name__ == '__main__':\n answer()\n","repo_name":"hevezolly/Split-storage","sub_path":"node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":14144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28965132319","text":"\n\nimport os\nimport sys\n\n# use brew install pyspark, open pyspark from terminal, import sys, print sys.path, copy py4j part and paste below\n# os.environ['SPARK_HOME']=\"/usr/local/Cellar/apache-spark/1.4.1\"\n# sys.path.append(\"/usr/local/Cellar/apache-spark/1.4.1/libexec/python/\")\n# sys.path.append(\"/usr/local/Cellar/apache-spark/1.4.1/libexec/python/lib/py4j-0.8.2.1-src.zip\")\n\ntry:\n from pyspark import SparkContext\n from pyspark import SparkConf\n from operator import add\n print (\"Successfully imported Spark Modules\")\nexcept ImportError as e:\n print(\"Error importing Spark Modules\", e)\n sys.exit(1)\n\nimport datetime\n\n\ndef main(arglist):\n\n with open(\"log_file_x.txt\", \"a\") as f:\n f.write(\"Start time of sort...... %s\\n\" % datetime.datetime.now())\n\n print(\"Start time...... %s\" % datetime.datetime.now())\n\n # mapreduce params\n path = arglist[0]\n output = arglist[1]\n minPartitions = int(arglist[2])\n\n # initialize\n conf = SparkConf()\n conf = conf.setMaster('local').setAppName(\"PythonSort\").set(\"spark.driver.memory\", \"10g\").set(\"spark.driver-maxResultSize\", \"3g\")\n sc = SparkContext(conf=conf)\n\n sc = SparkContext(appName=\"PythonWordCount\")\n lines = sc.textFile(path)\n counts = lines.flatMap(lambda x: x.split('\\n')) \\\n .map(lambda x: (x, 1)) \\\n .sortByKey(lambda x: x)\n counts.saveAsTextFile(output)\n # # print(rdd)\n # f = open(output, 'w')\n # f.writelines('\\n'.join(rdd))\n # f.close()\n\n # # write to one single file\n # single_output = open('single_output', 'w')\n # for i in range(minPartitions):\n # file_name = 'part-000' + ('0'+str(i) if i < 10 else str(i))\n # file_path = os.path.join(output, file_name)\n # file = open(file_path, 'r')\n #\n # single_output.write(''.join(file))\n # single_output.close()\n sc.stop()\n\n print(\"End time of sort...... %s\" % datetime.datetime.now())\n with open(\"log_file_x.txt\", \"a\") as f:\n f.write(\"End time of sort...... %s\\n\" % datetime.datetime.now())\n\n\nif __name__ == \"__main__\":\n\n main(sys.argv[1:])\n","repo_name":"1enemyleft/Hadoop-Sort","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70148580858","text":"import math\nfrom PyQt5 import QtWidgets as qtw\nfrom PyQt5 import QtCore as qtc\nfrom PyQt5 import QtGui as qtg\n\nclass Position():\n \"\"\"\n I made this position for holding a position in 3D space (i.e., a point). I've given it some ability to do\n vector arithmitic and vector algebra (i.e., a dot product). I could have used a numpy array, but I wanted\n to create my own. This class uses operator overloading as explained in the class.\n \"\"\"\n def __init__(self, pos=None, x=None, y=None, z=None):\n \"\"\"\n x, y, and z have the expected meanings\n :param pos: a tuple (x,y,z)\n :param x: float\n :param y: float\n :param z: float\n \"\"\"\n #set default values\n self.x = 0.0\n self.y = 0.0\n self.z = 0.0\n #unpack position from a tuple if given\n if pos is not None:\n self.x, self.y, self.z = pos\n #override the x,y,z defaults if they are given as arguments\n self.x=x if x is not None else self.x\n self.y=y if y is not None else self.y\n self.z=z if z is not None else self.z\n\n #region operator overloads $NEW$ 4/7/21\n def __eq__(self, other):\n if self.x != other.x:\n return False\n if self.y != other.y:\n return False\n if self.z != other.z:\n return False\n return True\n\n # this is overloading the addition operator. Allows me to add Position objects with simple math: c=a+b, where\n # a, b, and c are all position objects.\n def __add__(self, other):\n return Position((self.x+other.x, self.y+other.y,self.z+other.z))\n\n #this overloads the iterative add operator\n def __iadd__(self, other):\n if other in (float, int):\n self.x += other\n self.y += other\n self.z += other\n return self\n if type(other) == Position:\n self.x += other.x\n self.y += other.y\n self.z += other.z\n return self\n\n # this is overloading the subtraction operator. Allows me to subtract Positions. (i.e., c=b-a)\n def __sub__(self, other):\n return Position((self.x-other.x, self.y-other.y,self.z-other.z))\n\n #this overloads the iterative subtraction operator\n def __isub__(self, other):\n if other in (float, int):\n self.x -= other\n self.y -= other\n self.z -= other\n return self\n if type(other) == Position:\n self.x -= other.x\n self.y -= other.y\n self.z -= other.z\n return self\n\n # this is overloading the multiply operator. Allows me to multiply a scalar or do a dot product (i.e., b=s*a or c=b*a)\n def __mul__(self, other):\n if type(other) in (float, int):\n return Position((self.x*other, self.y*other, self.z*other))\n if type(other) is Position:\n return Position((self.x*other.x, self.y*other.y, self.z*other.z))\n\n # this is overloading the __rmul__ operator so that s*Pt works.\n def __rmul__(self,other):\n return self*other\n\n # this is overloading the *= operator. Same as a = Position((a.x*other, a.y*other, a.z*other))\n def __imul__(self, other):\n if type(other) in (float, int):\n self.x *= other\n self.y *= other\n self.z *= other\n return self\n\n # this is overloading the division operator. Allows me to divide by a scalar (i.e., b=a/s)\n def __truediv__(self, other):\n if type(other) in (float, int):\n return Position((self.x/other, self.y/other, self.z/other))\n\n # this is overloading the /= operator. Same as a = Position((a.x/other, a.y/other, a.z/other))\n def __idiv__(self, other):\n if type(other) in (float,int):\n self.x/=other\n self.y/=other\n self.z/=other\n return self\n #endregion\n\n def set(self,strXYZ=None, tupXYZ=None):\n #set position by string or tuple\n if strXYZ is not None:\n cells=strXYZ.replace('(','').replace(')','').strip().split(',')\n x, y, z = float(cells[0]), float(cells[1]), float(cells[2])\n self.x=float(x)\n self.y=float(y)\n self.z=float(z)\n elif tupXYZ is not None:\n x, y, z = tupXYZ #[0], strXYZ[1],strXYZ[2]\n self.x=float(x)\n self.y=float(y)\n self.z=float(z)\n\n def getTup(self): #return (x,y,z) as a tuple\n return (self.x, self.y, self.z)\n\n def getStr(self, nPlaces=3):\n return '{}, {}, {}'.format(round(self.x, nPlaces), round(self.y,nPlaces), round(self.z, nPlaces))\n\n def mag(self): # normal way to calculate magnitude of a vector\n return (self.x**2+self.y**2+self.z**2)**0.5\n\n def normalize(self): # typical way to normalize to a unit vector\n l=self.mag()\n if l<=0.0:\n return\n self.__idiv__(l)\n\n def getAngleRad(self):\n \"\"\"\n Gets angle of position relative to an origin (0,0) in the x-y plane\n :return: angle in x-y plane in radians\n \"\"\"\n l=self.mag()\n if l<=0.0:\n return 0\n if self.y>=0.0:\n return math.acos(self.x/l)\n return 2.0*math.pi-math.acos(self.x/l)\n\n def getAngleDeg(self):\n \"\"\"\n Gets angle of position relative to an origin (0,0) in the x-y plane\n :return: angle in x-y plane in degrees\n \"\"\"\n return 180.0/math.pi*self.getAngleRad()\n\nclass Material():\n def __init__(self, uts=None, ys=None, modulus=None, staticFactor=None):\n self.uts = uts\n self.ys = ys\n self.E=modulus\n self.staticFactor=staticFactor\n\nclass Node():\n def __init__(self, name=None, position=None):\n self.name = name\n self.position = position if position is not None else Position()\n\n def __eq__(self, other):\n \"\"\"\n This overloads the == operator such that I can compare two nodes to see if they are the same node. This is\n useful when reading in nodes to make sure I don't get duplicate nodes\n \"\"\"\n if self.name != other.name:\n return False\n if self.position != other.position:\n return False\n return True\n\nclass Link():\n def __init__(self,name=\"\", node1=\"1\", node2=\"2\", length=None, angleRad=None):\n \"\"\"\n Basic definition of a link contains a name and names of node1 and node2\n \"\"\"\n self.name=\"\"\n self.node1_Name=node1\n self.node2_Name=node2\n self.length=None\n self.angleRad=None\n\n def __eq__(self, other):\n \"\"\"\n This overloads the == operator for comparing equivalence of two links.\n \"\"\"\n if self.node1_Name != other.node1_Name: return False\n if self.node2_Name != other.node2_Name: return False\n if self.length != other.length: return False\n if self.angleRad != other.angleRad: return False\n return True\n\n def set(self, node1=None, node2=None, length=None, angleRad=None):\n self.node1_Name=node1\n self.node2_Name=node2\n self.length=length\n self.angleRad=angleRad\n\nclass TrussModel():\n def __init__(self):\n self.title=None\n self.links=[]\n self.nodes=[]\n self.material=Material()\n\n def getNode(self, name):\n for n in self.nodes:\n if n.name == name:\n return n\n\nclass TrussController():\n def __init__(self):\n self.truss=TrussModel()\n self.view=TrussView()\n\n def ImportFromFile(self, data):\n \"\"\"\n Data is the list of strings read from the data file.\n We need to parse this file and build the lists of nodes and links that make up the truss.\n Also, we need to parse the lines that give the truss title, material (and strength values).\n\n Reading Nodes:\n I create a new node object and the set its name and position.x and position.y values. Next, I check to see\n if the list of nodes in the truss model has this node with self.hasNode(n.name). If the trussModel does not\n contain the node, I append it to the list of nodes\n\n Reading Links:\n The links should come after the nodes. Each link has a name and two node names. See method addLink\n \"\"\"\n #$JES MISSING CODE HERE$\n\n self.calcLinkVals()\n self.displayReport()\n self.drawTruss()\n\n def hasNode(self, name):\n for n in self.truss.nodes:\n if n.name==name:\n return True\n return False\n\n def addNode(self, node):\n self.truss.nodes.append(node)\n\n def getNode(self, name):\n for n in self.truss.nodes:\n if n.name == name:\n return n\n\n def addLink(self, link):\n self.truss.links.append(link)\n\n def calcLinkVals(self):\n for l in self.truss.links:\n n1=None\n n2=None\n if self.hasNode(l.node1_Name):\n n1=self.getNode(l.node1_Name)\n if self.hasNode(l.node2_Name):\n n2=self.getNode(l.node2_Name)\n if n1 is not None and n2 is not None:\n r=n2.position-n1.position\n l.length=r.mag()\n l.angleRad=r.getAngleRad()\n\n def setDisplayWidgets(self, args):\n self.view.setDisplayWidgets(args)\n\n def displayReport(self):\n self.view.displayReport(truss=self.truss)\n\n def drawTruss(self):\n self.view.buildScene(truss=self.truss)\n\nclass TrussView():\n def __init__(self):\n #setup widgets for display. redefine these when you have a gui to work with using setDisplayWidgets\n self.scene=qtw.QGraphicsScene()\n self.le_LongLinkName=qtw.QLineEdit()\n self.le_LongLinkNode1=qtw.QLineEdit()\n self.le_LongLinkNode2=qtw.QLineEdit()\n self.le_LongLinkLength=qtw.QLineEdit()\n self.te_Report=qtw.QTextEdit()\n self.gv=qtw.QGraphicsView()\n\n #region setup pens and brushes and scene\n #make the pens first\n #a thick darkGray pen\n self.penLink = qtg.QPen(qtc.Qt.darkGray)\n self.penLink.setWidth(4)\n #a medium darkBlue pen\n self.penNode = qtg.QPen(qtc.Qt.darkBlue)\n self.penNode.setStyle(qtc.Qt.SolidLine)\n self.penNode.setWidth(1)\n #a pen for the grid lines\n self.penGridLines = qtg.QPen()\n self.penGridLines.setWidth(1)\n # I wanted to make the grid lines more subtle, so set alpha=25\n self.penGridLines.setColor(qtg.QColor.fromHsv(197, 144, 228, alpha=50))\n #now make some brushes\n #build a brush for filling with solid red\n self.brushFill = qtg.QBrush(qtc.Qt.darkRed)\n #a brush that makes a hatch pattern\n self.brushNode = qtg.QBrush(qtg.QColor.fromCmyk(0,0,255,0,alpha=100))\n #a brush for the background of my grid\n self.brushGrid = qtg.QBrush(qtg.QColor.fromHsv(87, 98, 245, alpha=128))\n #endregion\n \n def setDisplayWidgets(self, args):\n self.te_Report=args[0]\n self.le_LongLinkName=args[1]\n self.le_LongLinkNode1=args[2]\n self.le_LongLinkNode2=args[3]\n self.le_LongLinkLength=args[4]\n self.gv=args[5]\n self.gv.setScene(self.scene)\n\n def displayReport(self, truss=None):\n st='\\tTruss Design Report\\n'\n st+='Title: {}\\n'.format(truss.title)\n st+='Static Factor of Safety: {:0.2f}\\n'.format(truss.material.staticFactor)\n st+='Ultimate Strength: {:0.2f}\\n'.format(truss.material.uts)\n st+='Yield Strength: {:0.2f}\\n'.format(truss.material.ys)\n st+='Modulus of Elasticity: {:0.2f}\\n'.format(truss.material.E)\n st+='_____________Link Summary________________\\n'\n st+='Link\\t(1)\\t(2)\\tLength\\tAngle\\n'\n longest=None\n for l in truss.links:\n if longest is None or l.length>longest.length:\n longest=l\n st+='{}\\t{}\\t{}\\t{:0.2f}\\t{:0.2f}\\n'.format(l.name, l.node1_Name, l.node2_Name, l.length, l.angleRad)\n self.te_Report.setText(st)\n self.le_LongLinkName.setText(longest.name)\n self.le_LongLinkLength.setText(\"{:0.2f}\".format(longest.length))\n self.le_LongLinkNode1.setText(longest.node1_Name)\n self.le_LongLinkNode2.setText(longest.node2_Name)\n \n def buildScene(self, truss=None):\n #Create a QRect() object to help with drawing the background grid.\n rect=qtc.QRect()\n rect.setTop(truss.nodes[0].position.y)\n rect.setLeft(truss.nodes[0].position.x)\n rect.setHeight(0)\n rect.setWidth(0)\n for n in truss.nodes:\n if n.position.y>rect.top(): rect.setTop(n.position.y)\n if n.position.yrect.right(): rect.setRight(n.position.x)\n if n.position.x\n # By default, Flask server is only accessible from the localhost.\n # In case of starting server from docker, the container is the localhost (not your/my machine),\n # and browser requests are originating from outside the container.\n # setting host='0.0.0.0' or localhost parameter will not make the server accessible from external IPs.\n # So, you need to get the ip address (192.168.99.100) of the docker as explained in Instructions.txt\n # and enter on your browser e.g. http://192.168.99.100:12345/add_value\n # -------------------------------------------------------------------------------------------------->\n # However when running the server in local machine terminal (say Pycharm Terminal),\n # enter http://localhost:12345/add_value on your browser\n rest.app.run(debug=True, use_reloader=False, host=\"0.0.0.0\", port=12345)\n\n\ndef main():\n # Run REST API to enable client from Web\n thread_rest_api = threading.Thread(target=thread_rest_func)\n thread_rest_api.start()\n\n host = \"localhost\"\n # REST Server port = 12345\n # TCP Socket Server port = 6000\n port = 6000\n\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Avoid bind() exception: OSError: [WinError 10048] Address already in use\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n print('Server: Bind socket to host=', host, 'port=', port)\n sock.bind((host, port))\n\n\n # put the socket in listening mode\n sock.listen(5)\n print(\"Server: Socket is listening...\")\n\n except socket.error:\n print('Socket Creation Failed, Exiting...')\n sys.exit(1)\n\n # Forever loop - will accept multiple clients\n while True:\n\n try:\n # Accept connection from client\n connection, address = sock.accept()\n\n print('Server: New connection received to host :', address[0], ' and port :', address[1])\n\n # Start a new thread for each client connection\n start_new_thread(tcp_thread_func, (connection,))\n\n except socket.error:\n print('TCP Server: Socket connection request from client failed, Continuing to listen for clients...')\n\n except:\n print('TCP Server: Exception thrown while Socket connection request from client and starting thread')\n print('Continuing to listen for clients...')\n\n # socket will never be closed, server will run always\n # sock.close()\n\n\nif __name__ == '__main__':\n main()","repo_name":"sheikhazad/Python-ClientServer-RESTful-MultiThread-Mutex","sub_path":"CryptoServer.py","file_name":"CryptoServer.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"75101505334","text":"import time\nfrom io import BytesIO\n\nfrom dulwich import __version__ as dulwich_version\nfrom dulwich.objects import Blob\n\nfrom .. import __version__ as brz_version\nfrom .. import branch as _mod_branch\nfrom .. import diff as _mod_diff\nfrom .. import errors, osutils\nfrom .. import revision as _mod_revision\nfrom ..merge_directive import BaseMergeDirective\nfrom .mapping import object_mode\nfrom .object_store import get_object_store\n\nversion_tail = \"Breezy %s, dulwich %d.%d.%d\" % (\n (brz_version, ) + dulwich_version[:3])\n\n\nclass GitDiffTree(_mod_diff.DiffTree):\n \"\"\"Provides a text representation between two trees, formatted for svn.\"\"\"\n\n def _show_diff(self, specific_files, extra_trees):\n from dulwich.patch import write_blob_diff\n iterator = self.new_tree.iter_changes(\n self.old_tree, specific_files=specific_files,\n extra_trees=extra_trees, require_versioned=True)\n has_changes = 0\n\n def get_encoded_path(path):\n if path is not None:\n return path.encode(self.path_encoding, \"replace\")\n\n def get_file_mode(tree, path, kind, executable):\n if path is None:\n return 0\n return object_mode(kind, executable)\n\n def get_blob(present, tree, path):\n if present:\n with tree.get_file(path) as f:\n return Blob.from_string(f.read())\n else:\n return None\n trees = (self.old_tree, self.new_tree)\n for change in iterator:\n # The root does not get diffed, and items with no known kind (that\n # is, missing) in both trees are skipped as well.\n if change.parent_id == (None, None) or change.kind == (None, None):\n continue\n path_encoded = (get_encoded_path(change.path[0]),\n get_encoded_path(change.path[1]))\n present = ((change.kind[0] not in (None, 'directory')),\n (change.kind[1] not in (None, 'directory')))\n if not present[0] and not present[1]:\n continue\n contents = (get_blob(present[0], trees[0], change.path[0]),\n get_blob(present[1], trees[1], change.path[1]))\n renamed = (change.parent_id[0], change.name[0]) != (change.parent_id[1], change.name[1])\n mode = (get_file_mode(trees[0], path_encoded[0],\n change.kind[0], change.executable[0]),\n get_file_mode(trees[1], path_encoded[1],\n change.kind[1], change.executable[1]))\n write_blob_diff(self.to_file,\n (path_encoded[0], mode[0], contents[0]),\n (path_encoded[1], mode[1], contents[1]))\n has_changes |= (change.changed_content or renamed)\n return has_changes\n\n\ndef generate_patch_filename(num, summary):\n return \"%04d-%s.patch\" % (num, summary.replace(\"/\", \"_\").rstrip(\".\"))\n\n\nclass GitMergeDirective(BaseMergeDirective):\n\n multiple_output_files = True\n\n def __init__(self, revision_id, testament_sha1, time, timezone,\n target_branch, source_branch=None, message=None,\n patches=None, local_target_branch=None):\n super().__init__(\n revision_id=revision_id, testament_sha1=testament_sha1, time=time,\n timezone=timezone, target_branch=target_branch, patch=None,\n source_branch=source_branch, message=message, bundle=None)\n self.patches = patches\n\n def to_lines(self):\n return self.patch.splitlines(True)\n\n def to_files(self):\n return ((summary, patch.splitlines(True)) for (summary, patch) in self.patches)\n\n @classmethod\n def _generate_commit(cls, repository, revision_id, num, total,\n context=_mod_diff.DEFAULT_CONTEXT_AMOUNT):\n s = BytesIO()\n store = get_object_store(repository)\n with store.lock_read():\n commit = store[repository.lookup_bzr_revision_id(revision_id)[0]]\n from dulwich.patch import get_summary, write_commit_patch\n try:\n lhs_parent = repository.get_revision(revision_id).parent_ids[0]\n except IndexError:\n lhs_parent = _mod_revision.NULL_REVISION\n tree_1 = repository.revision_tree(lhs_parent)\n tree_2 = repository.revision_tree(revision_id)\n contents = BytesIO()\n differ = GitDiffTree.from_trees_options(\n tree_1, tree_2, contents, 'utf8', None, 'a/', 'b/', None,\n context_lines=context)\n differ.show_diff(None, None)\n write_commit_patch(s, commit, contents.getvalue(), (num, total),\n version_tail)\n summary = generate_patch_filename(num, get_summary(commit))\n return summary, s.getvalue()\n\n @classmethod\n def from_objects(cls, repository, revision_id, time, timezone,\n target_branch, local_target_branch=None,\n public_branch=None, message=None):\n patches = []\n submit_branch = _mod_branch.Branch.open(target_branch)\n with submit_branch.lock_read():\n submit_revision_id = submit_branch.last_revision()\n repository.fetch(submit_branch.repository, submit_revision_id)\n graph = repository.get_graph()\n todo = graph.find_difference(submit_revision_id, revision_id)[1]\n total = len(todo)\n for i, revid in enumerate(graph.iter_topo_order(todo)):\n patches.append(cls._generate_commit(repository, revid, i + 1,\n total))\n return cls(revision_id, None, time, timezone,\n target_branch=target_branch, source_branch=public_branch,\n message=message, patches=patches)\n\n\ndef send_git(branch, revision_id, submit_branch, public_branch, no_patch,\n no_bundle, message, base_revision_id, local_target_branch=None):\n if no_patch:\n raise errors.CommandError(\n \"no patch not supported for git-am style patches\")\n if no_bundle:\n raise errors.CommandError(\n \"no bundle not supported for git-am style patches\")\n return GitMergeDirective.from_objects(\n repository=branch.repository, revision_id=revision_id, time=time.time(),\n timezone=osutils.local_time_offset(), target_branch=submit_branch,\n public_branch=public_branch, message=message,\n local_target_branch=local_target_branch)\n","repo_name":"breezy-team/breezy","sub_path":"breezy/git/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":6553,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"22"} +{"seq_id":"73880120377","text":"from django.db.models.signals import pre_save\nfrom django.dispatch import receiver\n\nfrom karer.models import Order\nfrom tax_officer.models import Violation\n\n\n@receiver(pre_save, sender=Order)\n@receiver(pre_save, sender=Violation)\ndef delete_file_on_change(sender, instance, **kwargs):\n if instance.pk:\n try:\n old_file = sender.objects.get(pk=instance.pk).car_photo\n except sender.DoesNotExist:\n return\n new_file = instance.car_photo\n if not old_file == new_file:\n old_file.delete(save=False)\n","repo_name":"bekmuxtorov/project","sub_path":"core/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"22253375105","text":"import json\nfrom datetime import datetime\nfrom django.http import JsonResponse, HttpResponseBadRequest, HttpResponse\nfrom voter.models import ChangeTracker\n\n\nclass Serializer(json.JSONEncoder):\n\n def __init__(self, *args, **kwargs):\n kwargs['indent'] = 2\n super().__init__(*args, **kwargs)\n\n def default(self, obj):\n if isinstance(obj, datetime):\n return obj.isoformat()\n raise TypeError(\"Cannot serialize type '%s': %r\" % (obj.__class__.__name__, obj))\n\n\ndef get_voter_basic(voter):\n d = voter.build_current()\n return {\n \"full_name\": ' '.join((\n d.get('first_name', ''),\n d.get('midl_name', ''),\n d.get('last_name', ''),\n )),\n \"age\": d.get('age', ''),\n }\n\n\ndef changes(request):\n \"\"\"API endpoint that allows querying 100s+ millions of voter records to find changes\n a requestor might care about.\n\n Querystring Parameteres:\n `changed` A data field to search for changes in between consecutive records for voters (required)\n `new` Only find results where the field's new value matches this parameter (optional)\n `limit` The number of voters to return, or fewer. (default: 10)\n \"\"\"\n\n start = datetime.now()\n if 'changed' not in request.GET:\n return HttpResponseBadRequest('{\"error\": \"`changed` is a required queryset parameter\"}')\n changed = request.GET['changed']\n new = request.GET.get('new')\n limit = int(request.GET.get('limit', '10')) or None\n\n # Find change records that include the given field, only showing the most recent record\n # for each voter, but prefetching the related voter and their entire change history.\n mod_records = ChangeTracker.objects.filter(\n op_code=ChangeTracker.OP_CODE_MODIFY,\n data__has_key=changed,\n )\\\n .prefetch_related('voter__changelog')\\\n .order_by('voter__pk', '-snapshot_dt')\\\n .distinct('voter__pk')\n\n # Don't include data that was just removed\n mod_records = mod_records.exclude(**{'data__' + changed: \"\"})\n\n # If requested, only include results where the field was changed to a specific new value\n # For example, changed=county_desc and new=DURHAM to find people who moved to Durham\n if new:\n mod_records = mod_records.filter(**{'data__' + changed: new})\n\n if request.GET.get('__debug'):\n return HttpResponse(mod_records.query)\n\n result = {}\n for c in mod_records[:limit]:\n r = {}\n prev = c.get_prev()\n\n r[\"new\"] = c.build_version()[changed]\n r[\"old\"] = prev.build_version().get(changed, '') if prev else ''\n r[\"when\"] = c.snapshot_dt\n r[\"voter\"] = get_voter_basic(c.voter)\n\n result[c.voter.ncid] = r\n\n result['_elapsed'] = (datetime.now() - start).total_seconds()\n\n return JsonResponse(result, encoder=Serializer)\n","repo_name":"NCVotes/voters-ingestor","sub_path":"voter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"22"} +{"seq_id":"11852234551","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# Pylint doesn't play well with fixtures and dependency injection from pytest\n# pylint: disable=redefined-outer-name\n\nimport os\nimport pytest\n\nfrom buildstream.exceptions import ErrorDomain, LoadErrorReason\nfrom buildstream._testing import cli # pylint: disable=unused-import\n\nDATA_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\n@pytest.mark.parametrize(\n \"target,domain,reason,provenance\",\n [\n # When specifying a bad suffix on the command line we get a different error, we\n # catch this error earlier on in the load sequence while sorting out element and\n # artifact names and glob expressions.\n #\n (\"farm.pony\", ErrorDomain.STREAM, \"invalid-element-names\", None),\n ('The \"quoted\" pony.bst', ErrorDomain.LOAD, LoadErrorReason.BAD_CHARACTERS_IN_NAME, None),\n (\n \"bad-suffix-dep.bst\",\n ErrorDomain.LOAD,\n LoadErrorReason.BAD_ELEMENT_SUFFIX,\n \"bad-suffix-dep.bst [line 3 column 2]\",\n ),\n (\n \"bad-chars-dep.bst\",\n ErrorDomain.LOAD,\n LoadErrorReason.BAD_CHARACTERS_IN_NAME,\n \"bad-chars-dep.bst [line 3 column 2]\",\n ),\n ],\n ids=[\"toplevel-bad-suffix\", \"toplevel-bad-chars\", \"dependency-bad-suffix\", \"dependency-bad-chars\"],\n)\n@pytest.mark.datafiles(DATA_DIR)\ndef test_invalid_element_names(cli, datafiles, target, domain, reason, provenance):\n project = os.path.join(str(datafiles), \"elementnames\")\n result = cli.run(project=project, silent=True, args=[\"show\", target])\n result.assert_main_error(domain, reason)\n if provenance:\n assert provenance in result.stderr\n","repo_name":"ep-infosec/33_apache_buildstream","sub_path":"tests/format/elementnames.py","file_name":"elementnames.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3293692809","text":"#If you havent learnt functions yet\n\n#A dice game which is going to be HIGH or LOW\n#You are going to bet on HIGH or LOW or 7\n#You will be rolling 2 dice\n#You can quit if you have enough money or when you have lost all the money\n\n\nfrom random import randint\n\n\ncash = 100\nbet = 5\n\nwhile True:\n #The menu\n print(\"\\n---------------------------------\")\n print(\"Place your bet or enter X to exit\")\n print(\"1) High roll of 8-12 for bet x2\")\n print(\"2) Low roll of 2-6 for bet x2\")\n print(\"3) 7 roll for bet x3\")\n print(\"Cash on hand: \" + str(cash))\n print(\"------------------------------------\")\n choice = input(\"Make your choice: \")\n\n if choice == 'x' or choice == 'X':\n print(\"Thanks for playing and hope to see you back soon.\")\n break\n\n #the game logic\n elif choice in ['1', '2', '3']:\n #betting loop\n while True:\n try:\n bet = int(input(\"Place your bet: \"))\n except:\n print(\"Please enter a number\")\n continue\n if bet > 0 and bet <= cash:\n break\n else:\n print(\"That bet is not valid\")\n\n\n roll = randint(1, 6) + randint(1, 6)\n print(\"You rolled a: \" + str(roll))\n\n if roll > 7 and choice == '1':\n print(\"You win!\")\n cash = cash + bet #1 and 2 conditions can be combined\n elif roll < 7 and choice == '2':\n print(\"You win!\")\n cash = cash + bet\n elif roll == 7 and choice == '3':\n print(\"You win!\")\n cash = cash + (2 * bet)\n else:\n print(\"You lose!\")\n cash = cash - bet\n if cash == 0:\n print(\"You're out of money, see ya next time\")\n break\n else:\n print(\"Enter a valid choice!\")\n continue\n","repo_name":"idatenKamiya/python_learn","sub_path":"4HiLodice_game.py","file_name":"4HiLodice_game.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14988664176","text":"import chainer\nimport chainer.functions as F\nimport cupy as cp\nimport numpy as np\nimport skimage.color\nimport skimage.io\nimport os\nfrom PIL import Image\nfrom chainer import Variable\nfrom ganhacks import noise\nimport save_parameter\n\nclass DCGANUpdater(chainer.training.StandardUpdater):\n\n def __init__(self, *args, **kwargs):\n self.gen, self.dis = kwargs.pop('models')\n super(DCGANUpdater, self).__init__(*args, **kwargs)\n\n def loss_dis(self, dis, y_fake, y_real):\n batchsize = y_fake.data.shape[0]\n # L1 = F.sum(F.softplus(-y_real)) / batchsize\n L1 = F.mean_squared_error(y_real, noise.noisy_label(batchsize, mode='real').reshape(batchsize, 1))\n # L2 = F.sum(F.softplus(y_fake)) / batchsize\n L2 = F.mean_squared_error(y_fake, noise.noisy_label(batchsize, mode='fake').reshape(batchsize, 1))\n loss = L1 + L2\n chainer.report({'loss': loss, 'real_loss': L1, 'fake_loss': L2}, dis)\n return loss\n\n def loss_gen(self, gen, y_fake, x_real, x_fake):\n batchsize = y_fake.data.shape[0]\n # loss = F.sum(F.softplus(-y_fake)) / batchsize\n fake_loss = F.mean_squared_error(y_fake, cp.ones((batchsize, 1), cp.float32))\n distance = F.mean_absolute_error(x_real, x_fake)\n loss = fake_loss + distance\n chainer.report({'loss': loss, 'fake_loss': fake_loss, 'distance': distance}, gen)\n return loss\n\n def array_to_image(self, arr):\n return np.asarray((chainer.cuda.to_cpu(cp.transpose((cp.clip(arr, -1, 1) + 1) * 127.5, (1, 2, 0)))),\n dtype=np.uint8)\n\n def update_core(self):\n iteration = self.iteration+1\n gen_optimizer = self.get_optimizer('gen')\n dis_optimizer = self.get_optimizer('dis')\n batch = self.get_iterator('main').next()\n gen, dis = self.gen, self.dis\n z, x = self.converter(batch, self.device)\n # xp = chainer.cuda.get_array_module(x_real.data)\n # z = Variable(xp.asarray(gen.make_hidden(batchsize)))\n x_real = Variable(x)\n x_fake = gen(Variable(z), train=True)\n #print(\"[EXAMPLE_NO]:\",save_parameter.load_example_number.example_no)\n epoch = self.epoch\n no_frames = save_parameter.load_example_number.number_of_frames\n if iteration % no_frames == 0:\n print(\"[EPOCH]:\", self.epoch)\n else:\n print(\"[EPOCH]:\", self.epoch + 1)\n print(\"[ITERATION]:\", iteration)\n print(\"[FRAMES_NO]:\", no_frames)\n# print(\"[SAVE_ITERATION]:\",save_parameter.load_example_number.img_save_iteration)\n if iteration in save_parameter.load_example_number.img_save_iteration:\n# z_image = np.concatenate([self.array_to_image(z[0])] * 1, axis=2)\n# x_real_image = self.array_to_image(x[0])\n# x_fake_image = self.array_to_image(x_fake.data[0])\n# concatenated_images = np.concatenate([z_image, x_fake_image, x_real_image], axis=1)\n# skimage.io.imsave('./sample/{}.jpg'.format(self.epoch), concatenated_images)\n z_image = np.concatenate([self.array_to_image(z[0])] * 1, axis=2)\n x_real_image = self.array_to_image(x[0])\n x_fake_image = self.array_to_image(x_fake.data[0])\n #print(z.shape, x.shape, )\n concatenated_images = np.concatenate([z_image, x_fake_image, x_real_image], axis=1)\n if self.epoch % 10 == 9:\n skimage.io.imsave('./sample/{}_{:0>1}.jpg'.format(self.epoch+1, save_parameter.load_example_number.example_no), concatenated_images)\n else:\n skimage.io.imsave('./sample/{}_{:0>1}.jpg'.format(self.epoch, save_parameter.load_example_number.example_no), concatenated_images)\n skimage.io.imsave('./sample/current.jpg'.format(self.epoch), concatenated_images)\n\n y_real = dis(Variable(cp.concatenate((z, x), axis=1)), train=True)\n y_fake = dis(Variable(cp.concatenate((z, x_fake.data), axis=1)), train=True)\n\n gen_optimizer.update(self.loss_gen, gen, y_fake, x_real, x_fake)\n dis_optimizer.update(self.loss_dis, dis, y_fake, y_real)\n","repo_name":"ridgei/ravea_colourise","sub_path":"updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39240953756","text":"from nltk import FreqDist, bigrams, ngrams, pos_tag, word_tokenize, sent_tokenize\nfrom nltk.book import text6\n\ntokens = word_tokenize(\"Here is some not very interesting text. Let's get its tags!\")\nprint(pos_tag(tokens))\n\ntokens = word_tokenize(\"The dust was thick so he had to dust.\")\nprint(pos_tag(tokens))\n\nfdist = FreqDist(text6)\nprint(fdist.most_common(10))\n\nbigrams = bigrams(text6)\nbigramDist = FreqDist(bigrams)\nprint(bigramDist[(\"Sir\", \"Robin\")])\n\nfourgrams = ngrams(text6, 4)\nfourgramsDist = FreqDist(fourgrams)\nprint(fourgramsDist[(\"father\", \"smelt\", \"of\", \"elderberries\")])\n\nfor fourgram in fourgrams:\n if fourgram[0] == \"coconut\":\n print(fourgram)\n\nsentences = sent_tokenize(\"Google is one of the best companies in the world. People constantly google stuff.\")\nnouns = [\"NN\", \"NNS\", \"NNP\", \"NNPS\"]\n\nfor sentence in sentences:\n if \"google\" in sentence.lower():\n taggedWords = pos_tag(word_tokenize(sentence))\n for word in taggedWords:\n if word[0].lower() == \"google\" and word[1] in nouns:\n print(sentence)","repo_name":"iluxonchik/webscraping-with-python-book","sub_path":"Chapter_8/natlan.py","file_name":"natlan.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"16582421932","text":"n = int(input())\narr = [[1e9]*n for _ in range(n)]\nwhile True:\n a, b = map(int, input().split())\n if a == -1 and b == -1 :\n break\n a -= 1\n b -= 1\n arr[a][b] = arr[b][a] = 1\n\nfor k in range(n):\n for i in range(n):\n for j in range(n):\n if i == j :\n arr[i][j] = 0\n elif arr[i][j] > arr[i][k] + arr[k][j]:\n arr[i][j] = arr[i][k] + arr[k][j]\n\nperson = [0] * n\n\nfor i in range(n):\n person[i] = max(arr[i])\n#print(person)\nprint(min(person), person.count(min(person)))\nfor i in range(len(person)):\n if person[i] == min(person):\n print(i+1, end=' ')","repo_name":"gumsu/BOJ","sub_path":"2660_회장뽑기.py","file_name":"2660_회장뽑기.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41408475049","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.spatial import ConvexHull, convex_hull_plot_2d\r\n\r\ndef PolygonArea(corners):\r\n # Only for ordered vertices\r\n n = len(corners)\r\n area = 0.0\r\n for i in range(n):\r\n j = (i + 1) % n\r\n area += corners[i][0] * corners[j][1]\r\n area -= corners[j][0] * corners[i][1]\r\n area = abs(area) / 2.0\r\n return area\r\n\r\n\r\ndef computeAUC(scores_noise, scores_signal, gammas):\r\n \"\"\"Computes and plots AUC scores.\r\n\r\n Parameters\r\n ----------\r\n scores_noise : array\r\n Array of shape (n_samp, n_gammas) containing false positives of\r\n classifation.\r\n scores_noise : array\r\n Array of shape (n_samp, n_gammas) containing true positives of\r\n classifation.\r\n gammas : float or array\r\n Conducatance parameters used for optimisation.\r\n\r\n Returns\r\n -------\r\n aucs : list\r\n list of shape (n_gammas) containg the AUC score for each gamma.\r\n\r\n \"\"\"\r\n\r\n gamma_pts = 500\r\n aucs = np.zeros((gammas.shape[0]))\r\n n, m = scores_signal.shape\r\n\r\n for gind in range(gammas.shape[0]):\r\n th_range = np.linspace(scores_noise[:, gind].mean() / m,\r\n scores_signal[:, gind].mean() * m,\r\n gamma_pts)\r\n tps = np.zeros((gamma_pts))\r\n fps = np.zeros((gamma_pts))\r\n\r\n\r\n for thind in range(gamma_pts):\r\n th = th_range[thind]\r\n tps[thind] = (scores_signal[:, gind] > th).mean()\r\n fps[thind] = (scores_noise[:, gind] > th).mean()\r\n\r\n tps = np.concatenate((tps, np.array([0, 1, 0])),0)\r\n fps = np.concatenate((fps, np.array([0, 1, 1])),0)\r\n\r\n # Create 2-D ndarray of points\r\n points = np.stack((fps,tps),1)\r\n\r\n points = np.stack((np.concatenate((fps, np.array([1])), 0),\r\n np.concatenate((tps, np.array([0])), 0)),1)\r\n\r\n hull = ConvexHull(points)\r\n # Only vertices to get PolygonArea\r\n polygon_coords = [(x,y) for (x,y) in zip(points[hull.vertices][:,0], points[hull.vertices][:,1])]\r\n\r\n # Area of polygon = auc = Area enclosed by vertices\r\n auc = PolygonArea(polygon_coords)\r\n\r\n aucs[gind] = auc\r\n\r\n # Plot and save ROC-curve\r\n plt.figure()\r\n plt.plot(points[hull.vertices][:,0], points[hull.vertices][:,1], 'o')\r\n for simplex in hull.simplices:\r\n plt.plot(points[simplex, 0], points[simplex, 1], 'k-')\r\n\r\n return aucs\r\n","repo_name":"georgeyiasemis/Mirror-Descent-and-Interacting-Mirror-Descent","sub_path":"Connected Subgraph Detection on SDPs/startOpt/computation.py","file_name":"computation.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"9757633862","text":"# 프로그래머스\n# 2019 카카오 개발자 겨울 인턴십\n# 튜플\n\ndef solution(s):\n nums = eval(s[1:-1])\n if len(nums) > 1:\n nums = sorted(nums, key=lambda n: sum(n))\n else:\n return list(nums)\n\n result = []\n for num in nums:\n for n in num:\n if n not in result:\n result.append(n)\n return result","repo_name":"wesley-94/Algorithm_study","sub_path":"for_coding_test_python/programmers/튜플.py","file_name":"튜플.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70412746295","text":"# from AHPLib import *\nfrom AhpAnpLib import inputs_AHPLib as input\nfrom AhpAnpLib import structs_AHPLib as str\nfrom AhpAnpLib import calcs_AHPLib as calc\n\n\nshoesModel=str.Model(\"Choose Shoes\")\n\n# check\nprint(shoesModel)\n\n# making a goal node\ngoal_node=str.Node(\"GoalNode\", 1) # IDはあまり重要じゃないみたい?\nprint(goal_node)\n\n# making criteria\nquality=str.Node(\"Quality\", 2)\nstyle=str.Node(\"Style\", 3)\nweight=str.Node(\"Weight\", 4)\nprice=str.Node(\"Price\", 5)\n\n# check\nprint(quality, \"\\n\", style, \"\\n\", weight, \"\\n\", price)\n\n# making sub-criteria\neasy_of_putting_on=str.Node(\"EasyOfPuttingOn\", 6)\nslip_resistance=str.Node(\"SlipResistance\", 7)\ndesign=str.Node(\"Design\", 8)\ncolor=str.Node(\"Color\", 9)\neasy_of_running=str.Node(\"EasyOfRunning\", 10)\nmaterial=str.Node(\"Material\", 11)\n\n# check\nprint(easy_of_putting_on, \"\\n\", slip_resistance, \"\\n\", design, \"\\n\", color, \"\\n\", easy_of_running, \"\\n\", material)\n\n# making alternatives\nalt1=str.Node(\"Nike\", 12)\nalt2=str.Node(\"Brooks\", 13)\nalt3=str.Node(\"Asics\", 14)\n\nprint(alt1)\nprint(alt2)\nprint(alt3)\n\n# making clusters\ncluster0=str.Cluster(\"1_Goal\", 1) \ncluster1=str.Cluster(\"2_Criteria\", 2)\ncluster2a=str.Cluster(\"3a_SubCriteria\", 4)\ncluster2b=str.Cluster(\"3b_SubCriteria\", 5)\ncluster2c=str.Cluster(\"3c_SubCriteria\", 6)\ncluster3=str.Cluster(\"4_Alternatives\", 7)\n\nprint(cluster0, \"\\n\", cluster1, \"\\n\", cluster2a, \"\\n\", cluster2b, \"\\n\", cluster2c, \"\\n\", cluster3)\n\n# Add the goal node to Goal cluster\ncluster0.addNode2Cluster(goal_node)\n\n# Add 4 nodes to Criteria cluster\ncluster1.addNode2Cluster(quality)\ncluster1.addNode2Cluster(style)\ncluster1.addNode2Cluster(weight)\ncluster1.addNode2Cluster(price)\n\n# Add 6 nodes to Sub Criteria cluster\ncluster2a.addNode2Cluster(easy_of_putting_on)\ncluster2a.addNode2Cluster(slip_resistance)\ncluster2b.addNode2Cluster(design)\ncluster2b.addNode2Cluster(color)\ncluster2c.addNode2Cluster(easy_of_running)\ncluster2c.addNode2Cluster(material)\n\n# Add the alternative nodes to Alternatives cluster\ncluster3.addNode2Cluster(alt1)\ncluster3.addNode2Cluster(alt2)\ncluster3.addNode2Cluster(alt3)\n\n# check\ncluster0.printWithNodes()\ncluster1.printWithNodes()\ncluster2a.printWithNodes()\ncluster2b.printWithNodes()\ncluster2c.printWithNodes()\ncluster3.printWithNodes()\n\n# Add the clusters to my model\nshoesModel.addCluster2Model(cluster0)\nshoesModel.addCluster2Model(cluster1)\nshoesModel.addCluster2Model(cluster2a)\nshoesModel.addCluster2Model(cluster2b)\nshoesModel.addCluster2Model(cluster2c)\nshoesModel.addCluster2Model(cluster3)\n\n# connect the goal node and Criteria cluster\nshoesModel.addNodeConnectionFromNodeToAllNodesOfCluster(\"GoalNode\",\"2_Criteria\")\n\n\n# connect Quality node to 2 nodes\nshoesModel.addNodeConnectionFromTo(\"Quality\",\"EasyOfPuttingOn\")\nshoesModel.addNodeConnectionFromTo(\"Quality\",\"SlipResistance\")\n\n# connect Style node to 2 nodes\nshoesModel.addNodeConnectionFromTo(\"Style\",\"Design\")\nshoesModel.addNodeConnectionFromTo(\"Style\",\"Color\")\n\n# connect Weight node to 2 nodes\nshoesModel.addNodeConnectionFromTo(\"Weight\",\"EasyOfRunning\")\nshoesModel.addNodeConnectionFromTo(\"Weight\",\"Material\")\n\n# connect Sub Criteria cluster to Alternatives cluster\nshoesModel.addNodeConnectionFromAllNodesToAllNodesOfCluster(\"3a_SubCriteria\",\"4_Alternatives\")\nshoesModel.addNodeConnectionFromAllNodesToAllNodesOfCluster(\"3b_SubCriteria\",\"4_Alternatives\")\nshoesModel.addNodeConnectionFromAllNodesToAllNodesOfCluster(\"3c_SubCriteria\",\"4_Alternatives\")\nshoesModel.addNodeConnectionFromNodeToAllNodesOfCluster(\"Price\",\"4_Alternatives\")\n\n# connect Price node to 3 alternative nodes directly\nshoesModel.addNodeConnectionFromTo(\"Price\",\"Nike\")\nshoesModel.addNodeConnectionFromTo(\"Price\",\"Brooks\")\nshoesModel.addNodeConnectionFromTo(\"Price\",\"Asics\")\n\n# check all connections in the model\nshoesModel.showAllNodeConnections()\n\n# text questionnaires\ninput.genFullQuest(shoesModel,\"important\")\ninput.genFirstLineAboveDiagQuest(shoesModel,\"dominant\")\ninput.genFirstLineQuest(shoesModel,\"likelihood\")\n\n# export Excel questionnaires\ninput.export4ExcelQuestFull(shoesModel, \"AHP_shoesModel__DM2022_Excel_empty.xlsx\")\ninputFilePath=\"AHP_shoesModel__DM2022_Excel_filledin.xlsx\"\noutputFilepath = \"AHP_shoesModel__DM2022_Excel_Results.xlsx\"\n\ncalc.calcAHPMatricesSave2File(shoesModel,inputFilePath,outputFilepath,True,False,True,True)\ncalc.sensitivityCellSupermatrixPlot(shoesModel,\"4_Alternatives\",outputFilepath,\"Quality\",\"Weight\",\"Style\",\"Price\")","repo_name":"CreativeDecisions/AhpAnpLib","sub_path":"Examples/DM2022_std_Models/AHP_shoesModel_dev_byDM2022.py","file_name":"AHP_shoesModel_dev_byDM2022.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38983168455","text":"class DCMethod:\n\n\tdef __init__(self, parent, name, data):\n\t\tself.parent = parent\n\t\tself.name = name\n\t\tself.data = data\n\n\t\t# Our index within a dclass.\n\t\tself.methodIndex = None\n\n\t\t# Method keywords.\n\t\tself.keywords = []\n\n\t\t# Method arguments.\n\t\tself.args = []\n\n\t\t# Process all the data passed to us.\n\t\tself.loadMethod()\n\n\tdef loadMethod(self):\n\t\t# Load keywords.\n\t\tkeywords = self.data[\"keywords\"].split(', ')\n\t\tfor keyword in keywords:\n\t\t\tself.keywords.append(keyword)\n\n\t\t# Load args.\n\t\tfor arg in self.data[\"args\"]:\n\t\t\t# Append the method type to our args.\n\t\t\tself.args.append(self.data[\"args\"][arg])\n\n\t\t\t# Calculate the method name into the hash.\n\t\t\tself.parent.parent.hashGenerator.addString(arg)\n\n\t\t# Calculate the method data into the hash.\n\t\tfor arg in self.args:\n\t\t\tself.parent.parent.hashGenerator.addArg(arg.split(' = ')[0])\n\n\tdef hasKeyword(self, name):\n\t\treturn name in self.keywords\n\n\tdef getIndex(self):\n\t\treturn self.methodIndex","repo_name":"Ardos-Project/ardos.libpython","sub_path":"ardos/dc/DCMethod.py","file_name":"DCMethod.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21321586002","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\"\"\"\n@author: __Evin__\n@file : auth.py\n@time : 2017/08/{10}\n@email: 879531595@qq.com\n\"\"\"\nimport json\n\nimport sys\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname (os.path.abspath(__file__)))\nsys.path.append(BASE_DIR)\nfrom core import logger\nfrom core import accounts\n\ndef user_auth(func):\n def wrapper(*args,**kwargs):\n u = input('请输入账户名:')\n data = accounts.getJsonData(u)\n if data:\n if data['congelation'] == 'yes':\n for i in range(3):\n p = input('请输入密码:')\n if p == data['password']:\n print('您的账户还款时间是:【%s】'%data['AlsoMoney_time'])\n logger.user_op_logger(u,'【%s】登陆成功' % u)\n func(*args,**kwargs)\n return u\n else:\n print('密码错误!!!')\n else:\n data['congelation'] = 'no'\n if accounts.setJsonData(u,data):\n print('账户【%s】被冻结')\n logger.user_op_logger(u,'【%s】账户密码错误三次被冻结' % u)\n else:\n print('账户【%s】已被冻结,无法登陆')\n else:\n print('该账户不存在')\n return wrapper\n\n\n","repo_name":"879531595/ATM_of_python","sub_path":"atm/core/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"19843839884","text":"from decimal import Decimal\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404\nfrom tours.models import Ticket, Match, Gametype\n\n\ndef cart_contents(request):\n\n cart_items = []\n total = 0\n product_count = 0\n cart = request.session.get('cart', {})\n\n for item_id, item_data in cart.items():\n if isinstance(item_data, int):\n ticket = get_object_or_404(Ticket, pk=item_id)\n total += item_data * ticket.price\n product_count += item_data\n cart_items.append({\n 'item_id': item_id,\n 'quantity': item_data,\n 'ticket': ticket,\n\n })\n else:\n ticket = get_object_or_404(Ticket, pk=item_id)\n for day, quantity in item_data['items_by_day'].items():\n total += quantity * ticket.price\n product_count += quantity\n cart_items.append({\n 'item_id': item_id,\n 'quantity': quantity,\n 'ticket': ticket,\n 'day': day\n })\n\n if total > 100:\n delivery = 5\n else:\n delivery = total * Decimal(settings.DELIVERY_CHARGE / 100)\n\n discount = total * Decimal(settings.MEMBER_DISCOUNT / 100)\n\n if request.user.is_authenticated:\n grand_total = total - discount + delivery\n\n else:\n grand_total = total + delivery\n\n gametype = Gametype.objects.all()\n match = Match.objects.all()\n\n context = {\n 'cart_items': cart_items,\n 'total': total,\n 'product_count': product_count,\n 'delivery': delivery,\n 'discount': discount,\n 'grand_total': grand_total,\n 'match': match,\n 'gametype': gametype\n }\n\n return context\n","repo_name":"adamdelancey/ms4-england-cricket-tickets","sub_path":"cart/contexts.py","file_name":"contexts.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"8758750696","text":"### [문제 분석]\n# 목적 : 9개의 서로 다른 자연수가 주어질 때, 이들 중 최댓값을 찾고 그 최댓값이 몇 번째 수인지를 구하는 프로그램 작성\n# -> 첫째 줄에 최댓값을 출력하고, 둘째 줄에 최댓값이 몇 번째 수인지를 출력한다.\n\n### [문제 풀이]\n# 1단계 : 먼저 A라는 빈 리스트를 생성한다.\n# 2단계 : 9번 반복하는 for문안에서 빈 리스트에 9개의 자연수를 추가한다.\n# 3단계 : max함수를 이용해서 최댓값을 출력하고. 몇번째 인덱스인지 찾아 +1하여 출력한다.\n\nimport sys\ninput = sys.stdin.readline\n\n# 1단계\nA = []\n\n# 2단계\nfor i in range(9):\n A.append(int(input()))\n\n# 3단계\nprint(max(A))\nprint((A.index(max(A))+1))\n \n# 주의할 점\n# 1. 원하는 index번호를 찾기 위해서는 꼭 list.index() 해야한다. index()함수 앞에 리스트 이름 넣는 것을 잊지말자.\n# 2. 인덱스는 0부터 시작하기 때문에 인덱스 7은 8번째자리 수이다. 따라서 index +1을 해줘야한다.","repo_name":"chaeni1105/CodingTest-Study","sub_path":"Baekjoon/Level 4/2562_최댓값.py","file_name":"2562_최댓값.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21940619483","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom math import pi, sqrt, sin, cos, radians, degrees\n\nfrom constants import PLOT_DATA_ROUND, DPI_VALIE, PLOT_DISPLAY_SIZE, PLOT_LEGEND_FONT_SIZE, PLOT_MARKET_SIZE,\\\n PLOT_LINE_WIDTH, PLOT_TITLE_FONT_SIZE, PLOT_ANOTATE_FONT_SIZE, PLOT_ASIX_FONT_SIZE, I, E\n\n\ndef get_request_data(request):\n return dict(request.json if request.is_json else (request.form.items() or {}))\n\n\ndef display_plot(arguments, labels, color_line, title, annotate_step, points_count, alpha=None, show=None, axis=None):\n show = show or range(len(arguments))\n alpha = alpha or [1 for i in range(len(arguments))]\n plt.figure(figsize=PLOT_DISPLAY_SIZE, dpi = DPI_VALIE)\n for i in show:\n if i == \"\":\n continue\n plt.plot(*np.round(arguments[i], PLOT_DATA_ROUND), color_line[i], label=labels[i], markersize=PLOT_MARKET_SIZE, linewidth=PLOT_LINE_WIDTH, alpha=alpha[i])\n plt.rc('legend', fontsize=PLOT_LEGEND_FONT_SIZE)\n if annotate_step[i]:\n for j in range(0, len(arguments[i][0])-(1 if len(arguments[i][0]) != points_count else 0), annotate_step[i]):\n mid_x, mid_y = (sum(arguments[i][0])/len(arguments[i][0])), (sum(arguments[i][1])/len(arguments[i][1]))\n scale_x, scale_y = (abs(max(arguments[i][0])) + abs(min(arguments[i][0])))/2, (abs(max(arguments[i][1])) + abs(min(arguments[i][1])))/2\n try:\n if arguments[i][0][j] != 0:\n x_add = abs(arguments[i][0][j])/arguments[i][0][j]/25*abs(mid_x-arguments[i][0][j])\n else:\n x_add = 0\n except:\n x_add = 0\n try:\n if arguments[i][1][j] != 0:\n y_add = abs(arguments[i][1][j])/arguments[i][1][j]/25*abs(mid_y-arguments[i][1][j])\n else:\n y_add = 0\n except:\n y_add = 0\n plt.annotate(j+1, (arguments[i][0][j] - 0.025 * scale_x + x_add, arguments[i][1][j] - 0.015 * scale_x + y_add), fontsize=PLOT_ANOTATE_FONT_SIZE)\n # plt.annotate(j+1, (arguments[i][0][j], arguments[i][1][j]), fontsize=plot_annotate_font_size)\n plt.suptitle(title, fontsize=PLOT_TITLE_FONT_SIZE)\n\n if axis:\n print(axis)\n if axis == 2:\n plt.xlim([-0.25, 2.25])\n plt.ylim([-1.25, 1.25])\n plt.grid()\n else:\n plt.xlim([-2.5, 2.5])\n plt.ylim([-1.5, 1.5])\n\n # plt.xticks([])\n # plt.yticks([])\n plt.tick_params(labelsize=PLOT_ASIX_FONT_SIZE)\n plt.legend()\n plt.grid()\n plt.tight_layout()\n\n return plt\n\ndef vector_cords(M, X, Y):\n d = np.array([])\n\n for i in range(M):\n d = np.append(d, np.array([X[(i+1) % M]-X[i % M], Y[(i+1) % M]-Y[i % M]]))\n d = np.append(d, [d[0], d[1]])\n d = d.reshape(M+1, 2)\n return d\n\ndef vector_cords_not_loop(M, X, Y):\n d = np.array([])\n\n for i in range(M):\n d = np.append(d, np.array([X[i+1]-X[i], Y[i+1]-Y[i]]))\n d = d.reshape(M, 2)\n return d\n\n\ndef to_angle(x_a, y_a, x_b, y_b):\n sin_phi = (x_a * -y_b - y_a * -x_b) / (np.sqrt(x_a ** 2 + y_a ** 2) * np.sqrt(x_b ** 2 + y_b ** 2))\n cos_phi = (x_a * x_b + y_a * y_b) / (np.sqrt(x_a ** 2 + y_a ** 2) * np.sqrt(x_b ** 2 + y_b ** 2))\n return sin_phi, cos_phi\n\n\ndef align_value_count(M, d, curve_type):\n psis_sin = np.array([])\n psis = np.array([])\n\n if curve_type == \"not_loop\":\n M -= 1\n\n for i in range(M):\n psi = to_angle(d[i][0], d[i][1], d[i+1][0], d[i+1][1])\n if psi[0] > 0 and psi[1] > 0:\n psis_sin = np.append(psis_sin, psi[0])\n psis = np.append(psis, np.arcsin(psi[0]))\n elif psi[0] > 0 and psi[1] < 0:\n psis_sin = np.append(psis_sin, psi[0])\n psis = np.append(psis, pi - np.arcsin(psi[0]))\n elif psi[0] < 0 and psi[1] < 0:\n psis_sin = np.append(psis_sin, psi[0])\n psis = np.append(psis, -pi - np.arcsin(psi[0]))\n elif psi[0] < 0 and psi[1] > 0:\n psis_sin = np.append(psis_sin, psi[0])\n psis = np.append(psis, np.arcsin(psi[0]))\n elif round(psi[0], 5) == 0 and round(psi[1], 5) == 1:\n psis_sin = np.append(psis_sin, psi[0])\n psis = np.append(psis, np.arcsin(psi[0]))\n elif round(psi[0], 5) == 0 and round(psi[1], 5) == -1:\n psis_sin = np.append(psis_sin, psi[0])\n psis = np.append(psis, np.arcsin(psi[0]))\n elif round(psi[0], 5) == 1 and round(psi[1], 5) == 0:\n psis_sin = np.append(psis_sin, psi[0])\n psis = np.append(psis, np.arcsin(psi[0]))\n elif round(psi[0], 5) == -1 and round(psi[1], 5) == 0:\n psis_sin = np.append(psis_sin, psi[0])\n psis = np.append(psis, np.arcsin(psi[0]))\n else:\n print(psi, \"\\nERROR!!!\")\n\n return psis_sin, psis\n\n\ndef len_value_count(M, d):\n S = np.array([])\n for i in range(M):\n S = np.append(S, np.sqrt(d[i][0]**2 + d[i][1]**2))\n return S\n\n\ndef C_coef_value_count(M, S, C_proportion_coef):\n return (6/((sum(S)/M)**3))*C_proportion_coef\n\n\ndef matrix_coefs(M, S, psis, C, point_type, equation_type, P_align_coef=None, extra_psis=None, aligns=None):\n dims = 8 * M\n matrix = np.zeros((dims, dims))\n coefs = np.zeros((dims))\n\n if equation_type == \"not_loop\":\n for i in range(M):\n # Рівняння зв'язку\n matrix[i*8+2, i*8], matrix[i*8+2, i*8+1], \\\n matrix[i*8+2, i*8+2], matrix[i*8+2, i*8+3] = \\\n (1, S[i], S[i]**2/(2*I*E), S[i]**3/(6*I*E))\n\n matrix[i*8+3, i*8], matrix[i*8+3, i*8+1], \\\n matrix[i*8+3, i*8+2], matrix[i*8+3, i*8+3] = \\\n (0, 1, S[i]/(I*E), S[i]**2/(2*I*E))\n\n matrix[i*8+4, i*8], matrix[i*8+4, i*8+1], \\\n matrix[i*8+4, i*8+2], matrix[i*8+4, i*8+3] = \\\n (0, 0, 1, S[i])\n\n matrix[i*8+5, i*8], matrix[i*8+5, i*8+1], \\\n matrix[i*8+5, i*8+2], matrix[i*8+5, i*8+3] = \\\n (0, 0, 0, 1)\n\n matrix[i*8+2, i*8+4], matrix[i*8+3, i*8+5], matrix[i*8+4, i*8+6], matrix[i*8+5, i*8+7] = -1, -1, -1, -1\n\n if i < M - 1:\n matrix[i*8+6, i*8+4], matrix[i*8+7, i*8+5], matrix[i*8+8, i*8+6], matrix[i*8+9, i*8+7] = 1, 1, 1, 1\n matrix[i*8+6, (i+1)*8], matrix[i*8+7, (i+1)*8+1], matrix[i*8+8, (i+1)*8+2], matrix[i*8+9, (i+1)*8+3] = -1, -1, -1, -1\n if point_type[i] == 0:\n matrix[i*8+9, i*8+8] = -C\n elif point_type[i] == 1:\n matrix[i*8+9, i*8+7] = 0\n matrix[i*8+9, (i+1)*8+3] = 0\n matrix[i*8+9, i*8+4] = 1\n coefs[i*8+7] = psis[i]\n\n if P_align_coef is not None:\n coefs[i*8+7] = -C*P_align_coef[i]\n\n coefs[1] = (radians(aligns[0]) + aligns[1] * extra_psis[0])\n coefs[-1] = (radians(aligns[2]) + aligns[3] * extra_psis[-1])\n # print(degrees(coefs[1]), degrees(coefs[-1]))\n\n matrix[0][0], matrix[1][1] = 1, 1\n matrix[-2][-4], matrix[-1][-3] = 1, 1\n else:\n for i in range(M):\n # Рівняння зв'язку\n matrix[i*8, i*8], matrix[i*8, i*8+1], \\\n matrix[i*8, i*8+2], matrix[i*8, i*8+3] = \\\n (1, S[i], S[i]**2/(2*I*E), S[i]**3/(6*I*E))\n\n matrix[i*8+1, i*8], matrix[i*8+1, i*8+1], \\\n matrix[i*8+1, i*8+2], matrix[i*8+1, i*8+3] = \\\n (0, 1, S[i]/(I*E), S[i]**2/(2*I*E))\n\n matrix[i*8+2, i*8], matrix[i*8+2, i*8+1], \\\n matrix[i*8+2, i*8+2], matrix[i*8+2, i*8+3] = \\\n (0, 0, 1, S[i])\n\n matrix[i*8+3, i*8], matrix[i*8+3, i*8+1], \\\n matrix[i*8+3, i*8+2], matrix[i*8+3, i*8+3] = \\\n (0, 0, 0, 1)\n\n matrix[i*8, i*8+4], matrix[i*8+1, i*8+5], matrix[i*8+2, i*8+6], matrix[i*8+3, i*8+7] = -1, -1, -1, -1\n\n if i < M - 1:\n matrix[i*8+4, i*8+4], matrix[i*8+5, i*8+5], matrix[i*8+6, i*8+6], matrix[i*8+7, i*8+7] = 1, 1, 1, 1\n matrix[i*8+4, (i+1)*8], matrix[i*8+5, (i+1)*8+1], matrix[i*8+6, (i+1)*8+2], matrix[i*8+7, (i+1)*8+3] = -1, -1, -1, -1\n if point_type[i] == 0:\n matrix[i*8+7, i*8+8] = -C\n elif point_type[i] == 1:\n matrix[i*8+7, i*8+7] = 0\n matrix[i*8+7, (i+1)*8+3] = 0\n matrix[i*8+7, i*8+4] = 1\n\n else:\n matrix[i*8+4, i*8+4], matrix[i*8+5, i*8+5], matrix[i*8+6, i*8+6], matrix[i*8+7, i*8+7] = 1, 1, 1, 1\n matrix[i*8+4, 0], matrix[i*8+5, 1], matrix[i*8+6, 2], matrix[i*8+7, 3] = -1, -1, -1, -1\n if point_type[i] == 0:\n matrix[i*8+7, 0] = -C\n elif point_type[i] == 1:\n matrix[i*8+7, i*8+7] = 0\n matrix[i*8+7, 3] = 0\n matrix[i*8+7, i*8+4] = 1\n\n\n coefs[i*8+5] = psis[i]\n if P_align_coef is not None:\n coefs[i*8+7] = -C*P_align_coef[i]\n\n # display_table(matrix, bad_data = False, revert=True)\n\n return matrix, coefs\n\n\ndef len_calc(k, X, Y, x, y):\n return abs((y-Y)/(sqrt(1+k**2))-(k*(x-X))/(sqrt(1+k**2)))\n\n\ndef P_coef_count(M, d, X, Y, X_n, Y_n):\n P_align_coef = []\n\n if M != 1:\n for i in range(M):\n psi_0 = np.sign(np.arcsin(to_angle(d[i][0], d[i][1], X[i+1]-X_n[i+1], Y[i+1]-Y_n[i+1])[0]))\n psi_1 = np.sign(np.arcsin(to_angle(d[i+1][0], d[i+1][1], X[i+1]-X_n[i+1], Y[i+1]-Y_n[i+1])[0]))\n k_0 = (Y_n[(i+1)%M]-Y_n[i%M])/(X_n[(i+1)%M]-X_n[i%M])\n k_1 = (Y_n[(i+2)%M]-Y_n[(i+1)%M])/(X_n[(i+2)%M]-X_n[(i+1)%M])\n len_0 = len_calc(k_0, X_n[i+1], Y_n[i+1], X[i+1], Y[i+1])\n len_1 = len_calc(k_1, X_n[i+1], Y_n[i+1], X[i+1], Y[i+1])\n P_align_coef.append(psi_0 * len_0 if len_0 < len_1 else psi_1 * len_1)\n else:\n P_align_coef = None\n return P_align_coef\n\n\ndef vector_normalization(d, S, solution, curve_type):\n a_norm = []\n b_norm = []\n c_l_norm = []\n d_l_norm = []\n c_n_norm = []\n d_n_norm = []\n\n for i in range(len(S)):\n a_norm.append(d[i][0]/S[i])\n b_norm.append(d[i][1]/S[i])\n\n if curve_type == \"loop\":\n d = d[:-1]\n\n for i in range(len(d)):\n matrix_rotate = [[cos(-pi/2), -sin(-pi/2)], [sin(-pi/2), cos(-pi/2)]]\n vektors = (np.dot(matrix_rotate, [a_norm[i], b_norm[i]]))\n c_l_norm.append(vektors[0]), d_l_norm.append(vektors[1])\n\n align = solution[8*i+1]\n matrix_rotate = [[cos(-pi/2-align), -sin(-pi/2-align)], [sin(-pi/2-align), cos(-pi/2-align)]]\n vektors = (np.dot(matrix_rotate, [a_norm[i], b_norm[i]]))\n c_n_norm.append(vektors[0]), d_n_norm.append(vektors[1])\n\n align = solution[-3]\n matrix_rotate = [[cos(-pi/2-align), -sin(-pi/2-align)], [sin(-pi/2-align), cos(-pi/2-align)]]\n vektors = (np.dot(matrix_rotate, [a_norm[-1], b_norm[-1]]))\n c_n_norm.append(vektors[0]), d_n_norm.append(vektors[1])\n\n return a_norm, b_norm, c_l_norm, d_l_norm, c_n_norm, d_n_norm\n\n\ndef midle_point_params_vector(M, S, solution, list_of_patrs):\n dims = 4\n matrix = np.zeros((dims, dims))\n vector = np.zeros((dims))\n sol_half = []\n\n for i in range(M):\n for k in list_of_patrs:\n s = S[i]*k\n\n matrix[0, 0], matrix[0, 1], \\\n matrix[0, 2], matrix[0, 3] = \\\n (1, s, s**2/(2*I*E), s**3/(6*I*E))\n\n matrix[1, 0], matrix[1, 1], \\\n matrix[1, 2], matrix[1, 3] = \\\n (0, 1, s/(I*E), s**2/(2*I*E))\n\n matrix[2, 0], matrix[2, 1], \\\n matrix[2, 2], matrix[2, 3] = \\\n (0, 0, 1, s)\n\n matrix[3, 0], matrix[3, 1], \\\n matrix[3, 2], matrix[3, 3] = \\\n (0, 0, 0, 1)\n\n vector[0], vector[1], vector[2], vector[3] = solution[i*8], solution[i*8+1], solution[i*8+2], solution[i*8+3]\n sol_half.append(np.dot(matrix, vector))\n return sol_half\n\n\ndef midle_point_count(M, list_of_patrs, X, Y, S, a_norm, b_norm, sol_half):\n B_j = []\n c_n_norm_B_j=[]\n d_n_norm_B_j=[]\n\n for i in range(M):\n for k in range(len(list_of_patrs)):\n B_j.append([X[i] + S[i] * list_of_patrs[k] * a_norm[i], Y[i] + S[i] * list_of_patrs[k] * b_norm[i]])\n\n index = len(list_of_patrs)*i+k\n matrix_rotate = [[cos(-pi/2-sol_half[index][1]), -sin(-pi/2-sol_half[index][1])],\n [sin(-pi/2-sol_half[index][1]), cos(-pi/2-sol_half[index][1])]]\n vektors = (np.dot(matrix_rotate, [a_norm[i], b_norm[i]]))\n c_n_norm_B_j.append(vektors[0]), d_n_norm_B_j.append(vektors[1])\n\n return B_j, c_n_norm_B_j, d_n_norm_B_j\n\n\ndef new_position_count(M, S, X, Y, solution, c_l_norm, c_n_norm, c_n_norm_j, d_l_norm, d_n_norm, d_n_norm_j, sol_half, list_of_patrs, B_j, curve_type):\n M_j = []\n M_j_coreg = []\n D_j = []\n D_j_coreg = []\n X_disp = []\n Y_disp = []\n X__disp = []\n Y__disp = []\n znam = []\n\n for i in range(M):\n M_j.append([sum(S[:i]), solution[8*i+2]])\n X_ = 1 + solution[8*i+1]*sin(solution[8*i+1]) + solution[8*i]*cos(solution[8*i+1])*solution[8*i+2]\n Y_ = solution[8*i+1]*cos(solution[8*i+1]) - solution[8*i]*sin(solution[8*i+1])*solution[8*i+2]\n X__ = (solution[8*i+2]*sin(solution[8*i+1]) + 2*solution[8*i+1]*cos(solution[8*i+1])*solution[8*i+2]\n - solution[8*i]*sin(solution[8*i+1])*(solution[8*i+2]**2) + solution[8*i]*cos(solution[8*i+1])*solution[8*i+3])\n Y__ = (solution[8*i+2]*cos(solution[8*i+1]) - 2*solution[8*i+1]*sin(solution[8*i+1])*solution[8*i+2]\n - solution[8*i]*cos(solution[8*i+1])*(solution[8*i+2]**2) - solution[8*i]*sin(solution[8*i+1])*solution[8*i+3])\n\n X_disp.append(X_)\n Y_disp.append(Y_)\n X__disp.append(X__)\n Y__disp.append(Y__)\n znam.append(((sqrt(X_**2 + Y_**2))**3))\n\n # print(\"X start\",\n # -solution[8*i+2]*sin(solution[8*i+1]),\n # - 2*solution[8*i+1]*cos(solution[8*i+1])*solution[8*i+2]\n # + solution[8*i]*sin(solution[8*i+1])*(solution[8*i+2]**2)\n # - solution[8*i]*cos(solution[8*i+1])*solution[8*i+3]\n # )\n # print(\"Y start\",\n # solution[8*i+2]*cos(solution[8*i+1])\n # - 2*solution[8*i+1]*sin(solution[8*i+1])*solution[8*i+2]\n # - solution[8*i]*cos(solution[8*i+1])*(solution[8*i+2]**2)\n # - solution[8*i]*sin(solution[8*i+1])*solution[8*i+3]\n # )\n\n # print((-(X__*Y_ - Y__*X_))/((sqrt(X_**2 + Y_**2))**3), solution[8*i+2], cos(solution[8*i+1]), 2*solution[8*i+1], sin(solution[8*i+1]), solution[8*i+2])\n M_j_coreg.append([sum(S[:i]), (-(X__*Y_ - Y__*X_))/((sqrt(X_**2 + Y_**2))**3), S[i]*list_of_patrs[0]])\n D_j.append([X[i] + solution[8*i] * c_l_norm[i], Y[i] + solution[8*i] * d_l_norm[i]])\n D_j_coreg.append([X[i] + solution[8*i] * c_n_norm[i], Y[i] + solution[8*i] * d_n_norm[i]])\n for k in range(len(list_of_patrs)):\n index = len(list_of_patrs)*i+k\n M_j.append([M_j[i*len(list_of_patrs)+k+i][0]+S[i]*list_of_patrs[0], sol_half[index][2]])\n X_ = 1 + sol_half[index][1]*sin(sol_half[index][1]) + sol_half[index][0]*cos(sol_half[index][1])*sol_half[index][2]\n Y_ = sol_half[index][1]*cos(sol_half[index][1]) - sol_half[index][0]*sin(sol_half[index][1])*sol_half[index][2]\n X__ = (sol_half[index][2]*sin(sol_half[index][1]) + 2*sol_half[index][1]*cos(sol_half[index][1])*sol_half[index][2]\n - sol_half[index][0]*sin(sol_half[index][1])*(sol_half[index][2]**2) + sol_half[index][0]*cos(sol_half[index][1])*sol_half[index][3])\n Y__ = (sol_half[index][2]*cos(sol_half[index][1]) - 2*sol_half[index][1]*sin(sol_half[index][1])*sol_half[index][2]\n - sol_half[index][0]*cos(sol_half[index][1])*(sol_half[index][2]**2) - sol_half[index][0]*sin(sol_half[index][1])*sol_half[index][3])\n\n X_disp.append(X_)\n Y_disp.append(Y_)\n X__disp.append(X__)\n Y__disp.append(Y__)\n znam.append(((sqrt(X_**2 + Y_**2))**3))\n\n # print(\"X midl\",\n # -sol_half[index][2]*sin(sol_half[index][1])\n # - 2*sol_half[index][1]*cos(sol_half[index][1])*sol_half[index][2]\n # + sol_half[index][0]*sin(sol_half[index][1])*(sol_half[index][2]**2)\n # - sol_half[index][0]*cos(sol_half[index][1])*sol_half[index][3])\n # print(\"Y midl\",\n # sol_half[index][2]*cos(sol_half[index][1])\n # - 2*sol_half[index][1]*sin(sol_half[index][1])*sol_half[index][2]\n # - sol_half[index][0]*cos(sol_half[index][1])*(sol_half[index][2]**2)\n # - sol_half[index][0]*sin(sol_half[index][1])*sol_half[index][3]\n # )\n\n M_j_coreg.append([M_j[i*len(list_of_patrs)+k+i][0]+S[i]*list_of_patrs[0], (-(X__*Y_ - Y__*X_))/((sqrt(X_**2 + Y_**2))**3), S[i]*list_of_patrs[0]])\n D_j.append([B_j[index][0] + sol_half[index][0] * c_l_norm[i], B_j[index][1] + sol_half[index][0] * d_l_norm[i]])\n D_j_coreg.append([B_j[index][0] + sol_half[index][0] * c_n_norm_j[index], B_j[index][1] + sol_half[index][0] * d_n_norm_j[index]])\n M_j.append([sum(S), solution[-2]])\n X_ = 1 + solution[-3]*sin(solution[-3]) + solution[-4]*cos(solution[-3])*solution[-2]\n Y_ = solution[-3]*cos(solution[-3]) - solution[-4]*sin(solution[-3])*solution[-2]\n X__ = (solution[-2]*sin(solution[-3]) + 2*solution[-3]*cos(solution[-3])*solution[-2]\n - solution[-4]*sin(solution[-3])*(solution[-2]**2) + solution[-4]*cos(solution[-3])*solution[-1])\n Y__ = (solution[-2]*cos(solution[-3]) - 2*solution[-3]*sin(solution[-3])*solution[-2]\n - solution[-4]*cos(solution[-3])*(solution[-2]**2) - solution[-4]*sin(solution[-3])*solution[-1])\n\n X_disp.append(X_)\n Y_disp.append(Y_)\n X__disp.append(X__)\n Y__disp.append(Y__)\n znam.append(((sqrt(X_**2 + Y_**2))**3))\n\n # print(\"X end\",\n # -solution[-2]*sin(solution[-3])\n # - 2*solution[-3]*cos(solution[-3])*solution[-2]\n # + solution[-4]*sin(solution[-3])*(solution[-2]**2)\n # - solution[-4]*cos(solution[-3])*solution[-1]\n # )\n # print(\"Y end\",\n # solution[-2]*cos(solution[-3])\n # - 2*solution[-3]*sin(solution[-3])*solution[-2]\n # - solution[-4]*cos(solution[-3])*(solution[-2]**2)\n # - solution[-4]*sin(solution[-3])*solution[-1],\n # \"\\n\\n\")\n\n\n M_j_coreg.append([sum(S), (-(X__*Y_ - Y__*X_))/((sqrt(X_**2 + Y_**2))**3), S[-1]*list_of_patrs[0]])\n D_j.append([X[-1] + solution[-4] * c_l_norm[-1], Y[-1] + solution[-4] * d_l_norm[-1]])\n D_j_coreg.append([X[-1] + solution[-4] * c_n_norm[-1], Y[-1] + solution[-4] * d_n_norm[-1]])\n\n # plt.figure(figsize=(10, 10))\n #\n # plt.plot(X_disp, \"oy\", markersize=8, linewidth=2, label=f\"X_\")\n #\n # plt.legend()\n # plt.grid()\n # plt.show()\n #\n # plt.figure(figsize=(10, 10))\n #\n # plt.plot(Y_disp, \"om\", markersize=8, linewidth=2, label=f\"Y_\")\n #\n # plt.legend()\n # plt.grid()\n # plt.show()\n #\n # plt.figure(figsize=(10, 10))\n #\n # plt.plot(X__disp, \"og\", markersize=8, linewidth=2, label=f\"X__\")\n #\n # plt.legend()\n # plt.grid()\n # plt.show()\n #\n # plt.figure(figsize=(10, 10))\n #\n # plt.plot(Y__disp, \"ob\", markersize=8, linewidth=2, label=f\"Y__\")\n #\n # plt.legend()\n # plt.grid()\n # plt.show()\n #\n # plt.figure(figsize=(10, 10))\n #\n # plt.plot(znam, \"oy\", markersize=8, linewidth=2, label=f\"X_\")\n #\n # plt.legend()\n # plt.grid()\n # plt.show()\n\n return map(np.transpose, map(np.array, [M_j, M_j_coreg, D_j, D_j_coreg]))\n","repo_name":"dimon19994/diploma_web","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":20077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19919404584","text":"from tkinter import *\nfrom Signup import *\nfrom Accounts import *\nfrom CustomerSignIn import *\nfrom AdminSignIn import *\n\ndef createButtons(root):\n Button1 = Button(root, text = \"Sign Up (New Customer)\", bg = \"white\", fg = \"red\", height=2, width=100, command=SignUp)\n Button2 = Button(root, text = \"Sign In (Existing Customer)\", bg = \"white\", fg = \"red\", height=2, width=100, command=CustomerSignIn)\n Button3 = Button(root, text = \"Admin Sign In\", bg = \"white\", fg = \"red\", height=2, width=100, command=adminSignIn)\n Button4 = Button(root, text = \"Quit\",bg = \"white\", fg = \"red\", command = quit, height=2, width=100)\n Button1.pack(fill = BOTH, expand=1)\n Button2.pack(fill = BOTH, expand=1)\n Button3.pack(fill = BOTH, expand=1)\n Button4.pack(fill = BOTH, expand=1)\n return\n\nroot = Tk()\nroot.title(\"Main Menu\")\nroot.geometry(\"400x163\")\ncreateButtons(root)\nroot.mainloop()","repo_name":"Shivank-thapa/Bank-Application","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35254551382","text":"#! /usr/bin/env python\n\nimport rospy\nfrom diff_drive_msgs.msg import Diff_drive\nfrom geometry_msgs.msg import Twist\nfrom nav_msgs.msg import Odometry \nfrom math import atan2, fabs, sin, cos\n\n\nclass Diff_driver:\n\tdef odom_callback(self,msg):\n\t\tself.curr_or = msg.twist.twist.angular.z\n\n\tdef constrain_vel (self,inputs):\n\t\tif inputs > 0.55:\n\t\t\treturn 0.55\n\t\telif inputs < -0.55:\n\t\t\treturn -0.55\n\t\telse:\n\t\t\treturn inputs\n\n\n\tdef publish_diff(self,v,w):\n\t\tself.v_r = (2*v + w*self.l)/(2*self.r)\n\t\tself.v_l = (2*v - w*self.l)/(2*self.r) \n\t\t#self.v_r = self.constrain_vel(self.v_r)\n\t\t#self.v_l = self.constrain_vel(self.v_l)\n\t\tdiff = Diff_drive()\n\t\tdiff.time_stamp = rospy.get_time()\n\t\tdiff.left_wheel_vel = self.v_l\n\t\tdiff.right_wheel_vel = self.v_r\n\t\tself.diff_pub.publish(diff)\n\n\t\n\tdef pid(self,v,w):\n\t\tlast_time = 0.0\n\t\tprev_error = 0.0\n\t\twhile True:\n\t\t\trospy.Subscriber(\"/odom\", Odometry, self.odom_callback)\n\t\t\terror = w - self.curr_or\n\t\t\tself.p = self.k_p * error\n\t\t\tself.i = self.i + (self.k_i*error)\n\t\t\tdt = rospy.get_time() - last_time\n\t\t\tself.d = self.k_d * ( (error - prev_error) / dt)\n\t\t\tlast_time = rospy.get_time()\n\t\t\tprev_error = error\n\t\t\tself.PID = self.p + self.i + self.d\n\t\t\tself.PID = atan2(sin(self.PID), cos(self.PID))\n\t\t\tw_f = self.PID + self.curr_or\n\t\t\tw_f = atan2(sin(w_f), cos(w_f))\n\t\t\tself.publish_diff(v, w_f)\n\n\t\t\tif fabs(error) <= 0.05:\n\t\t\t\tbreak\n\n\tdef vel_callback(self,vel):\n\t\tv = vel.linear.x\n\t\tw = vel.angular.z\n\t\tself.publish_diff(v,w)\n\n\n\tdef __init__(self):\n\t\tself.p = 0.0\n\t\tself.i = 0.0\n\t\tself.d = 0.0\n\t\tself.curr_or = 0.0\n\t\tself.PID = 0.0\n\t\tself.k_p = 0.31\n\t\tself.k_i = 0.0\n\t\tself.k_d = 0.59\n\t\tself.v_l = 0.0\n\t\tself.v_r = 0.0\n\t\tself.l = 0.62\n\t\tself.r = 0.162\n\n\t\tself.diff_pub = rospy.Publisher('/diff_drive_vel', Diff_drive, queue_size=1)\n\t\tsub = rospy.Subscriber('/cmd_vel', Twist, self.vel_callback)\n\t\trospy.spin()\n\n\nif __name__ == \"__main__\":\n\trospy.init_node('cmd_vel_to_diff_drive')\n\ttry:\n\t\tdifferential = Diff_driver()\n\n\texcept rospy.ROSInterruptException: pass\n\n'''\nrospy.init_node('cmd_vel_to_diff_drive')\ndiff_pub = rospy.Publisher('/diff_drive_vel', Diff_drive, queue_size=100)\n\ndiff = Diff_drive()\nl = 0.62 #baseline length\nr = 0.162 #radius of wheels\nv_r = 0.0\nv_l = 0.0\nk_p = 0.31\nk_i = 0.0\nk_d = 0.59\n\ndef pid(w):\n\t\n\t\n\ndef callback(msg):\n\tv = msg.linear.x\n\tw = msg.angular.z\n\t\n\twhile True:\n\t\t\n\n\n\tv_r = (2*v + w*l)/(2*r)\n v_l = (2*v - w*l)/(2*r)\n\tdiff.time_stamp = rospy.get_time()\n\tdiff.left_wheel_vel = v_l\n\tdiff.right_wheel_vel = v_r\n\tdiff_pub.publish(diff)\n\n\nsub = rospy.Subscriber('/cmd_vel', Twist, callback)\nrospy.spin()\n'''\n","repo_name":"adubredu/rascapp_robot","sub_path":"bill_ws/src/bill_diff_drive/src/diff_drive.py","file_name":"diff_drive.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"71241847097","text":"__author__ = 'Irwan Fathurrahman '\n__date__ = '22/08/16'\n__license__ = \"GPL\"\n__copyright__ = 'kartoza.com'\n\nfrom mezzanine.conf import register_setting\n\n# Number of recent post in front page\nregister_setting(\n name=\"FRONT_PAGE_RECENT_POST\",\n label=\"Recent posts in front page\",\n description=\"The number of recent posts to show in front page.\",\n editable=True,\n default=5,\n)\n\nregister_setting(\n name=\"TEMPLATE_ACCESSIBLE_SETTINGS\",\n default=(\"FRONT_PAGE_RECENT_POST\",),\n append=True,\n)\n","repo_name":"kartoza/django-channel-api","sub_path":"django_project/config/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18938703304","text":"import time\nimport pandas as pd\nimport numpy as np\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = str(input('Please enter the input for the city[no caps] : '))\n while(not(city in CITY_DATA)):\n city = str(input('The entered city data is not available , please try another city[no caps] :'))\n if city in CITY_DATA:\n break\n # TO DO: get user input for month (all, january, february, ... , june)\n months = ['all','january','february','march','april','may','june']\n month = str(input('Please input the month to be applied as filter[no caps] : '))\n while(not(month in months)):\n month = str(input('The entered month data is not available/invalid , please try another month between january and june[no caps] :'))\n if month in months:\n break\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n days = ['all','monday','tuesday','wednesday','thursday','friday','saturday','sunday']\n day = str(input('Please input the day of the week :'))\n while(not(day in days)):\n day = str(input('The entered day is invalid , please enter correct day[no caps] :'))\n if day in days:\n break\n print('-'*40)\n return city, month, day\n\n\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n df = pd.read_csv(CITY_DATA[city])\n \n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.dayofweek\n \n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month)+1\n \n # filter by month to create the new dataframe\n df = df[df['month']==month]\n \n # filter by day of week if applicable\n if day != 'all':\n days = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']\n day = days.index(day.title())\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week']==day]\n \n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n months = ['january','february','march','april','may','june']\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n # TO DO: display the most common month\n cm = df['month'].mode()[0]\n print('\\n The most common month is {}\\n'.format(months[cm-1]))\n\n # TO DO: display the most common day of week\n cd = df['day_of_week'].mode()[0]\n print('\\n The most common day is {}\\n'.format(cd))\n\n # TO DO: display the most common start hour\n df['start hour']=df['Start Time'].dt.hour\n ch= df['start hour'].mode()[0]\n print('\\n The most common start hour is {}\\n'.format(ch))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n cuss = df['Start Station'].mode()[0]\n print('\\n most common starting station :{}\\n'.format(cuss))\n \n # TO DO: display most commonly used end station\n cues = df['End Station'].mode()[0]\n print('\\n most common Ending station :{}\\n'.format(cues))\n\n # TO DO: display most frequent combination of start station and end station trip\n df['start&end']= df[\"Start Station\"].astype(str) + \" to \" + df[\"End Station\"].astype(str)\n cuses=df['start&end'].mode()[0]\n print('\\n most frequent combination of start station and end station trip :{}\\n'.format(cuses))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n sum = df['Trip Duration'].sum()\n print('\\n calculated total travel time is {}\\n'.format(sum))\n\n # TO DO: display mean travel time\n mean = df['Trip Duration'].mean()\n print('\\n calculated mean travel time is {}\\n'.format(mean))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n print('\\ncount of all user types is shown below : \\n')\n print(df['User Type'].value_counts())\n\n # TO DO: Display counts of gender\n if 'Gender' in df.columns:\n print('\\ncount of all Gender is shown below : \\n')\n print(df['Gender'].value_counts())\n else:\n print('\\n The city file does not have gender data\\n')\n\n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n print('\\n Earliest year of birth was found to be {}\\n'.format(int(df['Birth Year'].min())))\n print('\\n Most recent year of birth was found to be {}\\n'.format(int(df['Birth Year'].max())))\n print('\\n Most common year of birth was found to be {}\\n'.format(int(df['Birth Year'].mode()[0])))\n else:\n print('\\n The city file does not have year of birth data\\n')\n \n # Display individual data of five's\n choices = ['yes' , 'no']\n choice = str(input('\\n Would like to see the data for first five indiviual users[yes/no] : ')).lower()\n while(not(choice in choices)):\n choice = str(input('\\n The entered choice is invalid please enter the correct choice[yes/no] :')).lower()\n if choice in choices:\n break\n \n \n i=5\n x=0\n while(choice == 'yes'):\n \n print(df[x:i])\n choice = str(input('\\n Would like to see the individual data for next five indiviual users[yes/no] : ')).lower()\n while(not(choice in choices)):\n \n choice = str(input('\\n The entered choice is invalid please enter the correct choice[yes/no] :')).lower()\n if choice in choices:\n \n break\n x=i\n i += 5\n if choice == 'no':\n break\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n \n \n \n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"shivauchiha/Udacity-IOT-foundation-projects","sub_path":"BIKESHARE DATA/bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":8108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9282453263","text":"from turtle import Turtle\r\n\r\nclass Paddle(Turtle):\r\n \r\n def __init__(self, x_pos, y_pos, color):\r\n super().__init__()\r\n \r\n self.penup()\r\n self.shape(\"square\")\r\n self.color(color)\r\n self.goto(x_pos, y_pos)\r\n self.shapesize(2.5, 1)\r\n\r\n\r\n def Up(self):\r\n if self.ycor() < 215: # Paddle up with border limitation\r\n self.goto(self.xcor(), self.ycor() + 40)\r\n \r\n def down(self):\r\n if self.ycor() > -240: # Paddle down with border limitation\r\n self.goto(self.xcor(), self.ycor() - 40)","repo_name":"mdashik123456/Pong-Game","sub_path":"paddle.py","file_name":"paddle.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"69948479098","text":"import pandas as pd\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\n\nimport gc,os,sys\n\nimport operator \n\n\n\nfrom keras import initializers, regularizers, constraints, optimizers, layers, callbacks\n\nfrom keras import backend as K\n\nfrom keras.engine import InputSpec, Layer\n\nfrom keras.callbacks import ModelCheckpoint, Callback, EarlyStopping, ReduceLROnPlateau\n\nfrom keras.preprocessing.text import Tokenizer, text_to_word_sequence\n\nfrom keras.preprocessing.sequence import pad_sequences\n\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Conv1D, GRU, CuDNNGRU, CuDNNLSTM, BatchNormalization\n\nfrom keras.layers import Bidirectional, GlobalMaxPool1D, MaxPooling1D, Add, Flatten, Masking\n\nfrom keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, concatenate, SpatialDropout1D\n\nfrom keras.models import Model, load_model\n\nfrom keras.optimizers import Adam\n\n\n\nsns.set_style('darkgrid')\n\npd.options.display.float_format = '{:,.3f}'.format\n\ntrain = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/train.csv')\n\ntest = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/test.csv')\n\n\n\nprint(train.shape, test.shape)\n# Memory saving function credit to https://www.kaggle.com/gemartin/load-data-reduce-memory-usage\n\ndef reduce_mem_usage(df):\n\n \"\"\" iterate through all the columns of a dataframe and modify the data type\n\n to reduce memory usage.\n\n \"\"\"\n\n start_mem = df.memory_usage().sum() / 1024**2\n\n \n\n for col in df.columns:\n\n col_type = df[col].dtype\n\n \n\n if col_type != object:\n\n c_min = df[col].min()\n\n c_max = df[col].max()\n\n if str(col_type)[:3] == 'int':\n\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n\n df[col] = df[col].astype(np.int8)\n\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n\n df[col] = df[col].astype(np.int16)\n\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n\n df[col] = df[col].astype(np.int32)\n\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n\n df[col] = df[col].astype(np.int64) \n\n else:\n\n #if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\n\n # df[col] = df[col].astype(np.float16)\n\n #el\n\n if c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\n\n df[col] = df[col].astype(np.float32)\n\n else:\n\n df[col] = df[col].astype(np.float64)\n\n #else:\n\n #df[col] = df[col].astype('category')\n\n\n\n end_mem = df.memory_usage().sum() / 1024**2\n\n print('Memory usage of dataframe is {:.2f} MB --> {:.2f} MB (Decreased by {:.1f}%)'.format(\n\n start_mem, end_mem, 100 * (start_mem - end_mem) / start_mem))\n\n return df\ntrain = reduce_mem_usage(train)\n\ntest = reduce_mem_usage(test)\ntrain.head()\ntest.head()\ntrain['target'].describe()\n#train['target'].hist(bins=50, figsize=(10,3))\n\n#sns.distplot(train['target'], bins=50, kde=True)\n\n\n\ntarget_bin = pd.cut(train['target'], [0, 0.01, 0.2, 0.4, 0.6, 0.8, 0.99, 1], right=False).value_counts()\n\ntarget_bin = pd.Series(target_bin)\n\ntarget_bin.plot.bar(color='navy', figsize=(8,3), title='target histgram (by range)')\n\ntarget_bin.to_frame().T\ntrain.sort_values(['target'], ascending=False).head()\ntrain.sort_values(['target']).head()\n# word-count histgram\n\nword_counts = train['comment_text'].apply(lambda x: len(x.split()))\n\nword_counts.hist(bins=50, figsize=(10,3))\n\n\n\nprint('max words: ', max(word_counts))\n\nprint('sum words: ', sum(word_counts))\n\ndel word_counts\nprint('toxic comment:\\n', train[train['target'] == 1]['comment_text'].iloc[0])\n\nprint()\n\nprint('non-toxic comment:\\n', train[train['target'] == 0]['comment_text'].iloc[0])\nall_df = pd.concat([train, test], sort=False)\n\ndel (train, test)\n\ngc.collect()\nall_df['comment_text'].values[0]\nall_df['comment_text'] = all_df['comment_text'].apply(lambda x: x.lower())\ncontraction_mapping = {\"ain't\": \"is not\", \"aren't\": \"are not\",\"can't\": \"can not\", \"'cause\": \"because\",\n\n \"could've\": \"could have\", \"couldn't\": \"could not\", \"didn't\": \"did not\", \n\n \"doesn't\": \"does not\", \"don't\": \"do not\", \"hadn't\": \"had not\", \n\n \"hasn't\": \"has not\", \"haven't\": \"have not\", \"he'd\": \"he would\",\n\n \"he'll\": \"he will\", \"he's\": \"he is\", \"how'd\": \"how did\", \n\n \"how'd'y\": \"how do you\", \"how'll\": \"how will\", \"how's\": \"how is\", \n\n \"I'd\": \"I would\", \"I'd've\": \"I would have\", \"I'll\": \"I will\",\n\n \"I'll've\": \"I will have\",\"I'm\": \"I am\", \"I've\": \"I have\",\n\n \"i'd\": \"i would\", \"i'd've\": \"i would have\", \"i'll\": \"i will\",\n\n \"i'll've\": \"i will have\",\"i'm\": \"i am\", \"i've\": \"i have\", \"isn't\": \"is not\",\n\n \"it'd\": \"it would\", \"it'd've\": \"it would have\", \"it'll\": \"it will\", \n\n \"it'll've\": \"it will have\",\"it's\": \"it is\", \"let's\": \"let us\",\n\n \"ma'am\": \"madam\", \"mayn't\": \"may not\", \"might've\": \"might have\",\n\n \"mightn't\": \"might not\",\"mightn't've\": \"might not have\", \n\n \"must've\": \"must have\", \"mustn't\": \"must not\", \"mustn't've\": \"must not have\", \n\n \"needn't\": \"need not\", \"needn't've\": \"need not have\",\n\n \"o'clock\": \"of the clock\", \"oughtn't\": \"ought not\", \n\n \"oughtn't've\": \"ought not have\", \"shan't\": \"shall not\",\n\n \"sha'n't\": \"shall not\", \"shan't've\": \"shall not have\", \"she'd\": \"she would\", \n\n \"she'd've\": \"she would have\", \"she'll\": \"she will\",\n\n \"she'll've\": \"she will have\", \"she's\": \"she is\", \"should've\": \"should have\", \n\n \"shouldn't\": \"should not\", \"shouldn't've\": \"should not have\", \n\n \"so've\": \"so have\",\"so's\": \"so as\", \"this's\": \"this is\",\n\n \"that'd\": \"that would\", \"that'd've\": \"that would have\", \"that's\": \"that is\",\n\n \"there'd\": \"there would\", \"there'd've\": \"there would have\", \n\n \"there's\": \"there is\", \"here's\": \"here is\",\"they'd\": \"they would\", \n\n \"they'd've\": \"they would have\", \"they'll\": \"they will\", \n\n \"they'll've\": \"they will have\", \"they're\": \"they are\", \n\n \"they've\": \"they have\", \"to've\": \"to have\", \"wasn't\": \"was not\",\n\n \"we'd\": \"we would\", \"we'd've\": \"we would have\", \"we'll\": \n\n \"we will\", \"we'll've\": \"we will have\", \"we're\": \"we are\", \"we've\": \"we have\",\n\n \"weren't\": \"were not\", \"what'll\": \"what will\", \"what'll've\": \"what will have\",\n\n \"what're\": \"what are\", \"what's\": \"what is\", \"what've\": \"what have\", \n\n \"when's\": \"when is\", \"when've\": \"when have\", \"where'd\": \"where did\", \n\n \"where's\": \"where is\", \"where've\": \"where have\", \"who'll\": \"who will\", \n\n \"who'll've\": \"who will have\", \"who's\": \"who is\", \"who've\": \"who have\", \n\n \"why's\": \"why is\", \"why've\": \"why have\", \"will've\": \"will have\",\n\n \"won't\": \"will not\", \"won't've\": \"will not have\", \"would've\": \"would have\",\n\n \"wouldn't\": \"would not\", \"wouldn't've\": \"would not have\", \"y'all\": \"you all\", \n\n \"y'all'd\": \"you all would\",\"y'all'd've\": \"you all would have\",\n\n \"y'all're\": \"you all are\",\"y'all've\": \"you all have\",\"you'd\": \"you would\", \n\n \"you'd've\": \"you would have\", \"you'll\": \"you will\", \n\n \"you'll've\": \"you will have\", \"you're\": \"you are\", \"you've\": \"you have\" }\n\n\n\ndef clean_contractions(text, mapping):\n\n specials = [\"’\", \"‘\", \"´\", \"`\"]\n\n for s in specials:\n\n text = text.replace(s, \"'\")\n\n text = ' '.join([mapping[t] if t in mapping else t for t in text.split(\" \")])\n\n return text\n\n\n\nall_df['comment_text'] = all_df['comment_text'].apply(lambda x: clean_contractions(x, contraction_mapping))\ndef preprocess(data):\n\n def clean_special_chars(text):\n\n punct = \"/-'?!.,#$%\\'()*+-/:;<=>@[\\\\]^_`{|}~`\" + '\"\"“”’' + '∞θ÷α•à−β∅³π‘₹´°£€\\×™√²—–&…'\n\n for p in punct:\n\n text = text.replace(p, ' ')\n\n for p in '0123456789':\n\n text = text.replace(p, ' ')\n\n #for p in \"?!.,\":\n\n # text = text.replace(p, ' ' + p)\n\n return text\n\n\n\n data = data.astype(str).apply(lambda x: clean_special_chars(x))\n\n return data\n\n\n\nall_df['comment_text'] = preprocess(all_df['comment_text'])\ntable = str.maketrans('ᴀʙᴄᴅᴇғɢʜɪᴊᴋʟᴍɴᴏᴘʀᴛᴜᴠᴡʏᴢ', 'abcdefghijklmnoprtuvwyx')\n\nall_df['comment_text'] = all_df['comment_text'].apply(lambda x: x.translate(table))\ntext_to_word_sequence(all_df['comment_text'].values[0])\ntrain = all_df[all_df['target'].notnull()]\n\ntest = all_df[all_df['target'].isnull()]\n\n\n\nX_train = train.drop(['id','target'], axis=1)\n\nY_train = (train['target'] >= 0.5).astype(int)\n\nX_test = test.drop(['id','target'], axis=1)\n\n#train_id = train['id']\n\n#test_id = test['id']\n\n\n\nprint(X_train.shape, X_test.shape)\ndel (all_df, train, test)\n\ngc.collect()\n\n\n\nprint(pd.DataFrame([[val for val in dir()], [sys.getsizeof(eval(val)) for val in dir()]],\n\n index=['name','size']).T.sort_values('size', ascending=False).reset_index(drop=True)[:5])\nTOXICITY_COLUMN = 'target'\n\nTEXT_COLUMN = 'comment_text'\n\nMAX_NUM_WORDS = 300000\n\nTOKENIZER_FILTER = '\\r\\t\\n'\n\n\n\n# Create a text tokenizer.\n\ntokenizer = Tokenizer(num_words=MAX_NUM_WORDS, filters=TOKENIZER_FILTER)\n\ntokenizer.fit_on_texts(list(X_train[TEXT_COLUMN]) + list(X_test[TEXT_COLUMN]))\ncounter = sorted(dict(tokenizer.word_docs).items(), key=lambda x:x[1], reverse=True)\n\nwordcount = pd.Series([x[1] for x in counter], [x[0] for x in counter])\n\ndel counter\n\n\n\nwordcount[:30].plot.bar(color='navy', width=0.7, figsize=(12,3))\ntokenizer_tx = Tokenizer(num_words=MAX_NUM_WORDS, filters=TOKENIZER_FILTER)\n\ntokenizer_tx.fit_on_texts(list(X_train.loc[Y_train == 1, TEXT_COLUMN]))\n\n\n\ncounter = sorted(dict(tokenizer_tx.word_docs).items(), key=lambda x:x[1], reverse=True)\n\nwordcount_tx = pd.Series([x[1] for x in counter], [x[0] for x in counter])\n\n\n\nwordcount_stats = pd.concat([wordcount, wordcount_tx], axis=1, keys=[0, 'toxic'], sort=False)\n\nwordcount_only_tx = wordcount_stats[wordcount_stats[0] * 0.8 <= wordcount_stats['toxic']].copy()\n\nwordcount_only_tx.drop('toxic', axis=1, inplace=True)\n\nwordcount_only_tx = wordcount_only_tx[wordcount_only_tx[0] > 1]\n\n\n\nprint(len(wordcount_only_tx))\n\nwordcount_only_tx[:10]\nwordcount = pd.concat([wordcount_only_tx, wordcount])[0]\n\ndel counter, wordcount_tx, wordcount_stats, wordcount_only_tx\nwordsum = wordcount.sum()\n\n\n\nn_words = len(wordcount)\n\ncumsum_rate = wordcount.cumsum() / wordsum\n\ncover_rate = {}\n\nfor i in range(100, 90, -1):\n\n p = i / 100\n\n cover_rate[str(i)+'%'] = n_words - len(cumsum_rate[cumsum_rate > p])\n\ndel cumsum_rate\n\n\n\npd.Series(cover_rate).plot.barh(color='navy', figsize=(12, 3), title='vocab-size by coverage-rate')\n\npd.Series(cover_rate).to_frame().T\nVOCAB_SIZE = 50000\n\n\n\nprint('covered', wordcount[VOCAB_SIZE], 'times word')\n\n\n\nEMBEDDINGS_DIMENSION = 300\n\nCRAWL_EMBEDDING_PATH = '../input/fasttext-crawl-300d-2m/crawl-300d-2M.vec'\n\nGLOVE_EMBEDDING_PATH = '../input/glove840b300dtxt/glove.840B.300d.txt'\n\n\n\ndef get_coefs(word, *arr):\n\n return word, np.asarray(arr, dtype='float32')\n\n\n\ndef load_embeddings(path):\n\n with open(path) as f:\n\n return dict(get_coefs(*line.strip().split(' ')) for line in f)\n\n\n\ndef build_matrix(path):\n\n embedding_index = load_embeddings(path)\n\n embedding_matrix = np.zeros((VOCAB_SIZE + 1, EMBEDDINGS_DIMENSION))\n\n unknown_words = []\n\n for i in range(VOCAB_SIZE):\n\n try:\n\n word = wordcount.index[i]\n\n embedding_matrix[i] = embedding_index[word]\n\n except KeyError:\n\n unknown_words.append(word)\n\n return embedding_matrix, unknown_words\n\n\n\ncrawl_matrix, unknown_words_crawl = build_matrix(CRAWL_EMBEDDING_PATH)\n\nglove_matrix, unknown_words_glove = build_matrix(GLOVE_EMBEDDING_PATH)\n\n\n\nword2index = dict((wordcount.index[i], i) for i in range(VOCAB_SIZE))\n\n\n\nembedding_matrix = np.concatenate([crawl_matrix, glove_matrix], axis=-1)\n\n#embedding_matrix = glove_matrix\n\nembedding_matrix.shape\nwords_count = len(unknown_words_crawl)\n\nprint('n unknown words (crawl):', words_count, ', {:.3%} of all words'.format(words_count / n_words))\n\nprint('unknown words (crawl):', unknown_words_crawl)\n\nwords_count = len(unknown_words_glove)\n\nprint('n unknown words (glove):', words_count, ', {:.3%} of all words'.format(words_count / n_words))\n\nprint('unknown words (glove):', unknown_words_glove)\ndel crawl_matrix, unknown_words_crawl\n\ndel glove_matrix, unknown_words_glove\n\ndel wordcount\n\ngc.collect()\nMAX_SEQUENCE_LENGTH = 256\n\n\n\ndef word_index(word):\n\n try:\n\n return word2index[word]\n\n except KeyError:\n\n return VOCAB_SIZE\n\n\n\n# All comments must be truncated or padded to be the same length.\n\ndef pad_text(texts, tokenizer):\n\n matrix = [list(map(word_index, text_to_word_sequence(t, filters=TOKENIZER_FILTER))) for t in texts]\n\n return pad_sequences(matrix, maxlen=MAX_SEQUENCE_LENGTH)\n\n\n\ntrain_text = pad_text(X_train[TEXT_COLUMN], tokenizer)\n\ntest_text = pad_text(X_test[TEXT_COLUMN], tokenizer)\ndel (X_train, X_test)\n\ngc.collect()\n\n\n\nprint(pd.DataFrame([[val for val in dir()], [sys.getsizeof(eval(val)) for val in dir()]],\n\n index=['name','size']).T.sort_values('size', ascending=False).reset_index(drop=True)[:10])\ndef build_model(lr=0.0, lr_d=0.0, units=64, spatial_dr=0.0, \n\n dense_units=0, dr=0.1, conv_size=32, epochs=20):\n\n \n\n file_path = \"best_model.hdf5\"\n\n check_point = ModelCheckpoint(file_path, monitor=\"val_loss\", verbose=1, save_best_only=True, mode=\"min\")\n\n early_stop = EarlyStopping(monitor=\"val_loss\", mode=\"min\", patience=3)\n\n\n\n sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\n\n embedding_layer = Embedding(*embedding_matrix.shape,\n\n weights=[embedding_matrix],\n\n input_length=MAX_SEQUENCE_LENGTH,\n\n trainable=True)\n\n \n\n x = embedding_layer(sequence_input)\n\n x = SpatialDropout1D(spatial_dr)(x)\n\n x = Bidirectional(CuDNNGRU(units, return_sequences=True))(x) \n\n x = Conv1D(conv_size, 2, padding=\"valid\", kernel_initializer=\"he_uniform\")(x)\n\n \n\n avg_pool1 = GlobalAveragePooling1D()(x)\n\n max_pool1 = GlobalMaxPooling1D()(x) \n\n \n\n x = concatenate([avg_pool1, max_pool1])\n\n x = BatchNormalization()(x)\n\n x = Dense(int(dense_units / 2), activation='relu')(x)\n\n x = Dropout(dr)(x)\n\n \n\n preds = Dense(1, activation='sigmoid')(x)\n\n \n\n model = Model(inputs=sequence_input, outputs=preds)\n\n model.compile(loss=\"binary_crossentropy\", optimizer=Adam(lr=lr, decay=lr_d), metrics=[\"accuracy\"])\n\n model.summary()\n\n history = model.fit(train_text, Y_train, batch_size=1024, epochs=epochs, validation_split=0.1, \n\n verbose=1, callbacks=[check_point, early_stop])\n\n \n\n model = load_model(file_path)\n\n return model\nmodel = build_model(lr=1e-3, lr_d=1e-7, units=64, spatial_dr=0.2, dense_units=64, dr=0, conv_size=64, epochs=20)\n\npred = model.predict(test_text)\nsubmission = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/sample_submission.csv', index_col='id')\n\nsubmission['prediction'] = pred\n\nsubmission.reset_index(drop=False, inplace=True)\n\nsubmission.to_csv('submission.csv', index=False)\n\nsubmission.head()\nsubmission['prediction'].describe()\ntarget_bin = pd.cut(submission['prediction'], [0, 0.01, 0.2, 0.4, 0.6, 0.8, 0.99, 1], right=False).value_counts()\n\ntarget_bin = pd.Series(target_bin)\n\ntarget_bin.plot.bar(color='navy', figsize=(10,3))\n\ntarget_bin.to_frame().T","repo_name":"aorursy/new-nb-5","sub_path":"plasticgrammer_jigsaw-toxicity-classification-playground.py","file_name":"plasticgrammer_jigsaw-toxicity-classification-playground.py","file_ext":"py","file_size_in_byte":16376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28587367374","text":"\"\"\"Compatibility fixes for older version of python, numpy and scipy\n\nIf you add content to this file, please give the version of the package\nat which the fixe is no longer needed.\n\"\"\"\n# Authors: Emmanuelle Gouillart \n# Gael Varoquaux \n# Fabian Pedregosa \n# Lars Buitinck\n#\n# License: BSD 3 clause\n\nfrom functools import update_wrapper\nfrom distutils.version import LooseVersion\nimport functools\n\nimport numpy as np\nimport scipy.sparse as sp\nimport scipy\nimport scipy.stats\nfrom scipy.sparse.linalg import lsqr as sparse_lsqr # noqa\nfrom numpy.ma import MaskedArray as _MaskedArray # TODO: remove in 1.0\nfrom .._config import config_context, get_config\n\nfrom .deprecation import deprecated\n\ntry:\n from pkg_resources import parse_version # type: ignore\nexcept ImportError:\n # setuptools not installed\n parse_version = LooseVersion # type: ignore\n\n\nnp_version = parse_version(np.__version__)\nsp_version = parse_version(scipy.__version__)\n\n\nif sp_version >= parse_version('1.4'):\n from scipy.sparse.linalg import lobpcg\nelse:\n # Backport of lobpcg functionality from scipy 1.4.0, can be removed\n # once support for sp_version < parse_version('1.4') is dropped\n # mypy error: Name 'lobpcg' already defined (possibly by an import)\n from ..externals._lobpcg import lobpcg # type: ignore # noqa\n\n\ndef _object_dtype_isnan(X):\n return X != X\n\n\n# TODO: replace by copy=False, when only scipy > 1.1 is supported.\ndef _astype_copy_false(X):\n \"\"\"Returns the copy=False parameter for\n {ndarray, csr_matrix, csc_matrix}.astype when possible,\n otherwise don't specify\n \"\"\"\n if sp_version >= parse_version('1.1') or not sp.issparse(X):\n return {'copy': False}\n else:\n return {}\n\n\ndef _joblib_parallel_args(**kwargs):\n \"\"\"Set joblib.Parallel arguments in a compatible way for 0.11 and 0.12+\n\n For joblib 0.11 this maps both ``prefer`` and ``require`` parameters to\n a specific ``backend``.\n\n Parameters\n ----------\n\n prefer : str in {'processes', 'threads'} or None\n Soft hint to choose the default backend if no specific backend\n was selected with the parallel_backend context manager.\n\n require : 'sharedmem' or None\n Hard condstraint to select the backend. If set to 'sharedmem',\n the selected backend will be single-host and thread-based even\n if the user asked for a non-thread based backend with\n parallel_backend.\n\n See joblib.Parallel documentation for more details\n \"\"\"\n import joblib\n\n if parse_version(joblib.__version__) >= parse_version('0.12'):\n return kwargs\n\n extra_args = set(kwargs.keys()).difference({'prefer', 'require'})\n if extra_args:\n raise NotImplementedError('unhandled arguments %s with joblib %s'\n % (list(extra_args), joblib.__version__))\n args = {}\n if 'prefer' in kwargs:\n prefer = kwargs['prefer']\n if prefer not in ['threads', 'processes', None]:\n raise ValueError('prefer=%s is not supported' % prefer)\n args['backend'] = {'threads': 'threading',\n 'processes': 'multiprocessing',\n None: None}[prefer]\n\n if 'require' in kwargs:\n require = kwargs['require']\n if require not in [None, 'sharedmem']:\n raise ValueError('require=%s is not supported' % require)\n if require == 'sharedmem':\n args['backend'] = 'threading'\n return args\n\n\nclass loguniform(scipy.stats.reciprocal):\n \"\"\"A class supporting log-uniform random variables.\n\n Parameters\n ----------\n low : float\n The minimum value\n high : float\n The maximum value\n\n Methods\n -------\n rvs(self, size=None, random_state=None)\n Generate log-uniform random variables\n\n The most useful method for Scikit-learn usage is highlighted here.\n For a full list, see\n `scipy.stats.reciprocal\n `_.\n This list includes all functions of ``scipy.stats`` continuous\n distributions such as ``pdf``.\n\n Notes\n -----\n This class generates values between ``low`` and ``high`` or\n\n low <= loguniform(low, high).rvs() <= high\n\n The logarithmic probability density function (PDF) is uniform. When\n ``x`` is a uniformly distributed random variable between 0 and 1, ``10**x``\n are random variables that are equally likely to be returned.\n\n This class is an alias to ``scipy.stats.reciprocal``, which uses the\n reciprocal distribution:\n https://en.wikipedia.org/wiki/Reciprocal_distribution\n\n Examples\n --------\n\n >>> from sklearn.utils.fixes import loguniform\n >>> rv = loguniform(1e-3, 1e1)\n >>> rvs = rv.rvs(random_state=42, size=1000)\n >>> rvs.min() # doctest: +SKIP\n 0.0010435856341129003\n >>> rvs.max() # doctest: +SKIP\n 9.97403052786026\n \"\"\"\n\n\n@deprecated(\n 'MaskedArray is deprecated in version 0.23 and will be removed in version '\n '1.0 (renaming of 0.25). Use numpy.ma.MaskedArray instead.'\n)\nclass MaskedArray(_MaskedArray):\n pass # TODO: remove in 1.0\n\n\ndef _take_along_axis(arr, indices, axis):\n \"\"\"Implements a simplified version of np.take_along_axis if numpy\n version < 1.15\"\"\"\n if np_version >= parse_version('1.15'):\n return np.take_along_axis(arr=arr, indices=indices, axis=axis)\n else:\n if axis is None:\n arr = arr.flatten()\n\n if not np.issubdtype(indices.dtype, np.intp):\n raise IndexError('`indices` must be an integer array')\n if arr.ndim != indices.ndim:\n raise ValueError(\n \"`indices` and `arr` must have the same number of dimensions\")\n\n shape_ones = (1,) * indices.ndim\n dest_dims = (\n list(range(axis)) +\n [None] +\n list(range(axis+1, indices.ndim))\n )\n\n # build a fancy index, consisting of orthogonal aranges, with the\n # requested index inserted at the right location\n fancy_index = []\n for dim, n in zip(dest_dims, arr.shape):\n if dim is None:\n fancy_index.append(indices)\n else:\n ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]\n fancy_index.append(np.arange(n).reshape(ind_shape))\n\n fancy_index = tuple(fancy_index)\n return arr[fancy_index]\n\n\n# remove when https://github.com/joblib/joblib/issues/1071 is fixed\ndef delayed(function):\n \"\"\"Decorator used to capture the arguments of a function.\"\"\"\n @functools.wraps(function)\n def delayed_function(*args, **kwargs):\n return _FuncWrapper(function), args, kwargs\n return delayed_function\n\n\nclass _FuncWrapper:\n \"\"\"\"Load the global configuration before calling the function.\"\"\"\n def __init__(self, function):\n self.function = function\n self.config = get_config()\n update_wrapper(self, self.function)\n\n def __call__(self, *args, **kwargs):\n with config_context(**self.config):\n return self.function(*args, **kwargs)\n","repo_name":"ryfeus/lambda-packs","sub_path":"Sklearn_arm/source/sklearn/utils/fixes.py","file_name":"fixes.py","file_ext":"py","file_size_in_byte":7226,"program_lang":"python","lang":"en","doc_type":"code","stars":1104,"dataset":"github-code","pt":"22"} +{"seq_id":"16635003356","text":"import numpy as np # linear algebra\nimport pandas as pd \nimport seaborn as sns\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n\ndataset=pd.read_csv('dataR2.csv')\n\nX = dataset.iloc[:, 0:9].values\ny = dataset.iloc[:, 9].values\n\ndataset.describe()\n\nclasses = dataset['Classification']\nsns.countplot(x=classes,data=dataset)\n\nfrom sklearn.preprocessing import LabelEncoder\nlabel_encoder = LabelEncoder()\ndataset['Classification']=label_encoder.fit_transform(dataset['Classification'])\n\nsns.heatmap(dataset.corr(),annot=True)\n\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.20,random_state=0)\n\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n'''\nfrom sklearn.decomposition import PCA\npca = PCA(n_components=5)\nX_train = pca.fit_transform(X_train)\nX_test = pca.transform(X_test)\nexplained_variance = pca.explained_variance_ratio_\n'''\n'''\n\nclassifier = LinearDiscriminantAnalysis(solver='lsqr')\nclassifier.fit(X_train, y_train.ravel())\ny_pred = classifier.predict(X_test)\n'''\n'''\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression()\nclassifier.fit(X_train,y_train)\n'''\n\nfrom sklearn.ensemble import RandomForestClassifier\nclassifier=RandomForestClassifier(n_estimators=300,random_state=0)\nclassifier.fit(X_train,y_train)\n\n# Applying gird_search\nfrom sklearn.model_selection import GridSearchCV\n\nparameters= {\n 'n_estimators': [100, 300, 500, 800, 1000],\n 'criterion': ['gini', 'entropy'],\n 'bootstrap': [True, False]\n}\n\ngird_search = GridSearchCV(estimator = classifier,\n param_grid = parameters,\n scoring = \"accuracy\",\n cv = 5,\n n_jobs =-1)\ngird_search = gird_search.fit(X_train,y_train)\n\nbest_accuracies = gird_search.best_score_\nbest_parameters = gird_search.best_params_\n\nfrom sklearn.model_selection import cross_val_score\naccuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 5)\naccuracies.mean()\n\n\n'''\nfrom sklearn.naive_bayes import MultinomialNB\nclassifier = MultinomialNB()\nclassifier.fit(X_train, y_train)\n'''\n'''\nfrom sklearn.svm import SVC\nclassifier = SVC(kernel = 'linear')\nclassifier.fit(X_train, y_train)\n\n# Applying gird_search\nfrom sklearn.model_selection import GridSearchCV\nparameters = [{'C':[1,10,100],'kernel':['linear']},\n {'C':[1,10,100,1000],'kernel':['rbf'],'gamma':[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]}]\ngird_search = GridSearchCV(estimator = classifier,\n param_grid = parameters,\n scoring = \"accuracy\",\n cv = 10,\n n_jobs =-1)\ngird_search = gird_search.fit(X_train,y_train)\n\nbest_accuracies = gird_search.best_score_\nbest_parameters = gird_search.best_params_\n'''\n'''\nfrom sklearn.tree import DecisionTreeClassifier\nclassifier = DecisionTreeClassifier(criterion='entropy')\nclassifier.fit(X_train, y_train.ravel())\ndtc_pred = classifier.predict(X_test)\n\nfrom sklearn.model_selection import GridSearchCV\ncriterions = ['gini', 'entropy']\nparameters = dict(criterion=criterions)\ndtc = GridSearchCV(\n classifier, parameters, cv=5, scoring='accuracy'\n)\naaccuracy = 0.58\n'''\n\nclassifier.score(X_test,y_test)\n\ny_pred=classifier.predict(X_test)\n\nfrom sklearn.metrics import accuracy_score,confusion_matrix\ncm=confusion_matrix(y_test,y_pred)\n\nacc = accuracy_score(y_test,y_pred)\n\n\n","repo_name":"arrony18/Kaggle-work","sub_path":"Breast_cancer_dataset/Breast_cancer.py","file_name":"Breast_cancer.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11130139024","text":"import os\nimport json\nimport random\nimport torch\nfrom ..char_utils import text2char_indices, NEGATIVE, POSITIVE, CHAR_LIST_KIND_0\nfrom . import DataIteratorAbstract, load_json\n\nDATA_LEVEL_0 = 0 # char level, no split title by space.\nDATA_LEVEL_1 = 1 # char level, split title by space\n\n\nclass CharacterIterator(DataIteratorAbstract):\n data_level = DATA_LEVEL_0\n\n def __init__(self, input_dir, date_version, data_kind, batch_size=64, is_train=False):\n super(CharacterIterator, self).__init__(batch_size=batch_size, is_train=is_train)\n\n f_id_maps = os.path.join(input_dir, f'{date_version}.{data_kind}.id_maps.json')\n f_core_product = os.path.join(input_dir, f'{date_version}.{data_kind}.core_product.json')\n f_products = os.path.join(input_dir, f'{date_version}.{data_kind}.products.json')\n\n self.id_maps = load_json(f_id_maps)\n self.core_product = self.preprocess_data(load_json(f_core_product))\n self.products = self.preprocess_data(load_json(f_products))\n self._data = self.data()\n\n def process_title(self, text):\n return text2char_indices(text, kind=self.data_level)\n\n def preprocess_data(self, origin_data):\n data = {}\n for key, value in origin_data.items():\n data[key] = self.process_title(value['title'])\n return data\n\n @staticmethod\n def create_pair(supplier_product_id: int, list_positive_id: str, list_negative_id: str):\n \"\"\"\n\n :param supplier_product_id:\n :param list_positive_id:\n :param list_negative_id:\n :return: Return examples containing supplier_product_id, positive_id or negative_id and the label.\n \"\"\"\n data = []\n for positive_id in list_positive_id:\n # print('(supplier_product_id, positive_id), POSITIVE)', (supplier_product_id, positive_id), POSITIVE)\n data.append(((supplier_product_id, positive_id), POSITIVE))\n\n for negative_id in list_negative_id:\n # print('(supplier_product_id, positive_id), POSITIVE)', (supplier_product_id, negative_id), NEGATIVE)\n data.append(((supplier_product_id, negative_id), NEGATIVE))\n return data\n\n def data(self):\n data = []\n for category_id in self.id_maps:\n list_supplier_product_id = list(self.id_maps[category_id].keys())\n list_chosen_supplier_product_id = random.choices(list_supplier_product_id,\n k=int(0.8 * len(list_supplier_product_id)))\n\n for supplier_product_id in list_chosen_supplier_product_id:\n list_core_product_id = list(\n self.id_maps[category_id][supplier_product_id])\n list_chosen_positive_id = random.choices(list_core_product_id,\n k=int(0.8 * len(list_core_product_id)))\n\n if len(list_supplier_product_id) > 1:\n list_negative_product_id = list_supplier_product_id.copy()\n list_negative_product_id.remove(supplier_product_id)\n list_negative_id = []\n for negative_product_id in list_negative_product_id:\n list_negative_id.extend(self.id_maps[category_id][negative_product_id])\n list_chosen_negative_id = random.choices(list_negative_id,\n k=int(min(0.8 * len(list_negative_id),\n # 3 * len(list_chosen_positive_id))))\n 1 * len(list_chosen_positive_id))))\n else:\n list_chosen_negative_id = []\n data.extend(self.create_pair(supplier_product_id, list_chosen_positive_id, list_chosen_negative_id))\n random.shuffle(data)\n return data\n\n def _ids2data(self, supplier_product_id, core_product_id):\n # print(supplier_product_id, core_product_id)\n return self.products[supplier_product_id], self.core_product[core_product_id]\n\n def minibatch2tensor(self, minibatch):\n max_len = None\n data_supplier = []\n data_core = []\n labels = []\n\n for (supplier_product_id, core_product_id), label in minibatch:\n supplier_product, core_product = self._ids2data(supplier_product_id, core_product_id)\n _current_max_len = max(len(supplier_product), len(core_product))\n if max_len is None or max_len < _current_max_len:\n max_len = _current_max_len\n\n data_supplier.append(supplier_product)\n data_core.append(core_product)\n labels.append(label)\n\n products = torch.zeros(2 * len(minibatch), max_len, dtype=torch.long)\n lens = []\n\n index = 0\n for product in data_supplier:\n products[index][:len(product)] = torch.LongTensor(product)\n index += 1\n lens.append(len(product))\n for product in data_core:\n # print(\"len(product)\", len(product))\n products[index][:len(product)] = torch.LongTensor(product)\n index += 1\n lens.append(len(product))\n\n # lengths = torch.IntTensor(lens)\n # labels = torch.IntTensor(labels)\n\n lengths = torch.LongTensor(lens)\n labels = torch.LongTensor(labels)\n\n return products, lengths, labels\n\n\ndef _check_iterator():\n data_iterator = CharacterIterator('datasets/190626', '190626', 'val', batch_size=1)\n # data_iter = iter(data_iterator)\n # print(next(data_iter))\n for index, _ in enumerate(data_iterator):\n if index == 0:\n a = _\n if index == 1:\n b = _\n # print(_)\n\n # for index, _ in enumerate(data_iterator):\n # if index == 0:\n # b = _\n\n # print('a', a)\n # print('b', b)\n\n for batch in (a, b):\n # print(batch)\n product_titles, len_, label = batch\n pv = ''.join([CHAR_LIST_KIND_0[c] for c in product_titles[0].tolist() if c != 0])\n print(pv)\n com = ''.join([CHAR_LIST_KIND_0[c] for c in product_titles[1].tolist() if c != 0])\n print(com)\n print(len_)\n print(label)\n\n\n\n\nif __name__ == '__main__':\n _check_iterator()\n","repo_name":"namnguyenxuan181/web_demo","sub_path":"product_matching/data_iterator/character_iterator.py","file_name":"character_iterator.py","file_ext":"py","file_size_in_byte":6353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19412614144","text":"# map() = applies a function to each item in an iterable (list, tuple, etc)\n# map(function, iterable)\n\nstore = [(\"shirt\", 20.00),\n (\"pants\", 25.00),\n (\"jacket\", 50.00),\n (\"socks\", 10.00)]\n\nto_euros = lambda data: (data[0], data[1]*0.82)\nto_dollar = lambda data: (data[0], data[1]/0.82)\n\nstore_euros = map(to_euros, store)\nstore_dollar = map(to_dollar, store)\n\nfor i in store_dollar:\n print(i)\n","repo_name":"BsFelipe/PythonCrashCourse","sub_path":"maps.py","file_name":"maps.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2388190475","text":"import argparse\nimport os\nimport subprocess\n\n\ndef create_dir(path):\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n print(\"Unable to create directory \", path)\n print(OSError)\n raise\n else:\n print(\"The directory \", path, \"already exists\")\n raise\n else:\n print(\"Directory \", path, \"created.\")\n\n\ndef foldx_repair(pdb, path, pdb_folder):\n foldx_command = \"foldx --command=RepairPDB --pdb-dir=%s --pdb=%s --output-dir=%s\"\n command = foldx_command % (pdb_folder, pdb, path)\n print(command)\n proc = subprocess.run(command,\n shell=True,\n universal_newlines=True,\n stdout=subprocess.PIPE)\n print(proc.stdout)\n foldx_output = proc.stdout.split(\"\\n\")\n ddG = []\n time = 0\n for line in foldx_output:\n if line.startswith(\"Total\"):\n if \"time\" in line:\n line = line.split()\n time = float(line[-2])\n else:\n line = line.split()\n ddG.append(float(line[-1]))\n print(ddG)\n print(time)\n return time, ddG[-1]\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"pdb_folder\", help=\"Folder with pdb structures\")\n parser.add_argument(\"-directory\", help=\"Directory to save results, \\\n otherwise directory has name PDB_Repair\")\n parser.add_argument(\"-n\", help=\"Number of Repair_PDB runs. Default is 10\")\n args = parser.parse_args()\n if args.directory:\n path = args.directory\n else:\n path = \"PDB_Repair\"\n\n if args.n:\n n_rounds = args.n\n else:\n n_rounds = 10\n\n path = os.path.abspath(path)\n args.pdb_folder = os.path.abspath(args.pdb_folder)\n create_dir(path)\n print(path)\n os.chdir(path)\n try:\n os.symlink(\"/usr/local/share/FoldX/rotabase.txt\", \"rotabase.txt\")\n except OSError:\n if not os.path.exists(\"rotabase.txt\"):\n print(OSError)\n raise\n else:\n print(\"Symlink to rotabase.txt created\")\n\n pdbs = os.listdir(args.pdb_folder)\n pdbs = list(map(lambda x: x.lower(), pdbs))\n print(pdbs)\n print(n_rounds)\n energy_output = path + \"/energies.csv\"\n energy_output = open(energy_output, 'w')\n head = \"PDB,\"\n for i in range(1, n_rounds):\n head += \",time\" + str(i) + \",\" + str(i)\n head += \"\\n\"\n energy_output.write(head)\n print(head)\n for structure in pdbs:\n result = structure + \",\"\n for i in range(n_rounds):\n time, ddG = foldx_repair(structure, path, args.pdb_folder)\n result += ','.join([str(time), str(ddG)])\n result += \"\\n\"\n print(result)\n energy_output.write(result)\n energy_output.close()\nif __name__ == \"__main__\":\n main()\n","repo_name":"Aleva14/epistasis","sub_path":"run_repair_pdb.py","file_name":"run_repair_pdb.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42207445315","text":"severity = {0:'Emergency',1:'Alert',2:'Critical',3:'Error',4:'Warning',5:'Notice',6:'Informational',7:'Debug'}\nuiserver = {'ip':'10.1.1.8','port':80,'script':'/dhcpdstat/index.py'}\n\ndef updateIpblocks(conn,cur):\n sql = \"INSERT INTO ipblock(ipblock_net,ipblock_prefix,ipblock_maxleases) SELECT ipblock,ipblock_prefix,ipblock_maxleases FROM ipblock_stat WHERE tst>=(CURRENT_TIMESTAMP - interval '10 min') AND (ipblock,ipblock_prefix,ipblock_maxleases) NOT IN (SELECT ipblock_net,ipblock_prefix,ipblock_maxleases FROM ipblock) GROUP BY ipblock,ipblock_prefix,ipblock_maxleases;\"\n cur.execute(sql)\n conn.commit()\n return\n\ndef getIpblocks(conn,cur):\n ipblocks = None\n sql = 'SELECT id,ipblock_net,ipblock_prefix,ipblock_maxleases,usersegment,att,userlocation,routername,vlanname,others FROM ipblock ORDER BY ipblock_net;'\n cur.execute(sql)\n ipblocks = cur.fetchall()\n return ipblocks\n\ndef modIpblock(conn,cur,ipblocparams):\n sql = \"UPDATE ipblock SET (usersegment,routername,vlanname,others) = (%(usersegment)s,%(routername)s,%(vlanname)s,%(others)s) WHERE id=%(id)s\"\n cur.execute(sql,ipblocparams)\n conn.commit()\n return\n\ndef getServers(conn,cur):\n sql = \"SELECT * FROM servers ORDER by hostname\"\n cur.execute(sql)\n servers = cur.fetchall()\n return servers\n\ndef getServersNames(conn,cur):\n sql = \"SELECT hostname FROM servers ORDER by hostname\"\n cur.execute(sql)\n servers = cur.fetchall()\n return servers\n\ndef modServers(conn,cur,server):\n sql = \"UPDATE servers SET (active,report,alarms,emailfrom,emailrcp,eventrcp,smtpserveraddress,smtpserverport,remarks) = (%(active)s,%(report)s,%(alarms)s,%(emailfrom)s,%(emailrcp)s,%(eventrcp)s,%(smtpserveraddress)s,%(smtpserverport)s,%(remarks)s) WHERE hostname=%(hostname)s\"\n cur.execute(sql,server)\n conn.commit()\n return \n\ndef getServerEmailParams(conn,cur,server):\n sql = \"SELECT emailfrom,emailrcp,smtpserveraddress,smtpserverport FROM servers WHERE active='t' AND report='t' AND hostname='\"+server+\"'\"\n cur.execute(sql)\n servers = cur.fetchall()\n if cur.rowcount:\n return servers[0]\n else:\n return None\n\ndef getServerById(conn,cur,objId):\n sql = \"SELECT * FROM servers WHERE id=\"+str(objId)\n cur.execute(sql)\n server = None\n if cur.rowcount:\n server = cur.fetchone()\n return server\n\ndef getIpblockById(conn,cur,objId):\n sql = \"SELECT * FROM ipblock WHERE id=\"+str(objId)\n cur.execute(sql)\n ipblock = None\n if cur.rowcount:\n ipblock = cur.fetchone()\n return ipblock\n\ndef getObjById(conn,cur,objId):\n sql = \"SELECT * FROM objects WHERE id=\"+str(objId)\n cur.execute(sql)\n obj = None\n if cur.rowcount:\n obj = cur.fetchone()\n return obj\n\ndef getServerLastHourStat(conn,cur,dhcpdhn):\n lastData = []\n sql = \"SELECT * FROM active_stat WHERE dhcpdhn='\"+dhcpdhn+\"' AND tst >= (CURRENT_TIMESTAMP - interval '1 hour') ORDER BY tst DESC\"\n cur.execute(sql)\n lastData = cur.fetchall()\n return lastData\n\ndef getIPblockLastHourStat(conn,cur,ipblock):\n lastData = []\n sql = \"SELECT * FROM ipblock_stat WHERE ipblock='\"+ipblock+\"' AND tst >= (CURRENT_TIMESTAMP - interval '1 hour') ORDER BY tst DESC\"\n cur.execute(sql)\n lastData = cur.fetchall()\n return lastData\n\ndef getAlarms(conn,cur,count=20):\n alarms = []\n sql = \"SELECT event_log.*,event_log.id AS eventlog_id,event_log.active AS eventlog_active,events.*,objects.* FROM event_log JOIN events ON events.id=event_log.event_id JOIN objects ON objects.id=event_log.obj_id ORDER BY raise_time DESC LIMIT \"+str(count)\n cur.execute(sql)\n if cur.rowcount:\n alarms = cur.fetchall()\n return alarms\n\ndef ackEventlog(conn,cur,eventlog_id):\n sql = \"UPDATE event_log SET acknowledged='t' WHERE id=\"+str(eventlog_id)\n cur.execute(sql)\n conn.commit()\n return\n","repo_name":"norbertoisaac/DHCPDSTAT","sub_path":"www/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32232804357","text":"# -*- coding: utf-8 -*-\n\n#from Tripadvisor_Hotel_Reviews import Reviews\n#from Tripadvisor_Hotel_Information import Hotel_Info\n\nstart_csv_file = r'C:\\Users\\pasch\\Documents\\TripAdvisor_Scrapping\\Data'\npr = r'\\Unprocessed_Data'\ncountries =[r'\\Greece', r'\\Cyprus', r'\\France', r'\\Italy', r'\\Portugal', r'\\Spain', r'\\Turkey']\ncountry = r'\\Spain'\nc = r'\\Hotels_Name_Link.csv'\n\n#\n# for country in countries[6:7]:\n\ncsv_file = start_csv_file+country+c\nHotel_info_csv_file = start_csv_file+country+ r'\\Hotels_Info.csv'\nReviews_csv_file = start_csv_file + country + r'\\Reviews_Total.csv'\n\nimport csv\nimport time\n\ncollected_data = list(csv.DictReader(open(csv_file, encoding='utf-8')))\n\n\n# processed_data = []\n# for dt in collected_data:\n# \tif dt['Price'] != None and dt['Price'] != 'N/A' and dt['Price'] != '':\n# \t\tprocessed_data.append(dt)\n\n\n\t#######################################################################################################################\n\n\t# info = []\n\t# unused_info = []\n\t# size = len(processed_data)\n\t# for item in range(size):\n\t# \tprint('\\nProgramm Completed: %.2f'%((item/size)*100) + '%')\n\t# \tprint('\\nItem = ',item)\n\t# \tdata = processed_data[item]\n\t#\n\t# \td = {'Name': data['Name'],\n\t# \t\t 'Link': data['Link'],\n\t# \t\t 'Price': data['Price']\n\t# \t\t }\n\t#\n\t# \ttry:\n\t# \t\th_i = Hotel_Info(data['Link'])\n\t# \t\ttime.sleep(1)\n\t# \t\tif h_i == 'No reviews':\n\t# \t\t\tunused_info.append(d)\n\t# \t\telse:\n\t# \t\t\td.update(h_i)\n\t# \t\t\tinfo.append(d)\n\t#\n\t# \texcept:\n\t# \t\tunused_info.append(d)\n\t#\n\t#\n\t#\n\t#\n\t# with open(Hotel_info_csv_file, 'w', encoding='utf-8', newline='') as hotels_file:\n\t# \tfieldnames = info[0].keys()\n\t#\n\t# \twriter = csv.DictWriter(hotels_file, fieldnames=fieldnames)\n\t#\n\t# \twriter.writeheader()\n\t# \tfor data in info:\n\t# \t\ttry:\n\t#\n\t# \t\t\twriter.writerow(data)\n\t#\n\t# \t\texcept:\n\t# \t\t\tprint(data)\n\n\n#######################################################################################################################\ndef append_dict_as_row(file_name, dict_of_elem, field_names):\n # Open file in append mode\n with open(file_name, 'a+',encoding='utf-8', newline='') as write_obj:\n # Create a writer object from csv module\n dict_writer = csv.DictWriter(write_obj, fieldnames=field_names)\n # Add dictionary as wor in the csv\n dict_writer.writerow(dict_of_elem)\n\n\nhotelswithreviews = []\nfor dt in collected_data:\n\tif int(dt['Reviews']) > 5:\n\t\thotelswithreviews.append(dt)\n\n\n\nreviews_info = []\n\nunused_info_revs = []\nsize = len(hotelswithreviews)\nprint('size = ',size)\ntrial = range(size)\n\nfor item in trial[170:]:\n\tprint('\\nProgramm Completed: %.2f'%(((item-trial[0])/len(trial))*100) + '%')\n\tprint('\\nHotel Item = ', item)\n\tdata = hotelswithreviews[item]\n\n\td = {'Hotel Name': data['Name'],\n\t\t'Link': data['Link'],\n\t 'Hotel Reviews': data['Reviews']\n\t\t}\n\n\t# try:\n\thotel_reviews = Reviews(d['Link'])\n\tfor rev in hotel_reviews:\n\t\trev.update(d)\n\t\treviews_info.append(rev)\n\t\t#\n\t\tfieldnames = rev.keys()\n\t\ttry:\n\t\t\tappend_dict_as_row(Reviews_csv_file, rev, fieldnames)\n\t\texcept:\n\t\t\tunused_info_revs.append(rev)\n\n#\n# #\n# # # #Create csv file for reviews FIELDNAMES only\n# reviews_fieldnames = ['Reviewer Username', 'Reviewer Link', 'Country', 'Rating', 'Date of stay', 'Date of review', 'Title', 'Comment', 'Helpful Votes', 'Contribution Votes', 'Trip Type', 'Value Stars', 'Rooms Stars', 'Service Stars', 'Cleanliness Stars', 'Location Stars', 'Sleep Quality Stars', 'Hotel Name', 'Link', 'Hotel Reviews']\n# with open(Reviews_csv_file, 'w', encoding='utf-8', newline='') as reviews_file:\n#\n#\n# writer = csv.DictWriter(reviews_file, fieldnames=reviews_fieldnames)\n#\n# writer.writeheader()\n# #\n\n\n\n\n\n########################################################################################################################\n\n\n\n\n\n\n","repo_name":"MariusPasch/Thesis-Python-Code","sub_path":"TripAdvisor_Scrapping/Scrapping/Tripadvisor.py","file_name":"Tripadvisor.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"8612915038","text":"class Proprietario:\n proprietarios = []\n\n def __init__(self, nome, cpf, data_nasc):\n self.nome = nome\n self.cpf = cpf\n self.data_nasc = data_nasc\n if Proprietario.find(cpf) == None:\n Proprietario.proprietarios.append(self)\n\n @staticmethod\n def find(cpf):\n for p in Proprietario.proprietarios:\n if p.cpf == cpf:\n return p\n return None\n\nclass Inquilino:\n inquilinos = []\n\n def __init__(self, nome, cpf, data_nasc):\n self.nome = nome\n self.cpf = cpf\n self.data_nasc = data_nasc\n if Inquilino.find(cpf) == None:\n Inquilino.inquilinos.append(self)\n\n @staticmethod\n def find(cpf):\n for p in Inquilino.inquilinos:\n if p.cpf == cpf:\n return p\n return None\n\nclass Imovel:\n imoveis = []\n\n def __init__(self, cod, cpf_prop, tipo, endereco, valor_aluguel, status_alugado):\n self.cod = cod\n self.cpf_prop = cpf_prop\n self.tipo = tipo\n self.endereco = endereco\n self.valor_aluguel = valor_aluguel\n self.status_alugado = status_alugado\n if Imovel.find(cod) == None:\n Imovel.imoveis.append(self)\n\n @staticmethod\n def find(cod):\n for p in Imovel.imoveis:\n if p.cod == cod:\n return p\n return None\n\nclass Aluguel:\n alugueis = []\n\n def __init__(self, cpf_inquilino, cod_imovel, data_inicio, data_fim):\n self.cpf_inquilino = cpf_inquilino\n self.cod_imovel = cod_imovel\n self.data_inicio = data_inicio\n self.data_fim = data_fim\n if Aluguel.find(cod_imovel) == None:\n Aluguel.alugueis.append(self)\n\n @staticmethod\n def find(cod):\n for p in Aluguel.alugueis:\n if p.cod_imovel == cod:\n return p\n return None","repo_name":"KevennyJS/PooPython","sub_path":"Classes_prova.py","file_name":"Classes_prova.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16205405063","text":"class Solution:\n def minimumReplacement(self, nums: List[int]) -> int:\n n = len(nums) # Get the length of the array\n operations = 0 # Initialize the operations counter\n\n prevValue = nums[n - 1] # Initialize prevValue with the last element of the array\n\n # Iterate through the array in reverse order\n for i in range(n - 2, -1, -1):\n if nums[i] > prevValue:\n # Calculate how many times prevValue should be added to get nums[i]\n k = (nums[i] + prevValue - 1) // prevValue\n # Increment operations by k - 1\n operations += k - 1\n # Update prevValue to be nums[i] divided by k\n prevValue = nums[i] // k\n else:\n # If nums[i] is not greater than prevValue, update prevValue to nums[i]\n prevValue = nums[i]\n\n return operations # Return the total number of operations\n","repo_name":"Stealeristaken/leetcodeSolutions","sub_path":"leetcodeInterviewQuestions/Question2366/Question2366.py","file_name":"Question2366.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"6098555985","text":"import LoadLocationData\r\nimport RandomizeFunctions\r\nimport os\r\nimport yaml\r\n\r\ndef testModifiers():\r\n modList = []\r\n\r\n modifierFiles = os.listdir(\"Modifiers\")\r\n for i in modifierFiles:\r\n if os.path.isfile(\"Modifiers/\"+i):\r\n yamlfile = open(\"Modifiers/\"+i)\r\n yamltext = yamlfile.read()\r\n modList.append(yaml.load(yamltext, Loader=yaml.FullLoader))\r\n modList[-1]['fileName'] = i\r\n\r\n # Do not load from modifier dict when modifying data\r\n # As we are checking the functionality of all modifiers manually\r\n\r\n fullLocationData = LoadLocationData.LoadDataFromFolder(\".\", None, None , {}, [])\r\n all_locs = fullLocationData[0]\r\n locationList = LoadLocationData.FlattenLocationTree(all_locs)\r\n\r\n warpFileLocation = \"Warp Data/WarpFriendlyNames.tsv\"\r\n warpGroupData = LoadLocationData.readTSVFile(warpFileLocation)\r\n\r\n for mod in modList:\r\n if 'Changes' in mod:\r\n for change in mod['Changes']:\r\n\r\n isWarpGroupChange = False\r\n\r\n location = change[\"Location\"]\r\n if location.endswith(LoadLocationData.WARP_OPTION):\r\n isWarpGroupChange = True\r\n\r\n if isWarpGroupChange:\r\n possibilities = list(filter(lambda x: x[\"Group\"] ==\r\n location.replace(LoadLocationData.WARP_OPTION, \"\"), warpGroupData))\r\n else:\r\n possibilities = list(filter(lambda x: x.Name == location, locationList))\r\n\r\n if len(possibilities) == 0:\r\n print(\"Error with mod, location not found!\", isWarpGroupChange)\r\n print(mod[\"Name\"], location)\r\n continue\r\n\r\n if not isWarpGroupChange:\r\n if \"RemoveFlagReqs\" in change:\r\n for flagR in change[\"RemoveFlagReqs\"]:\r\n anyContains = False\r\n for poss in possibilities:\r\n if flagR in poss.FlagReqs:\r\n anyContains = True\r\n if not anyContains:\r\n print(\"No flag found for:\", flagR, location)\r\n\r\n if \"RemoveItemReqs\" in change:\r\n for itemR in change[\"RemoveItemReqs\"]:\r\n anyContains = False\r\n for poss in possibilities:\r\n if itemR in poss.ItemReqs:\r\n anyContains = True\r\n if not anyContains:\r\n print(\"No item found for:\", itemR, location)\r\n\r\n if \"RemoveLocationReqs\" in change:\r\n for locR in change[\"RemoveLocationReqs\"]:\r\n anyContains = False\r\n for poss in possibilities:\r\n if locR in poss.LocationReqs:\r\n anyContains = True\r\n if not anyContains:\r\n print(\"No item found for:\", locR, location)\r\n else:\r\n if \"RemoveLocationReqs\" in change:\r\n print(\"Cannot remove Location Requirement for warp group data\")\r\n print(mod[\"Name\"], location)\r\n if \"RemoveItemReqs\" in change:\r\n print(\"Cannot remove Item Requirement for warp group data\")\r\n print(mod[\"Name\"], location)\r\n if \"RemoveFlagReqs\" in change:\r\n print(\"Cannot remove Flag Requirement for warp group data\")\r\n print(mod[\"Name\"], location)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ntestModifiers()","repo_name":"erudnick-cohen/Pokemon-Crystal-Item-Randomizer","sub_path":"Tests/TestModifiers.py","file_name":"TestModifiers.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"22"} +{"seq_id":"47623644649","text":"from typing import Tuple\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom .training import ModelTrainer\nfrom .training_types import FinalSummary, TQDMState, Summary\nfrom .validating import ModelValidator\n\n# fmt: off\ntry:\n import horovod.torch as hvd\n HAVE_HOROVOD = True\nexcept ImportError:\n HAVE_HOROVOD = False\n# fmt: on\n\n\nclass SegmentationMetric(object):\n def __init__(self, numClass):\n self.numClass = numClass\n self.confusionMatrix = np.zeros((self.numClass,) * 2) # 混淆矩阵(空)\n self.iou = np.zeros((self.numClass), float)\n self.acc = 0.\n self.loss = np.array([0.])\n self.count = 0\n self.miou = 0.\n\n def pixelAccuracy(self):\n # return all class overall pixel accuracy 正确的像素占总像素的比例\n # PA = acc = (TP + TN) / (TP + TN + FP + TN)\n acc = np.diag(self.confusionMatrix).sum() / self.confusionMatrix.sum()\n return acc\n\n def classPixelAccuracy(self):\n # return each category pixel accuracy(A more accurate way to call it precision)\n # acc = (TP) / TP + FP\n classAcc = np.diag(self.confusionMatrix) / self.confusionMatrix.sum(axis=1)\n return classAcc # 返回的是一个列表值,如:[0.90, 0.80, 0.96],表示类别1 2 3各类别的预测准确率\n\n def meanPixelAccuracy(self):\n \"\"\"\n Mean Pixel Accuracy(MPA,均像素精度):是PA的一种简单提升,计���每个类内被正确分类像素数的比例,之后求所有类的平均。\n :return:\n \"\"\"\n classAcc = self.classPixelAccuracy()\n meanAcc = np.nanmean(classAcc) # np.nanmean 求平均值,nan表示遇到Nan类型,其值取为0\n return meanAcc # 返回单个值,如:np.nanmean([0.90, 0.80, 0.96, nan, nan]) = (0.90 + 0.80 + 0.96) / 3 = 0.89\n\n def IntersectionOverUnion(self):\n # Intersection = TP Union = TP + FP + FN\n # IoU = TP / (TP + FP + FN)\n intersection = np.diag(self.confusionMatrix) # 取对角元素的值,返回列表\n union = np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) - np.diag(\n self.confusionMatrix) # axis = 1表示混淆矩阵行的值,返回列表; axis = 0表示取混淆矩阵列的值,返回列表\n IoU = intersection / union # 返回列表,其值为各个类别的IoU\n return IoU\n\n def meanIntersectionOverUnion(self):\n mIoU = np.nanmean(self.IntersectionOverUnion()) # 求各类别IoU的平均\n return mIoU\n\n def genConfusionMatrix(self, imgPredict, imgLabel): #\n \"\"\"\n 同FCN中score.py的fast_hist()函数,计算混淆矩阵\n :param imgPredict:\n :param imgLabel:\n :return: 混淆矩阵\n \"\"\"\n # remove classes from unlabeled pixels in gt image and predict\n mask = (imgLabel >= 0) & (imgLabel < self.numClass)\n label = self.numClass * imgLabel[mask] + imgPredict[mask]\n count = np.bincount(label, minlength=self.numClass ** 2)\n confusionMatrix = count.reshape(self.numClass, self.numClass)\n # print(confusionMatrix)\n return confusionMatrix\n\n def Frequency_Weighted_Intersection_over_Union(self):\n \"\"\"\n FWIoU,频权交并比:为MIoU的一种提升,这种方法根据每个类出现的频率为其设置权重。\n FWIOU = [(TP+FN)/(TP+FP+TN+FN)] *[TP / (TP + FP + FN)]\n \"\"\"\n freq = np.sum(self.confusionMatrix, axis=1) / np.sum(self.confusionMatrix)\n iu = np.diag(self.confusionMatrix) / (\n np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) -\n np.diag(self.confusionMatrix))\n FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()\n return FWIoU\n\n def addBatch(self, imgPredict, imgLabel, loss):\n assert imgPredict.shape == imgLabel.shape\n predicted = torch.sigmoid(imgPredict)\n predicted = (predicted>0.5).float()\n preds, y = predicted.cpu().numpy(), imgLabel.cpu().numpy()\n preds, y = preds.astype(np.int32), y.astype(np.int32)\n self.confusionMatrix += self.genConfusionMatrix(preds, y) # 得到混淆矩阵\n iou = self.IntersectionOverUnion()\n acc = self.pixelAccuracy()\n\n self.iou += iou\n self.miou += np.nanmean(iou).item()\n self.count += 1\n self.loss += np.array(loss.item())\n self.acc += acc\n\n self._latest_state = {\n \"loss\": loss.item(),\n \"acc\": acc,\n \"iou\": iou,\n \"miou\": np.nanmean(iou).item()\n }\n return self.confusionMatrix\n\n def get_latest_state(self):\n return self._latest_state\n\n def get_average_state(self):\n return {\n \"loss\": (self.loss/self.count).item(),\n \"acc\": self.acc/self.count,\n \"iou\": self.iou/self.count,\n \"miou\": self.miou/self.count\n }\n\n def reset(self):\n self.confusionMatrix = np.zeros((self.numClass, self.numClass))\n self.iou = np.zeros((self.numClass), float)\n self.acc = 0.\n self.loss = np.array([0.])\n self.count = 0\n self.miou = 0.\n\n\nclass ImagenetAccumulator(object):\n \"\"\"Horovod-aware accumulator that keeps track of accuracy so far\"\"\"\n\n def __init__(self):\n self._n_total = 0\n self._n_correct = 0\n self._total_loss = 0.\n self._count = 0\n\n def reset(self):\n self._n_total = 0\n self._n_correct = 0\n self._total_loss = 0.\n self._count = 0\n\n def accumulate(self, targets: torch.Tensor, outputs: torch.Tensor, loss: torch.Tensor):\n \"\"\"Updates the number of correct predictions and average loss so far.\n\n Parameters\n targets: The expected classes of the inputs to the model\n outputs: The classes that the model predicted\n loss: The loss that the model incurred on making its predictions\n \"\"\"\n\n targets = targets.detach()\n outputs = outputs.detach()\n loss = loss.detach().cpu()\n\n _, predicted = outputs.max(dim=1)\n n_total = torch.tensor(targets.size(0), dtype=torch.float)\n n_correct = predicted.eq(targets).cpu().float().sum()\n\n if HAVE_HOROVOD:\n n_total = hvd.allreduce(n_total, average=False, name=\"accum_n_total\")\n n_correct = hvd.allreduce(n_correct, average=False, name=\"accum_n_correct\")\n loss = hvd.allreduce(loss, average=True, name=\"accum_loss\")\n\n n_total = round(n_total.item())\n n_correct = round(n_correct.item())\n\n self._n_total += n_total\n self._n_correct += n_correct\n self._total_loss += loss.item()\n self._count += 1\n\n self._latest_state = {\n \"loss\": loss.item(),\n \"acc\": n_correct / n_total * 100,\n \"correct\": n_correct,\n \"total\": n_total,\n }\n\n def get_latest_state(self):\n return self._latest_state\n\n def get_average_state(self):\n return {\n \"loss\": self._total_loss / self._count,\n \"acc\": self._n_correct / self._n_total * 100,\n \"correct\": self._n_correct,\n \"total\": self._n_total,\n }\n\n\nclass ImagenetValidator(ModelValidator):\n def __init__(self, model, criterion):\n super().__init__()\n # self._accumulator = ImagenetAccumulator()\n self._accumulator = SegmentationMetric(2)\n\n self._model = model\n self._criterion = criterion\n\n def reset(self):\n self._accumulator.reset()\n\n def update(self, data: Tuple[torch.Tensor, torch.Tensor], device: torch.device):\n \"\"\"Computes loss between what the model produces and the ground truth, and updates the accumulator\n\n Parameters:\n data: 2-tuple with inputs and targets for the model\n device: CPU or GPU\n \"\"\"\n\n # inputs = data[0].cuda(non_blocking=True)\n # targets = data[1].cuda(non_blocking=True)\n inputs = data[0].to(device,non_blocking=True)\n targets = data[1].float().unsqueeze(1).to(device,non_blocking=True)\n\n outputs = self._model(inputs)\n loss = self._criterion(outputs, targets)\n\n self._accumulator.addBatch(outputs, targets, loss)\n\n def get_tqdm_state(self):\n state = self._accumulator.get_average_state()\n return TQDMState({\n \"loss\": f'{state[\"loss\"]:.2f}',\n \"accuracy\": f'{state[\"acc\"]:.2f}',\n \"iou(0,1)\": f'({state[\"iou\"].item(0):.2f},{state[\"iou\"].item(1):.2f})',\n \"miou\": f'{state[\"miou\"]:.2f}',\n })\n\n def get_final_summary(self):\n state = self._accumulator.get_average_state()\n return FinalSummary(Summary({\"loss\": state[\"loss\"],\n \"accuracy\": state[\"acc\"],\n \"iou-1\": state[\"iou\"].item(0),\n \"iou-2\": state[\"iou\"].item(1),\n \"miou\": state[\"miou\"]}))\n\n def get_final_metric(self):\n state = self._accumulator.get_average_state()\n return state[\"miou\"]\n\n\nclass ImagenetTrainer(ModelTrainer):\n def __init__(self, model, optimizer, lr_scheduler, criterion):\n super().__init__(model, optimizer, lr_scheduler)\n self.accumulator = SegmentationMetric(2)\n self.criterion = criterion\n\n def reset(self):\n self.accumulator.reset()\n\n def pass_to_model(self, inputs, targets):\n outputs = self.model(inputs)\n loss = self.criterion(outputs, targets)\n\n return outputs, loss\n\n def update_state(self, targets: torch.Tensor, outputs: torch.Tensor, loss: torch.Tensor):\n self.accumulator.addBatch(outputs, targets, loss)\n state = self.accumulator.get_latest_state()\n # self.latest_state = {\"loss\": state[\"loss\"], \"accuracy\": state[\"acc\"], \"miou\": state[\"miou\"]}\n self.latest_state = state\n\n def get_final_summary(self):\n state = self.accumulator.get_average_state()\n return FinalSummary(Summary({\"loss\": state[\"loss\"],\n \"accuracy\": state[\"acc\"],\n \"iou-0\": state[\"iou\"].item(0),\n \"iou-1\": state[\"iou\"].item(1),\n \"miou\": state[\"miou\"]}))\n\n\n# Dice损失函数\nclass DiceLoss(nn.Module):\n def __init__(self):\n super(DiceLoss, self).__init__()\n self.epsilon = 1e-5\n\n def forward(self, predict, target):\n assert predict.size() == target.size(), \"The size of predict and target must be equal.\"\n num = predict.size(0)\n\n pre = torch.sigmoid(predict).view(num, -1)\n tar = target.view(num, -1)\n\n intersection = (pre * tar).sum(-1).sum() # 利用预测值与标签相乘当作交集\n union = (pre + tar).sum(-1).sum()\n\n score = 1 - 2 * (intersection + self.epsilon) / (union + self.epsilon)\n return score\n\n\ndef get_imagenet_criterion():\n \"\"\"Gets the typical training loss for Imagenet classification\"\"\"\n # return torch.nn.CrossEntropyLoss()\n return DiceLoss()\n","repo_name":"lewin4/model_compressing","sub_path":"src/training/imagenet_utils.py","file_name":"imagenet_utils.py","file_ext":"py","file_size_in_byte":11164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3283519313","text":"from PyPDF2 import PdfReader\r\nfrom text2vec import SentenceModel\r\n\r\nmodel = SentenceModel('shibing624/text2vec-base-chinese')\r\n\r\n\r\ndef text(path):\r\n reader = PdfReader(path)\r\n number_of_pages = len(reader.pages)\r\n tex = []\r\n\r\n for i in range(number_of_pages):\r\n xxx = reader.pages[i].extract_text()\r\n xxxx = xxx.split(\"\\n\")\r\n\r\n for k in xxxx:\r\n tex.append(k)\r\n\r\n return tex\r\n\r\ndef text_embedding(query):\r\n\r\n text_embeddings = model.encode(query)\r\n\r\n return text_embeddings\r\n\r\n","repo_name":"WADreaming/ChatLaw","sub_path":"crate.py","file_name":"crate.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"24649035218","text":"import cv2\r\nimport numpy as np\r\nfrom Webcamera import camera\r\nfrom scipy.spatial import distance as dist\r\nimport pickle\r\nfrom foregroundextract import *\r\nfrom compareblackandwhite import compareblackandwhite\r\nfrom compareradiusfinder import radiusfinder\r\nfrom comparemoments import zernikemoments\r\nfrom comparer import compare\r\nfrom imutils.paths import list_images\r\n\r\ncamera() #these two functions are out of the main because if they are not then the img variable will read a previous sessions phot\r\nforegroundextrpart1()\r\n\r\nwindowName = \"BackgroundRemover\"\r\nimg = cv2.imread(\"foregroundextrpart1.jpg\")\r\n(ix,iy) = (-1,-1) #default mouse coord\r\ndrawing = False #if mouse isn't pressed then drawing isn't happening\r\n\r\n\r\ndef draw_circle(event, x,y,flags,param):\r\n global ix, iy,drawing #makes sure to update variables\r\n\r\n \r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n drawing = True #we are now drawing. mouse has been clicked\r\n (ix,iy) = x,y #draws a circle\r\n\r\n elif event == cv2.EVENT_MOUSEMOVE:\r\n if drawing == True:\r\n cv2.circle(img, (x,y), 10,(255,255,255),-1)\r\n\r\n elif event == cv2.EVENT_LBUTTONUP:\r\n drawing = False\r\n\r\n\r\n\r\ndef main():\r\n \r\n cv2.namedWindow(windowName)\r\n \r\n #cv2.setMouseCallback(windowName, draw_circle)\r\n\r\n while(True): #refreshes image everytime\r\n cv2.imshow(windowName,img)\r\n cv2.setMouseCallback(windowName, draw_circle)\r\n\r\n if cv2.waitKey(20) == 27:\r\n break\r\n cv2.imwrite(\"foregroundextrpart2.jpg\", img)\r\n\r\n cv2.destroyAllWindows()\r\n\r\n foregroundextrpart2()\r\n compareblackandwhite(\"foregroundextrpart3.jpg\") \r\n radius = radiusfinder()\r\n features = zernikemoments(radius)\r\n\r\n\r\n infile = open(\"moment_dict.p\",\"rb\") #opens file filled with jojo image moments\r\n new_dict = pickle.load(infile)\r\n\r\n finalanswer = compare(new_dict, features)\r\n\r\n finalanswer = finalanswer \r\n\r\n infile.close()\r\n comparedimg = cv2.imread(\"compare.jpg\")\r\n\r\n for names in list_images(\"poses\"):\r\n if finalanswer in names and \"and\" not in names:\r\n jojoimg = cv2.imread(names)\r\n cv2.imshow(\"jojo\", jojoimg)\r\n\r\n cv2.imshow(\"takenphoto\",comparedimg)\r\n\r\n print(finalanswer)\r\n \r\n \r\n \r\n \r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Eeister/Jojoposes","sub_path":"jojo poses/jojoposefinder.py","file_name":"jojoposefinder.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43763381840","text":"# Problem Link: https://www.algoexpert.io/questions/Three%20Number%20Sum\n# Time Complexity: O(N^2)\n# Space Complexity: O(1)\n\n\ndef threeNumberSum(array, target):\n sum_of_pairs = {} # int : list(tuples)\n\n already_taken_pairs = {} # string : boolean\n\n result = []\n\n for i in range(len(array) - 1):\n for j in range(i + 1, len(array)):\n if array[i] + array[j] in sum_of_pairs:\n sum_of_pairs[array[i] + array[j]].append((i, j))\n else:\n sum_of_pairs[array[i] + array[j]] = [(i, j)]\n\n for i in range(len(array)):\n if target - array[i] in sum_of_pairs:\n for pair in sum_of_pairs[target - array[i]]:\n if pair[0] == i or pair[1] == i:\n continue\n possible_triplet = sorted(\n [array[i], array[pair[0]], array[pair[1]]])\n if str(possible_triplet) in already_taken_pairs:\n continue\n result.append(possible_triplet)\n already_taken_pairs[str(possible_triplet)] = True\n\n result.sort()\n\n return result\n","repo_name":"ShowmickKar/Algoexpert-Solutions","sub_path":"three_number_sum.py","file_name":"three_number_sum.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"15340613023","text":"import os, errno\n\nimport window as win\n\nmaxStackSize = 100\navailableAddresses = list(range(2*maxStackSize))\n\n# manages the avaiable addresses\ndef getNextNumber():\n return availableAddresses.pop(0)\n\nclass UndoManager:\n \"\"\"\n UndoManager Class\n\n Description:\n Tracks changes done on the canvas\n \"\"\"\n \n def __init__(self):\n try:\n os.makedirs(\"lowpolypainter/resources/temp/\")\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n self.undoStack = MeshStack()\n self.redoStack = MeshStack()\n \n # pushes the actual state of the window on the undoStack and clears the redoStack \n def do(self, window):\n self.undoStack.push(window)\n self.redoStack.clear()\n \n # loads the last state of the window \n def undo(self, window):\n oldVersionNumber = self.undoStack.pop()\n if oldVersionNumber >= 0:\n self.redoStack.push(window)\n window.canvasFrame.mesh.clear()\n window.loadMeshDataPath('lowpolypainter/resources/temp/temp' + str(oldVersionNumber) + '.py')\n \n # load the previous state of the window \n def redo(self, window):\n oldVersionNumber = self.redoStack.pop()\n if oldVersionNumber >= 0:\n self.undoStack.push(window)\n window.canvasFrame.mesh.clear()\n window.loadMeshDataPath('lowpolypainter/resources/temp/temp' + str(oldVersionNumber) + '.py')\n \n def clear(self):\n self.undoStack.clear()\n self.redoStack.clear()\n \n \n \n\nclass MeshStack:\n \"\"\"\n MeshStack Class\n\n Description:\n Datastructure to save previous states\n \"\"\"\n\n def __init__(self):\n self.stack = []\n \n # gets the last state and saves current state \n def push(self, window):\n saveNumber = getNextNumber()\n self.stack.append(saveNumber)\n window.saveMeshDataPath('lowpolypainter/resources/temp/temp' + str(saveNumber) + '.py')\n if len(self.stack) > maxStackSize:\n os.remove('lowpolypainter/resources/temp/temp' + str(self.stack[0]) + '.py')\n removedAddress = self.stack.pop(0)\n availableAddresses.append(removedAddress)\n \n # pops last state from stack\n def pop(self):\n if len(self.stack) == 0:\n return -1\n else:\n topOfStack = self.stack.pop()\n global availableAddresses\n availableAddresses.append(topOfStack)\n return topOfStack\n \n # clears stack and returns unused addresses \n def clear(self):\n global availableAddresses\n availableAddresses += self.stack\n self.stack = []\n \n \n \n ","repo_name":"cgtuebingen/low-poly-painter","sub_path":"lowpolypainter/undoManager.py","file_name":"undoManager.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"22"} +{"seq_id":"69969229817","text":"def perfection_check(some_num):\n divisors_sum = 0\n for divisor in range(1, some_num // 2 + 1):\n if some_num % divisor == 0:\n divisors_sum += divisor\n if divisors_sum == some_num:\n return \"We have a perfect number!\"\n else:\n return \"It's not so perfect.\"\n\n\nnumber = int(input())\nprint(perfection_check(number))","repo_name":"Funcrow1349/SoftUni-Python-Fundamentals---september-2022","sub_path":"Functions/ex_10_perfect_number.py","file_name":"ex_10_perfect_number.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"37657993575","text":"import esphome.codegen as cg\nimport esphome.config_validation as cv\nfrom esphome.components import fan\nfrom esphome.const import (\n CONF_ID, \n CONF_OUTPUT_ID, \n)\nfrom esphome import pins\nfrom .. import vornado_660_ns\n\n\nAUTO_LOAD = [\"fan\"]\n\n\nVornado_660 = vornado_660_ns.class_('Vornado_660', cg.Component, fan.Fan)\n\nCONF_POWER_PIN = 'power_pin'\nCONF_LL_PIN = 'll_pin'\nCONF_L_PIN = 'l_pin'\nCONF_H_PIN = 'h_pin'\nCONF_HH_PIN = 'hh_pin'\nCONF_LL_FB_PIN = 'll_fb_pin'\nCONF_L_FB_PIN = 'l_fb_pin'\nCONF_H_FB_PIN = 'h_fb_pin'\nCONF_HH_FB_PIN = 'hh_fb_pin'\n# CONF_RELAY_PIN = 'relay_pin'\n\n\nCONFIG_SCHEMA = (\n cv.Schema(\n {\n cv.GenerateID(CONF_OUTPUT_ID): cv.declare_id(Vornado_660),\n cv.Required(CONF_POWER_PIN): pins.gpio_output_pin_schema,\n cv.Required(CONF_LL_PIN): pins.gpio_output_pin_schema,\n cv.Required(CONF_L_PIN): pins.gpio_output_pin_schema,\n cv.Required(CONF_H_PIN): pins.gpio_output_pin_schema,\n cv.Required(CONF_HH_PIN): pins.gpio_output_pin_schema,\n cv.Required(CONF_LL_FB_PIN): pins.gpio_output_pin_schema,\n cv.Required(CONF_L_FB_PIN): pins.gpio_output_pin_schema,\n cv.Required(CONF_H_FB_PIN): pins.gpio_output_pin_schema,\n cv.Required(CONF_HH_FB_PIN): pins.gpio_output_pin_schema,\n # cv.Optional(CONF_RELAY_PIN): pins.gpio_output_pin_schema,\n }\n\n )\n .extend(cv.COMPONENT_SCHEMA)\n .extend(fan.FAN_SCHEMA)\n)\n\n\nasync def to_code(config):\n var = cg.new_Pvariable(config[CONF_OUTPUT_ID])\n await cg.register_component(var, config)\n await fan.register_fan(var, config)\n\n power_pin = await cg.gpio_pin_expression(config[CONF_POWER_PIN])\n cg.add(var.set_power_pin(power_pin))\n \n ll_pin = await cg.gpio_pin_expression(config[CONF_LL_PIN])\n cg.add(var.set_ll_pin(ll_pin))\n l_pin = await cg.gpio_pin_expression(config[CONF_L_PIN])\n cg.add(var.set_l_pin(l_pin))\n h_pin = await cg.gpio_pin_expression(config[CONF_H_PIN])\n cg.add(var.set_h_pin(h_pin))\n hh_pin = await cg.gpio_pin_expression(config[CONF_HH_PIN])\n cg.add(var.set_hh_pin(hh_pin))\n\n ll_fb_pin = await cg.gpio_pin_expression(config[CONF_LL_FB_PIN])\n cg.add(var.set_ll_fb_pin(ll_fb_pin))\n l_fb_pin = await cg.gpio_pin_expression(config[CONF_L_FB_PIN])\n cg.add(var.set_l_fb_pin(l_fb_pin))\n h_fb_pin = await cg.gpio_pin_expression(config[CONF_H_FB_PIN])\n cg.add(var.set_h_fb_pin(h_fb_pin))\n hh_fb_pin = await cg.gpio_pin_expression(config[CONF_HH_FB_PIN])\n cg.add(var.set_hh_fb_pin(hh_fb_pin))\n\n # if CONF_RELAY_PIN in config:\n # relay_pin = await cg.gpio_pin_expression(config[CONF_RELAY_PIN])\n # cg.add(var.set_relay_pin(relay_pin))\n","repo_name":"deltafish32/esphome-components","sub_path":"components/vornado_660/fan/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73180171895","text":"import logging\nimport os\nimport random\nimport subprocess\nimport threading\nfrom time import sleep\n\nlogger = logging.getLogger(__name__)\n_airconditioner_thread = None\n\n\ndef aircondition(sphero):\n \"\"\"Simulate air conditioner on/off\"\"\"\n logger.info(\"Play a sound putting the air conditioner on and off\")\n global _airconditioner_thread\n if not _airconditioner_thread:\n _airconditioner_thread = AirConditioner()\n _airconditioner_thread.start()\n else:\n _airconditioner_thread.stop()\n _airconditioner_thread = None\n sleep(3)\n\n\ndef garagedoor(sphero):\n \"\"\"Simulating opening the garage door\"\"\"\n logger.info(\"Open gare door\")\n current_angle = 0\n num_steps = 40\n # make 2 loops\n rotate_by = 360 * 2 / num_steps\n for _ in range(num_steps + 1):\n current_angle = (current_angle + rotate_by) % 360\n _make_a_step(sphero, current_angle, 0, 0.3)\n\n\ndef switchlight(sphero):\n \"\"\"simulate light switch on\"\"\"\n logger.info(\"Light switch on (on sphero)\")\n for i in range(255):\n sphero.set_rgb(i, i, i)\n sleep(0.005)\n sleep(3)\n from sphero import Sphero\n Sphero().reset_default_color()\n\n\ndef welcome(sphero):\n \"\"\"Let sphero welcome you\"\"\"\n logger.info(\"Sphero welcome!\")\n for _ in range(4):\n sphero.roll(0, 20)\n sphero.set_rgb(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\n sleep(0.2)\n sphero.roll(0, -20 % 360)\n sphero.set_rgb(0, 0, 0)\n sleep(0.2)\n sphero.roll(0, 0)\n from sphero import Sphero\n Sphero().reset_default_color()\n\nclass AirConditioner(threading.Thread):\n \"\"\"Play an air conditioner sound\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(AirConditioner, self).__init__(*args, **kwargs)\n self.stopped = False\n\n def run(self):\n \"\"\"Start playing the wav file in a separate thread\"\"\"\n conditioner_sound_file = os.path.join(os.path.dirname(__file__), \"airconditioner.wav\")\n while(not self.stopped):\n subprocess.call([\"aplay\", conditioner_sound_file])\n\n def stop(self):\n \"\"\"Stop the running air conditioner\"\"\"\n self.stopped = True\n subprocess.call([\"pkill\", \"aplay\"])\n\n\ndef _make_a_step(sphero, current_angle, speed, step_time):\n sphero.roll(speed, current_angle)\n sleep(step_time)\n sphero.roll(0, current_angle)\n","repo_name":"didrocks/ubuntu-core-exp","sub_path":"sdc-demo/homedemo/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25050835135","text":"import statistics\r\nimport random \r\nimport csv\r\nimport plotly.figure_factory as ff\r\nimport pandas as pd\r\n\r\n\r\ndata=pd.read_csv('newdata.csv')\r\ndata1=data[\"average\"].tolist()\r\n\r\nmean=statistics.mean(data1)\r\nstd_dev=statistics.stdev(data1)\r\nprint('mean={},standard deviation{}'.format(mean,std_dev))\r\n\r\n# fig=ff.create_distplot([data1],[\"Average\"],show_hist=False)\r\n# fig.show()\r\n\r\ndef randomSetOfMean():\r\n dataset=[]\r\n\r\n for i in range(0,100):\r\n index1=random.randint(0,len(data1)-1)\r\n value=data1[index1]\r\n dataset.append(value)\r\n\r\n dataset_mean=statistics.mean(dataset)\r\n #dataset_stdDev=statistics.stdev(dataset)\r\n return dataset_mean\r\n\r\n#print('mean={},standard deviation{}'.format(dataset_mean,dataset_stdDev))\r\n\r\n\r\n# figure=ff.create_distplot([dataset],[\"Random Average Data\"],show_hist=False)\r\n# figure.show()\r\n\r\ndef setup():\r\n sampleMeanDistribution=[]\r\n for i in range(0,1000):\r\n x=randomSetOfMean()\r\n sampleMeanDistribution.append(x)\r\n sampleMean=statistics.mean(sampleMeanDistribution)\r\n print(\"Sample Mean =\",sampleMean)\r\n\r\n sample_std=statistics.stdev(sampleMeanDistribution)\r\n print(\"Std dev =\",sample_std)\r\n\r\n figure=ff.create_distplot([sampleMeanDistribution],[\"Sample Mean\"],show_hist=False)\r\n figure.show()\r\n\r\nsetup()\r\n\r\n\r\n\r\n\r\n","repo_name":"Sasmitha3001/Sample-Mean","sub_path":"C-110/sampleMeanDeviation.py","file_name":"sampleMeanDeviation.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7249725171","text":"def read_file(file_name):\n f = open(file_name,'r')\n while True:\n line = f.readline()\n if line == '':\n break\n else:\n student_info = line.strip('\\n').split('\\t')\n student = {'name':student_info[0],'age':student_info[1],'qq':student_info[2]}\n studentinfo.append(student)\n \ndef write_file(file_name):\n f = open(file_name,'w')\n for item in studentinfo:\n student = '%s\\t%s\\t%s\\n'%(item['name'] ,item['age'],item['qq'])\n f.write(student)\n f.close()\n \ndef print_menu():\n print('='*30)\n print('Student Management System'.center(30))\n print('Enter1:add student')\n print('Enter2:search student')\n print('Enter3:modify student')\n print('Enter4:delete student')\n print('Enter5:check all students')\n print('Enter6:exit')\n\ndef add_student():\n name = input('Please enter the name of student:')\n age = int(input('Please enter the age of student:'))\n qq = input('Please enter the qq number of student:')\n stu = {}\n stu['name'] = name\n stu['age'] = age\n stu['qq'] = qq\n studentinfo.append(stu)\n print('Successfully add the student information.')\n\ndef search_student():\n name = input('Please enter the name of the student you want to search:')\n for item in studentinfo:\n if item['name'] == name.strip():\n print('%s exist, age: %d, qq: %s'%(item['name'],item['age'],item['qq']))\n else:\n print('%s is not exist, please add the information'%name)\n\ndef delete_student():\n name = input('Please enter the name of the student you want to remove:')\n for item in studentinfo:\n if item['name'] == name.strip():\n studentinfo.remove(item)\n print('Sucessfully remove %s from the database'% (name))\n break\n else:\n print('%s is not exist, please try again.'%name)\n\ndef modify_student():\n name = input('Please enter the name of the student you want to edit:')\n for item in studentinfo:\n if item['name'] == name.strip():\n item['age'] = int(input('Please enter the age of student:'))\n item['qq'] = input('Please enter the qq number of student:')\n else:\n print('%s is not exist, please try again.'%name)\n\ndef print_all():\n print('nope\\tname\\tage\\tqq\\t')\n for i,item in enumerate(studentinfo,1):\n print('%s\\t'%i, end='')\n print('%s\\t%s\\t%s\\t'%(item['name'],item['age'],item['qq']))\n\ndef main():\n read_file(file_name)\n while True:\n print_menu()\n choice = int(input())\n if choice == 1:\n add_student()\n elif choice == 2:\n search_student()\n elif choice == 3:\n modify_student()\n elif choice == 4:\n delete_student()\n elif choice == 5:\n print_all()\n else:\n break\nstudentinfo = [] \nfile_name = '../data/info.txt'\nmain()\nwrite_file(file_name)\n","repo_name":"zpypiao/Study_py","sub_path":"program/system of student infomation.py","file_name":"system of student infomation.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"22572998128","text":"from collections import defaultdict\nfrom graph import graph\n\n\ndef BFS(graph, root):\n \"\"\"\n Breadth first search algorithm.\n Time complexity is O(V+E).\n \"\"\"\n visited = defaultdict(dict)\n depth = defaultdict(dict)\n\n # initialization\n for v in graph:\n visited[v] = False\n depth[v] = -1\n queue = []\n # push root\n queue.append(root)\n visited[root] = True\n depth[root] = 0\n # search\n while queue: # all vertices will be pushed to queue once O(V)\n s = queue.pop(0)\n print('vertex: ' + s + ' depth: %d, ' % depth[s], end=\" \")\n for v in graph[s]: # all adjancent vertices will be scanned O(E)\n if visited[v['ID']] is not True:\n visited[v['ID']] = True\n depth[v['ID']] = depth[s] + 1\n queue.append(v['ID'])\n print('\\0')\n\n\nif __name__ == '__main__':\n g = graph()\n g.addEdge('a', 'b')\n g.addEdge('b', 'c')\n g.addEdge('a', 'c')\n g.addEdge('c', 'd')\n g.addEdge('d', 'e')\n g.addEdge('d', 'f')\n g.addEdge('g', 'h')\n BFS(g.graph, 'a')\n","repo_name":"Jyun-Neng/graph_algorithm","sub_path":"breadth-first-search.py","file_name":"breadth-first-search.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35457433780","text":"import os\nimport pytest\n\nTEST_APP_PATH = 'tests/myapp.py'\n\n\n@pytest.fixture(scope='session')\ndef app():\n def make_test_app():\n with open(TEST_APP_PATH, 'w') as f:\n for line in [\n 'from flask import Flask\\n',\n 'from flask_request_logger import RequestLogger\\n',\n 'test_app = Flask(__name__)\\n',\n 'req_logger = RequestLogger()\\n',\n 'req_logger.init_app(test_app)\\n',\n ]:\n f.write(line)\n\n make_test_app()\n\n from myapp import test_app, req_logger\n yield test_app\n\n if req_logger.db_info.drivername == 'sqlite':\n os.unlink(req_logger.db_info.database)\n if os.path.exists(TEST_APP_PATH):\n os.unlink(TEST_APP_PATH)\n\n\n@pytest.fixture\ndef client(app):\n \"\"\"A test client for the app.\"\"\"\n app.testing = True\n return app.test_client()\n","repo_name":"BbsonLin/flask-request-logger","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"19026459264","text":"from Produto import Produto\nfrom vetorProduto import vetorProduto\n\nclass MenuProduto:\n def menu(vp):\n x = int(input(\"Digite: \\n0 para sair \\n1 para cadastrar produto \\n2 busca produto \\n3 para listar produtos \\n4 remover produto\"))\n\n while x!=0 and x!=1 and x!=2 and x!=3 and x!=4:\n print(\"Digite um valor válido\\n\")\n x = int(input(\"Digite: \\n0 para sair \\n1 para cadastrar produto \\n2 busca produto \\n3 para listar produtos \\n4 remover produto\"))\n\n while x==1 or x==2 or x==3 or x==4:\n if x==0:\n break\n elif x==1:\n nome = input(\"Digite o nome do produto: \")\n codigo = input(\"Digite o código do produto: \")\n precounitario = double(input(\"Digite o preço unitário do produto: \"))\n quantidade = int(input(\"Digite a quantidade do produto: \"))\n p = Produto(nome, codigo, precounitario, quantidade)\n vp.addProduto(p)\n elif x==2:\n x = int(input(\"\\nSe você quiser buscar: \\nPor nome digite 1 \\nPor código digite 2\"))\n while x!=1 and x!=2:\n print(\"Digite um valor válido\\n\")\n x = int(input(\"\\nSe você quiser buscar: \\nPor nome digite 1 \\nPor código digite 2\"))\n if x==1:\n nome = input(\"Digite o nome do produto\")\n vp.buscarProdutoNome(nome)\n elif x==2:\n codigo = input(\"Digite o código do produto\\n\")\n vp.buscarProdutoCodigo(codigo)\n #elif x==3:\n #função de listar produto\n\n #elif x==4:\n #função de remover produto\n\n x = int(input(\"Digite: \\n0 para sair \\n1 para cadastrar produto \\n2 busca produto \\n3 para listar produtos \\n4 remover produto\")) ","repo_name":"PedroOtv/tp2_aeds3","sub_path":"menuProduto.py","file_name":"menuProduto.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3932420030","text":"from configurations.config import Config\nfrom utils.customLogger import LogGen\nimport pytest\n\n\nclass Test_Login_Page_Title:\n\n baseUrl = Config.appUrl\n \n logger = LogGen.logGen()\n \n @pytest.mark.smoke\n def test_login_page_title(self, setUp):\n \n self.logger.info(\"************ Test_Login **************\")\n self.logger.info(\"************ Verifying login page title **************\")\n self.driver = setUp\n self.driver.maximize_window()\n self.driver.get(self.baseUrl)\n \n login_title = self.driver.title\n \n if login_title == \"Your store. Login\":\n self.driver.quit()\n self.logger.info(\"************ Login page title is verified successfully **************\")\n assert True\n else:\n self.driver.save_screenshot(\"../screenshots/\"+\"test_loginPageTitle\"+\".png\")\n self.driver.quit()\n self.logger.error(\"************ Login page title verification failed **************\")\n assert False","repo_name":"appan-roy/Selenium-Python-Hybrid-Framework","sub_path":"testcases/test_login_page_title.py","file_name":"test_login_page_title.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37610224728","text":"#python -m grpc_tools.protoc -I ./Server/Protos --python_out=./Server --grpc_python_out=./Server ./Server/Protos/Chat.proto ; python -m grpc_tools.protoc -I ./Server/Protos --python_out=./Client --grpc_python_out=./Client ./Server/Protos/Chat.proto\n\n#imports de rpc y protos\nimport grpc\nimport Chat_pb2\nimport Chat_pb2_grpc\n\n#imports Funcionales\nimport threading\nfrom datetime import datetime\nfrom concurrent import futures\n\n\n#IP = \"[::]\"\nIP = \"0.0.0.0\"\n#Windows port\n#PORT = \"50051\"\n#Docker port\nPORT = \"8080\"\nFILE = \"./Server/log.txt\"\nEVENTS = []\n\n\nclass ChatDB ():\n def __init__(self):\n self.Clients = {}\n try:\n file = open(FILE,\"r\")\n print(\"Ya existe un archivo log\")\n file.close()\n except IOError:\n print(\"No se encontro un archivo log\")\n try: \n file = open(FILE, \"w\")\n print(\"Archivo log creado exitosamente\")\n file.close()\n except IOError:\n print(\"Ha ocurrido un error inesperado al crear el archivo log\")\n\n\n def AddClient(self, ClientId):\n\n if ClientId in self.Clients.keys():\n print(\"El cliente {0} ya existe\".format(ClientId))\n return False\n else:\n self.Clients[ClientId] = []\n print(\"Cliente {0} agregado exitosamente!\".format(ClientId))\n return True\n\n def AddMessage(self, ClientId, SecondId, Message):\n TimeStamp = datetime.now().strftime(\"%d-%b-%Y|%H:%M:%S\")\n #IdMensaje = IDEMISOR-IDRECEPTOR-TIMESTAMP\n IdMessage = str(ClientId)+\"_\"+str(SecondId)+\"_\"+str(TimeStamp)\n try:\n log = open(FILE,\"a\")\n except IOError:\n print(\"[ERROR] Algo salio mal, al intentar registrar el mensaje: \"+ str(Message)+\" entre los clientes :\"+str(ClientId)+\" -- \"+str(SecondId))\n return\n\n Data = \"#\".join([str(IdMessage), str(Message), \"\\n\"])\n\n if ClientId in self.Clients.keys():\n if SecondId in self.Clients.keys():\n self.Clients[SecondId].append(Data) \n log.write(Data)\n print(\"[EXITO] El mensaje: \"+ str(Message)+\" entre los clientes :\"+str(ClientId)+\" -- \"+str(SecondId)+ \" Se registro correctame\")\n return Chat_pb2.Confirmacion(Tipo = 1, IdPropietario=ClientId, IdMensaje = IdMessage, Error = \"\" )\n else:\n print(\"El receptor {0} no se ecnuentra registrado\".format(SecondId))\n return Chat_pb2.Confirmacion(Tipo = 0, IdPropietario=ClientId, IdMensaje = IdMessage, Error = \"El receptor {0} no se ecnuentra registrado\".format(SecondId) )\n else:\n print(\"El emisor {0} no se encuentra registrado\".format(ClientId))\n return Chat_pb2.Confirmacion(Tipo = 0, IdPropietario=ClientId, IdMensaje = IdMessage, Error = \"El emisor {0} no se encuentra registrado\".format(ClientId))\n log.close()\n\n #Mensajes que he recibido por ClienteId\n def GetMessages(self,ClientId):\n temp = []\n try:\n for mensaje in self.Clients[ClientId]:\n temp.append(mensaje)\n print(\"se obtubieron los mensajes del usuario {0}\".format(ClientId))\n \n except Exception as error:\n print(error)\n print(\"Error al acceder al buffer de mensajes del cliente {0}\".format(ClientId))\n \n self.Clients[ClientId].clear()\n for m in temp:\n IdMensaje, Mensaje = m.split(sep=\"#\", maxsplit=1)\n IdEmisor,IdReceptor, TimeStamp = IdMensaje.split(sep=\"_\", maxsplit = 2)\n mensaje = Chat_pb2.MensajeCliente(IdPropietario = IdEmisor, IdDestinatario = IdReceptor, IdMensaje = IdMensaje,\n TimeStamp = TimeStamp, Mensaje = Mensaje, Error = \"\" )\n yield mensaje\n #Mensajes enviados por ClietntID\n def GetRecord(self, ClientId):\n temp = []\n try:\n log = open(FILE,\"r\")\n except IOError:\n print(\"[ERROR] Algo salio mal, al intentar abrir el archivo log.txt\")\n return\n for linea in log:\n IdMensaje, Mensaje = linea.split(sep=\"#\", maxsplit=1)\n IdEmisor,IdReceptor, TimeStamp = IdMensaje.split(sep=\"_\", maxsplit = 2)\n if IdEmisor == ClientId:\n temp.append((IdEmisor,IdReceptor,IdMensaje, TimeStamp, Mensaje))\n log.close()\n for tupla in temp:\n IdEmisor,IdReceptor,IdMensaje, TimeStamp, Mensaje = tupla\n mensaje = Chat_pb2.MensajeCliente(IdPropietario = IdEmisor, IdDestinatario = IdReceptor, IdMensaje = IdMensaje,\n TimeStamp = TimeStamp, Mensaje = Mensaje, Error = \"\" )\n yield mensaje\n \n def GetAllMessages(self):\n temp = []\n try:\n log = open(FILE,\"r\")\n except IOError:\n print(\"[ERROR] Algo salio mal, al intentar abrir el archivo log.txt\")\n return\n for linea in log:\n IdMensaje, Mensaje = linea.split(sep=\"#\", maxsplit=1)\n IdEmisor,IdReceptor, TimeStamp = IdMensaje.split(sep=\"_\", maxsplit = 2)\n temp.append((IdEmisor,IdReceptor,IdMensaje, TimeStamp, Mensaje)) \n return temp\n\n def GetClients(self, ClientId):\n for client in self.Clients.keys():\n if client != ClientId:\n mensaje = Chat_pb2.MensajeCliente(IdPropietario = \"\", IdDestinatario = \"\", IdMensaje = \"\", TimeStamp = \"\", Mensaje = client, Error = \"\" )\n yield mensaje\n\n\n \nclass ChatServicer (Chat_pb2_grpc.ChatServicer):\n\n #Directorio de clientes\n\n def __init__(self):\n self.Directorio = ChatDB()\n self.ServerId = \"S01\"\n self.ClientNumber = 0\n #self.Events = []\n print(\"iniciando servicios\")\n #threading.Thread(target= self.Menu()).start()\n \n def Saludo(self, request, context):\n\n NewId = \"Cliente-\"+str(self.ClientNumber+1)\n if (request.Tipo == 0):\n if (self.Directorio.AddClient(NewId)):\n self.ClientNumber = self.ClientNumber +1\n print(\"Se ha agregado a {0} a la lista de clientes\".format(NewId))\n return Chat_pb2.Saludos(Tipo = 1, IdCliente = NewId, IdServidor = str(self.ServerId), Error = \"\")\n else:\n return Chat_pb2.Saludos(Tipo = 1, IdCliente = \"\", IdServidor = \"\", Error = \"Error al ser agregado al servidor, El cliente ya existe\")\n elif (request.Tipo == 1):\n print(\"USTED NO DEBERIA ESTAR AQUI! ¬¬\")\n\n def EnvioSolicitud(self, request, context):\n return self.Directorio.AddMessage(request.IdPropietario, request.IdDestinatario, request.Mensaje)\n\n def DespachoMensajes(self, request, context):\n if (request.Tipo == \"r\"):\n while True:\n return self.Directorio.GetMessages(request.IdCliente)\n elif (request.Tipo == \"e\"):\n while True:\n return self.Directorio.GetRecord(request.IdCliente)\n elif (request.Tipo == \"c\"):\n while True:\n return self.Directorio.GetClients(request.IdCliente)\n else:\n print(\"Usted NO DEBERIA ESTAR AQUI! ¬¬\")\n\"\"\" \n def Menu(self):\n option = 0\n while option != 4:\n print(\"--------------------------------------------\\nServidor {0}\\n\\nSeleccione una opcion:\\n1) Revisar eventos del servidor\\n2) Ver clientes activos.\\n3) Ver mensajes almacenados\\n4) Salir.\\n--------------------------------------------\\n\".format(self.ServerId))\n option = str(input(\"Opcion: \"))\n \n if option == str(1):\n for event in self.Events:\n print(event)\n elif option == str(2):\n i = 1\n for client in self.Directorio.GetClients():\n print(\"{0}) {1}\".format(i, client))\n i=i+1\n elif option == str(3):\n AllMessages = self.Directorio.GetAllMessages()\n print(\"--------------------------------------------\\nMensajes almacenados en log.txt:\\n----fecha y hora----|-Emisor-|-Receptor-|----Mensaje----\\n\")\n for IdEmisor,IdReceptor,IdMensaje, TimeStamp, Mensaje in AllMessages:\n print(\"{0} {1} {2} {3}\".format(TimeStamp, IdEmisor, IdReceptor, Mensaje))\n print(\"--------------------------------------------\\n\")\n elif option == str(4):\n break\n else:\n print(\"Ha seleccionado una opcion no valida, intentelo nuevamente\")\ndef serve():\n \n ChatServer = grpc.server(futures.ThreadPoolExecutor(max_workers = 10))\n Chat_pb2_grpc.add_ChatServicer_to_server(ChatServicer(), ChatServer)\n ChatServer.add_insecure_port(IP+\":\"+PORT)\n ChatServer.start()\n ChatServer.wait_for_termination()\n\"\"\"\nif __name__ == '__main__':\n #serve()\n ChatServer = grpc.server(futures.ThreadPoolExecutor(max_workers = 10))\n Chat_pb2_grpc.add_ChatServicer_to_server(ChatServicer(), ChatServer)\n ChatServer.add_insecure_port(IP+\":\"+PORT)\n ChatServer.start()\n ChatServer.wait_for_termination()","repo_name":"diegomontecinos/SistemasDistribuidos","sub_path":"Tarea2/Pregunta-1-gRPC/Server/app/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":9163,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"44381745590","text":"import gym\nimport numpy as np\nimport tensorflow as tf\nfrom matplotlib import pyplot as plt\nfrom tensorflow.keras.optimizers import Adam\n\nfrom PolicyGradients.Algos.Reinforce import Reinforce, Model, compute_discounted_rewards\n\nhuber_loss = tf.keras.losses.Huber(reduction=tf.keras.losses.Reduction.SUM)\n\ndef _compute_policy_loss(action_probs, rewards, state_values):\n log_action_probs = tf.math.log(action_probs)\n advs = tf.stop_gradient(rewards - state_values)\n return -tf.math.reduce_sum(log_action_probs * advs)\n\n\nclass ReinforceBaseLine(Reinforce):\n def __init__(self, env, actor_lr, critic_lr, policy, critic, gamma, max_episodes, max_eps_steps):\n super().__init__(env, actor_lr, policy, gamma, max_episodes, max_eps_steps)\n self.critic = critic\n self.critic_optimizer = Adam(critic_lr)\n\n def _run_episode(self):\n state = tf.constant(self.env.reset(), dtype=tf.float32)\n rewards = tf.TensorArray(tf.float32, 0, True)\n action_probs = tf.TensorArray(tf.float32, 0, True)\n state_values = tf.TensorArray(tf.float32, 0, True)\n\n state_shape = state.shape\n\n for step in tf.range(self.max_eps_steps):\n action, action_logits_step = self.get_action(state)\n action_probs_step = tf.nn.softmax(action_logits_step)[0, action]\n state, reward, done = self.tf_env_step(action)\n value = self.critic(tf.expand_dims(state, 0))\n\n self.steps_taken += 1\n\n action_probs = action_probs.write(step, action_probs_step)\n rewards = rewards.write(step, reward)\n state_values = state_values.write(step, value)\n\n state.set_shape(state_shape)\n\n if tf.cast(done, tf.bool):\n break\n return action_probs.stack(), rewards.stack(), state_values.stack()\n\n def train(self):\n with tf.GradientTape() as tape, tf.GradientTape() as tape2:\n action_probs, rewards, values = self._run_episode()\n discounted_rewards = compute_discounted_rewards(rewards, self.gamma)\n policy_loss = _compute_policy_loss(action_probs, discounted_rewards, values)\n critic_loss = huber_loss(values,discounted_rewards)\n policy_grads = tape.gradient(policy_loss, self.policy.trainable_variables)\n critic_grads = tape2.gradient(critic_loss, self.critic.trainable_variables)\n self.policy_optimizer.apply_gradients(zip(policy_grads, self.policy.trainable_variables))\n self.critic_optimizer.apply_gradients(zip(critic_grads, self.critic.trainable_variables))\n\n return rewards\n\n\nif __name__ == '__main__':\n env = gym.make('CartPole-v0')\n policy = Model(env.action_space.n, hidden_units=12)\n critic = Model(1, hidden_units=12)\n\n reinforce_baseline = ReinforceBaseLine(env, actor_lr=.0025, critic_lr=.02, critic=critic, policy=policy, gamma=.99,\n max_episodes=10000, max_eps_steps=250)\n\n running_rewards, episode_rewards = reinforce_baseline()\n episodes = np.arange(len(running_rewards))\n plt.plot(episodes, running_rewards, label='running_reward')\n plt.plot(episodes, episode_rewards, label='episode_reward')\n plt.legend()\n plt.show()\n\n reinforce_baseline.demo()\n","repo_name":"ChrisBenka/DQN-and-PolicyGradients","sub_path":"PolicyGradients/Algos/ReinforceBaseline.py","file_name":"ReinforceBaseline.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73815480056","text":"from helpers import *\nfrom uninformed import *\nfrom informed import *\nfrom local import *\n\n'''\n Perform Breadth First Search\n'''\n#read grid\ngrid = readGrid(\"grid.txt\")\n#Set the starting positon and goal position \nstart_pos = [0, 0]\ngoal_pos = [len(grid)-1, len(grid[0])-1]\n#Set up the initial closed and open lists \nopen_list = []\nclosed_list = []\n#setup uninformed breadth search search\nstart_node = Node(value=start_pos)\ngoal_node = Node(value=goal_pos)\n\nprint(\"Perform Breadth First Search\")\n\nif (start_node.value != goal_node.value):\n closed_list.append(start_node)\n breadth_path = uninformed_search(open_list, closed_list, start_node, goal_node, grid, True)\n#visualizeGrid(grid, breadth_path, True)\noutputGrid(grid, start_pos, goal_pos, breadth_path, 'breadth_output.txt')\n\n'''\n Perform Depth First Search\n'''\n#read grid\ngrid = readGrid(\"grid.txt\")\n#Set the starting positon and goal position \nstart_pos = [0, 0]\ngoal_pos = [len(grid)-1, len(grid[0])-1]\n#Set up the initial closed and open lists \nopen_list = []\nclosed_list = []\n#setup uninformed depth search\nstart_node = Node(value=start_pos)\ngoal_node = Node(value=goal_pos)\n\nprint(\"Perform Depth First Search\")\n\nif (start_node.value != goal_node.value):\n closed_list.append(start_node)\n depth_path = uninformed_search(open_list, closed_list, start_node, goal_node, grid, False)\n#visualizeGrid(grid, depth_path, True)\noutputGrid(grid, start_pos, goal_pos, depth_path, 'depth_output.txt')\n\n'''\n Perform A* Search\n'''\n#read grid\ngrid = readGrid(\"grid.txt\")\n#Set the starting positon and goal position \nstart_pos = [0, 0]\ngoal_pos = [len(grid)-1, len(grid[0])-1]\n#Set up the initial closed and open lists \nopen_list = []\nclosed_list = []\n#setup informed search\nheur = heuristic(start_pos, goal_pos)\nstart_node = InformedNode(start_pos, g=grid[start_pos[0]][start_pos[1]], h=heur)\ngoal_node = InformedNode(value=goal_pos)\n\nprint(\"Perform A* First Search\")\n\nif (start_node.value != goal_node.value):\n closed_list.append(start_node)\n path = a_star(open_list, closed_list, start_node, goal_node, grid)\noutputGrid(grid, start_pos, goal_pos, path, 'a_star_output.txt')\n#visualizeGrid(grid, path, True)\n\n'''\n Perform Simulated Annealing (Local Search)\n'''\nprint(\"Perform Simulated Annealing\")\nboard_sizes = [4, 8, 16]\ndecay_rates = [.9, .75, .5]\nthresholds = [0.000001, 0.0000001, 0.00000001]\n\n#trying different pairs \nfor i in range(0, 3):\n print(\"***************************\")\n print(f\"Board size: {board_sizes[1]}\")\n print(\"***************************\")\n print(f\"Decay rate: {decay_rates[i]}\")\n print(\"***************************\")\n print(f\"Threshold: {thresholds[i]}\")\n print(\"***************************\")\n board = Board(board_sizes[1])\n board.rand()\n result = simulated_annealing(board, decay_rates[i], thresholds[i]) \n\n#10 runs of each board size\n#10 runs of each board size\ntotal = 0\nfor j in board_sizes:\n print(\"***************************\")\n print(f\"Board size: {j}\")\n print(\"***************************\")\n print(f\"Decay rate: {decay_rates[0]}\")\n print(\"***************************\")\n print(f\"Threshold: {thresholds[0]}\")\n for k in range(0, 10):\n board = Board(j)\n board.rand() \n print(\"---------------------------\")\n print(f\"Run: {k+1}\")\n print()\n result = simulated_annealing(board, decay_rates[0], thresholds[0])\n total = total + result\n print(\"---------------------------\") \n print(f\"Average h-value of board size {j}: {total/10}\")\n print(\"---------------------------\")","repo_name":"calvinhathcock/ai_searching","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"8694970880","text":"class Cup:\n color = \"No color\"\n _material = \"No infor\"\n __price = 0.0 # private variable\n def __init__(self):\n self.color = None\n self._material = None # protected variable\n self.__price = 0.0 # private variable\n\n def fill(self, material):\n self._material = material\n\n def empty(self):\n self._content = None\n\nprint(Cup.color)\nprint(Cup._material)\nprint(Cup.__price)\n","repo_name":"sacki123/django_training","sub_path":"Python test/DAY_10/scope_access.py","file_name":"scope_access.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20535005795","text":"import os\n\nfrom flask import Blueprint, render_template, request, url_for\nfrom werkzeug.utils import redirect\n\nfrom config.default import BASE_DIR\nfrom .auth_views import login_required\nfrom .. import db\nfrom ..forms import CategoryForm, StatisticForm\nfrom ..functions import get_total_visit_count, get_yesterday_visit_count, get_total_posts_count, get_today_visit_count\nfrom ..models import Category, Statistic\nfrom collections import Counter\nimport csv, requests\n\nbp = Blueprint('statistic', __name__, url_prefix='/statistic')\n\n\n@bp.route('/s_list/')\n@login_required\ndef s_list():\n total_visit_count = get_total_visit_count()\n yesterday_visit_count = get_yesterday_visit_count()\n total_posts_count = get_total_posts_count()\n today_visit_user_count = get_today_visit_count()\n grp = request.args.get('grp', type=str, default=1)\n form_for_new_category = CategoryForm()\n category_list = []\n for selected_category in Category.query.all():\n category_list.append(selected_category.category)\n\n # Statistic 테이블에서 데이터 가져오기\n title_list = []\n statistic_list = Statistic.query.all()\n for name in statistic_list:\n title_list.append(name.title)\n\n if grp:\n statistic_query = Statistic.query.filter_by(id=grp).first()\n title = statistic_query.title\n labels = statistic_query.labels.strip('[]')\n data = statistic_query.data_value.strip('[]')\n stepsize = statistic_query.stepsize\n border_color = statistic_query.border_color\n type = statistic_query.type\n background_color = statistic_query.background_color\n border_width = statistic_query.border_width\n else:\n statistic_query = Statistic.query.filter_by(id=1).first()\n title = statistic_query.title\n labels = statistic_query.labels.strip('[]')\n data = statistic_query.data_value.strip('[]')\n stepsize = statistic_query.stepsize\n border_color = statistic_query.border_color\n type = statistic_query.type\n background_color = statistic_query.background_color\n border_width = statistic_query.border_width\n\n return render_template('statistic/statistic_list.html', form_for_new_category=form_for_new_category,\n category_list=category_list, title=title, labels=labels, data=data, stepsize=stepsize,\n border_color=border_color, type=type, title_list=title_list,\n view_name='statistic.s_list', background_color=background_color, border_width=border_width,\n total_posts_count=total_posts_count, total_visit_count=total_visit_count,\n today_visit_user_count=today_visit_user_count, yesterday_visit_count=yesterday_visit_count)\n\n\n@bp.route('/create/', methods=['GET', 'POST'])\n@login_required\ndef create():\n total_visit_count = get_total_visit_count()\n yesterday_visit_count = get_yesterday_visit_count()\n total_posts_count = get_total_posts_count()\n today_visit_user_count = get_today_visit_count()\n form = StatisticForm()\n form_for_new_category = CategoryForm()\n category_list = []\n for selected_category in Category.query.all():\n category_list.append(selected_category.category)\n\n if request.method == 'POST':\n new_graph = Statistic(\n title=form.title.data,\n labels=form.labels.data,\n data_value=form.data_value.data,\n stepsize=form.stepsize.data,\n border_color=form.border_color.data,\n type=form.type.data,\n background_color=form.background_color.data,\n border_width=form.border_width.data\n )\n db.session.add(new_graph)\n db.session.commit()\n return redirect(url_for('statistic.s_list'))\n\n return render_template('statistic/statistic_form.html', category_list=category_list,\n form_for_new_category=form_for_new_category, form=form, view_name='statistic.create',\n total_visit_count=total_visit_count, today_visit_user_count=today_visit_user_count,\n total_posts_count=total_posts_count, yesterday_visit_count=yesterday_visit_count)\n\n\n@bp.route('/live_chart')\n@login_required\ndef live_chart():\n total_visit_count = get_total_visit_count()\n yesterday_visit_count = get_yesterday_visit_count()\n total_posts_count = get_total_posts_count()\n today_visit_user_count = get_today_visit_count()\n form_for_new_category = CategoryForm()\n seoul_time_list, seoul_temperature_list, seoul_humidity_list, seoul_description_list = [], [], [], []\n url_path = os.path.join(BASE_DIR, \"pybo/static/statistic_data/weather_data_Seoul.txt\")\n with open(url_path, 'r') as f:\n weather_data = f.read()\n weather_data = weather_data.split('\\n')\n for data in weather_data[-11:-1]:\n data = data.split(',')\n current_time = data[0]\n temperature = data[1]\n humidity = data[2]\n weather_description = data[3]\n seoul_time_list.append(current_time)\n seoul_temperature_list.append(temperature)\n seoul_humidity_list.append(humidity)\n seoul_description_list.append(weather_description)\n\n busan_time_list, busan_temperature_list, busan_humidity_list, busan_description_list = [], [], [], []\n url_path = os.path.join(BASE_DIR, \"pybo/static/statistic_data/weather_data_Busan.txt\")\n with open(url_path, 'r') as f:\n weather_data = f.read()\n weather_data = weather_data.split('\\n')\n for data in weather_data[-11:-1]:\n data = data.split(',')\n current_time = data[0]\n temperature = data[1]\n humidity = data[2]\n weather_description = data[3]\n busan_time_list.append(current_time)\n busan_temperature_list.append(temperature)\n busan_humidity_list.append(humidity)\n busan_description_list.append(weather_description)\n\n daegu_time_list, daegu_temperature_list, daegu_humidity_list, daegu_description_list = [], [], [], []\n url_path = os.path.join(BASE_DIR, \"pybo/static/statistic_data/weather_data_Daegu.txt\")\n with open(url_path, 'r') as f:\n weather_data = f.read()\n weather_data = weather_data.split('\\n')\n for data in weather_data[-11:-1]:\n data = data.split(',')\n current_time = data[0]\n temperature = data[1]\n humidity = data[2]\n weather_description = data[3]\n daegu_time_list.append(current_time)\n daegu_temperature_list.append(temperature)\n daegu_humidity_list.append(humidity)\n daegu_description_list.append(weather_description)\n\n daejeon_time_list, daejeon_temperature_list, daejeon_humidity_list, daejeon_description_list = [], [], [], []\n url_path = os.path.join(BASE_DIR, \"pybo/static/statistic_data/weather_data_Daejeon.txt\")\n with open(url_path, 'r') as f:\n weather_data = f.read()\n weather_data = weather_data.split('\\n')\n for data in weather_data[-11:-1]:\n data = data.split(',')\n current_time = data[0]\n temperature = data[1]\n humidity = data[2]\n weather_description = data[3]\n daejeon_time_list.append(current_time)\n daejeon_temperature_list.append(temperature)\n daejeon_humidity_list.append(humidity)\n daejeon_description_list.append(weather_description)\n\n ulsan_time_list, ulsan_temperature_list, ulsan_humidity_list, ulsan_description_list = [], [], [], []\n url_path = os.path.join(BASE_DIR, \"pybo/static/statistic_data/weather_data_Ulsan.txt\")\n with open(url_path, 'r') as f:\n weather_data = f.read()\n weather_data = weather_data.split('\\n')\n for data in weather_data[-11:-1]:\n data = data.split(',')\n current_time = data[0]\n temperature = data[1]\n humidity = data[2]\n weather_description = data[3]\n ulsan_time_list.append(current_time)\n ulsan_temperature_list.append(temperature)\n ulsan_humidity_list.append(humidity)\n ulsan_description_list.append(weather_description)\n\n incheon_time_list, incheon_temperature_list, incheon_humidity_list, incheon_description_list = [], [], [], []\n url_path = os.path.join(BASE_DIR, \"pybo/static/statistic_data/weather_data_Incheon.txt\")\n with open(url_path, 'r') as f:\n weather_data = f.read()\n weather_data = weather_data.split('\\n')\n for data in weather_data[-11:-1]:\n data = data.split(',')\n current_time = data[0]\n temperature = data[1]\n humidity = data[2]\n weather_description = data[3]\n incheon_time_list.append(current_time)\n incheon_temperature_list.append(temperature)\n incheon_humidity_list.append(humidity)\n incheon_description_list.append(weather_description)\n\n return render_template('statistic/live_chart_list.html', form_for_new_category=form_for_new_category,\n seoul_temperature_list=str(seoul_temperature_list).strip('[]'),\n seoul_humidity_list=str(seoul_humidity_list).strip('[]'),\n seoul_description_list=str(seoul_description_list).strip('[]'),\n seoul_time_list=str(seoul_time_list).strip('[]'),\n busan_temperature_list=str(busan_temperature_list).strip('[]'),\n busan_humidity_list=str(busan_humidity_list).strip('[]'),\n busan_description_list=str(busan_description_list).strip('[]'),\n busan_time_list=str(busan_time_list).strip('[]'),\n daegu_temperature_list=str(daegu_temperature_list).strip('[]'),\n daegu_humidity_list=str(daegu_humidity_list).strip('[]'),\n daegu_description_list=str(daegu_description_list).strip('[]'),\n daegu_time_list=str(daegu_time_list).strip('[]'),\n daejeon_temperature_list=str(daejeon_temperature_list).strip('[]'),\n daejeon_humidity_list=str(daejeon_humidity_list).strip('[]'),\n daejeon_description_list=str(daejeon_description_list).strip('[]'),\n daejeon_time_list=str(daejeon_time_list).strip('[]'),\n ulsan_temperature_list=str(ulsan_temperature_list).strip('[]'),\n ulsan_humidity_list=str(ulsan_humidity_list).strip('[]'),\n ulsan_description_list=str(ulsan_description_list).strip('[]'),\n ulsan_time_list=str(ulsan_time_list).strip('[]'),\n incheon_temperature_list=str(incheon_temperature_list).strip('[]'),\n incheon_humidity_list=str(incheon_humidity_list).strip('[]'),\n incheon_description_list=str(incheon_description_list).strip('[]'),\n incheon_time_list=str(incheon_time_list).strip('[]'),\n total_visit_count=total_visit_count, today_visit_user_count=today_visit_user_count,\n total_posts_count=total_posts_count, yesterday_visit_count=yesterday_visit_count)\n","repo_name":"Kang-Minseokk/minseokcom","sub_path":"pybo/views/statistic_views.py","file_name":"statistic_views.py","file_ext":"py","file_size_in_byte":11244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13122638163","text":"\"\"\"\nquery protein classes.\n\"\"\"\n\nimport argparse\nfrom glob import glob\nfrom collections import defaultdict\nimport os\nimport pickle\n\n\nDATABASE = \"proteinclasses.pkl\"\nPROTEIN_LIST = \"proteinclasses.tsv\"\n\n\ndef makedb():\n protein_catalogs = defaultdict(list)\n for tsvfile in glob(\"tsv/*.tsv\"):\n catalog = os.path.basename(os.path.splitext(tsvfile)[0])\n n = 0\n indexes = []\n for line in open(tsvfile):\n dic = {}\n if n == 0:\n indexes = line.strip().split(\"\\t\")\n n = 1\n else:\n tmp = line.strip().split(\"\\t\")\n for ind, value in zip(indexes[1:], tmp[1:]):\n dic[ind] = value\n protein_catalogs[tmp[0]].append((catalog, dic))\n with open(DATABASE, 'wb') as f_pkl:\n pickle.dump(protein_catalogs, f_pkl)\n\n\ndef query(gene, item):\n global DATABASE, PROTEIN_LIST\n classes = []\n for line in open(PROTEIN_LIST):\n classes.append(line.split(\"\\t\")[0])\n with open(DATABASE, 'rb') as f_pkl:\n protein_catalogs = pickle.load(f_pkl)\n sorted_res = sorted(protein_catalogs[gene], key=lambda i: classes.index(i[0]))\n res = set()\n for (catalog, dic) in sorted_res:\n print(\"{}\".format(catalog))\n # in case of multi-result in different catalogs.\n res.add(dic[item])\n print(\"#########################################################\")\n for i in res:\n print(i)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='''query protein catalogs.''')\n parser.add_argument('--makedb', action='store_true',\n help='make catalogs db if specified.')\n parser.add_argument('-g', '--gene',\n help='input gene name.')\n parser.add_argument('-i', dest='item', default='Protein class',\n choices=['Gene synonym', 'Ensembl', 'Gene description',\n 'Chromosome', 'Position', 'Protein class',\n 'Evidence', 'Antibody', 'Reliability (IH)',\n 'Reliability (Mouse Brain)', 'Reliability (IF)',\n 'Subcellular location', 'Prognostic p-value',\n 'RNA cancer category', 'RNA tissue category',\n 'RNA TS', 'RNA TS TPM', 'TPM max in non-specific',\n 'RNA cell line category'],\n help='specify query item.')\n args = parser.parse_args()\n if args.makedb:\n makedb()\n else:\n if args.gene:\n query(args.gene, args.item)\n else:\n parser.print_help()\n","repo_name":"zzploveyou/proteinclasses","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"9734047857","text":"#!/usr/bin/env python\n\nimport math\n\nfrom position import Position\nfrom route import Route\n\nTEST_ACTUAL_POSITION = Position(1.4, 2.0)\nTEST_ACTUAL_POSITION_2 = Position(1.9, 1.0)\nTEST_ROUTE = [Position(0.5, 1.0), Position(1.0, 1.0), Position(2.0, 1.0), Position(3.0, 1.0)]\nEXPECTED_ROUTE = [Position(2.0, 1.0), Position(3.0, 1.0), Position(0.5, 1.0), Position(1.0, 1.0), Position(1.4, 2.0)]\nEXPECTED_ROUTE_2 = [Position(2.0, 1.0), Position(3.0, 1.0), Position(0.5, 1.0), Position(1.0, 1.0), Position(1.9, 1.0)]\n\nclass TestRoute:\n\tdef test_argmin(self):\n\t\ttestArray = [3.2, 1.0, 2.5, 1.1]\n\t\tminIndex = Route._argmin(testArray)\n\t\tassert 1 == minIndex\n\n\tdef test_initWithWideAngle(self):\n\t\t# when\n\t\tactual = Route(TEST_ACTUAL_POSITION, TEST_ROUTE)\n\t\t\n\t\t# then\n\t\tfor i in range(0, len(actual.ordered_coordinates)):\n\t\t\tassert actual.ordered_coordinates[i] == EXPECTED_ROUTE[i]\n\n\tdef test_initWithNarrowAngle(self):\n\t\t# when\n\t\tactual = Route(TEST_ACTUAL_POSITION_2, TEST_ROUTE)\n\t\t\n\t\t# then\n\t\tfor i in range(0, len(actual.ordered_coordinates)):\n\t\t\tassert actual.ordered_coordinates[i] == EXPECTED_ROUTE_2[i]\n\n\tdef test_getCurrentGoalNotEmpty(self):\n\t\t# given\n\t\troute = Route(TEST_ACTUAL_POSITION, TEST_ROUTE)\n\t\t\n\t\t# when\n\t\tactualGoal = route.getCurrentGoal()\n\t\t\n\t\t# then\n\t\tassert EXPECTED_ROUTE[0] == actualGoal\n\n\tdef test_getCurrentGoalEmpty(self):\n\t\t# given\n\t\troute = Route(TEST_ACTUAL_POSITION, TEST_ROUTE)\n\t\twhile len(route.ordered_coordinates) > 0:\n\t\t\troute.markCurrentGoalVisited()\n\t\t\n\t\t\n\t\t# when\n\t\tactualGoal = route.getCurrentGoal()\n\t\t\n\t\t# then\n\t\tassert actualGoal is None\n\n\tdef test_markCurrentGoalVisited(self):\n\t\t# given\n\t\troute = Route(TEST_ACTUAL_POSITION, TEST_ROUTE)\n\t\texpectedNumberOfPositions = len(route.ordered_coordinates) - 1\n\t\t\n\t\t# when\n\t\troute.markCurrentGoalVisited()\n\t\t\n\t\t# then\n\t\tassert len(route.ordered_coordinates) == expectedNumberOfPositions\n\n\tdef test_done(self):\n\t\t# given\n\t\troute = Route(TEST_ACTUAL_POSITION, TEST_ROUTE)\n\t\t\n\t\t# when\n\t\tfor i in range(0, len(EXPECTED_ROUTE)):\n\t\t\troute.markCurrentGoalVisited()\n\t\t\n\t\t# then\n\t\tassert route.done()\n","repo_name":"sattila83/Robocar","sub_path":"test_route.py","file_name":"test_route.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"34943372182","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.db import models\nfrom django.shortcuts import render_to_response\nfrom django.shortcuts import redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth.models import User\n\nimport commands\nimport glob\nimport os\nimport re\nimport rrdtool\nimport time\n\nfrom models import Dashboard\n\nRRDPATH = '/var/lib/collectd/rrd'\n\n\ndef get_rrd_plugininstances_for_host(host):\n hostdir = os.path.join(RRDPATH, host)\n if os.path.isdir(hostdir):\n plugininstance_list = os.listdir(hostdir)\n plugininstance_list.append('cpu-*')\n plugininstance_list.sort()\n else:\n plugininstance_list = []\n return plugininstance_list\n\n\ndef graph(request, label, host, plugininstance, rrdfile, datasource,\n endtime=0, showtime=86400, debug=False):\n if rrdfile == 'memory-ram_limit':\n return memorygraph(request, label, host, plugininstance,\n endtime, showtime)\n rrdfiles = glob.glob('%s/%s/%s/%s.rrd'%(\n RRDPATH, host, plugininstance, rrdfile))\n options = [\n '-', '--imgformat', 'PNG', '--height', '100',\n '--start', 'end-%ss'%showtime, '--end', 'now-%ss'%endtime,\n ]\n i=0\n rpn='CDEF:total=0'\n for rrdfile in rrdfiles:\n options.append('DEF:i%d=%s:%s:AVERAGE'%(i,rrdfile,datasource))\n options.append('LINE:i%d#cccccc:'%i)\n rpn+=',i%d,+'%i\n i+=1\n options.append(rpn)\n options.append('LINE:total#000000:%s'%label)\n options = \" \".join(map(str,options))\n if debug:\n return HttpResponse(options)\n data_image = commands.getoutput(\"rrdtool graph \" + options)\n return HttpResponse(data_image, mimetype=\"image/png\")\n\n\ndef browse(request, host = None, plugininstance = None):\n hosts = sorted(os.listdir(RRDPATH))\n if host != None :\n plugininstances = get_rrd_plugininstances_for_host(host)\n if plugininstance != None :\n all_ds_list = []\n realplugininstance = plugininstance.replace('*','0')\n if os.path.isdir('%s/%s/%s'%(RRDPATH,host,realplugininstance)):\n rrdfiles = os.listdir('%s/%s/%s/' %(\n RRDPATH, host, realplugininstance))\n for rrdfile in rrdfiles:\n if not rrdfile.endswith('.rrd'):\n continue\n rrdname = rrdfile[:-4]\n rrdpath = '%s/%s/%s/%s' %(\n RRDPATH, host, realplugininstance, rrdfile)\n info = rrdtool.info(str(rrdpath))\n ds_list = set(k.split('[')[1].split(']')[0]\n for k in info\n if k.startswith('ds'))\n for ds in ds_list:\n all_ds_list.append(dict(rrdname=rrdname,\n dsname=ds))\n return render_to_response('browse.html',\n {'user' : request.user,\n 'hosts' : hosts,\n 'host': host,\n 'plugininstance' : plugininstance,\n 'plugininstances' : plugininstances,\n 'ds_list': all_ds_list,\n })\n return render_to_response('browse.html', {'user' : request.user,\n 'hosts' : hosts,\n 'host' : host,\n 'plugininstances' : plugininstances,\n })\n return render_to_response('browse.html',\n {'user': request.user,\n 'hosts': hosts})\n\n\ndef dashboard(request):\n metrics = Dashboard.objects.filter(user=request.user.id)\n return render_to_response('dashboard.html', {'user' : request.user, 'metrics' : metrics})\n\n\ndef add_to_dashboard(request, host, plugininstance, rrdfile, datasource):\n obj, created = Dashboard.objects.get_or_create(user=request.user.id, host=host, plugininstance=plugininstance, rrdfile=rrdfile, datasource=datasource)\n return dashboard(request)\n\n\ndef remove_from_dashboard(request, host, plugininstance, rrdfile, datasource):\n Dashboard.objects.filter(user=request.user.id, host=host, plugininstance=plugininstance, rrdfile=rrdfile, datasource=datasource).delete()\n return dashboard(request)\n\n","repo_name":"jpetazzo/django-r2d2","sub_path":"r2d2/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4646,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"3521350369","text":"import unittest\nfrom hero import Hero\nfrom orc import Orc\nfrom weapon import Weapon\nfrom entity import Entity\nfrom fight import Fight\n\n\nclass TestFight(unittest.TestCase):\n\n def setUp(self):\n self.my_hero = Hero(\"Hero\", 100, \"Hero\")\n self.my_orc = Orc(\"Orc\", 100, 1.5)\n self.the_fight = Fight(self.my_hero, self.my_orc)\n\n def test_init(self):\n self.assertEqual(self.my_hero.name, \"Hero\")\n self.assertEqual(self.my_hero.health, 100)\n self.assertEqual(self.my_hero.nickname, \"Hero\")\n self.assertEqual(self.my_orc.name, \"Orc\")\n self.assertEqual(self.my_orc.health, 100)\n self.assertEqual(self.my_orc.berserk_factor, 1.5)\n\n def test_attacks_first(self):\n my_arr = []\n for i in range(0, 100):\n if self.the_fight.attacks_first() == True:\n my_arr.append(1)\n my_arr.append(0)\n self.assertIn(1, my_arr)\n self.assertIn(0, my_arr)\n\n def test_simulate_fight(self):\n proba1 = Weapon(\"axe\", 1, 0.1)\n proba2 = Weapon(\"sword\", 40, 0.9)\n self.my_hero.equip_weapon(proba1)\n self.my_orc.equip_weapon(proba2)\n the_fight = Fight(self.my_hero, self.my_orc)\n self.assertFalse(the_fight.simulate_fight())\n\n \n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"skdls-/Python","sub_path":"Week2/Week2-1 (TDD)/test_fight.py","file_name":"test_fight.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"8039086842","text":"import random\n\n\nclass Letter(object):\n\n def __init__(self, char):\n self.discovered = False\n self.value = char\n\n def __str__(self):\n if self.discovered:\n return self.value +\" \"\n else:\n return \"_ \"\n\n def get_value(self):\n return self.value\n\n def get_discovered(self):\n return self.discovered\n\n def discover(self):\n self.discovered = True\n\n\nclass Hangman(object):\n\n def __init__(self, solution):\n\n self.words = open(\"words.txt\", \"r\").read().split(\"\\n\")\n self.errors = 10\n\n self.solution = solution\n self.display = []\n self.attempts = []\n self.correct = []\n for char in solution:\n self.display.append(Letter(char))\n\n self.gameFinished = False\n\n def get_penalty(self):\n return len(self.attempts)\n\n def get_attempts(self):\n return self.attempts\n\n def get_display(self):\n result = \"\"\n for letter in self.display:\n result += str(letter)\n return result\n\n def get_game_finished(self):\n return self.gameFinished\n\n def check(self, char):\n\n if char == self.solution:\n self.gameFinished = True\n return True\n\n if char < \"a\" or char > \"z\" or len(char) != 1:\n raise Exception(\"That is not a valid input.\")\n\n if char in self.attempts:\n print(\"You already tried \", char, \"!. Try with another letter\")\n\n boolean = False\n for letter in self.display:\n if char is letter.get_value():\n letter.discover()\n boolean = True\n\n if boolean:\n if char not in self.correct:\n self.correct.append(char)\n if not boolean:\n if char not in self.attempts:\n self.attempts.append(char)\n\n for letter in self.display:\n if not letter.get_discovered():\n break\n else:\n self.gameFinished = True\n\n return boolean\n\n\n\n def play(self):\n\n game = Hangman(random.choice(self.words))\n\n # while not game.get_game_finished():\n #\n # for char in game.get_attempts():\n # print(char, end=\", \")\n # print()\n # print(game.get_display())\n # char = input(\"Guess a letter (or the word): \")\n # try:\n # game.check(char)\n # except Exception as error:\n # print(error)\n #\n # if game.get_penalty() > self.errors:\n # return \"You lost\"\n #\n # return \"YOU WIN!!\"\n\n\nclass Game(object):\n\n def __init__(self):\n\n self.words = open(\"words.txt\", \"r\").read().split(\"\\n\")\n self.errors = 10\n\n def play(self):\n\n game = Hangman(random.choice(self.words))\n\n while not game.get_game_finished():\n\n for char in game.get_attempts():\n print(char, end=\", \")\n print()\n print(game.get_display())\n char = input(\"Guess a letter (or the word): \")\n try:\n game.check(char)\n except Exception as error:\n print(error)\n\n if game.get_penalty() > self.errors:\n return \"You lost\"\n\n return \"YOU WIN!!\"\n\n","repo_name":"GonzaloCalleja/Hangman","sub_path":"HangmanTest.py","file_name":"HangmanTest.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9713216493","text":"#!/usr/bin/python\n#\n# An implementation of a Loop Erased Random Walk (LERW)\n# from a cylinder with reflecting boundaries on the left\n# and open boundaries on the right.\n# PNG output of a single trajectory.\n# Habib Rehmann and Gunnar Pruessner\n#\n\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nseed = 10 # random seed\nLength = 20 # length of the cyclinder\nCirc = 20 # circumference of cyclinder\ns = 0 # Step number.\n\ntrajectory = [] # List of the x coordinates of all points visited.\n# (Length x Circ) 2D array of zeros\nlattice = np.zeros((Length, Circ), dtype=int)\nrandom.seed(seed)\n\nxStart = random.randint(0, Length)\t# x coordinate of starting location\nyStart = random.randint(0, Length)\t# y coordinate of starting location\nx, y = xStart, yStart\n\n# Generate a randomwalk\nwhile True:\n\ts += 1\n\tif (bool(random.getrandbits(1))):\n\t\tif (bool(random.getrandbits(1))):\n\t\t\tx += 1\n\t\telse:\n\t\t\tx -= 1\n\telse:\n\t\tif (bool(random.getrandbits(1))):\n\t\t\ty += 1\n\t\telse:\n\t\t\ty -= 1\n\n\t# Periodic boundaries\n\tif (x >= Length):\n\t\tx -= Length\n\telif (x < 0):\n\t\tx += Length\n\tif (y >= Circ):\n\t\ty -= Circ\n\telif (y < 0):\n\t\ty += Circ\n\n\tif (x == xStart and y == yStart):\n\t\tbreak\n\n\tlattice[x][y] += 1\n\ttrajectory.append((x, y))\n\nx0 = None\ny0 = None\npos = 0\n\n# Loop erasure\nwhile pos < len(trajectory):\n\tx, y = trajectory[pos]\n\tif lattice[x][y] > 1 and (not x0):\n\t\tx0, y0 = x, y\n\t\tpos0 = pos\n\telif (x == x0) and (y == y0) and (lattice[x][y] == 1):\n\t\tdel trajectory[pos0:pos]\n\t\tx0, y0 = None, None\n\t\tpos = pos0\n\tlattice[x][y] -= 1\n\tpos += 1\n\n# Plot random walk\ndpi = 300\nfig, ax = plt.subplots()\nfig.set_size_inches(3, Circ * 3. / Length)\nax.set_xlim(0, Length - 1)\nax.set_ylim(0, Circ - 1)\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\nplt.plot(*zip(*trajectory), marker=\".\",linewidth=0.3)\nplt.savefig(\"plots/\"+__file__[:-3]+\".png\", bbox_inches=\"tight\", dpi=dpi)\n","repo_name":"HR/RandomWalk","sub_path":"src/LERW_fromCoord.py","file_name":"LERW_fromCoord.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"22"} +{"seq_id":"37043439043","text":"from pathlib import Path\r\nimport typing\r\nimport click\r\nimport urllib3\r\n\r\nfrom loading import users\r\nfrom conversion import users_to_contacts\r\nfrom saving import digital_contact_list\r\n\r\nUSERS_URL = \"https://radioid.net/static/users.json\"\r\n\r\n\r\n@click.group()\r\ndef cli():\r\n pass\r\n\r\n\r\n@cli.command()\r\n@click.option(\r\n \"--country\",\r\n \"countries\",\r\n multiple=True,\r\n help=\"One or more countries by which the result should be filtered.\")\r\n@click.option(\r\n \"--input\",\r\n type=Path,\r\n required=False,\r\n help=\"Optional input file. If set, this JSON file will be used as input. If not set, RadioID.net will be queried.\")\r\n@click.option(\r\n \"--output\",\r\n type=Path,\r\n default=\"digital_contact_list.csv\",\r\n help=\"Output CSV file location.\")\r\ndef convert_users(countries: typing.List[str], input: typing.Optional[Path], output: Path):\r\n \"\"\"Convert RadioID.net users to digital contacts\"\"\"\r\n\r\n if input:\r\n user_list = users.load_from_file(input)\r\n else:\r\n http = urllib3.PoolManager()\r\n response = http.request(\"GET\", USERS_URL)\r\n user_list = users.load_from_data(response.data.decode())\r\n\r\n print(f\"Loaded {len(user_list)} users\")\r\n\r\n if len(countries) > 0:\r\n user_list = users.filter_by_countries(user_list, countries)\r\n print(f\"Users list has been filtered to {len(user_list)} users\")\r\n\r\n contact_list = users_to_contacts.convert(user_list)\r\n digital_contact_list.write_to_file(output, contact_list)\r\n\r\n print(\"Finished\")\r\n","repo_name":"Norman0406/radioid-to-djmd5","sub_path":"convert_users.py","file_name":"convert_users.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10863664932","text":"from gensim.models.fasttext import FastText\nfrom gensim.models import Word2Vec\nimport argparse\nfrom helper import get_sentences\nimport os\nfrom multiprocessing import cpu_count\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='''\n Tool for training different kind of embeddings on email corpus\n ''')\n\n required = parser.add_argument_group('required arguments')\n optional = parser.add_argument_group('optional arguments')\n\n required.add_argument(\n '--input', help=\"Input directory that contains the emails\", required=True)\n required.add_argument(\n '--size', help=\"Dimensionality of word vectors\", required=True, type=int)\n required.add_argument('--type', help=\"Choose between FastText and Word2Vec\",\n choices=['fasttext', 'word2vec'], required=True)\n\n optional.add_argument(\n '--algorithm', help=\"Training algorithm to be used when choosing fasttext embeddings\", choices=['skipgram', 'cbow'], default='skipgram')\n required.add_argument(\n '--output', help=\"Output directory\", required=True)\n\n args = parser.parse_args()\n input = args.input\n vec_size = args.size\n type = args.type\n algorithm = args.algorithm\n output = args.output\n\n if not input.endswith('/'):\n input = input + '/'\n if not output.endswith('/'):\n output = output + '/'\n\n if not os.path.exists(output):\n os.makedirs(output)\n sentences = get_sentences(input)\n\n sent_token = []\n for sent in sentences:\n sent_token.append(sent.strip('\\n').split(' '))\n\n if type == \"fasttext\":\n if algorithm == \"skipgram\":\n sg = 1\n else:\n sg = 0\n model = FastText(sent_token, sg=sg, hs=1, size=vec_size,\n workers=cpu_count(), iter=100)\n model.save(os.path.join(output, algorithm + '.model'))\n else:\n model = Word2Vec(sent_token, size=vec_size,\n window=3, workers=cpu_count(), iter=100)\n model.save(os.path.join(output, type + '.model'))\n","repo_name":"eellak/gsoc2019-sphinx","sub_path":"email_clustering/train_vec.py","file_name":"train_vec.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"22"} +{"seq_id":"7803978237","text":"#!/usr/bin/env python3\n\nimport requests, json\n\nclass Spotify:\n # Constructor\n def __init__(self, key, token):\n self.key = key\n self.token = token\n\n ## Private methods\n\n # Get the playlists\n def _getPlaylists(self, idPlaylist):\n url = 'https://api.spotify.com/1/playlists'.format(idPlaylist)\n\n params = {\n 'key': self.key,\n 'token': self.token\n }\n\n return requests.request(method='GET', url=url, params=params)\n\n # Get the id for a Playlist\n def _getPlaylistId(self, playlist):\n for playlist in self._getPlaylists():\n if (playlist['name'] == playlist):\n return playlist['id']\n\n return ''\n\n ## Public methods\n\n # Create a card\n def addCard(self, playlist, name, description):\n\n url = 'https://api.spotify.com/1/cards'\n\n params = {\n 'key': self.key,\n 'token': self.token,\n 'idPlaylist': playlist,\n 'name': name,\n 'desc': description\n }\n\n return requests.request(\n 'POST',\n url,\n params=params\n )\n\n # Get the cards in a playlist\n def getCards(self, idPlaylist):\n url = 'https://api.spotify.com/1/playlists/{}/cards'.format(idPlaylist)\n\n params = {\n 'key': self.key,\n 'token': self.token\n }\n\n return requests.request(\n method='GET',\n url=url,\n params=params\n )\n\n # Get a card\n def getCard(self, idCard):\n url = 'https://api.spotify.com/1/cards/{}'.format(idCard)\n\n params = {\n 'key': self.key,\n 'token': self.token\n }\n\n headers = {\n 'Accept': 'application/json'\n }\n\n return requests.request(\n 'GET',\n url,\n params=params,\n headers=headers\n )\n\n # Delete a card\n def deleteCard(self, idPlaylist, cardName):\n for card in self.getCards(idPlaylist).json():\n if (card['name'] == cardName):\n print('Deleting the card {} ({})'.format(cardName, card['id']))\n url = 'https://api.spotify.com/1/cards/{}'.format(card['id'])\n params = {\n 'key': self.key,\n 'token': self.token\n }\n\n response = requests.request(\n method=\"DELETE\",\n url=url,\n params=params\n )\n\n return\n\n print('Not found the card {} in the playlist {}'.format(cardName, idPlaylist))\n","repo_name":"txixco/scripts","sub_path":"Python/spotify.py","file_name":"spotify.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19293035570","text":"# This code was copied from https://github.com/znxlwm/UGATIT-pytorch and adapted for our purpose accordingly.\n# From: https://github.com/znxlwm/UGATIT-pytorch\n# Author: Junho Kim\n# Paper: U-GAT-IT: Unsupervised Generative Attentional Networks with Adaptive Layer-Instance Normalization for Image-to-Image Translation\n# License: MIT License\n\nimport torch\nimport itertools\nfrom image_pool import ImagePool\nfrom .base_model import BaseModel\nfrom . import networks\nfrom .radam import RAdam\nfrom .model import Custom\nfrom .nutils import WeightedL1Loss\n\n\nclass UGATIT2Model(BaseModel):\n \"\"\"\n This class implements the UGATIT model, for learning image-to-image translation without paired data.\n U-GAT-IT paper: https://arxiv.org/abs/1907.10830\n \"\"\"\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n parser.set_defaults(no_dropout=True) # default U_GAT_IT did not use dropout\n parser.set_defaults(norm='instance') # default U_GAT_IT did use instance norm\n if is_train:\n parser.add_argument('--weight_decay', type=float, default=0.0001)\n\n parser.add_argument('--lambda_A', type=float, default=1.0, help='weight for cycle loss (A -> B -> A)')\n parser.add_argument('--lambda_B', type=float, default=1.0, help='weight for cycle loss (B -> A -> B)')\n parser.add_argument('--lambda_Seg', type=float, default=.5, help='weight for cycle loss (Seg(A), A -> B -> A, Seg(A))')\n parser.add_argument('--lambda_id', type=float, default=1.0, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')\n parser.add_argument('--lambda_cam', type=int, default=1000, help='Weight for CAM')\n\n parser.add_argument('--n_layers_D_G', type=int, default=7, help='depth of global discriminators')\n parser.add_argument('--n_layers_D_L', type=int, default=5, help='depth of local discriminators')\n\n parser.add_argument('--lightFC', action='store_true', help='use light FC version which is input resolution independent')\n return parser\n\n def __init__(self, opt):\n \"\"\"Initialize the U_GAT_IT class.\n\n Parameters:\n opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n \"\"\"\n BaseModel.__init__(self, opt)\n\n self.model_names = ['genA2B', 'genB2A', 'disGA', 'disGB', 'disLA', 'disLB']\n self.model_names_save = ['genA2B', 'genB2A']\n\n if opt.netG.startswith('unet'):\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)\n self.genA2B = UnetGenerator(input_nc=opt.input_nc, output_nc=opt.output_nc, ngf=opt.ngf, n_downsampling=int(opt.netG.split('_')[-1]), norm_layer=norm_layer, use_dropout=False).to(self.device)\n self.genB2A = UnetGenerator(input_nc=opt.output_nc, output_nc=opt.input_nc, ngf=opt.ngf, n_downsampling=int(opt.netG.split('_')[-1]), norm_layer=norm_layer, use_dropout=False).to(self.device)\n else: #ResnetGenerator\n self.genA2B = ResnetGenerator(input_nc=opt.input_nc, output_nc=opt.output_nc, ngf=opt.ngf, n_blocks=int(opt.netG.split('_')[-1][:-6]),n_downsampling=int(opt.netG.split('_')[1]), img_size=opt.crop_size, lightFC=opt.lightFC).to(self.device)\n self.genB2A = ResnetGenerator(input_nc=opt.output_nc, output_nc=opt.input_nc, ngf=opt.ngf, n_blocks=int(opt.netG.split('_')[-1][:-6]),n_downsampling=int(opt.netG.split('_')[1]), img_size=opt.crop_size, lightFC=opt.lightFC).to(self.device)\n\n if self.isTrain:\n self.disGA = Discriminator(input_nc=3, ndf=opt.ndf, n_layers=7).to(self.device)\n self.disGB = Discriminator(input_nc=3, ndf=opt.ndf, n_layers=7).to(self.device)\n self.disLA = Discriminator(input_nc=3, ndf=opt.ndf, n_layers=5).to(self.device)\n self.disLB = Discriminator(input_nc=3, ndf=opt.ndf, n_layers=5).to(self.device)\n\n \"\"\" Define Loss \"\"\"\n self.L1_loss = torch.nn.L1Loss().to(self.device)\n self.MSE_loss = torch.nn.MSELoss().to(self.device)\n self.BCE_loss = torch.nn.BCEWithLogitsLoss().to(self.device)\n\n \"\"\" Trainer \"\"\"\n self.G_optim = torch.optim.Adam(itertools.chain(self.genA2B.parameters(), self.genB2A.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay)\n self.D_optim = torch.optim.Adam(itertools.chain(self.disGA.parameters(), self.disGB.parameters(), self.disLA.parameters(), self.disLB.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay)\n\n self.optimizers.append(self.D_optim)\n self.optimizers.append(self.G_optim)\n\n \"\"\" Define Rho clipper to constraint the value of rho in AdaILN and ILN\"\"\"\n self.Rho_clipper = RhoClipper(0, 1)\n\n if self.opt.use_segm_model:\n self.segm_model = Custom(input_ch=3, output_ch=8, modelDim=2)\n self.segm_model.load_state_dict(torch.load(opt.segm_model_path))\n opt.logger.info('### Segmentation Model Loaded ###')\n\n self.segm_model.to(self.device)\n self.set_requires_grad(self.segm_model, False)\n self.segm_model.train(False)\n\n self.segmentationloss = WeightedL1Loss(weights = torch.FloatTensor([1., 1., 1., 1., 1., 10., 1., 10.]).to(self.device))\n\n\n def set_input(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input (dict): include the data itself and its metadata information.\n\n The option 'direction' can be used to swap domain A and domain B.\n \"\"\"\n AtoB = self.opt.direction == 'AtoB'\n self.real_A = input['A' if AtoB else 'B'].to(self.device)\n self.real_B = input['B' if AtoB else 'A'].to(self.device)\n self.image_paths = input['A_paths' if AtoB else 'B_paths']\n\n if self.opt.use_segm_model:\n if self.opt.dataset_mode == 'seg_map':\n self.segm_B = input['segm_B'].to(self.device)\n else:\n with torch.no_grad():\n self.segm_B = self.segm_model(self.real_B)\n\n\n def optimize_parameters(self):\n identity_weight = self.opt.lambda_id\n cycle_weight_A = self.opt.lambda_A\n cycle_weight_B = self.opt.lambda_B\n cam_weight = self.opt.lambda_cam\n seg_weight = self.opt.lambda_Seg\n\n self.D_optim.zero_grad()\n\n fake_A2B, _, _ = self.genA2B(self.real_A)\n fake_B2A, _, _ = self.genB2A(self.real_B)\n\n real_GA_logit, real_GA_cam_logit, _ = self.disGA(self.real_A)\n real_LA_logit, real_LA_cam_logit, _ = self.disLA(self.real_A)\n real_GB_logit, real_GB_cam_logit, _ = self.disGB(self.real_B)\n real_LB_logit, real_LB_cam_logit, _ = self.disLB(self.real_B)\n\n fake_GA_logit, fake_GA_cam_logit, _ = self.disGA(fake_B2A)\n fake_LA_logit, fake_LA_cam_logit, _ = self.disLA(fake_B2A)\n fake_GB_logit, fake_GB_cam_logit, _ = self.disGB(fake_A2B)\n fake_LB_logit, fake_LB_cam_logit, _ = self.disLB(fake_A2B)\n\n D_ad_loss_GA = self.MSE_loss(real_GA_logit, torch.ones_like(real_GA_logit).to(self.device)) + self.MSE_loss(fake_GA_logit, torch.zeros_like(fake_GA_logit).to(self.device))\n D_ad_cam_loss_GA = self.MSE_loss(real_GA_cam_logit,torch.ones_like(real_GA_cam_logit).to(self.device)) + self.MSE_loss(fake_GA_cam_logit, torch.zeros_like(fake_GA_cam_logit).to(self.device))\n D_ad_loss_LA = self.MSE_loss(real_LA_logit, torch.ones_like(real_LA_logit).to(self.device)) + self.MSE_loss(fake_LA_logit, torch.zeros_like(fake_LA_logit).to(self.device))\n D_ad_cam_loss_LA = self.MSE_loss(real_LA_cam_logit,torch.ones_like(real_LA_cam_logit).to(self.device)) + self.MSE_loss(fake_LA_cam_logit, torch.zeros_like(fake_LA_cam_logit).to(self.device))\n D_ad_loss_GB = self.MSE_loss(real_GB_logit, torch.ones_like(real_GB_logit).to(self.device)) + self.MSE_loss(fake_GB_logit, torch.zeros_like(fake_GB_logit).to(self.device))\n D_ad_cam_loss_GB = self.MSE_loss(real_GB_cam_logit,torch.ones_like(real_GB_cam_logit).to(self.device)) + self.MSE_loss(fake_GB_cam_logit, torch.zeros_like(fake_GB_cam_logit).to(self.device))\n D_ad_loss_LB = self.MSE_loss(real_LB_logit, torch.ones_like(real_LB_logit).to(self.device)) + self.MSE_loss(fake_LB_logit, torch.zeros_like(fake_LB_logit).to(self.device))\n D_ad_cam_loss_LB = self.MSE_loss(real_LB_cam_logit,torch.ones_like(real_LB_cam_logit).to(self.device)) + self.MSE_loss(fake_LB_cam_logit, torch.zeros_like(fake_LB_cam_logit).to(self.device))\n\n self.D_loss_A = D_ad_loss_GA + D_ad_cam_loss_GA + D_ad_loss_LA + D_ad_cam_loss_LA\n self.D_loss_B = D_ad_loss_GB + D_ad_cam_loss_GB + D_ad_loss_LB + D_ad_cam_loss_LB\n\n Discriminator_loss = self.D_loss_A + self.D_loss_B\n Discriminator_loss.backward()\n self.D_optim.step()\n\n # Update G\n self.G_optim.zero_grad()\n\n self.fake_A2B, fake_A2B_cam_logit, fake_A2B_heatmap = self.genA2B(self.real_A)\n self.fake_B2A, fake_B2A_cam_logit, fake_B2A_heatmap = self.genB2A(self.real_B)\n\n self.fake_A2B2A, _, fake_A2B2A_heatmap = self.genB2A(self.fake_A2B)\n self.fake_B2A2B, _, fake_B2A2B_heatmap = self.genA2B(self.fake_B2A)\n\n fake_A2A, fake_A2A_cam_logit, _ = self.genB2A(self.real_A)\n fake_B2B, fake_B2B_cam_logit, _ = self.genA2B(self.real_B)\n\n fake_GA_logit, fake_GA_cam_logit, _ = self.disGA(self.fake_B2A)\n fake_LA_logit, fake_LA_cam_logit, _ = self.disLA(self.fake_B2A)\n fake_GB_logit, fake_GB_cam_logit, _ = self.disGB(self.fake_A2B)\n fake_LB_logit, fake_LB_cam_logit, _ = self.disLB(self.fake_A2B)\n\n G_ad_loss_GA = self.MSE_loss(fake_GA_logit, torch.ones_like(fake_GA_logit).to(self.device))\n G_ad_loss_LA = self.MSE_loss(fake_LA_logit, torch.ones_like(fake_LA_logit).to(self.device))\n G_ad_loss_GB = self.MSE_loss(fake_GB_logit, torch.ones_like(fake_GB_logit).to(self.device))\n G_ad_loss_LB = self.MSE_loss(fake_LB_logit, torch.ones_like(fake_LB_logit).to(self.device))\n G_ad_cam_loss_GA = self.MSE_loss(fake_GA_cam_logit, torch.ones_like(fake_GA_cam_logit).to(self.device))\n G_ad_cam_loss_LA = self.MSE_loss(fake_LA_cam_logit, torch.ones_like(fake_LA_cam_logit).to(self.device))\n G_ad_cam_loss_GB = self.MSE_loss(fake_GB_cam_logit, torch.ones_like(fake_GB_cam_logit).to(self.device))\n G_ad_cam_loss_LB = self.MSE_loss(fake_LB_cam_logit, torch.ones_like(fake_LB_cam_logit).to(self.device))\n\n G_recon_loss_A = self.L1_loss(self.fake_A2B2A, self.real_A)\n G_recon_loss_B = self.L1_loss(self.fake_B2A2B, self.real_B)\n\n G_identity_loss_A = self.L1_loss(fake_A2A, self.real_A)\n G_identity_loss_B = self.L1_loss(fake_B2B, self.real_B)\n\n G_cam_loss_A = self.BCE_loss(fake_B2A_cam_logit,torch.ones_like(fake_B2A_cam_logit).to(self.device)) + self.BCE_loss(fake_A2A_cam_logit, torch.zeros_like(fake_A2A_cam_logit).to(self.device))\n G_cam_loss_B = self.BCE_loss(fake_A2B_cam_logit,torch.ones_like(fake_A2B_cam_logit).to(self.device)) + self.BCE_loss(fake_B2B_cam_logit, torch.zeros_like(fake_B2B_cam_logit).to(self.device))\n\n self.G_loss_A = G_ad_loss_GA + G_ad_cam_loss_GA + G_ad_loss_LA + G_ad_cam_loss_LA + cycle_weight_A * G_recon_loss_A + identity_weight * G_identity_loss_A + cam_weight * G_cam_loss_A\n self.G_loss_B = G_ad_loss_GB + G_ad_cam_loss_GB + G_ad_loss_LB + G_ad_cam_loss_LB + cycle_weight_B * G_recon_loss_B + identity_weight * G_identity_loss_B + cam_weight * G_cam_loss_B\n\n if self.opt.use_segm_model:\n self.rec_B_Segm = self.segm_model(self.fake_B2A2B) #Segm(G_B(G_A(A)))\n self.idt_B_Segm = self.segm_model(fake_B2B)\n self.G_loss_B += seg_weight * (self.segmentationloss(self.rec_B_Segm, self.segm_B) + self.segmentationloss(self.idt_B_Segm, self.segm_B))\n\n Generator_loss = self.G_loss_A + self.G_loss_B # + G_heatmap_loss * 10\n\n Generator_loss.backward()\n self.G_optim.step()\n\n # clip parameter of AdaILN and ILN, applied after optimizer step\n self.genA2B.apply(self.Rho_clipper)\n self.genB2A.apply(self.Rho_clipper)\n\n def forward(self):\n return 0\n\n def computeLosses(self):\n return 0\n\n def perform_test_conversion(self, input):\n if self.opt.direction == 'AtoB':\n res, _, _ = self.genA2B(input)\n return res\n else:\n res, _, _ = self.genB2A(input)\n return res\n\n\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parameter import Parameter\nfrom torch.utils.checkpoint import checkpoint, checkpoint_sequential\nimport functools\n\nclass ResnetBlock(nn.Module):\n def __init__(self, dim, use_bias):\n super(ResnetBlock, self).__init__()\n conv_block = []\n conv_block += [nn.ReflectionPad2d(1),\n nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias),\n nn.InstanceNorm2d(dim),\n nn.ReLU(True)]\n\n conv_block += [nn.ReflectionPad2d(1),\n nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias),\n nn.InstanceNorm2d(dim)]\n\n self.conv_block = nn.Sequential(*conv_block)\n\n def forward(self, x):\n out = x + self.conv_block(x)\n return out\n\n\nclass ResnetAdaILNBlock(nn.Module):\n def __init__(self, dim, use_bias):\n super(ResnetAdaILNBlock, self).__init__()\n self.pad1 = nn.ReflectionPad2d(1)\n self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias)\n self.norm1 = adaILN(dim)\n self.relu1 = nn.ReLU(True)\n\n self.pad2 = nn.ReflectionPad2d(1)\n self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias)\n self.norm2 = adaILN(dim)\n\n def forward(self, x, gamma, beta):\n out = self.pad1(x)\n out = self.conv1(out)\n out = self.norm1(out, gamma, beta)\n out = self.relu1(out)\n out = self.pad2(out)\n out = self.conv2(out)\n out = self.norm2(out, gamma, beta)\n\n return out + x\n\n\nclass adaILN(nn.Module):\n def __init__(self, num_features, eps=1e-5):\n super(adaILN, self).__init__()\n self.eps = eps\n self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))\n self.rho.data.fill_(0.9)\n\n def forward(self, input, gamma, beta):\n in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True), torch.var(input, dim=[2, 3], keepdim=True)\n out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)\n ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True), torch.var(input, dim=[1, 2, 3], keepdim=True)\n out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)\n out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1 - self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln\n out = out * gamma.unsqueeze(2).unsqueeze(3) + beta.unsqueeze(2).unsqueeze(3)\n\n return out\n\n\nclass ILN(nn.Module):\n def __init__(self, num_features, eps=1e-5):\n super(ILN, self).__init__()\n self.eps = eps\n self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))\n self.gamma = Parameter(torch.Tensor(1, num_features, 1, 1))\n self.beta = Parameter(torch.Tensor(1, num_features, 1, 1))\n self.rho.data.fill_(0.0)\n self.gamma.data.fill_(1.0)\n self.beta.data.fill_(0.0)\n\n def forward(self, input):\n in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True), torch.var(input, dim=[2, 3], keepdim=True)\n out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)\n ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True), torch.var(input, dim=[1, 2, 3], keepdim=True)\n out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)\n out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1 - self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln\n out = out * self.gamma.expand(input.shape[0], -1, -1, -1) + self.beta.expand(input.shape[0], -1, -1, -1)\n\n return out\n\n\nclass Discriminator(nn.Module):\n def __init__(self, input_nc, ndf=64, n_layers=5):\n super(Discriminator, self).__init__()\n model = [nn.ReflectionPad2d(1),\n nn.utils.spectral_norm(nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=0, bias=True)),\n nn.LeakyReLU(0.2, True)]\n\n for i in range(1, n_layers - 2):\n mult = 2 ** (i - 1)\n model += [nn.ReflectionPad2d(1),\n nn.utils.spectral_norm(nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=2, padding=0, bias=True)),\n nn.LeakyReLU(0.2, True)]\n\n mult = 2 ** (n_layers - 2 - 1)\n model += [nn.ReflectionPad2d(1),\n nn.utils.spectral_norm(nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=1, padding=0, bias=True)),\n nn.LeakyReLU(0.2, True)]\n\n # Class Activation Map\n mult = 2 ** (n_layers - 2)\n self.gap_fc = nn.utils.spectral_norm(nn.Linear(ndf * mult, 1, bias=False))\n self.gmp_fc = nn.utils.spectral_norm(nn.Linear(ndf * mult, 1, bias=False))\n self.conv1x1 = nn.Conv2d(ndf * mult * 2, ndf * mult, kernel_size=1, stride=1, bias=True)\n self.leaky_relu = nn.LeakyReLU(0.2, True)\n\n self.pad = nn.ReflectionPad2d(1)\n self.conv = nn.utils.spectral_norm(nn.Conv2d(ndf * mult, 1, kernel_size=4, stride=1, padding=0, bias=False))\n\n self.model = nn.Sequential(*model)\n\n def forward(self, input):\n x = self.model(input)\n\n gap = torch.nn.functional.adaptive_avg_pool2d(x, 1)\n gap_logit = self.gap_fc(gap.view(x.shape[0], -1))\n gap_weight = list(self.gap_fc.parameters())[0]\n gap = x * gap_weight.unsqueeze(2).unsqueeze(3)\n\n gmp = torch.nn.functional.adaptive_max_pool2d(x, 1)\n gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1))\n gmp_weight = list(self.gmp_fc.parameters())[0]\n gmp = x * gmp_weight.unsqueeze(2).unsqueeze(3)\n\n cam_logit = torch.cat([gap_logit, gmp_logit], 1)\n x = torch.cat([gap, gmp], 1)\n x = self.leaky_relu(self.conv1x1(x))\n\n heatmap = torch.sum(x, dim=1, keepdim=True)\n\n x = self.pad(x)\n out = self.conv(x)\n\n return out, cam_logit, heatmap\n\n\nclass RhoClipper(object):\n def __init__(self, min, max):\n self.clip_min = min\n self.clip_max = max\n assert min < max\n\n def __call__(self, module):\n if hasattr(module, 'rho'):\n w = module.rho.data\n w = w.clamp(self.clip_min, self.clip_max)\n module.rho.data = w\n\n\n\nclass ResnetGenerator(nn.Module):\n def __init__(self, input_nc, output_nc, ngf=64, n_blocks=6, n_downsampling = 2, img_size=640, lightFC=True):\n assert (n_blocks >= 0)\n super(ResnetGenerator, self).__init__()\n self.input_nc = input_nc\n self.output_nc = output_nc\n self.ngf = ngf\n self.n_blocks = n_blocks\n self.img_size = img_size\n self.lightFC = lightFC\n\n DownBlock = []\n DownBlock += [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, stride=1, padding=0, bias=False),\n nn.InstanceNorm2d(ngf),\n nn.ReLU(True)]\n\n # Down-Sampling\n for i in range(n_downsampling):\n mult = 2 ** i\n DownBlock += [nn.ReflectionPad2d(1),\n nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=0, bias=False),\n nn.InstanceNorm2d(ngf * mult * 2),\n nn.ReLU(True)]\n\n # Down-Sampling Bottleneck\n mult = 2 ** n_downsampling\n for i in range(n_blocks):\n DownBlock += [ResnetBlock(ngf * mult, use_bias=False)]\n\n # Class Activation Map\n self.gap_fc = nn.Linear(ngf * mult, 1, bias=False)\n self.gmp_fc = nn.Linear(ngf * mult, 1, bias=False)\n self.conv1x1 = nn.Conv2d(ngf * mult * 2, ngf * mult, kernel_size=1, stride=1, bias=True)\n self.relu = nn.ReLU(True)\n\n # Gamma, Beta block\n if self.lightFC:\n FC = [nn.Linear(ngf * mult, ngf * mult, bias=False), nn.ReLU(True),\n nn.Linear(ngf * mult, ngf * mult, bias=False), nn.ReLU(True)]\n else:\n FC = [nn.Linear(img_size // mult * img_size // mult * ngf * mult, ngf * mult, bias=False), nn.ReLU(True),\n nn.Linear(ngf * mult, ngf * mult, bias=False), nn.ReLU(True)]\n self.gamma = nn.Linear(ngf * mult, ngf * mult, bias=False)\n self.beta = nn.Linear(ngf * mult, ngf * mult, bias=False)\n\n # Up-Sampling Bottleneck\n for i in range(n_blocks):\n setattr(self, 'UpBlock1_' + str(i + 1), ResnetAdaILNBlock(ngf * mult, use_bias=False))\n\n # Up-Sampling\n UpBlock2 = []\n for i in range(n_downsampling):\n mult = 2 ** (n_downsampling - i)\n UpBlock2 += [nn.Upsample(scale_factor=2, mode='nearest'),\n nn.ReflectionPad2d(1),\n nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0, bias=False),\n ILN(int(ngf * mult / 2)),\n nn.ReLU(True)]\n\n UpBlock2 += [nn.ReflectionPad2d(3),\n nn.Conv2d(ngf, output_nc, kernel_size=7, stride=1, padding=0, bias=False),\n nn.Tanh()]\n\n self.DownBlock = nn.Sequential(*DownBlock)\n self.FC = nn.Sequential(*FC)\n self.UpBlock2 = nn.Sequential(*UpBlock2)\n\n def forward(self, input):\n x = self.DownBlock(input)\n\n gap = torch.nn.functional.adaptive_avg_pool2d(x, 1)\n gap_logit = self.gap_fc(gap.view(x.shape[0], -1))\n gap_weight = list(self.gap_fc.parameters())[0]\n gap = x * gap_weight.unsqueeze(2).unsqueeze(3)\n\n gmp = torch.nn.functional.adaptive_max_pool2d(x, 1)\n gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1))\n gmp_weight = list(self.gmp_fc.parameters())[0]\n gmp = x * gmp_weight.unsqueeze(2).unsqueeze(3)\n\n cam_logit = torch.cat([gap_logit, gmp_logit], 1)\n x = torch.cat([gap, gmp], 1)\n x = self.relu(self.conv1x1(x))\n\n heatmap = torch.sum(x, dim=1, keepdim=True)\n\n if self.lightFC:\n x_ = torch.nn.functional.adaptive_avg_pool2d(x, 1)\n x_ = self.FC(x_.view(x_.shape[0], -1))\n else:\n x_ = self.FC(x.view(x.shape[0], -1))\n gamma, beta = self.gamma(x_), self.beta(x_)\n\n for i in range(self.n_blocks):\n x = getattr(self, 'UpBlock1_' + str(i + 1))(x, gamma, beta)\n out = self.UpBlock2(x)\n\n return out, cam_logit, heatmap\n\n\nclass UnetGenerator(nn.Module):\n def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=6, norm_layer=None, use_dropout=False):\n super(UnetGenerator, self).__init__()\n # construct unet structure\n self.unet_block_First = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=self.unet_block_First, norm_layer=norm_layer, use_dropout=use_dropout)\n for i in range(n_downsampling - 6): # add intermediate layers with ngf * 8 filters\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)\n # gradually reduce the number of filters from ngf * 8 to ngf\n unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer\n\n def forward(self, input):\n res = self.model(input)\n return res, self.unet_block_First.cam_logit, self.unet_block_First.heatmap\n\n\nclass UnetSkipConnectionBlock(nn.Module):\n def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UnetSkipConnectionBlock, self).__init__()\n self.outermost = outermost\n self.innermost = innermost\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n if input_nc is None:\n input_nc = outer_nc\n downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)\n downrelu = nn.LeakyReLU(0.2, True)\n downnorm = norm_layer(inner_nc)\n uprelu = nn.ReLU(True)\n upnorm = norm_layer(outer_nc)\n\n if outermost:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)\n #conv = nn.Conv2d(inner_nc,outer_nc,kernel_size=5,stride=1,padding=2 )\n down = [downconv]\n up = [uprelu, upconv, nn.Tanh()]\n model = down + [submodule] + up #+ [conv]\n elif innermost:\n upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)\n down = [downrelu, downconv]\n # Class Activation Map\n self.gap_fc = nn.Linear(inner_nc, 1, bias=False)\n self.gmp_fc = nn.Linear(inner_nc, 1, bias=False)\n self.conv1x1 = nn.Conv2d(inner_nc * 2, inner_nc, kernel_size=1, stride=1, bias=True)\n self.relu = nn.ReLU(False)\n\n self.up = nn.Sequential(*[uprelu, upconv, upnorm])\n model = down\n else:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)\n down = [downrelu, downconv, downnorm]\n up = [uprelu, upconv, upnorm]\n\n if use_dropout:\n model = down + [submodule] + up + [nn.Dropout(0.5)]\n else:\n model = down + [submodule] + up\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n if self.innermost:\n bottomX = self.model(x)\n gap = torch.nn.functional.adaptive_avg_pool2d(bottomX, 1)\n gap_logit = self.gap_fc(gap.view(bottomX.shape[0], -1))\n gap_weight = list(self.gap_fc.parameters())[0]\n gap = bottomX * gap_weight.unsqueeze(2).unsqueeze(3)\n\n gmp = torch.nn.functional.adaptive_max_pool2d(bottomX, 1)\n gmp_logit = self.gmp_fc(gmp.view(bottomX.shape[0], -1))\n gmp_weight = list(self.gmp_fc.parameters())[0]\n gmp = bottomX * gmp_weight.unsqueeze(2).unsqueeze(3)\n\n self.cam_logit = torch.cat([gap_logit, gmp_logit], 1)\n bottomX = torch.cat([gap, gmp], 1)\n bottomX = self.relu(self.conv1x1(bottomX))\n\n self.heatmap = torch.sum(bottomX, dim=1, keepdim=True)\n\n return torch.cat([x, self.up(bottomX)], 1)\n elif self.outermost:\n return self.model(x)\n else: # add skip connections\n return torch.cat([x, self.model(x)], 1)","repo_name":"NBouteldja/KidneyStainTranslation","sub_path":"models/U_GAT_IT_2_model.py","file_name":"U_GAT_IT_2_model.py","file_ext":"py","file_size_in_byte":27945,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"2041172315","text":"# Class to calculate mortgage payments/amortization:\n\nimport numpy as np\nimport pandas as pd\n\n\ndef calc_pmt(loan_amount, r_monthly, months, fv=0):\n \"\"\"\n Finds the monthly payment on a loan using the\n monthly coupon rate and number of months.\n\n Parameters\n ----------\n loan_amount: float\n Current loan amount.\n r_monthly: float\n Monthly coupon interest rate.\n months: int\n Number of months remaining on the loan.\n fv: float\n Outstanding loan balance in the final period.\n Assumes the loan will be fully paid off.\n\n Returns\n -------\n pmt: float\n Monthly payment.\n \"\"\"\n\n # Calculate total present value of the loan:\n pv = loan_amount + (fv/(1.0+r_monthly)**months)\n\n # Calculate the monthly payment on the loan:\n pmt = r_monthly * pv / (1.0-(1.0+r_monthly)**-months)\n\n return pmt\n\n\ndef calc_market_value(cash_flow, market_rates, month_i=1):\n \"\"\"\n Calculates the market value of the mortgage using current risk-free\n interest rates.\n Formula uses the present value of the remaining cash flows from\n month_i to the terminal month.\n\n Parameters\n ----------\n cash_flow: array_like\n Array of future cash flows.\n First element is typically of value 0.\n market_rates: array_like \n Current annual risk-free interest rate.\n Must be same length as cash_flow.\n month_i: int\n Current month.\n Assumes the individual has made (month_i - 1) payments.\n Must be between 1 and terminal month.\n Defaults to first payment month.\n\n Returns\n -------\n market_value: float\n Market value of future cash flows.\n \"\"\"\n\n # Make sure length of cash_flow is equal to length of market_rates:\n if len(cash_flow) != len(market_rates):\n raise ValueError(\"cash_flow and market_rates are not the same \"\n \"length.\")\n\n # Subset cash_flow:\n remaining_payments = cash_flow[month_i:]\n\n # Get remaining market rates:\n remaining_rates = 1.0 + (np.array(market_rates)[month_i:]/12.0)\n\n # Calculate discount rates:\n discount_rates = np.cumprod(1.0/remaining_rates)\n\n # Calculate sum of the present value of the payments from month_i\n # to terminal month:\n market_value = sum(remaining_payments*discount_rates)\n\n return market_value\n\n\ndef calc_wal(cash_flow, original_balance):\n \"\"\"\n Finds the weighted average life of the loan.\n\n Parameters\n ----------\n cash_flow: array_like\n Array of principal payments.\n original_balance: float\n Original loan balance.\n\n Returns\n -------\n wal: float\n Weighted average life of the loan.\n \"\"\"\n\n # Change cash_flow to array:\n cash_flow = np.ones(1) * cash_flow\n\n # Find the length of cash_flow:\n n = len(cash_flow)\n\n # Calculate WAL:\n wal = sum(cash_flow*list(range(n))) / original_balance\n\n return wal\n\n\nclass Mortgage(object):\n \"\"\"\n Class for different kinds of mortgages.\n\n Parameters\n ----------\n loan_amount: float \n Current loan amount\n r_annual: float\n Annual coupon interest rate.\n years: int\n Number of years remaining on the loan.\n fv: float\n Outstanding loan balance in the final period.\n Assumes the loan will be fully paid off.\n pts: float\n Discount points paid directly to the lender.\n \"\"\"\n\n def __init__(self, loan_amount, r_annual, years, fv=0.0, pts=0.0):\n self.loan_amount = loan_amount\n self.r_monthly = r_annual / 12.0\n self.months = years * 12\n self.fv = fv\n self.pts = pts\n self.vec_pmt = [0.0]\n self.vec_int = [0.0]\n self.vec_principal = [0.0]\n self.vec_balance = [loan_amount]\n self.pmt = calc_pmt(loan_amount, self.r_monthly, self.months,\n self.fv)\n self.amortization = self.create_amortization_schedule()\n self.upfront = loan_amount * (pts/100.0)\n\n def update_loan(self, month_i):\n \"\"\"\n Updates the current loan amount by the monthly principal paid.\n \n Parameters\n ----------\n month_i: int \n Current month.\n \"\"\"\n\n # Check to make sure length of payments is less than total\n # months:\n if len(self.vec_pmt) <= (self.months+1):\n # Find the interest and principal amount paid for the month:\n interest = self.vec_balance[-1] * self.r_monthly\n principal = self.pmt - interest\n\n # Update monthly vectors:\n self.vec_pmt.append(self.pmt)\n self.vec_int.append(interest)\n self.vec_principal.append(principal)\n self.vec_balance.append(self.vec_balance[-1] - principal)\n else:\n print(self.months, \"payments were already made.\")\n\n def create_amortization_schedule(self):\n \"\"\"\n Creates full amortization schedule and sets it in pandas\n DataFrame.\n \"\"\"\n\n # Loop through all payments:\n for m in range(self.months):\n self.update_loan(m)\n\n # Create pandas DataFrame:\n column_names = [\"balance\", \"payment\", \"interest\", \"principal\"]\n amortization = pd.DataFrame(list(zip(self.vec_balance,\n self.vec_pmt,\n self.vec_int,\n self.vec_principal)),\n columns=column_names)\n\n return amortization\n\n\nclass Fixed(Mortgage):\n \"\"\"\n Class for fixed rate mortgages.\n\n Parameters\n ----------\n loan_amount: float \n Current loan amount.\n r_annual: float\n Annual coupon interest rate.\n years: int\n Number of years remaining on the loan.\n fv: float\n Outstanding loan balance in the final period.\n Assumes the loan will be fully paid off.\n pts: float\n Discount points paid directly to the lender.\n \"\"\"\n\n def __init__(self, loan_amount, r_annual, years, fv=0.0, pts=0.0):\n Mortgage.__init__(self, loan_amount, r_annual, years, fv, pts)\n\n\nclass Adjustable(Mortgage):\n \"\"\"\n Class for adjustable rate mortgages.\n\n Parameters\n ----------\n loan_amount: float \n Current loan amount\n r_annual: array_like\n Annual coupon interest rate after the initial teaser rate.\n Must have either length 1 or length (months - months_teaser).\n years: int\n Number of years remaining on the loan.\n r_teaser: float\n Initial annual coupon interest rate for a certain period.\n years_teaser: int\n Number of years for the teaser rate.\n fv: float\n Outstanding loan balance in the final period.\n Assumes the loan will be fully paid off.\n pts: float\n Discount points paid directly to the lender.\n \"\"\"\n\n def __init__(self, loan_amount, r_annual, years, r_teaser,\n years_teaser, fv=0.0, pts=0.0):\n self.months_teaser = years_teaser * 12\n self.next_r = self.check_r_annual(r_annual, years) / 12.0\n Mortgage.__init__(self, loan_amount, r_teaser, years, fv, pts)\n\n def check_r_annual(self, r_annual, years):\n \"\"\"\n Check whether r_annual is either length 1 or length (\n months - months_teaser).\n If r_annual is length 1, then make it length (months -\n months_teaser).\n \"\"\"\n\n # Get length of r_annual:\n r_annual_length = len(np.ones(1)*r_annual)\n\n # Set up new length:\n new_length = (years*12) - self.months_teaser\n\n if r_annual_length == 1:\n r_annual = np.ones(new_length)*r_annual\n\n if (r_annual_length != 1) and (r_annual_length != new_length):\n raise ValueError(\"r_annual must be either length 1 or \"\n \"length {}\".format(new_length))\n\n return r_annual\n\n def update_loan(self, month_i):\n \"\"\"\n Updates the r_monthly to the new value, recalculates payment,\n and updates the loan amount by the monthly principal paid.\n\n Parameters\n ----------\n month_i: int \n Current month\n \"\"\"\n\n if len(self.vec_pmt) >= (self.months_teaser + 1):\n self.r_monthly = self.next_r[month_i - self.months_teaser]\n self.pmt = calc_pmt(loan_amount=self.vec_balance[-1],\n r_monthly=self.r_monthly,\n months=self.months - month_i)\n\n Mortgage.update_loan(self, month_i)\n","repo_name":"SeanBrunson/mortgages","sub_path":"mortgages/mortgages.py","file_name":"mortgages.py","file_ext":"py","file_size_in_byte":8562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9006306016","text":"def solution(record):\n answer = []\n result = []\n dic = {}\n for i in record:\n result.append(list(i.split()))\n for i in result:\n if i[0] == \"Enter\" or i[0] == \"Change\":\n dic[i[1]] = i[2]\n for i in result:\n if i[0] == \"Enter\":\n answer.append(dic[i[1]]+\"님이 들어왔습니다.\")\n elif i[0] == \"Leave\":\n answer.append(dic[i[1]]+\"님이 나갔습니다.\")\n return answer\n","repo_name":"jojaeng2/algorithm","sub_path":"python/Programmers/오픈채팅방.py","file_name":"오픈채팅방.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19421937598","text":"from tensorflow.python.keras.models import load_model\nfrom load_data import load_testing_data, get_labels\nfrom clean_data import threshold, remove_dots\nimport scipy.misc, scipy.ndimage\nimport numpy as np\nimport sys\n\nif len(sys.argv) < 2:\n model_path = 'data/temp_model.hdf5'\nelse:\n model_path = sys.argv[1]\n\nif len(sys.argv) < 3:\n output_path = 'data/test_y.csv'\nelse:\n output_path = sys.argv[2]\n\nprint(\"Loading model from \" + model_path + \"...\")\nmodel = load_model(model_path)\n\nprint(\"Loading test data...\")\nx_test = load_testing_data()\nx_test = x_test.reshape(-1, 64, 64)\n\nprint(\"Removing background...\")\nx_test = threshold(x_test)\n\nprint(\"Removing dots...\")\n#x_test = remove_dots(x_test)\n\n#new_im = np.zeros((256, 256))\n#r = 0\n#for i in range(0, 256,64):\n# for j in range(0, 256, 64):\n# # paste the image at location i,j\n# new_im[i:i+64, j:j+64] = x_test[r]\n# r += 1\n#\n#new_im = scipy.ndimage.zoom(new_im, 4, order=0) \n#scipy.misc.imsave('sample_clean.jpg', new_im)\n\n#scipy.misc.imshow(new_im) # to visualize only \n#quit()\n\nprint(\"Generating predictions...\")\nx_test = x_test.reshape(-1, 64, 64, 1)\npredictions = model.predict(x_test)\n\nlabels, _, _ = get_labels()\nlabels = list(labels)\n\nprint(\"Writing predictions to \" + output_path + \"...\")\nwith open(output_path, 'w+') as output:\n print(\"Id,Label\", file=output)\n for i, prediction in enumerate(predictions):\n print(str(i + 1) + \",\" + str(labels[np.argmax(prediction)]), file=output)\n","repo_name":"breandan/comp551-p3","sub_path":"classify_data.py","file_name":"classify_data.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74918813497","text":"# Finding whether given tree is symmetric (mirror image at vertical line passing through root)\n# Link : https://www.geeksforgeeks.org/symmetric-tree-tree-which-is-mirror-image-of-itself/\n\nclass Node:\n def __init__(self, key):\n self.data = key\n self.left = None\n self.right = None\n\n\ndef isSymmetric(n1, n2):\n if not n1 and not n2:\n return True\n\n if (n1 and n2) and (n1.data == n2.data):\n return (isSymmetric(n1.left, n2.right) and isSymmetric(n1.right, n2.left))\n \n return False\n\n\nroot = Node(1) \nroot.left = Node(2) \nroot.right = Node(2) \nroot.left.left = Node(3) \nroot.left.right = Node(4) \nroot.right.left = Node(4) \nroot.right.right = Node(3) \n\nif isSymmetric(root, root):\n print('Yes')\nelse:\n print('No')","repo_name":"GuptaAman08/CompetitiveCoding","sub_path":"Other Company Coding Problem/Tree Practice/symmetricTree.py","file_name":"symmetricTree.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23741924174","text":"from setuptools import find_packages, setup\n\nPACKAGE = \"zc_flightplan_toolkit\"\n\nsetup(\n name=\"zc-flightplan-toolkit\",\n version=\"0.0.1\",\n packages=find_packages(include=PACKAGE),\n install_requires=[\n \"pandas\",\n \"numpy\",\n \"python-dotenv\",\n \"requests\",\n \"pyside6\",\n \"loguru\",\n \"frozendict\",\n ],\n extras_require={\n \"dev\": [\n \"black\",\n \"isort\",\n \"pycln\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-mock\",\n \"radon\",\n \"codespell\",\n \"pre-commit\",\n \"pyright\",\n \"pylint\",\n \"pyinstaller\",\n \"pytest-qt; platform_system=='Windows'\",\n \"pandas-stubs\",\n ]\n },\n)\n","repo_name":"zeecm/zc-flightplan-toolkit","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5223208460","text":"from tkinter import *\n\ngif_dir=\"/home/pi/GUI-Project-in-tk/gifs/\" # path of GIFs directory\n\nimagePaths = {\n \"editorImg\" : (gif_dir + \"pencil.gif\"),\n \"videoList\" : (gif_dir + \"images.gif\"),\n \"videoSearchImg\" : (gif_dir + \"pogi.gif\"),\n \"browserImg\" : (gif_dir + \"index.gif\"),\n \"referenceImg\" : (gif_dir + \"mine.gif\"),\n \"sendFilesImg\" : (gif_dir + \"send.gif\"),\n \"drawingImg\" : (gif_dir + \"brush.gif\"),\n \"gradesImg\" : (gif_dir + \"grades.gif\")\n}\n\ntableNames = {\n \"studentsTable\" : \"students\"\n}\n\ndef addButtonToFrame(currentFrame, buttonName, place, bindedFunction = None, imagePath = None, imgLen = None, imgWid = None, butwidth = 0, butHeight = 0):\n # Create Button.\n b = Button(currentFrame, text= buttonName, fg=\"black\",height = butHeight, width = butwidth, borderwidth=3);\n # Add Image if it's send.\n if(imagePath and imgLen and imgWid):\n # Read Image.\n img = PhotoImage(file= imagePath)\n img = img.subsample(imgWid, imgLen)\n # Add image to the button.\n b.config(image= img, compound=RIGHT)\n b.image = img\n # Show Button.\n if(place != -1):\n b.pack(fill=place)\n # Bind Button if found.\n if(bindedFunction):\n b.bind(\"\", bindedFunction)\n # Return Button\n return b\n\ndef addEntryTextToFrame(currentFrame, addFocus = False, placeHolderText = None):\n e = Entry(currentFrame)\n if(placeHolderText):\n e.insert(0, placeHolderText);\n e.pack()\n if(addFocus):\n e.focus()\n return e","repo_name":"hadirsamir/GUI-Project-in-tk","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21523193347","text":"# For20. n butun son berilgan(n>0). bitta for dan foydalanib 1!+2!+...n! topuvchi dastur tuzing.\nn = int(input(\"N=\"))\ns=0\nx=1\nif n<=0:\n print(\"Kiritilgan son nolga teng yoki manfiy\")\nelse:\n for i in range(1, n+1):\n x*=i\n s+=x\n print(x, end=\" \")\n print(\"\\nYig'indi:\", s)\n","repo_name":"UzSenor/Pythonda_1000_ta_misol","sub_path":"For/For20.py","file_name":"For20.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"25925074549","text":"import os\nimport sys\n\nfrom flask import Flask, jsonify, request, abort, send_file\nfrom dotenv import load_dotenv\nfrom linebot import LineBotApi, WebhookParser\nfrom linebot.exceptions import InvalidSignatureError\nfrom linebot.models import MessageEvent, TextMessage, TextSendMessage, ImageSendMessage\n\nfrom fsm import TocMachine\nfrom utils import send_text_message\n# from draw import draw\n\nstate = {}\n\nload_dotenv()\n\nmachine = TocMachine(\n states=[\"new game\", \"playing\", \"win\", \"lose\", \"quit\"],\n transitions=[\n {\n \"trigger\": \"new game\",\n \"source\": \"new game\",\n \"dest\": \"new game\",\n },\n {\n \"trigger\": \"new game\",\n \"source\": \"playing\",\n \"dest\": \"new game\",\n },\n {\n \"trigger\": \"new game\",\n \"source\": \"win\",\n \"dest\": \"new game\",\n },\n {\n \"trigger\": \"new game\",\n \"source\": \"lose\",\n \"dest\": \"new game\",\n },\n {\n \"trigger\": \"new game\",\n \"source\": \"quit\",\n \"dest\": \"new game\",\n },\n {\n \"trigger\": \"move\",\n \"source\": \"new game\",\n \"dest\": \"playing\",\n },\n {\n \"trigger\": \"move\",\n \"source\": \"playing\",\n \"dest\": \"playing\",\n },\n {\n \"trigger\": \"move\",\n \"source\": \"playing\",\n \"dest\": \"win\",\n },\n {\n \"trigger\": \"move\",\n \"source\": \"playing\",\n \"dest\": \"lose\",\n },\n {\n \"trigger\": \"quit\",\n \"source\": \"new game\",\n \"dest\": \"quit\",\n },\n {\n \"trigger\": \"quit\",\n \"source\": \"playing\",\n \"dest\": \"quit\",\n },\n {\n \"trigger\": \"quit\",\n \"source\": \"win\",\n \"dest\": \"quit\",\n },\n {\n \"trigger\": \"quit\",\n \"source\": \"lose\",\n \"dest\": \"quit\",\n },\n ],\n initial=\"quit\",\n auto_transitions=False,\n show_conditions=True,\n)\n\n\napp = Flask(__name__, static_url_path=\"\")\n\n\n# get channel_secret and channel_access_token from your environment variable\nchannel_secret = os.getenv(\"LINE_CHANNEL_SECRET\", None)\nchannel_access_token = os.getenv(\"LINE_CHANNEL_ACCESS_TOKEN\", None)\nif channel_secret is None:\n print(\"Specify LINE_CHANNEL_SECRET as environment variable.\")\n sys.exit(1)\nif channel_access_token is None:\n print(\"Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.\")\n sys.exit(1)\n\nline_bot_api = LineBotApi(channel_access_token)\nparser = WebhookParser(channel_secret)\n\n\n@app.route(\"/callback\", methods=[\"POST\"])\ndef callback():\n signature = request.headers[\"X-Line-Signature\"]\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n # parse webhook body\n try:\n events = parser.parse(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n # if event is MessageEvent and message is TextMessage, then echo text\n for event in events:\n if not isinstance(event, MessageEvent):\n continue\n if not isinstance(event.message, TextMessage):\n continue\n message = event.message.text\n userID = str(event.source.user_id)\n f = open('input.txt', 'w')\n if message == 'quit' or message == 'q':\n message = 'Thanks for playing!'\n state[userID] = 'quit'\n line_bot_api.reply_message(\n event.reply_token, TextSendMessage(text=message)\n )\n continue \n if len(message) == 2:\n state[userID] = 'playing'\n if message != 'new game':\n f.write(message + '\\n')\n else:\n state[userID] = 'new game'\n open(userID + '.txt', 'w').close()\n f.write('q\\n')\n f.close()\n os.system('python gobang.py ' + userID + '.txt < input.txt > output.txt')\n f = open('output.txt', 'r')\n result = f.readlines()\n f.close()\n message = ''\n data = ''\n for i in range(len(result)):\n s = result[i]\n if len(s) >= 4 and s[:4] == 'DUMP':\n filename = userID + '.txt'\n if not os.path.exists(filename):\n open(filename, 'w').close()\n f = open(filename,'w')\n f.write(s[4:])\n data = s[4:]\n f.close()\n continue\n if len(s) >= 9 and s[:9] == 'Your move' and i <= len(result) - 10:\n message = ''\n continue\n if len(s) >= 8 and s[:8] == 'YOU LOSE':\n state[userID] = 'lose'\n if len(s) >= 7 and s[:7] == 'YOU WIN':\n state[userID] = 'win'\n message += s\n # pic = draw()\n # for i in range(0, len(data), 5):\n # if data[i] == '1':\n # pic.black.append(pic.trans(s[i+2], s[i+3]))\n # else:\n # pic.white.append(pic.trans(s[i+2], s[i+3]))\n # pic.draw(userID + '.png')\n # message = ImageSendMessage(\n # original_content_url='https://tocfinalproject.herokuapp.com/getpic/' + userID,\n # preview_image_url='https://tocfinalproject.herokuapp.com/getpic/' + userID\n # )\n # line_bot_api.reply_message(event.reply_token, message)\n line_bot_api.reply_message(\n event.reply_token, TextSendMessage(text=message)\n ) \n print('state: ', state[userID])\n return \"OK\"\n\n\n@app.route(\"/webhook\", methods=[\"POST\"])\ndef webhook_handler():\n signature = request.headers[\"X-Line-Signature\"]\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(f\"Request body: {body}\")\n\n # parse webhook body\n try:\n events = parser.parse(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n # if event is MessageEvent and message is TextMessage, then echo text\n for event in events:\n if not isinstance(event, MessageEvent):\n continue\n if not isinstance(event.message, TextMessage):\n continue\n if not isinstance(event.message.text, str):\n continue\n print(f\"\\nFSM STATE: {machine.state}\")\n print(f\"REQUEST BODY: \\n{body}\")\n response = machine.advance(event)\n if response == False:\n send_text_message(event.reply_token, \"Not Entering any State\")\n\n return \"OK\"\n\n\n@app.route(\"/show-fsm\", methods=[\"GET\"])\ndef show_fsm():\n machine.get_graph().draw(\"fsm.png\", prog=\"dot\", format=\"png\")\n return send_file(\"fsm.png\", mimetype=\"image/png\")\n@app.route(\"/getpic/\")\ndef getpic(filename):\n return send_file(filename, mimetype='image/png')\n\n\nif __name__ == \"__main__\":\n port = os.environ.get(\"PORT\", 8000)\n app.run(host=\"0.0.0.0\", port=port, debug=True) \n","repo_name":"visitorckw/TOC-FianlProject","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39887917495","text":"# -*- coding: utf-8 -*-\nimport pytesseract\nimport scrapy\nimport re\nimport requests\nfrom PIL import Image\n\n\n\nclass ZiroomSpider(scrapy.Spider):\n name = 'ziroom'\n allowed_domains = ['ziroom.com']\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36\"}\n start_urls = ['http://www.ziroom.com/z/s100013-r2-p{}/?sort=2'.format(num) for num in range(1, 10)]\n\n def parse(self, response):\n urls = response.xpath('//div[@class=\"pic-box\"]/a/@href').extract()\n for url in urls:\n yield scrapy.Request(\"http:\" + url, callback=self.parse_info)\n\n def parse_info(self, response):\n payurls = response.xpath('//div[@class=\"Z_price\"]/i/@style').extract()\n Z_name = response.xpath('/html/body/section/aside/h1/text()').extract_first()\n Z_home_b = response.xpath('/html/body/section/aside/div[3]/div[1]/dl[1]/dd/text()').extract_first()\n chaoxiang = response.xpath('/html/body/section/aside/div[3]/div[1]/dl[2]/dd/text()').extract_first()\n huxing = response.xpath('/html/body/section/aside/div[3]/div[1]/dl[3]/dd/text()').extract_first()\n weizhi = response.xpath('/html/body/section/aside/div[3]/ul/li[1]/span[2]/span/text()').extract_first()\n louceng = response.xpath('//ul[@class=\"Z_home_o\"]/li[2]/span[@class=\"va\"]/text()').extract_first()\n dianti = response.xpath('//ul[@class=\"Z_home_o\"]/li[3]/span[@class=\"va\"]/text()').extract_first()\n gongnuan = response.xpath('//ul[@class=\"Z_home_o\"]/li[5]/span[@class=\"va\"]/text()').extract_first()\n qianyue = response.xpath('//*[@id=\"live-tempbox\"]/ul/li[2]/span[2]/text()').extract_first()\n if payurls:\n yield {\n \"标题\": Z_name[5:],\n \"价格\": self.NumUrl(payurls),\n \"面积\": Z_home_b,\n \"朝向\": chaoxiang,\n \"户型\": huxing,\n \"位置\": weizhi,\n \"楼层\": louceng,\n \"电梯\": dianti,\n \"供暖\": gongnuan,\n \"签约\": qianyue,\n \"链接\": response.url,\n }\n\n def NumUrl(self, datas):\n Nnum = []\n url=re.findall(r\"url\\((.+)\\)\", datas[0])[0]\n with open(\"Img.jpg\", \"wb\")as file:\n file.write(requests.get(\"http:\"+url, headers=self.headers).content)\n for data in datas:\n dataNumUrl = re.findall(r\"background-position:-(.+)px;\", data)[0]\n NumList = self.ImgNum(dataNumUrl) # 获得图片的对应数字\n Nnum.append(NumList)\n # print(\"-------------------------------------------------------------------------\",\"\".join(Nnum))\n return \"\".join(Nnum)\n\n def ImgNum(self, num):\n im = Image.open(\"Img.jpg\")\n # 图片的宽度和高度\n img_size = im.size\n print(\"图片宽度和高度分别是{}\".format(img_size))\n '''\n #4190\n 裁剪:传入一个元组作为参数\n 元组里的元素分别是:\n (距离图片左边界距离x, \n 距离图片上边界距离y,\n 距离图片左边界距离+裁剪框宽度x+w,距离图片上边界距离+裁剪框高度y+h)\n '''\n # 截取4290\n x = float(num) * 1.2\n y = 0\n w = img_size[1]\n h = img_size[1]\n region = im.crop((x, y, x + w, y + h))\n # region.show()\n region.save(\"IMG.png\")\n ast = pytesseract.image_to_string(Image.open(\"IMG.png\"), lang=\"eng\",\n config='--psm 6 --oem 3 -c tessedit_char_whitelist=0123456789')\n return ast\n","repo_name":"wangds1988/room","sub_path":"room/spiders/ziroom.py","file_name":"ziroom.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9511515000","text":"# Paolo Takagi-Atilano, October 17, 2017\n\nfrom ConstraintSatisfactionProblem import ConstraintSatisfactionProblem\n\n\nclass CircuitBoardCSP:\n def __init__(self, length, height, pieces_list, square):\n self.length = length # length of grid\n self.height = height # height of grid\n self.pieces_list = pieces_list # list of pieces\n\n self.constraints = self.set_constraints(square) # constraints\n self.CSP = ConstraintSatisfactionProblem(len(pieces_list), self.length * self.height, self.constraints) # CSP\n\n # set constraints for CSP\n def set_constraints(self, square): # square format: (letter, length, height)\n constraints = {}\n\n pieces_location_list = [] # list of sets of all legal left-hand corners of each piece, for each piece\n for piece in self.pieces_list:\n # get set of all legal left-hand corners of each piece\n piece_location_set = set()\n for y in range(self.height):\n for x in range(self.length):\n if x + piece[1] - 1 < self.length and y + piece[2] - 1 < self.height:\n piece_location_set.add((piece, x, y)) # potential location of piece, and that piece\n #print(piece_location_set)\n pieces_location_list.append(piece_location_set)\n\n for i in range(len(pieces_location_list)):\n for j in range(len(pieces_location_list)):\n if i != j: # make sure we aren't comparing the same piece\n common_location_list1 = set() # set of all legal left-hand corners of pieces i and j\n common_location_list2 = set()\n\n for i_loc in pieces_location_list[i]:\n for j_loc in pieces_location_list[j]:\n #print(\"i_loc\", i_loc, \"j_loc:\", j_loc)\n #print(collision(i_loc, j_loc))\n if not collision(i_loc, j_loc):\n #print(\"adding:\", (self.coord_to_int(i_loc[1], i_loc[2]),\n # self.coord_to_int(j_loc[1], j_loc[2]) ))\n common_location_list1.add( (self.coord_to_int(i_loc[1], i_loc[2]),\n self.coord_to_int(j_loc[1], j_loc[2]) ))\n common_location_list2.add( (self.coord_to_int(j_loc[1], j_loc[2]),\n self.coord_to_int(i_loc[1], i_loc[2]) ))\n constraints[(i, j)] = common_location_list1\n constraints[(j, i)] = common_location_list2\n\n return constraints\n\n # given x and y values, return corresponding single int for some grid\n def coord_to_int(self, x, y):\n return y * self.length + x\n\n # given single int for some grid, return corresponding x and y values\n def int_to_coord(self, var):\n return var % self.length, int(var / self.length)\n\n # calls backtrack search object from CSP, and returns output plus some syntax\n def backtrack_search(self, mrv, lcv, inference):\n self.CSP.backtrack_search(mrv, lcv, inference)\n return self.solution_to_str(self.CSP.assignment) + \"\\n\" + str(self.CSP.fails) + \" fails\" + \"\\n\"\n\n # returns solution with nice syntax, rather than integers\n def solution_to_str(self, solution):\n sol_dict = {}\n sol_str = \"\"\n\n # iterate through pieces:\n for i in range(len(self.pieces_list)):\n # iterate through x and y values in each piece\n for x in range(self.pieces_list[i][1]):\n for y in range(self.pieces_list[i][2]):\n # set that key to corresponding character symbol representing piece\n pos = self.int_to_coord(solution[i])\n ins = self.coord_to_int(x + pos[0], y + pos[1])\n sol_dict[ins] = self.pieces_list[i][0]\n\n # setting blank spaces to periods\n for i in range(self.length * self.height):\n if i not in sol_dict.keys():\n sol_dict[i] = \".\"\n\n # turning dictionary into string\n for i in range(len(sol_dict.keys())):\n if i != 0 and i % self.length == 0:\n sol_str += \"\\n\"\n sol_str += sol_dict[i]\n\n return sol_str\n\n\n# resorts to brute force because some objects might not be rectangular\ndef collision(i_loc, j_loc): # piece location: (piece, x, y), piece: (letter, length, height)\n for i_x in range(i_loc[1], i_loc[1] + i_loc[0][1]): # iterate through x in first piece\n for i_y in range(i_loc[2], i_loc[2] + i_loc[0][2]): # iterate through y in first piece\n for j_x in range(j_loc[1], j_loc[1] + j_loc[0][1]): # iterate through x in second piece\n for j_y in range(j_loc[2], j_loc[2] + j_loc[0][2]): # iterate through y in second piece\n if i_x == j_x and i_y == j_y:\n #print(\"collision\", str(i_loc[0][0]), str(j_loc[0][0]), str(i_x), str(i_y))\n return True\n # no collisions found\n return False\n","repo_name":"p-takagi-atilano/CSP","sub_path":"CircuitBoardCSP.py","file_name":"CircuitBoardCSP.py","file_ext":"py","file_size_in_byte":5194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27287607797","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\n\r\nglob = pd.read_csv(\"global.csv\", index_col='Time', parse_dates=[0])\r\ndata = glob.sort_values(by=\"Time\")\r\ntrain_mag = glob[\"Major Magnitude\"]\r\ntrain_temp = glob[\"Global Temperature Anomaly\"]\r\n\r\nfig,ax1 = plt.subplots()\r\n\r\n# First Plot\r\norder = 1\r\ncoef_fst = np.polyfit(np.arange(len(train_mag)),\r\n train_mag.values.ravel(),order)\r\n\r\npoly_mdl = np.poly1d(coef_fst)\r\ntrend_global_mag = pd.Series(data = poly_mdl(np.arange(len(train_mag))),\r\n index = train_mag.index)\r\n\r\nprint(\"The slope of the Magnitude curve is {:.4f}\".format(coef_fst[0]))\r\n\r\n\r\n# Second Plot\r\norder = 1\r\ncoef_sec = np.polyfit(np.arange(len(train_temp)),\r\n train_temp.values.ravel(),order)\r\n\r\npoly_mdl = np.poly1d(coef_sec)\r\ntrend_global_temp = pd.Series(data = poly_mdl(np.arange(len(train_temp))),\r\n index = train_temp.index)\r\n\r\nprint(\"The slope of the Temperature curve is {:.4f}\".format(coef_sec[0]))\r\n\r\n\r\n# plt.locator_params('y',nbins=30)\r\n\r\n# plt.ylim(ymin=6,ymax=8)\r\n\r\n# in case plot looking messy , uncomment below line\r\nchoice = int(input(\"Enter the Choice : 1 - Default , 2 - For User Scale\"))\r\n\r\nif choice == 2:\r\n scale = int(input(\"Enter the Scale Range\"))\r\n plt.ylim(0,scale)\r\n plt.plot(trend_global_mag,color=\"red\")\r\n plt.plot(train_mag,color='blue')\r\nelse:\r\n plt.plot(trend_global_mag, color=\"red\")\r\n plt.plot(train_mag, color='blue')\r\n\r\n\r\nplt.xlabel(\"Time\",fontweight='bold',color='black',size=16)\r\nplt.title(\"Major Magnitude\",color=\"black\",fontweight='bold',size=16)\r\n\r\n\r\nax = ax1.twinx()\r\n\r\n\r\nax.plot(trend_global_temp,color='black')\r\nax.plot(train_temp,color='green')\r\n\r\n\r\nax1.set_ylabel(\"Temporal Variation of the Total number of \\n earthquakes Globally\",fontweight='bold',color='blue',size=16)\r\n\r\nplt.ylabel(\"Global Temperature Anomaly (°C)\",fontweight='bold',color='green',size=16)\r\n\r\nplt.grid()\r\nplt.show()\r\n","repo_name":"Dwaipayan001/HoD-sir-Plots","sub_path":"linear trend 3 axis plots_global.py","file_name":"linear trend 3 axis plots_global.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70800866937","text":"# Name: Jennifer Um\n# Date: 2021-08-10\n# Description: Create a Quoridor class\n\nclass QuoridorGame:\n \"\"\"\n Class for a Quoridor game.\n - Interacts with Fence class: Fences will be placed on the Quoridor game board.\n - Interacts with Player class: Players will have a number of fences, and each will have a pawn on the game board.\n \"\"\"\n\n def __init__(self):\n # initialize the board\n self._status = \"IN_PROGRESS\" # set to GAME_WON when game is won by a player\n self._winner = None # will be set to either Player object\n self._player1 = Player(1)\n self._player2 = Player(2)\n self._players = [self._player1, self._player2]\n self._current_turn = self._player1\n self._board = self._generate_board()\n\n def _has_game_been_won(self):\n \"\"\"\n Checks if game has been won.\n :return: True if game has been won. False if game has not been won.\n \"\"\"\n if self._status == \"GAME_WON\":\n return True\n return False\n\n def _get_status(self):\n \"\"\"\n Get the status of the game.\n :return: status of the game; either \"IN_PROGRESS\" or \"GAME_WON\"\n \"\"\"\n return self._status\n\n def _get_winner(self):\n \"\"\"\n Get the winner of the game.\n :return: None if no player has won. Otherwise, return the winning Player object.\n \"\"\"\n return self._winner\n\n def _set_status_to_game_won_update_winner(self, player):\n \"\"\"\n Set status to \"GAME_WON\" and update the winner to the winning Player object\n :param player: winning player object\n :return: none\n \"\"\"\n self._status = \"GAME_WON\"\n self._winner = player\n\n def _get_player_from_num(self, number):\n \"\"\"\n Get Player object from a number\n :param number: player's number\n :return: Player object with that number\n \"\"\"\n for player in self._players:\n if player.get_player_num() == number:\n return player\n return None\n\n def _generate_board(self):\n \"\"\"\n Generate the Quoridor board and starting positions of each player's pawn\n - the board has 10 rows and 10 columns\n - 100 coordinates with 81 playable coordinates\n - coordinates in row 9 and column 9 are not playable coordinates, but were created to act as edge fences\n - each playable coordinate can have\n - a pawn\n - a vertical (v) fence and/or a horizontal (h) fence\n - column 0 and row 0 are playable coordinates that have v and/or h fences acting as edge fences\n - each non-playable coordinate has\n - a v fence and/or an h fence acting as edge fences\n :return:\n - a dictionary called \"board\" with\n key: coordinate\n value: a dictionary of\n - key=\"pawn\", value=None or Pawn object\n - key=\"fences\", value=empty list or list of Fence objects\n - sample board:\n {(column, row): {\n \"pawn\": Pawn object (or None if no pawn at that coordinate),\n \"fence\" = [Fence object(s) (if there are fences at that coordinate)]\n }\n }\n \"\"\"\n board = {}\n for row in range(0, 10):\n for col in range(0, 10):\n board[(col, row)] = {\n \"pawn\": None,\n \"fence\": []\n }\n\n # place player1 and player2 pawn in their starting positions\n for player in self._players:\n board[player.get_pawn_coordinate()][\"pawn\"] = player.get_player_num()\n # pawns are denoted by the player number\n\n # edges are fences\n for i in range(0, 10):\n board[(0, i)][\"fence\"].append(Fence(\"v\"))\n board[(9, i)][\"fence\"].append(Fence(\"v\"))\n board[(i, 0)][\"fence\"].append(Fence(\"h\"))\n board[(i, 9)][\"fence\"].append(Fence(\"h\"))\n\n return board\n\n def print_board(self):\n \"\"\"\n Print the Quoridor board\n :return: none\n \"\"\"\n for i in range(0, 10):\n for j in range(0, 10):\n this_coord = (j, i)\n\n # create pawn string\n if self._board[this_coord][\"pawn\"] is None:\n pawn_string = \"None\"\n else:\n pawn_string = str(self._board[this_coord][\"pawn\"])\n\n # create fence string\n if len(self._board[this_coord][\"fence\"]) == 0:\n fence_string = \"0\" # denoting no fences at that coordinate\n else:\n fence_string = \"\"\n for fence in self._board[this_coord][\"fence\"]:\n fence_string += str(fence.get_fence_direction()) # append \"v\" and/or \"h\"\n\n location_string = str(this_coord) + \",pawn=\" + pawn_string + \",fence=\" + fence_string\n\n print(location_string.ljust(50), end=\"\")\n print()\n\n def _get_current_turn(self):\n \"\"\"\n Get the player whose turn it currently is\n :return: player whose turn it currently is\n \"\"\"\n return self._current_turn\n\n def _change_current_turn(self):\n \"\"\"\n Change the player whose turn it currently is\n :return: none\n \"\"\"\n if self._current_turn == self._player1:\n self._current_turn = self._player2\n else:\n self._current_turn = self._player1\n\n def _is_valid_current_turn(self, inputted_player):\n \"\"\"\n Validate whose turn it currently is\n :param inputted_player: player that was entered in main\n :return: True if the current_turn is the inputted_player, False otherwise\n \"\"\"\n if inputted_player == self._current_turn:\n return True\n return False\n\n def _get_list_fences_at_coordinate(self, coordinate):\n \"\"\"\n Get fences at a coordinate as a list\n :param coordinate: inputted coordinate\n :return: list of Fences object at that coordinate\n \"\"\"\n return self._board[coordinate][\"fence\"]\n\n def _get_pawn_at_coordinate(self, coordinate):\n \"\"\"\n Get pawn at a coordinate\n :param coordinate: inputted coordinate\n :return: Pawn at that coordinate\n \"\"\"\n return self._board[coordinate][\"pawn\"]\n\n def move_pawn(self, player_num, desired_coord):\n \"\"\"\n Validate if a pawn can be moved to the desired coordinate; if so, move it there\n :param player_num: player's number\n :param desired_coord: coordinate player would like to move pawn to\n :return:\n False if\n - desired coordinate is forbidden by the rules\n - or desired coordinate is blocked by the fence\n - or game has already been won\n True if desired move makes the player win\n \"\"\"\n # check if the game has already been won\n if self._has_game_been_won() is True:\n return False\n\n # get player from player_num\n this_player = self._get_player_from_num(player_num)\n\n # check if player entered is the correct current player\n if not self._is_valid_current_turn(this_player):\n return False # return False if invalid move\n\n # check if coordinate is forbidden by the rules (diagonal, move in orthogonal directions)\n # check if desired_coord is within valid pawn coordinate boundaries\n if not self._pawn_coordinate_within_boundary(desired_coord):\n return False\n\n # get a list of valid moves/coordinates the pawn can move to with its current location\n valid_pawn_moves = self._get_valid_pawn_moves(this_player)\n\n if desired_coord in valid_pawn_moves:\n\n # this_player.get_pawn_coordinate() will get player's current pawn coordinate on board\n # on board, update that current coordinate's Pawn key to None\n self._board[this_player.get_pawn_coordinate()][\"pawn\"] = None\n\n # update player object's current coordinate to desired coordinate\n this_player.set_pawn_coordinate(desired_coord)\n\n # update board to that player's new pawn coordinate\n self._board[this_player.get_pawn_coordinate()][\"pawn\"] = this_player.get_player_num()\n\n # determine if player has won with that new pawn coordinate\n if this_player.is_pawn_in_winning_coordinate() is True:\n self._set_status_to_game_won_update_winner(this_player) # if so, change game status\n return True\n\n # update current turn to the other player for the next round\n self._change_current_turn()\n return True\n\n else: # if desired_coord is not in valid_pawn_moves\n return False\n\n def place_fence(self, player_num, direction, desired_coord):\n \"\"\"\n Validate and place fence at desired coordinate.\n :param player_num: number of the player\n :param direction: direction of the fence (horizontal, vertical)\n :param desired_coord: coordinate player would like to move pawn to\n :return:\n False if\n - player has no fence left\n - fence is out of the boundaries of the board\n - if there is already a fence there and desired coordinate will overlap/ intersect with existing fence\n - if the game has already been won\n True if\n - fence can be placed there\n \"\"\"\n # check if the game has already been won\n if self._has_game_been_won() is True:\n return False\n\n # get player from player_num\n this_player = self._get_player_from_num(player_num)\n\n # check if player entered is the correct current player\n if not self._is_valid_current_turn(this_player):\n return False # return False if invalid move\n\n # check if player has fences left\n if not this_player.has_fences_remaining():\n return False\n\n # check if desired_coord is within valid fence coordinate boundaries\n if not self._fence_coordinate_within_boundary(desired_coord):\n return False\n\n # check if another fence is already there and new fence will overlap or intersect with the fence\n fences_at_desired_coord = self._get_list_fences_at_coordinate(desired_coord)\n if len(fences_at_desired_coord) > 0:\n # check if there's already a fence with that direction\n for fence in fences_at_desired_coord:\n if fence.get_fence_direction() == direction:\n return False\n\n # extra credit: check fair play rule\n is_break_fair_play_rule = self._is_fair_play(direction, desired_coord)\n if is_break_fair_play_rule == \"breaks the fair play rule\":\n return is_break_fair_play_rule\n\n # place fence\n self._board[desired_coord][\"fence\"].append(Fence(direction))\n this_player.decrement_num_fences_available()\n\n # update current turn to the other player for the next round\n self._change_current_turn()\n\n return True\n\n def _is_fair_play(self, direction, desired_coord):\n \"\"\"\n Checks if fence placement would violate the fair play rule\n :param direction: direction of fence being placed\n :param desired_coord: desired coordinate of fence being placed\n :return:\n - \"breaks the fair play rule\" if the fair play rule is broken\n \"\"\"\n test_fence = Fence(direction) # temporarily add fence to desired_coord\n self._board[desired_coord][\"fence\"].append(test_fence)\n\n player_has_path_to_winning_coordinate = {self._player1: None, self._player2: None} # value will be True or False\n\n for player in self._players:\n coordinates_visited = set()\n original_pawn_coordinate = player.get_pawn_coordinate()\n\n result = self._is_fair_play_helper(player, original_pawn_coordinate, coordinates_visited)\n player_has_path_to_winning_coordinate[player] = result\n\n coord_with_pawn_to_remove = player.get_pawn_coordinate()\n self._board[coord_with_pawn_to_remove][\"pawn\"] = None\n player.set_pawn_coordinate(original_pawn_coordinate) # revert player's pawn to original pawn coordinate\n self._board[original_pawn_coordinate][\"pawn\"] = player.get_player_num()\n\n self._board[desired_coord][\"fence\"].remove(test_fence) # revert board to original (remove fence that was added\n\n complies_with_fair_play_rule_count = 0\n for value in player_has_path_to_winning_coordinate.values():\n if value is True:\n complies_with_fair_play_rule_count += 1\n\n if complies_with_fair_play_rule_count != len(player_has_path_to_winning_coordinate):\n return \"breaks the fair play rule\"\n\n def _is_fair_play_helper(self, player, curr_coord, coordinates_visited):\n \"\"\"\n\n :param player:\n :param curr_coord:\n :param coordinates_visited:\n :return:\n \"\"\"\n if player.is_pawn_in_winning_coordinate() is True:\n return True # is part of fair play rule\n valid_moves = self._get_valid_pawn_moves(player)\n for move in valid_moves:\n if move not in coordinates_visited:\n coordinates_visited.add(move)\n self._board[curr_coord][\"pawn\"] = None\n player.set_pawn_coordinate(move)\n self._board[move][\"pawn\"] = player.get_player_num()\n\n if self._is_fair_play_helper(player, move, coordinates_visited):\n return True\n return False\n\n def _fence_coordinate_within_boundary(self, coordinate):\n \"\"\"\n Check if coordinate of a fence is within the valid boundary\n :param coordinate: coordinate of the fence\n :return: False if it is within the valid boundary, True if it is not within the valid boundary\n \"\"\"\n col_num = coordinate[0]\n row_num = coordinate[1]\n\n if col_num < 1 or col_num > 8:\n return False\n if row_num < 1 or row_num > 8:\n return False\n return True\n\n def _pawn_coordinate_within_boundary(self, coordinate):\n \"\"\"\n Check if coordinate of a pawn is within the valid boundary\n :param coordinate: coordinate of the pawn\n :return: False if it is within the valid boundary, True if it is not within the valid boundary\n \"\"\"\n col_num = coordinate[0]\n row_num = coordinate[1]\n\n if col_num < 0 or col_num > 8:\n return False\n if row_num < 0 or row_num > 8:\n return False\n return True\n\n def _get_valid_pawn_moves(self, player):\n \"\"\"\n Get a list of valid moves a player's pawn can move to with its current coordinate\n :param player: player that is moving their pawn\n :return: list of valid moves a player's pawn can move to with its current coordinate\n \"\"\"\n current_pawn_coordinate = player.get_pawn_coordinate()\n\n # check valid moves in each direction\n valid_north = self._get_valid_north_pawn_moves(current_pawn_coordinate)\n valid_south = self._get_valid_south_pawn_moves(current_pawn_coordinate)\n valid_west = self._get_valid_west_pawn_moves(current_pawn_coordinate)\n valid_east = self._get_valid_east_pawn_moves(current_pawn_coordinate)\n valid_moves = valid_north + valid_south + valid_west + valid_east\n\n return valid_moves\n\n def _get_valid_south_pawn_moves(self, current_pawn_coordinate):\n \"\"\"\n Get list of southern, southeastern (se), and southwestern (sw) moves a pawn can move to.\n :param current_pawn_coordinate: current pawn coordinate\n :return: list of southern, southeastern (se), and southwestern (sw) moves a pawn can move to\n \"\"\"\n curr_col = current_pawn_coordinate[0]\n curr_row = current_pawn_coordinate[1]\n valid_south_moves = []\n south_row = curr_row + 1\n\n s_coord = (curr_col, south_row)\n s_coord_is_valid = self._pawn_coordinate_within_boundary(s_coord)\n s_coord_has_h_fence = self._coordinate_has_h_fence(s_coord)\n\n se_coord = (curr_col + 1, curr_row + 1)\n se_coord_is_valid = self._pawn_coordinate_within_boundary(se_coord)\n\n sw_coord = (curr_col - 1, curr_row + 1)\n sw_coord_is_valid = self._pawn_coordinate_within_boundary(se_coord)\n\n s_of_s_coord = (curr_col, south_row+1)\n s_of_s_coord_is_valid = self._pawn_coordinate_within_boundary(s_of_s_coord)\n\n if s_coord_is_valid is False:\n return valid_south_moves # empty list\n\n # check h fence directions in south coordinate\n if s_coord_has_h_fence:\n return valid_south_moves # empty list because pawn can't go south\n\n # at this point, s_coord does exist and there's no immediate south fence blocking\n south_pawn = self._get_pawn_at_coordinate(s_coord)\n if south_pawn is not None: # check if there's a pawn in the south coordinate\n\n # if no fence behind adjacent pawn\n if s_of_s_coord_is_valid is True and self._coordinate_has_h_fence(s_of_s_coord) is False:\n valid_south_moves.append(s_of_s_coord)\n return valid_south_moves\n\n # fence behind adjacent pawn\n if s_of_s_coord_is_valid is True and self._coordinate_has_h_fence(s_of_s_coord) is True:\n if se_coord_is_valid is True: # check if we can move se\n if self._coordinate_has_v_fence(se_coord) is False:\n valid_south_moves.append(se_coord)\n\n if sw_coord_is_valid is True: # check if we can move sw\n if self._coordinate_has_v_fence(sw_coord) is False:\n valid_south_moves.append(sw_coord)\n\n else: # if there's no pawn in south coordinate blocking\n valid_south_moves.append(s_coord)\n\n return valid_south_moves\n\n def _coordinate_has_v_fence(self, coordinate):\n \"\"\"\n Check if coordinate has a vertical fence\n :param coordinate: inputted coordinate\n :return: True if the coordinate has a vertical fence. False otherwise.\n \"\"\"\n for fence in self._get_list_fences_at_coordinate(coordinate):\n if fence.get_fence_direction() == \"v\":\n return True\n return False\n\n def _coordinate_has_h_fence(self, coordinate):\n \"\"\"\n Check if coordinate has a horizontal fence\n :param coordinate: inputted coordinate\n :return: True if the coordinate has a horizontal fence. False otherwise.\n \"\"\"\n for fence in self._get_list_fences_at_coordinate(coordinate):\n if fence.get_fence_direction() == \"h\":\n return True\n return False\n\n def _get_valid_north_pawn_moves(self, current_pawn_coordinate):\n \"\"\"\n Get list of northern, northeastern (ne), and northwestern (nw) moves a pawn can move to.\n :param current_pawn_coordinate: current pawn coordinate\n :return: list of northern, northeastern (ne), and northwestern (nw) moves a pawn can move to\n \"\"\"\n curr_col = current_pawn_coordinate[0]\n curr_row = current_pawn_coordinate[1]\n valid_north_moves = []\n north_row = curr_row - 1\n\n n_coord = (curr_col, north_row)\n n_coord_is_valid = self._pawn_coordinate_within_boundary(n_coord)\n\n ne_coord = (curr_col + 1, north_row)\n ne_coord_is_valid = self._pawn_coordinate_within_boundary(ne_coord)\n\n nw_coord = (curr_col - 1, north_row)\n nw_coord_is_valid = self._pawn_coordinate_within_boundary(nw_coord)\n\n n_of_n_coord = (curr_col, north_row+1)\n n_of_n_coord_is_valid = self._pawn_coordinate_within_boundary(n_of_n_coord)\n\n if n_coord_is_valid is False:\n return valid_north_moves # empty list\n\n current_coord_has_h_fence = self._coordinate_has_h_fence(current_pawn_coordinate)\n if current_coord_has_h_fence is True:\n return valid_north_moves # empty list\n\n # at this point, n_coord does exist and there's no immediate north fence blocking\n north_pawn = self._get_pawn_at_coordinate(n_coord)\n if north_pawn is not None: # check if there's a pawn in the north coordinate\n\n if n_of_n_coord_is_valid and self._coordinate_has_h_fence(n_coord) is False:\n valid_north_moves.append(n_of_n_coord)\n return valid_north_moves\n\n if n_of_n_coord_is_valid is True and self._coordinate_has_h_fence(n_coord) is True:\n if nw_coord_is_valid is True:\n if self._coordinate_has_v_fence(n_coord) is False: # check if we can move nw\n valid_north_moves.append(nw_coord)\n if ne_coord_is_valid is True:\n if self._coordinate_has_v_fence(ne_coord) is False:\n valid_north_moves.append(ne_coord)\n else: # if there's no pawn in n_coord blocking\n valid_north_moves.append(n_coord)\n return valid_north_moves\n\n def _get_valid_west_pawn_moves(self, current_pawn_coordinate):\n \"\"\"\n Get list of western, northwestern (nw), and southwestern (se) moves a pawn can move to.\n :param current_pawn_coordinate: current pawn coordinate\n :return: list of western, northwestern (nw), and southwestern (se) moves a pawn can move to.\n \"\"\"\n curr_col = current_pawn_coordinate[0]\n curr_row = current_pawn_coordinate[1]\n valid_west_moves = []\n west_col = curr_col - 1\n\n w_coord = (west_col, curr_row)\n w_coord_is_valid = self._pawn_coordinate_within_boundary(w_coord)\n\n w_of_w_coord = (west_col - 1, curr_row)\n w_of_w_coord_is_valid = self._pawn_coordinate_within_boundary(w_of_w_coord)\n\n nw_coord = (west_col, curr_row-1)\n nw_coord_is_valid = self._pawn_coordinate_within_boundary(nw_coord)\n\n sw_coord = (west_col, curr_row+1)\n sw_coord_is_valid = self._pawn_coordinate_within_boundary(sw_coord)\n\n if w_coord_is_valid is False:\n return valid_west_moves # empty list\n\n # check if immediate western fence is blocking\n if self._coordinate_has_v_fence(current_pawn_coordinate):\n return valid_west_moves # empty list\n\n # at this point, w_coord does exist and there's no immediate western fence blocking\n west_pawn = self._get_pawn_at_coordinate(w_coord)\n if west_pawn is not None:\n\n # no fence behind adjacent pawn\n if w_of_w_coord_is_valid is True and self._coordinate_has_v_fence(w_coord) is False:\n valid_west_moves.append(w_of_w_coord)\n return valid_west_moves\n\n # fence behind adjacent pawn\n if w_of_w_coord_is_valid is True and self._coordinate_has_v_fence(w_coord) is True:\n if nw_coord_is_valid is True:\n if self._coordinate_has_h_fence(w_coord) is False:\n valid_west_moves.append(nw_coord)\n\n if sw_coord_is_valid is True:\n if self._coordinate_has_h_fence(sw_coord) is False:\n valid_west_moves.append(sw_coord)\n\n else: # if there's no pawn in west coordinate blocking\n valid_west_moves.append(w_coord)\n\n return valid_west_moves\n\n def _get_valid_east_pawn_moves(self, current_pawn_coordinate):\n \"\"\"\n Get list of eastern, northeastern (ne), and southeastern (se) moves a pawn can move to.\n :param current_pawn_coordinate: current pawn coordinate\n :return: list of eastern, northeastern (ne), and southeastern (se) moves a pawn can move to.\n \"\"\"\n curr_col = current_pawn_coordinate[0]\n curr_row = current_pawn_coordinate[1]\n valid_east_moves = []\n east_col = curr_col + 1\n\n e_coord = (east_col, curr_row)\n e_coord_is_valid = self._pawn_coordinate_within_boundary(e_coord)\n\n e_of_e_coord = (east_col+1, curr_row)\n e_of_e_coord_is_valid = self._pawn_coordinate_within_boundary(e_of_e_coord)\n\n ne_coord = (east_col, curr_row-1)\n ne_coord_is_valid = self._pawn_coordinate_within_boundary(ne_coord)\n\n se_coord = (east_col, curr_row+1)\n se_coord_is_valid = self._pawn_coordinate_within_boundary(se_coord)\n\n if e_coord_is_valid is False:\n return valid_east_moves # empty list\n\n # check if immediate eastern fence is blocking\n if self._coordinate_has_v_fence(e_coord) is True:\n return valid_east_moves # empty list\n\n # at this point, e_coord does exist and there's no immediate western fence blocking\n east_pawn = self._get_pawn_at_coordinate(e_coord)\n if east_pawn is not None:\n # no fence behind adjacent pawn\n if e_of_e_coord_is_valid is True and self._coordinate_has_v_fence(e_of_e_coord) is False:\n valid_east_moves.append(e_of_e_coord)\n return valid_east_moves\n\n # fence behind adjacent pawn\n if e_of_e_coord_is_valid is True and self._coordinate_has_v_fence(e_of_e_coord) is True:\n if ne_coord_is_valid is True:\n if self._coordinate_has_h_fence(e_coord) is False:\n valid_east_moves.append(ne_coord)\n if se_coord_is_valid is True:\n if self._coordinate_has_h_fence(se_coord) is False:\n valid_east_moves.append(se_coord)\n\n else: # if there's no pawn in east coordinate blocking\n valid_east_moves.append(e_coord)\n\n return valid_east_moves\n\n def is_winner(self, player_num):\n \"\"\"\n Get if player has won\n :param player_num: player's number\n :return: True if that player has won, False if that player has not won\n \"\"\"\n this_player = self._get_player_from_num(player_num)\n if self._get_winner() == this_player:\n return True\n return False\n\n\nclass Fence:\n \"\"\"\n Class for a fence piece.\n Interacts with QuoridorGame class: fence objects with directions are placed on the board.\n \"\"\"\n def __init__(self, direction):\n self._v = False\n self._h = False\n self._edge = False\n\n if direction == \"edge\":\n self._edge = True\n if direction == \"h\":\n self._h = True\n if direction == \"v\":\n self._v = True\n\n def get_fence_direction(self):\n \"\"\"\n Get a fence object's direction\n :return: fence object's direction as a string\n \"\"\"\n if self._edge:\n return \"edge\"\n if self._h:\n return \"h\"\n if self._v:\n return \"v\"\n\n\nclass Player:\n \"\"\"\n Class for a Player.\n Interacts with QuoridorGame class: Players will have a number of fences, and each will have a pawn on the game board\n \"\"\"\n def __init__(self, player_num):\n self._player_num = player_num\n self._num_fences_available = 10\n\n if player_num == 1:\n self._pawn_coordinate = (4, 0)\n self._winning_coordinates = [(0, 8), (1, 8), (2, 8), (3, 8), (4, 8), (5, 8), (6, 8), (7, 8), (8, 8)]\n else: # if player 2\n self._pawn_coordinate = (4, 8)\n self._winning_coordinates = [(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0), (8, 0)]\n\n def is_pawn_in_winning_coordinate(self):\n \"\"\"\n Check if player's pawn is in a winning coordinate.\n :return: True if player's pawn is in a winning coordinate, False otherwise\n \"\"\"\n if self._pawn_coordinate in self._winning_coordinates:\n return True\n return False\n\n def set_pawn_coordinate(self, coordinate):\n \"\"\"\n Set the coordinate of a player's pawn\n :param coordinate: coordinate of a player's pawn\n :return: none\n \"\"\"\n self._pawn_coordinate = coordinate\n\n def get_pawn_coordinate(self):\n \"\"\"\n Get a player's pawn coordinate\n :return: player's pawn coordinate\n \"\"\"\n return self._pawn_coordinate\n\n def get_player_num(self):\n \"\"\"\n Get the player's number\n :return: player's number as an integer\n \"\"\"\n return self._player_num\n\n def get_num_fences_available(self):\n \"\"\"\n Get the number of fences a player has left\n :return: number of fences a player has left\n \"\"\"\n return self._num_fences_available\n\n def has_fences_remaining(self):\n \"\"\"\n Checks if a player has fences remaining\n :return: True if a player has fences remaining, False otherwise\n \"\"\"\n if self._num_fences_available == 0:\n return False\n return True\n\n def decrement_num_fences_available(self):\n \"\"\"\n Subtract one from the number of fences a player has left\n :return: none\n \"\"\"\n self._num_fences_available -= 1\n","repo_name":"umjennifer/Quoridor","sub_path":"Quoridor.py","file_name":"Quoridor.py","file_ext":"py","file_size_in_byte":29334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"33088555224","text":"\"\"\"\n 2606번: 바이러스\n Create by Kim Gayoun on 2022-06-21\n\"\"\"\nimport sys\ninput = sys.stdin.readline\n\n\ndef doBFS(G, V):\n result, queue = list(), list()\n p = 0 # pointer of queue\n cnode = V\n\n queue.append(cnode)\n while len(queue) > p:\n cnode = queue[p]; p += 1\n\n if cnode not in result:\n result.append(cnode)\n G[0][cnode] = 1\n\n for i in range(1, N + 1):\n if G[0][i] == 0 and G[cnode][i] == 1: queue.append(i)\n\n\n # print(*result)\n print(len(result) - 1)\n\n\nif __name__ == \"__main__\":\n N = int(input())\n M = int(input())\n G = [ [ 0 ] * (N + 1) for _ in range(N + 1) ]\n\n\n for _ in range(M):\n A, B = map(int, input().split())\n G[A][B] = 1\n G[B][A] = 1\n\n\n doBFS(G, 1)\n\n","repo_name":"makeAppsGreat/Baekjoon","sub_path":"python/2606.py","file_name":"2606.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4242902519","text":"#Aircraft Mach speeds, vertical velocity, and g-loads (mach, VVI, G-load)\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mlt\n\nf=open('Data.txt','r')\nlines=f.readlines()\nMach_ratio=[]\nVVI_fpm=[]\nGload_norml=[]\nGload_axial=[]\nGload_side=[]\nf.close()\n\nfig=plt.figure(figsize=(10,10))\nfig\n\nfor line in lines[1:]:\n Mach_ratio.append(line[117:125])\nP_Mach_ratio=pd.Series(Mach_ratio)\nP_Mach_ratio=P_Mach_ratio.astype(float)\nplt.subplot(321)\nplt.plot(P_Mach_ratio)\nplt.xticks(fontsize=8)\nplt.title('Mach_ratio',fontsize=8)\n\nfor line in lines[1:]:\n VVI_fpm.append(line[133:141])\nP_VVI_fpm=pd.Series(VVI_fpm)\nP_VVI_fpm=P_VVI_fpm.astype(float)\nplt.subplot(322)\nplt.plot(P_VVI_fpm)\nplt.xticks(fontsize=8)\nplt.title('VVI_fpm',fontsize=8)\n\nfor line in lines[1:]:\n Gload_norml.append(line[149:157])\nP_Gload_norml=pd.Series(Gload_norml)\nP_Gload_norml=P_Gload_norml.astype(float)\nplt.subplot(323)\nplt.plot(P_Gload_norml)\nplt.xticks(fontsize=8)\nplt.title('Gload_norml',fontsize=8)\n\nfor line in lines[1:]:\n Gload_axial.append(line[165:173])\nP_Gload_axial=pd.Series(Gload_axial)\nP_Gload_axial=P_Gload_axial.astype(float)\nplt.subplot(324)\nplt.plot(P_Gload_axial)\nplt.xticks(fontsize=8)\nplt.title('Gload_axial',fontsize=8)\n\nfor line in lines[1:]:\n Gload_side.append(line[181:189])\nP_Gload_side=pd.Series(Gload_side)\nP_Gload_side=P_Gload_side.astype(float)\nplt.subplot(325)\nplt.plot(P_Gload_side)\nplt.xticks(fontsize=8)\nplt.title('Gload_side',fontsize=8)\n\nplt.show()","repo_name":"dnguszz/ETC","sub_path":"planedata/Mach_speed.py","file_name":"Mach_speed.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18746669502","text":"import facebook\nimport requests\nfrom . import secrets\nURL = \"https://graph.facebook.com/oauth/access_token\"\n\ndef connect(version=\"2.5\"):\n\n r = requests.get(URL, {\n 'client_id': secrets.APP_ID,\n 'client_secret': secrets.APP_SECRET,\n 'grant_type': 'client_credentials',\n })\n\n r.raise_for_status()\n key, value = r.text.split(\"=\")\n\n print(r.text)\n\n\n # r = requests.get(URL, {\n # 'client_id': secrets.APP_ID,\n # 'client_secret': secrets.APP_SECRET,\n # 'grant_type': 'fb_exchange_token',\n # 'fb_exchange_token':value\n # })\n #\n # r.raise_for_status()\n\n # key, value = r.text.split(\"=\")\n # assert key == \"access_token\"\n\n # graph = facebook.GraphAPI(access_token=value,version='2.5')\n #\n # token = graph.extend_access_token(secrets.APP_ID, secrets.APP_SECRET)\n\n # with open(\"TOKEN.txt\", \"w\") as f:\n # f.write(value)\n\n assert key == \"access_token\"\n\n # print(\"Connected to FB graph API\")\n\n return facebook.GraphAPI(access_token=value, version=version)\n\n\ndef connect_with_token(token, version=\"2.5\"):\n print('connecting using ' + token)\n return facebook.GraphAPI(access_token=token, version=version)\n","repo_name":"miacaplan/FBFilter","sub_path":"FB/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25003160264","text":"from simulator import get_simulator\n\nneurons = [\"pyramidal_1\", \"pyramidal_2\"]\n\nsimm = LFPy_util.SimulatorManager()\n# Number of LFPy_util.Simulator objects that will run in parallel.\nsimm.concurrent_neurons = 8\nsimm.set_neuron_names(neurons)\nsimm.set_sim_load_func(get_simulator)\n\nsimm.simulate()\nsimm.plot()\n","repo_name":"lastis/master","sub_path":"documents/thesis/examples/multiple_neurons.py","file_name":"multiple_neurons.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"5982627572","text":"from zstacklib.utils import log\n\nlogger = log.get_logger(__name__)\n\n\ndef _load(stdout, sep=None):\n # type: (str, str) -> list[dict]\n ret = []\n lines = stdout.splitlines()\n if len(lines) < 2:\n return ret\n\n heads = lines[0].split(sep)\n for l in lines[1:]:\n o = {}\n # init\n for h in heads:\n o[h] = None\n\n for i, v in enumerate(l.split(sep)):\n o[heads[i]] = v\n ret.append(o)\n\n return ret\n\n\ndef load(stdout, sep=None):\n try:\n return _load(stdout, sep)\n except Exception as e:\n logger.debug(\"not a standard form:%s\" % e.message)\n return []\n","repo_name":"zstackio/zstack-utility","sub_path":"zstacklib/zstacklib/utils/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"22"} +{"seq_id":"28918901980","text":"import os\nimport sys\nimport json\nimport time\nimport logging\nimport collections\nimport re\n\nfrom oeqa.core.loader import OETestLoader\nfrom oeqa.core.runner import OETestRunner, OEStreamLogger, xmlEnabled\n\nclass OETestContext(object):\n loaderClass = OETestLoader\n runnerClass = OETestRunner\n streamLoggerClass = OEStreamLogger\n\n files_dir = os.path.abspath(os.path.join(os.path.dirname(\n os.path.abspath(__file__)), \"../files\"))\n\n def __init__(self, td=None, logger=None):\n if not type(td) is dict:\n raise TypeError(\"td isn't dictionary type\")\n\n self.td = td\n self.logger = logger\n self._registry = {}\n self._registry['cases'] = collections.OrderedDict()\n self._results = {}\n\n def _read_modules_from_manifest(self, manifest):\n if not os.path.exists(manifest):\n raise\n\n modules = []\n for line in open(manifest).readlines():\n line = line.strip()\n if line and not line.startswith(\"#\"):\n modules.append(line)\n\n return modules\n\n def loadTests(self, module_paths, modules=[], tests=[],\n modules_manifest=\"\", modules_required=[], filters={}):\n if modules_manifest:\n modules = self._read_modules_from_manifest(modules_manifest)\n\n self.loader = self.loaderClass(self, module_paths, modules, tests,\n modules_required, filters)\n self.suites = self.loader.discover()\n\n def runTests(self):\n streamLogger = self.streamLoggerClass(self.logger)\n self.runner = self.runnerClass(self, stream=streamLogger, verbosity=2)\n\n self._run_start_time = time.time()\n result = self.runner.run(self.suites)\n self._run_end_time = time.time()\n\n return result\n\n def logSummary(self, result, component, context_msg=''):\n self.logger.info(\"SUMMARY:\")\n self.logger.info(\"%s (%s) - Ran %d test%s in %.3fs\" % (component,\n context_msg, result.testsRun, result.testsRun != 1 and \"s\" or \"\",\n (self._run_end_time - self._run_start_time)))\n\n if result.wasSuccessful():\n msg = \"%s - OK - All required tests passed\" % component\n else:\n msg = \"%s - FAIL - Required tests failed\" % component\n skipped = len(self._results['skipped'])\n if skipped: \n msg += \" (skipped=%d)\" % skipped\n self.logger.info(msg)\n\n def _getDetailsNotPassed(self, case, type, desc):\n found = False\n\n for (scase, msg) in self._results[type]:\n # XXX: When XML reporting is enabled scase is\n # xmlrunner.result._TestInfo instance instead of\n # string.\n if xmlEnabled:\n if case.id() == scase.test_id:\n found = True\n break\n scase_str = scase.test_id\n else:\n if case == scase:\n found = True\n break\n scase_str = str(scase)\n\n # When fails at module or class level the class name is passed as string\n # so figure out to see if match\n m = re.search(\"^setUpModule \\((?P.*)\\)$\", scase_str)\n if m:\n if case.__class__.__module__ == m.group('module_name'):\n found = True\n break\n\n m = re.search(\"^setUpClass \\((?P.*)\\)$\", scase_str)\n if m:\n class_name = \"%s.%s\" % (case.__class__.__module__,\n case.__class__.__name__)\n\n if class_name == m.group('class_name'):\n found = True\n break\n\n if found:\n return (found, msg)\n\n return (found, None)\n\n def logDetails(self):\n self.logger.info(\"RESULTS:\")\n for case_name in self._registry['cases']:\n case = self._registry['cases'][case_name]\n\n result_types = ['failures', 'errors', 'skipped', 'expectedFailures']\n result_desc = ['FAILED', 'ERROR', 'SKIPPED', 'EXPECTEDFAIL']\n\n fail = False\n desc = None\n for idx, name in enumerate(result_types):\n (fail, msg) = self._getDetailsNotPassed(case, result_types[idx],\n result_desc[idx])\n if fail:\n desc = result_desc[idx]\n break\n\n oeid = -1\n for d in case.decorators:\n if hasattr(d, 'oeid'):\n oeid = d.oeid\n \n if fail:\n self.logger.info(\"RESULTS - %s - Testcase %s: %s\" % (case.id(),\n oeid, desc))\n if msg:\n self.logger.info(msg)\n else:\n self.logger.info(\"RESULTS - %s - Testcase %s: %s\" % (case.id(),\n oeid, 'PASSED'))\n\nclass OETestContextExecutor(object):\n _context_class = OETestContext\n\n name = 'core'\n help = 'core test component example'\n description = 'executes core test suite example'\n\n default_cases = [os.path.join(os.path.abspath(os.path.dirname(__file__)),\n 'cases/example')]\n default_test_data = os.path.join(default_cases[0], 'data.json')\n default_tests = None\n\n def register_commands(self, logger, subparsers):\n self.parser = subparsers.add_parser(self.name, help=self.help,\n description=self.description, group='components')\n\n self.default_output_log = '%s-results-%s.log' % (self.name,\n time.strftime(\"%Y%m%d%H%M%S\"))\n self.parser.add_argument('--output-log', action='store',\n default=self.default_output_log,\n help=\"results output log, default: %s\" % self.default_output_log)\n self.parser.add_argument('--run-tests', action='store',\n default=self.default_tests,\n help=\"tests to run in [.[.]] format. Just works for modules now\")\n\n if self.default_test_data:\n self.parser.add_argument('--test-data-file', action='store',\n default=self.default_test_data,\n help=\"data file to load, default: %s\" % self.default_test_data)\n else:\n self.parser.add_argument('--test-data-file', action='store',\n help=\"data file to load\")\n\n if self.default_cases:\n self.parser.add_argument('CASES_PATHS', action='store',\n default=self.default_cases, nargs='*',\n help=\"paths to directories with test cases, default: %s\"\\\n % self.default_cases)\n else:\n self.parser.add_argument('CASES_PATHS', action='store',\n nargs='+', help=\"paths to directories with test cases\")\n\n self.parser.set_defaults(func=self.run)\n\n def _setup_logger(self, logger, args):\n formatter = logging.Formatter('%(asctime)s - ' + self.name + \\\n ' - %(levelname)s - %(message)s')\n sh = logger.handlers[0]\n sh.setFormatter(formatter)\n fh = logging.FileHandler(args.output_log)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n return logger\n\n def _process_args(self, logger, args):\n self.tc_kwargs = {}\n self.tc_kwargs['init'] = {}\n self.tc_kwargs['load'] = {}\n self.tc_kwargs['run'] = {}\n\n self.tc_kwargs['init']['logger'] = self._setup_logger(logger, args)\n if args.test_data_file:\n self.tc_kwargs['init']['td'] = json.load(\n open(args.test_data_file, \"r\"))\n else:\n self.tc_kwargs['init']['td'] = {}\n\n\n if args.run_tests:\n self.tc_kwargs['load']['modules'] = args.run_tests.split()\n else:\n self.tc_kwargs['load']['modules'] = None\n\n self.module_paths = args.CASES_PATHS\n\n def run(self, logger, args):\n self._process_args(logger, args)\n\n self.tc = self._context_class(**self.tc_kwargs['init'])\n self.tc.loadTests(self.module_paths, **self.tc_kwargs['load'])\n rc = self.tc.runTests(**self.tc_kwargs['run'])\n self.tc.logSummary(rc, self.name)\n self.tc.logDetails()\n\n output_link = os.path.join(os.path.dirname(args.output_log),\n \"%s-results.log\" % self.name)\n if os.path.exists(output_link):\n os.remove(output_link)\n os.symlink(args.output_log, output_link)\n\n return rc\n\n_executor_class = OETestContextExecutor\n","repo_name":"klihub/poky-flatpak","sub_path":"meta/lib/oeqa/core/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":8554,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"3123461715","text":"import requests\nfrom requests_html import HTMLSession\n# 文件保存目录\npath = 'C:/Users/推广部/Desktop/表情包/'\n# 保存为.jpg格式\ndef save(respone, name):\n with open(path + name + '.jpg', 'wb') as f:\n f.write(respone)\n# 保存为.gif格式\ndef savegif(respone, name):\n with open(path + name + '.gif', 'wb') as f:\n f.write(respone)\ndef main():\n # 爬取表情包图片\n for i in range(2, 201):\n b = '/lists/page/' + str(i) + '.html'\n url = \"https://fabiaoqing.com/biaoqing\"+b\n session = HTMLSession()\n r = session.get(url)\n # print(r.html.html)\n # 直接定位到img标签,具体分析,获取相应的数据\n # print(r.html.find('img'))\n result = r.html.xpath('//*[@class=\"tagbqppdiv\"]/a/img')\n # 下载图片\n for idx in range(len(result)):\n try:\n temp_result = result[idx].attrs\n print(temp_result)\n image_name = temp_result['title']\n img_url = temp_result['data-original']\n print('第%d个,url:%s' % (idx + 1, img_url))\n connet = requests.get(img_url, timeout=15)\n # 判断文件格式\n if (img_url[-3:] == 'jpg'):\n save(connet.content, image_name)\n else:\n savegif(connet.content, image_name)\n except Exception as e:\n print(e)\n print('第:', i, '页表情包下载完成')\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"xwk134/myspider","sub_path":"ui_spider/crawler_case/biaoqingbao.py","file_name":"biaoqingbao.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"28661016884","text":"\"\"\" Run Length Encoding \"\"\"\r\ndef main():\r\n \"\"\" encode \"\"\"\r\n txt = input()\r\n txtout = ''\r\n count = 0\r\n numtxt = 0\r\n while numtxt < len(txt):\r\n if count == 0:\r\n start = txt[numtxt]\r\n if txt[numtxt] == start:\r\n count += 1\r\n numtxt += 1\r\n if numtxt == len(txt) or txt[numtxt] != start:\r\n txtout += '%d' % count + start\r\n count = 0\r\n print(txtout)\r\nmain()\r\n","repo_name":"bonnibelz13/pscp-lab","sub_path":"CODE/Run Length Encoding.py","file_name":"Run Length Encoding.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25816043381","text":"import os\nimport hashlib\nimport requests\nimport json\nimport jwt\nfrom datetime import datetime, timedelta\nfrom ..services import util, sqlite\n\ndef get_details(id):\n query = \"SELECT * FROM users WHERE id = ?\"\n params = (id,)\n return sqlite.read(query, params, one=True)\n\ndef lookup(oauth):\n query = \"SELECT * FROM users WHERE oauth = ?\"\n params = (oauth,)\n return sqlite.read(query, params, one=True)\n\ndef create_login_token(sub):\n return jwt.encode({\n 'sub': sub,\n 'iat': datetime.utcnow(),\n 'exp': datetime.utcnow() + timedelta(minutes=60*24*30)\n }, get_secret_token())\n\ndef get_user_data_from_token(token):\n token_dict = verify_token(token)\n if not token_dict:\n util.logger.error(f'Could not verify token: {token}')\n return False\n return lookup(token_dict.get('sub'))\n\ndef get_secret_token():\n return os.environ.get('JWT_SIGNING_TOKEN')\n\ndef verify_token(token): \n try: \n response = jwt.decode(token, get_secret_token())\n except:\n util.logger.error(f'Bad token: {token}')\n return False\n return response\n\ndef find_or_create_user(oauth):\n user_hash = hashlib.sha224(oauth.encode('ascii')).hexdigest()\n query = \"INSERT INTO users (oauth, last_login) VALUES (?, strftime('%Y-%m-%dT%H:%M:%fZ', 'now')) ON CONFLICT (oauth) DO UPDATE SET last_login = strftime('%Y-%m-%dT%H:%M:%fZ', 'now')\"\n params = (user_hash,)\n if not sqlite.write(query, params):\n return False\n return user_hash\n\ndef github_login(token):\n auth_response = github_token_to_access_code(token)\n if not auth_response:\n return False\n user_profile = github_get_user_profile(auth_response.get('access_token'))\n if not user_profile:\n return False\n return user_profile\n\ndef github_token_to_access_code(token):\n payload = {\n 'client_id': os.environ.get('GITHUB_CLIENT_ID'),\n 'client_secret': os.environ.get('GITHUB_CLIENT_SECRET'),\n 'code': token\n }\n response = requests.post(\"https://github.com/login/oauth/access_token\", data=payload, headers={'Accept': 'application/json'})\n if response.status_code != 200:\n return False\n return response.json()\n\ndef github_get_user_profile(oauth_token):\n response = requests.get(\"https://api.github.com/user\", headers={'Authorization': f\"token {oauth_token}\"})\n if response.status_code != 200:\n return False\n return response.json()\n\n\ndef google_verify_access_token(id_token):\n # We're doing it the lazy way here. What we get from the client side is JWT, we can just verify that instead of calling Google\n # Reason for that is to reduce the amount of dependencies for this, a demo app\n # For production, we should do it the right way by using google-auth\n\n response = requests.get(f'https://oauth2.googleapis.com/tokeninfo?id_token={id_token}').json()\n if response.get('error'):\n errmsg = response.get('error_description')\n util.logger.error(f\"[USER|google_verify_access_token] {errmsg}\")\n return False\n # Here, you should check that your domain name is in hd\n # if jwt['hd'] == 'example.com':\n # return jwt\n # For now, we're just going to accept all\n return response\n\n\nFACEBOOK_URL_APP_TOKEN = f'https://graph.facebook.com/oauth/access_token?client_id={os.environ.get(\"FACEBOOK_CLIENT_ID\")}&client_secret={os.environ.get(\"FACEBOOK_CLIENT_SECRET\")}&grant_type=client_credentials'\ndef facebook_get_app_token():\n return requests.get(FACEBOOK_URL_APP_TOKEN).json()['access_token']\n\ndef facebook_verify_access_token(access_token):\n app_token = facebook_get_app_token()\n access_token_url = f'https://graph.facebook.com/debug_token?input_token={access_token}&access_token={app_token}'\n try:\n debug_token = requests.get(access_token_url).json()['data']\n except (ValueError, KeyError, TypeError) as error:\n util.logger.error(f\"[USER|facebook_verify_access_token] {error}\")\n return error\n user_data_url = f\"https://graph.facebook.com/{debug_token['user_id']}/?&access_token={app_token}\"\n user_data = requests.get(user_data_url).json()\n return user_data\n\n'''\ndef find_or_create_user(oauth_source, user_id, oauth_payload):\n user_plaintext = f\"{oauth_source}|{user_id}\"\n user_hash = hashlib.sha224(user_plaintext.encode('ascii')).hexdigest()\n query = \"INSERT OR IGNORE INTO users (userhash, source, payload) VALUES (?,?,?)\"\n params = (user_hash, oauth_source, json.dumps(oauth_payload))\n if sqlite.write(query, params):\n return user_hash\n else:\n return False\n'''\n\n\n","repo_name":"willfong/docker-fastapi-vue","sub_path":"app/services/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":4584,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"22"} +{"seq_id":"6563592246","text":"from flask import Flask, request, g\nfrom flask_restful import Resource, Api\nfrom sqlalchemy import create_engine\nfrom flask import jsonify\nimport json\nimport eth_account\nimport algosdk\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm import scoped_session\nfrom sqlalchemy.orm import load_only\nfrom datetime import datetime\nimport math\nimport sys\nimport traceback\n\nfrom algosdk import mnemonic\nimport web3\nfrom web3 import Web3, HTTPProvider\n\n# TODO: make sure you implement connect_to_algo, send_tokens_algo, and send_tokens_eth\nfrom send_tokens import connect_to_algo, connect_to_eth, send_tokens_algo, send_tokens_eth\n\nfrom models import Base, Order, TX, Log\n\nengine = create_engine('sqlite:///orders.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\n\napp = Flask(__name__)\n\n\"\"\" Pre-defined methods (do not need to change) \"\"\"\n\n\n@app.before_request\ndef create_session():\n g.session = scoped_session(DBSession)\n\n\n@app.teardown_appcontext\ndef shutdown_session(response_or_exc):\n sys.stdout.flush()\n g.session.commit()\n g.session.remove()\n\n\ndef connect_to_blockchains():\n try:\n # If g.acl has not been defined yet, then trying to query it fails\n acl_flag = False\n g.acl\n except AttributeError as ae:\n acl_flag = True\n\n try:\n if acl_flag or not g.acl.status():\n # Define Algorand client for the application\n g.acl = connect_to_algo()\n print(\"connect_to_algo()-1\")\n except Exception as e:\n print(\"Trying to connect to algorand client again\")\n print(traceback.format_exc())\n g.acl = connect_to_algo()\n print(\"connect_to_algo()-1-e\")\n\n try:\n icl_flag = False\n g.icl\n except AttributeError as ae:\n icl_flag = True\n\n try:\n if icl_flag or not g.icl.health():\n # Define the index client\n g.icl = connect_to_algo(connection_type='indexer')\n print(\"connect_to_algo()-2\")\n except Exception as e:\n print(\"Trying to connect to algorand indexer client again\")\n print(traceback.format_exc())\n g.icl = connect_to_algo(connection_type='indexer')\n print(\"connect_to_algo()-2-e\")\n\n try:\n w3_flag = False\n g.w3\n except AttributeError as ae:\n w3_flag = True\n\n try:\n if w3_flag or not g.w3.isConnected():\n g.w3 = connect_to_eth()\n print(\"connect_to_eth()\")\n except Exception as e:\n print(\"Trying to connect to web3 again\")\n print(traceback.format_exc())\n g.w3 = connect_to_eth()\n print(\"connect_to_eth()-e\")\n\n\n\"\"\" End of pre-defined methods \"\"\"\n\n\"\"\" Helper Methods (skeleton code for you to implement) \"\"\"\n\n\ndef log_message(d):\n # Takes input dictionary d and writes it to the Log table\n return Log(message=json.dumps(d['payload']))\n\n\ndef get_algo_keys():\n # TODO: Generate or read (using the mnemonic secret)\n secret = \"typical permit hurdle hat song detail cattle merge oxygen crowd arctic cargo smooth fly rice vacuum lounge yard frown predict west wife latin absent cup\"\n return mnemonic.to_private_key(secret), mnemonic.to_public_key(secret)\n\n\ndef get_eth_keys(filename=\"eth_mnemonic.txt\"):\n w3 = Web3()\n\n # TODO: Generate or read (using the mnemonic secret)\n w3.eth.account.enable_unaudited_hdwallet_features()\n secret = \"double double teach potato comic hope problem enough stem upper behave brick\"\n\n acc = w3.eth.account.from_mnemonic(secret)\n return acc._private_key, acc._address\n\n\ndef fill_order(order, txes=[]):\n cur_order = order\n\n order_list = []\n orders = g.session.query(Order).filter(Order.filled == None).all()\n\n for ord in orders:\n if ((ord.buy_currency == cur_order.sell_currency)\n and (ord.sell_currency == cur_order.buy_currency)\n and (ord.sell_amount / ord.buy_amount >= cur_order.buy_amount / cur_order.sell_amount)\n and (ord.counterparty_id == None)):\n order_list.append(ord)\n\n if len(order_list) > 0:\n ord_get = order_list[0]\n\n ord_get.filled = datetime.now()\n ord_get.counterparty_id = cur_order.id\n\n cur_order.filled = datetime.now()\n cur_order.counterparty_id = ord_get.id\n\n g.session.commit()\n\n compare_order(cur_order, ord_get, txes)\n\n\ndef compare_order(order_a, order_b, txes):\n if order_a.sell_amount < order_b.buy_amount:\n order_c = Order(sender_pk=order_b.sender_pk,\n receiver_pk=order_b.receiver_pk,\n buy_currency=order_b.buy_currency,\n sell_currency=order_b.sell_currency,\n buy_amount=order_b.buy_amount - order_a.sell_amount,\n sell_amount=(order_b.sell_amount / order_b.buy_amount) * (\n order_b.buy_amount - order_a.sell_amount),\n creator_id=order_b.id,\n tx_id=order_b.tx_id)\n g.session.add(order_c)\n g.session.commit()\n txes.append({'platform': order_a.buy_currency, 'receiver_pk': order_a.receiver_pk,\n 'order_id': order_a.id, 'amount': order_a.buy_amount})\n txes.append({'platform': order_b.buy_currency, 'receiver_pk': order_b.receiver_pk,\n 'order_id': order_b.id, 'amount': order_a.sell_amount})\n\n sub_order = g.session.query(Order).order_by(Order.id.desc()).first()\n fill_order(sub_order, txes)\n\n\n elif order_a.buy_amount > order_b.sell_amount:\n order_c = Order(sender_pk=order_a.sender_pk,\n receiver_pk=order_a.receiver_pk,\n buy_currency=order_a.buy_currency,\n sell_currency=order_a.sell_currency,\n buy_amount=order_a.buy_amount - order_b.sell_amount,\n sell_amount=(order_a.buy_amount - order_b.sell_amount) / (\n order_a.buy_amount / order_a.sell_amount),\n creator_id=order_a.id,\n tx_id=order_a.tx_id)\n g.session.add(order_c)\n g.session.commit()\n\n txes.append({'platform': order_a.buy_currency, 'receiver_pk': order_a.receiver_pk,\n 'order_id': order_a.id, 'amount': order_b.sell_amount})\n txes.append({'platform': order_b.buy_currency, 'receiver_pk': order_b.receiver_pk,\n 'order_id': order_b.id, 'amount': order_b.buy_amount})\n\n sub_order = g.session.query(Order).order_by(Order.id.desc()).first()\n fill_order(sub_order, txes)\n\n else:\n txes.append({'platform': order_a.buy_currency, 'receiver_pk': order_a.receiver_pk,\n 'order_id': order_a.id, 'amount': order_a.buy_amount})\n txes.append({'platform': order_b.buy_currency, 'receiver_pk': order_b.receiver_pk,\n 'order_id': order_b.id, 'amount': order_b.buy_amount})\n\n\ndef execute_txes(txes):\n if txes is None:\n return True\n if len(txes) == 0:\n return True\n print(f\"Trying to execute {len(txes)} transactions\")\n print(f\"IDs = {[tx['order_id'] for tx in txes]}\")\n eth_sk, eth_pk = get_eth_keys()\n algo_sk, algo_pk = get_algo_keys()\n\n if not all(tx['platform'] in [\"Algorand\", \"Ethereum\"] for tx in txes):\n print(\"Error: execute_txes got an invalid platform!\")\n print(tx['platform'] for tx in txes)\n\n algo_txes = [tx for tx in txes if tx['platform'] == \"Algorand\"]\n eth_txes = [tx for tx in txes if tx['platform'] == \"Ethereum\"]\n\n # TODO:\n # 1. Send tokens on the Algorand and eth testnets, appropriately\n # We've provided the send_tokens_algo and send_tokens_eth skeleton methods in send_tokens.py\n # 2. Add all transactions to the TX table\n algo_id = send_tokens_algo(g.acl, algo_sk, algo_txes)\n modify(algo_id, algo_txes)\n\n eth_id = send_tokens_eth(g.w3, eth_sk.hex(), eth_txes)\n modify(eth_id, eth_txes)\n\n\ndef modify(tx_id, txes):\n for i, tx_dict in enumerate(txes):\n tx = TX(platform=tx_dict['platform'],\n receiver_pk=tx_dict['receiver_pk'],\n order_id=tx_dict['order_id'],\n tx_id=tx_id[i])\n g.session.add(tx)\n g.session.commit()\n\n\ndef get_list(order):\n return {\n field.name: getattr(order, field.name)\n for field in order.__table__.columns\n }\n\n\n\"\"\" End of Helper methods\"\"\"\n\n\n@app.route('/address', methods=['POST'])\ndef address():\n if request.method == \"POST\":\n print(\"--------- address ---------\")\n content = request.get_json(silent=True)\n\n # check whether the input content contains a 'platform'\n if 'platform' not in content.keys():\n print(f\"Error: no platform provided\")\n return jsonify(\"Error: no platform provided\")\n\n # check whether the input platform is \"Ethereum\" or \"Algorand\"\n if not content['platform'] in [\"Ethereum\", \"Algorand\"]:\n print(f\"Error: {content['platform']} is an invalid platform\")\n return jsonify(f\"Error: invalid platform provided: {content['platform']}\")\n\n if content['platform'] == \"Ethereum\":\n return jsonify(get_eth_keys()[1])\n\n if content['platform'] == \"Algorand\":\n return jsonify(get_algo_keys()[1])\n\n\n@app.route('/trade', methods=['POST'])\ndef trade():\n print(\"In trade\", file=sys.stderr)\n connect_to_blockchains()\n get_eth_keys()\n get_algo_keys()\n if request.method == \"POST\":\n print(\"--------- trade ---------\")\n content = request.get_json(silent=True)\n columns = [\"buy_currency\", \"sell_currency\", \"buy_amount\", \"sell_amount\", \"platform\", \"tx_id\", \"receiver_pk\"]\n fields = [\"sig\", \"payload\"]\n\n # Orders should have two fields “payload” and \"sig\".\n error = False\n for field in fields:\n if not field in content.keys():\n print(f\"{field} not received by Trade\")\n error = True\n if error:\n print(json.dumps(content))\n return jsonify(False)\n\n error = False\n for column in columns:\n if not column in content['payload'].keys():\n print(f\"{column} not received by Trade\")\n error = True\n if error:\n print(json.dumps(content))\n return jsonify(False)\n\n sig = content['sig']\n payload = content['payload']\n platform = payload['platform']\n\n platforms = [\"Algorand\", \"Ethereum\"]\n if not platform in platforms:\n return jsonify(False)\n\n verified = False\n sender_pk = payload['sender_pk']\n platform = payload['platform']\n msg = json.dumps(payload)\n\n if platform == \"Algorand\":\n if algosdk.util.verify_bytes(msg.encode('utf-8'), sig, sender_pk):\n verified = True\n\n elif platform == \"Ethereum\":\n eth_encoded_msg = eth_account.messages.encode_defunct(text=msg)\n if eth_account.Account.recover_message(eth_encoded_msg, signature=sig) == sender_pk:\n verified = True\n\n if verified is False:\n log_message(msg)\n return jsonify(False)\n\n else:\n ord = Order(sender_pk=payload['sender_pk'],\n receiver_pk=payload['receiver_pk'],\n buy_currency=payload['buy_currency'],\n sell_currency=payload['sell_currency'],\n buy_amount=payload['buy_amount'],\n sell_amount=payload['sell_amount'],\n tx_id=payload['tx_id'],\n signature=sig)\n\n g.session.add(ord)\n\n txes = []\n current_order = g.session.query(Order).order_by(Order.id.desc()).first()\n fill_order(current_order, txes)\n\n execute_txes(txes)\n return jsonify(True)\n\n\n@app.route('/order_book')\ndef order_book():\n orders = [\n get_list(order)\n for order in g.session.query(Order).all()\n ]\n\n return jsonify({\n 'data': orders\n })\n\n\nif __name__ == '__main__':\n app.run(port='5002')\n","repo_name":"Darkmountz/22sp_mcit582","sub_path":"exchange_endpoint.py","file_name":"exchange_endpoint.py","file_ext":"py","file_size_in_byte":12219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74876912375","text":"# To import required modules:\nimport numpy as np\nimport time\nimport os\nimport sys\nimport matplotlib\nimport matplotlib.cm as cm #for color maps\nimport matplotlib.pyplot as plt\nfrom matplotlib.gridspec import GridSpec #for specifying plot attributes\nfrom matplotlib import ticker #for setting contour plots to log scale\nimport scipy.integrate #for numerical integration\nimport scipy.misc #for factorial function\nfrom scipy.special import erf #error function, used in computing CDF of normal distribution\nimport scipy.interpolate #for interpolation functions\nimport corner #corner.py package for corner plots\n#matplotlib.rc('text', usetex=True)\n\nfrom syssimpyplots.general import *\nfrom syssimpyplots.compare_kepler import *\nfrom syssimpyplots.load_sims import *\nfrom syssimpyplots.plot_catalogs import *\nfrom syssimpyplots.plot_params import *\nfrom syssimpyplots.compute_RVs import *\n\n\n\n\n\n##### This module will be used to plot results of the optimization runs of our clustered model using bboptimize:\n\nsavefigures = False\nplt.ioff()\n\nrun_directory = 'AMD_system/Split_stars/Singles_ecc/Params11_KS/Distribute_AMD_equal/durations_norm_circ_singles_multis_GF2020_KS/'\nloadfiles_directory = '/Users/hematthi/Documents/GradSchool/Research/ACI/Model_Optimization/' + run_directory + 'GP_files/'\nsavefigures_directory = '/Users/hematthi/Documents/GradSchool/Research/ExoplanetsSysSim_Clusters/Figures/Model_Optimization/' + run_directory #+ 'New_terms/'\n\nsample_names = ['all', 'bluer', 'redder']\nsample_colors = ['k', 'b', 'r']\n\nmodel_name = 'Clustered_P_R_fswp_bprp_AMD_Model'\n\n\n\n\n\n##### To iterate through each of the optimization runs (files), and extract the results:\n\nactive_params_symbols = [#r'$f_{\\sigma_{i,\\rm high}}$',\n #r'$f_{\\rm swpa}$',\n #r'$f_{\\rm swpa,bluer}$',\n #r'$f_{\\rm swpa,redder}$',\n r'$f_{\\rm swpa,med}$',\n r'$d(f_{\\rm swpa})/d(b_p-r_p)$',\n r'$\\ln{(\\lambda_c)}$',\n r'$\\ln{(\\lambda_p)}$',\n r'$\\Delta_c$',\n r'$\\alpha_P$',\n #r'$\\alpha_{P,\\rm med}$',\n #r'$d(\\alpha_P)/d(b_p-r_p)$',\n r'$\\alpha_{R1}$',\n r'$\\alpha_{R2}$',\n r'$\\sigma_e$',\n #r'$\\sigma_i$',\n #r'$\\sigma_{i,\\rm res}$',\n r'$\\sigma_R$',\n r'$\\sigma_N$'\n ] # this list of parameter symbols must match the order of parameters in 'active_params_names'!\n\n##### To read a file of recomputed distances and save it as a table format file for training an emulator:\n\ndef load_split_stars_recomputed_distances_file(file_name):\n sample_names = ['all', 'bluer', 'redder']\n\n active_params_evals = []\n d_used_keys_evals = {key: [] for key in sample_names}\n d_used_vals_evals = {key: [] for key in sample_names}\n d_used_vals_w_evals = {key: [] for key in sample_names}\n total_dist_w_evals = []\n\n with open(file_name, 'r') as file:\n for line in file:\n if line[0:19] == '# Active parameters':\n active_params_names = line[23:-3].split('\", \"')\n elif line[0:13] == 'Active_params':\n active_params = [float(x) for x in line[16:-2].split(', ')]\n active_params_evals.append(active_params)\n elif line[0:12] == 'Total_dist_w':\n total_dist_w = float(line[15:-2])\n total_dist_w_evals.append(total_dist_w)\n\n for key in sample_names:\n n = len(key)\n if line[0:n+2] == '[%s]' % key:\n\n if line[n+3:n+3+12] == 'd_used_keys:':\n d_used_keys = line[n+3+15:-3].split('\", \"')\n d_used_keys_evals[key].append(d_used_keys)\n\n elif line[n+3:n+3+12] == 'd_used_vals:':\n d_used_vals_str, d_used_vals_tot_str = line[n+3+14:-2].split('][')\n d_used_vals = [float(x) for x in d_used_vals_str.split(', ')]\n d_used_vals_evals[key].append(tuple(d_used_vals))\n\n elif line[n+3:n+3+13] == 'd_used_vals_w':\n d_used_vals_w_str, d_used_vals_tot_w_str = line[n+3+16:-2].split('][')\n d_used_vals_w = [float(x) for x in d_used_vals_w_str.split(', ')]\n d_used_vals_tot_w = float(d_used_vals_tot_w_str)\n d_used_vals_w_evals[key].append(tuple(d_used_vals_w))\n\n active_params_evals = np.array(active_params_evals)\n total_dist_w_evals = np.array(total_dist_w_evals)\n\n for sample in sample_names:\n d_used_keys_evals[sample] = np.array(d_used_keys_evals[sample])\n d_used_vals_evals[sample] = np.array(d_used_vals_evals[sample], dtype=[(d_key, 'f8') for d_key in d_used_keys_evals[sample][0]])\n d_used_vals_w_evals[sample] = np.array(d_used_vals_w_evals[sample], dtype=[(d_key, 'f8') for d_key in d_used_keys_evals[sample][0]])\n\n # To compute the sums of weighted distances per iteration, for each sample:\n dtot_samples_evals = {}\n dtot_w_samples_evals = {}\n for sample in sample_names:\n dtot_samples_evals[sample] = np.array([sum(x) for x in d_used_vals_evals[sample]])\n dtot_w_samples_evals[sample] = np.array([sum(x) for x in d_used_vals_w_evals[sample]])\n dtot_w_evals = sum(dtot_w_samples_evals[sample] for sample in sample_names)\n\n for i in range(len(dtot_w_evals)):\n a, b = dtot_w_evals[i], total_dist_w_evals[i]\n #if np.abs(a - b) > 1e-4:\n #print('{:<5}: {:<8}, {:<8}'.format(i, np.round(a,4), np.round(b,4)))\n\n return active_params_names, active_params_evals, d_used_vals_w_evals, dtot_w_samples_evals, dtot_w_evals\n\n\n\n\n\nN_best_save, keep_every = 100000, 10\nactive_params_names, active_params_evals, d_used_vals_w_evals, dtot_w_samples_evals, dtot_w_evals = load_split_stars_recomputed_distances_file(loadfiles_directory + 'Clustered_P_R_recompute_optim_best%s_every%s_targs86760.txt' % (N_best_save, keep_every))\n\n##### To save the best parameter values and the recomputed distances for training a GP emulator:\n'''\nactive_params_distances_table = np.concatenate((active_params_evals, np.array([dtot_w_samples_evals['all'], dtot_w_samples_evals['bluer'], dtot_w_samples_evals['redder'], dtot_w_evals]).transpose()), axis=1)\ntable_header = ' '.join(active_params_names) + ' dtot_w_all dtot_w_bluer dtot_w_redder dist_tot_weighted'\nfields_formats = ['%1.6f']*len(active_params_names) + ['%1.6f']*4\n#####active_params_distances_table = np.concatenate((active_params_evals, np.array([dtot_w_samples_evals['redder'], dtot_w_evals]).transpose()), axis=1)\n#####table_header = ' '.join(active_params_names) + ' dtot_w_redder dist_tot_weighted'\n#####fields_formats = ['%1.6f']*len(active_params_names) + ['%1.6f']*2\nnp.savetxt(loadfiles_directory + 'Active_params_recomputed_distances_table_best%s_every%s.txt' % (N_best_save, keep_every), active_params_distances_table, fmt=fields_formats, header=table_header, comments='')\n'''\n\n\n\n\n\n##### To plot corner plots of the same points, with the new distance terms as a colorscale:\n#'''\n#plot_cornerpy_wrapper(active_params_symbols, active_params_evals, title_kwargs={'fontsize':20}, save_name=savefigures_directory + model_name + '_best%s_every%s_corner.pdf' % (N_best_save, keep_every), save_fig=savefigures)\n\nplot_function_heatmap_averaged_grid_given_irregular_points_corner(active_params_symbols, active_params_evals, dtot_w_evals, flabel=r'$\\mathcal{D}_W$', show_points=False, save_name=savefigures_directory + model_name + '_best%s_every%s_corner_dtot_w.pdf' % (N_best_save, keep_every), save_fig=savefigures)\n\ndist_terms = ['radii_partitioning_KS', 'radii_monotonicity_KS', 'gap_complexity_KS']\n#dist_terms = ['radii_partitioning_AD', 'radii_monotonicity_AD', 'gap_complexity_AD']\nfor (i,key) in enumerate(dist_terms):\n plot_function_heatmap_averaged_grid_given_irregular_points_corner(active_params_symbols, active_params_evals, d_used_vals_w_evals['all'][key], flabel=key, show_points=False, save_name=savefigures_directory + model_name + '_best%s_every%s_corner_%s.pdf' % (N_best_save, keep_every, key), save_fig=savefigures)\nplt.show()\n#'''\n","repo_name":"hematthi/SysSimPyPlots","sub_path":"examples/plot_model_params/collect_and_plot_recomputed_distances.py","file_name":"collect_and_plot_recomputed_distances.py","file_ext":"py","file_size_in_byte":8388,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"41135068918","text":"# -*- coding: utf-8 -*-\n\nimport logging\n\nimport pytest\nfrom flask import url_for\n\nimport test_auth as sut\n\n\ndef test_missing_token(config, app, client):\n config.debug = False\n response = client.get(url_for('api.users', uid='0'))\n assert response.status_code == 401\n\n\n@pytest.mark.parametrize('log_level_str,expected', [\n ('DEBUG', logging.DEBUG),\n ('INFO', logging.INFO),\n ('WARNING', logging.WARNING),\n ('ERROR', logging.ERROR),\n ('FATAL', logging.FATAL),\n (' DEBUG ', logging.DEBUG),\n])\ndef test_parse_loglevel_valid(log_level_str, expected):\n assert sut.parse_loglevel(log_level_str) == expected\n\n\n@pytest.mark.parametrize('log_level_str', [\n '',\n 'UNKNOWN',\n None\n])\ndef test_parse_loglevel_invalid(log_level_str):\n with pytest.raises(ValueError):\n sut.parse_loglevel(log_level_str)\n\n\ndef test_handle_exception(request_ctx):\n response = sut.handle_exception(Exception('testmessage'))\n assert response.status_code == 500\n\n\ndef test_handle_ratelimit_exceeded(request_ctx):\n response = sut.handle_rate_limit(Exception('testmessage'))\n assert response.status_code == sut.HTTP_STATUS_TOO_MANY_REQUESTS\n","repo_name":"lightuponcloud/time-waste","sub_path":"dropkitchen/tests/app_test.py","file_name":"app_test.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11131787911","text":"def solution(n,m,k,array):\n answer = 0\n array_sort = sorted(array,reverse = True)\n a = array_sort[0]\n b = array_sort[1]\n\n stack = 0\n for i in range(m):\n if stack == k:\n answer += b\n stack = -1\n else:\n answer += a\n stack += 1\n \n return answer\n# print(solution(4, 6, 2, [1, 2, 3, 4]))","repo_name":"jeongukkim/CodingTest","sub_path":"이것이코딩테스트다with파이썬/01_그리디/01_02_큰수의법칙/hk.py","file_name":"hk.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43070679858","text":"import Models\nimport email\n\nclass StudentDAO:\n \n #Read the students.csv file & append each student to the list \n def __init__(self):\n self.studentsDataList = []\n with open('students.csv') as f:\n for line in f.read().split('\\n'):\n email, s_name, s_password = line.split(',') \n s = Models.Student(email, s_name, s_password)\n self.studentsDataList.append(s)\n \n #This method returns the list of students \n def get_students(self):\n return self.studentsDataList\n \n #This method takes an email and a password from the user input. \n #Returns, whether or not a Student with the given information is found.\n def validate_user(self, email, pw):\n found = False\n for s in self.studentsDataList:\n if (s.email == email) & (s.s_password == pw):\n found = True\n break\n return found\n\n # This method takes a Student’s email as a String & searches the List of Students\n # for a Student with that email and returns a Student Object.\n def get_student_by_email(self, email):\n s = Models.Student\n for s in self.studentsDataList:\n if s.email == email:\n return s\n \nclass CourseDAO:\n def __init__(self):\n self.coursesDataList = []\n with open('courses.csv') as f:\n for line in f.read().split('\\n'):\n course_id, course_name, instructor = line.split(',') \n c = Models.Course(course_id, course_name, instructor)\n self.coursesDataList.append(c)\n \n #This method takes no parameters and returns every Course in the system. \n def get_courses(self):\n return self.coursesDataList\n \n def get_couse_name_And_Instructor_from_Course_ID(self, course_ID):\n for c in self.coursesDataList:\n if c.c_id == course_ID:\n return c.c_name, c.instructor\n \n \n \nclass AttendingDAO:\n def __init__(self, email):\n self.attendingDataList = []\n self.coursesStudentIsAttending = []\n with open('attending.csv') as f:\n for line in f.read().split('\\n'):\n course_id, email = line.split(',') \n a = Models.Attending(course_id, email)\n self.attendingDataList.append(a)\n self.get_student_courses(email)\n \n #This method returns a list of Attending Objects. \n def get_attending(self):\n return self.attendingDataList\n\n #This method takes a Student’s Email as a parameter and searches the Attending List \n #for all the courses that student is registered to.\n #This list of courses the Student is attending is returned.\n def get_student_courses(self, email):\n self.coursesStudentIsAttending = []\n for a in self.attendingDataList:\n if a.student_email == email:\n self.coursesStudentIsAttending.append(a.course_id)\n return self.coursesStudentIsAttending\n\n #This method takes a Student’s email, a Course ID. It checks if a Student with that Email \n #is currently attending a Course with that ID.\n #If the Student is not attending that Course and the Course ID is valid, \n #then add a new Attending object with the Student’s Email and Course ID to the List & save the list on a file and return True.\n #Otherwise, return False.\n def register_student_to_course(self, course_id, email):\n try: \n i = self.coursesStudentIsAttending.index(course_id)\n return False\n except ValueError as valerr:\n a = Models.Attending(course_id, email)\n self.attendingDataList.append(a)\n self.coursesStudentIsAttending.append(course_id)\n self.save_attending()\n return True\n \n \n \n #This method overwrites the original Attending.csv file with the new data.\n def save_attending(self):\n with open('attending.csv', mode = 'w', encoding = 'utf-8') as f:\n a = Models.Attending\n for a in self.attendingDataList:\n stringToWrite = a.course_id + ',' + a.student_email\n f.write(f'{stringToWrite}\\n')\n \n","repo_name":"deepalipandit/School_Management_System","sub_path":"DAO.py","file_name":"DAO.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"22496648032","text":"from base import BaseObject\nimport pygame\nimport pandas as pd\nimport numpy as np\nfrom scipy.interpolate import interp1d\nimport matplotlib.pyplot as plt\n\n\nclass CarObject(BaseObject):\n def __init__(self, data, stationary=False, *args, **kwargs):\n super(CarObject, self).__init__(*args, **kwargs)\n\n self.stationary = stationary\n self.create_object()\n\n if not stationary:\n x = data['timestep_time'].to_numpy()\n y = data[['vehicle_angle', 'vehicle_x', 'vehicle_y', 'ones']].to_numpy()\n # pc = self.simulator._point_cloud\n # # plt.gca().invert_yaxis()\n # plt.scatter(pc[:, 0], pc[:, 1], s=1)\n # plt.plot(y[:, 1], y[:, 2], 'r')\n # plt.xlim([300, 700])\n # plt.ylim([300, 800])\n # plt.show()\n # print(y, x)\n self._maxTime = x[-1]\n self._minTime = x[0]\n\n self._name = data['vehicle_id'].to_numpy()[0]\n\n self.interp = interp1d(x, y, kind='linear', axis=0)\n\n def create_object(self, rotation=0, x=0, y=0):\n self.width = 1.73 * self.simulator._scale\n self.height = 4.084 * self.simulator._scale\n\n self.image = pygame.Surface([self.width, self.height])\n self.image.fill((255, 0, 0))\n self.image.fill((0, 255, 255), (0, 0, self.width, 2 * self.simulator._scale))\n self.image.set_colorkey((0, 0, 0))\n # pygame.draw.rect(self.image, pygame.Color(0, 255, 255), pygame.Rect(0, 0, self.width, self.height))\n self.image = pygame.transform.rotozoom(self.image, rotation, 1)\n # pygame.draw.rect(self.image, pygame.Color(0, 255, 255), pygame.Rect(0, 0, self.width, self.height))\n\n self.rect = self.image.get_rect(center=(0, 0))\n self.rect.center = [x, y]\n\n def update(self):\n if self.stationary:\n temp = self.simulator.transformMatrix @ self._pos\n self.create_object(self.angle, temp[0], temp[1])\n\n elif self.simulator._MapCompleted:\n\n\n frame = self.simulator.Frame\n if frame / 4 < self._minTime:\n pass\n elif frame / 4 > self._maxTime:\n self.kill()\n print(f\"{self._name} has died\")\n elif not self.simulator._paused:\n temp = self.interp(frame / 4)\n self.angle = np.round(temp[0])\n self._pos = temp[1:].flatten()\n temp = self.simulator.transformMatrix @ temp[1:]\n self.create_object(-self.angle, temp[0], temp[1])\n\n\n\n","repo_name":"JeisonPham/Tracking","sub_path":"Simulation/CarObject.py","file_name":"CarObject.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41462396018","text":"# ------- Advent of Code 2022 -------\n# -------- Day 9: Rope Bridge -------\n# https://adventofcode.com/2022/day/9\n\ndef get_moves(path_to_file):\n # loads the list of head's moves in the form of [direction, number]\n with open(path_to_file) as file:\n moves = [move.split(\" \") for move in file.read().strip().split(\"\\n\")]\n return moves\n\ndef head_move(head, direction):\n # this function returns position of head after one step of head in the given direction\n udrl = {\"U\":[0,1], \"D\":[0,-1], \"R\":[1,1], \"L\":[1,-1]} # letter to coord and its change\n head[udrl[direction][0]] += udrl[direction][1] \n return head\n\ndef knot_move(head, tail):\n # this function returns position of tail given the position of moved head\n for i in [0,1]:\n if head[i] == tail[i]:\n if head[1-i] == tail[1-i]+2: tail[1-i] += 1\n elif head[1-i] == tail[1-i]-2: tail[1-i] -= 1\n if abs(tail[0] - head[0]) + abs(tail[1] - head[1]) > 2:\n tail[0] += int((head[0] - tail[0]) / abs(head[0] - tail[0]))\n tail[1] += int((head[1] - tail[1]) / abs(head[1] - tail[1]))\n return tail\n\ndef trajectory(positions, moves):\n # returns number of visited points by tail\n visited = {(0,0)}\n for move in moves:\n for i in range(int(move[1])):\n positions[0] = head_move(positions[0], move[0])\n for j in range(1,len(positions)):\n positions[j] = knot_move(positions[j-1], positions[j])\n visited.add(tuple(positions[-1]))\n return len(visited)\n\nif __name__ == \"__main__\":\n moves = get_moves(\"input.txt\")\n print(trajectory([[0,0] for i in range(2)], moves)) # Part One\n print(trajectory([[0,0] for i in range(10)], moves)) # Part Two","repo_name":"filomath/Advent-of-Code","sub_path":"2022/day09/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"28573898636","text":"from GlyphsApp import *\nfrom GlyphsApp.plugins import *\n\nimport sys, traceback\n\nfrom SkedgeModule import CodeEditor\n\n\n\nclass SkedgePlugin(GeneralPlugin):\n\t@objc.python_method\n\tdef settings(self):\n\t\tself.name = \"Skedge\"\n\t\n\t@objc.python_method\n\tdef start(self):\n\t\ttry:\n\t\t\ttargetMenu = WINDOW_MENU # EDIT_MENU # SCRIPT_MENU\n\t\t\tseparator = NSMenuItem.separatorItem()\n\t\t\tGlyphs.menu[targetMenu].append(separator)\n\t\t\ts = objc.selector(self.skedge_, signature=b'v@:')\n\t\t\tnewMenuItem = NSMenuItem(self.name, s)\n\t\t\tGlyphs.menu[targetMenu].append(newMenuItem)\n\t\texcept:\n\t\t\tNSLog(traceback.format_exc())\n\n\tdef skedge_(self, notification=None):\n\t\ttry:\n\t\t\tCodeEditor.new()\n\t\texcept:\n\t\t\tNSLog(traceback.format_exc())\n","repo_name":"Mark2Mark/Skedge","sub_path":"Skedge.glyphsPlugin/Contents/Resources/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"22"} +{"seq_id":"39383340924","text":"# 2.1最长回文子串\n# 给定一个字符串s,找到s中最长的回文子串.你可以假设s的最大长度为1000.\n# 思路:\n# 1.如果字符串长度是小于2的,则s就是回文.\n# 2.如果字符串头尾字符相等,但是中间部分不构成区间(最多只有 1 个元素);\n# 或者字符串头尾字符相等,且中间部分也是回文,则都返回True .此时回文长度为r-l+1\n# 3.比较 取大值 赋值给result\nclass Solution():\n def longestPalindrome(self, s: str):\n size = len(s)\n if size <= 1:\n return s\n dp = [[False for _ in range(size)] for _ in range(size)]#列表表达式\n longest_l = 1\n res = s[0]\n # 因为只有 1 个字符的情况在最开始做了判断\n # 左边界一定要比右边界小,因此右边界从 1 开始\n for r in range(1, size):\n for l in range(r):\n if s[l] == s[r] and (r - l <= 2 or dp[l + 1][r - 1]):\n dp[l][r] = True\n cur_len = r - l + 1\n if cur_len > longest_l:\n longest_l = cur_len\n res = s[l:r + 1] #把回文赋值给res\n return res\na = Solution()\nres = a.longestPalindrome(\"babad\")\nprint(res)\n\n#2.2回文子串\n#给定一个字符串,计算这个字符串中有多少个回文子串.\n#思路:(动态规划)\n# 1.用动规记录和求出字符串s的所有是回文串的子字符串,然后用计数器counter技术\n# 一样也是先从单个字符是回文的dp[i][i]= True开始记录\n# 2.再到两个字符dp[i][i+1] = s[i]==s[i+1]\n# 3.再到后面的多个字符的回文dp[i][j] = (dp[i+1][j-1] and s[i+1]==s[j-1])\n# 4.要注意遍历的方式\n# for j in range(1,n)\n# for i in range(j-1)\n# 先从j开始遍历,代表以j结束的子串,然后i再从0开始去循环到j\nclass Solution():\n def countSubstrings(self, s):\n n = len(s)\n dp = [[False] * n for _ in range(n)]\n counter = 0\n for i in range(n):\n dp[i][i] = True\n counter += 1\n for i in range(1, n):\n if s[i - 1] == s[i]:\n dp[i - 1][i] = True\n counter += 1\n for j in range(1, n):\n for i in range(j - 1):\n if s[i] == s[j] and dp[i + 1][j - 1]:\n dp[i][j] = True\n counter += 1\n return counter\na = Solution()\nc = a.countSubstrings(\"abababbb\")\nprint(c)\n\n#2.3分割回文串\n#给定一个字符串s,将s分割成一些子串,使每个子串都是回文串。返回符合要求的最少分割次数。\n#思路:(动态规划方法)\n#1.如果字符串长度小于等于1 则分割次数为0\n#2.定义一个初始的标记列表 (从-1 开始)(对应着索引位置字符还没有进入的时候的最大分割次数)\nclass Solution(object):\n def minCut(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if len(s) <= 1:\n return 0\n # 定义初始标记列表(对应索引位置字符还没有进入的时候的最大分割次数)\n record = [index for index in range(-1, len(s))]\n for r in range(1, len(s)+1):\n for l in range(r):\n if s[l:r] == s[l:r][::-1]: #判断s[l:r]是否回文(反转相同)\n record[r] = min(record[r], record[l]+1)\n return record[-1]\n\n\nif __name__ == \"__main__\":\n s = \"abaa\"\n min_split = Solution().minCut(s)\n print(min_split)\n\n#2.4字符串的排列\n#输入一个字符串 按字典 打印出该字符串中字符的所有排列。例如输入字符串 abc,则打印出由a,b,c所能排列出来的所有字符串\n# 字符串长度 不超过9(可能会有字符重复),只包括大小写字母\n# 思路:将字符串分为两部分:第一个字符和剩下的字符 先排第一个字符的所有可能 再对应着依次排后面的字符 递推\ndef Permutation(s):\n if len(s) <= 0:\n return []\n res = []\n perm(s, res, '') #初始化s_0 = '',s_0为首字符\n uniq = list(set(res)) #set()函数创建一个无需不重复的元素集,不需要判断元素是否重复\n return sorted(uniq) #sorted()函数对任意对象进行排序\n\ndef perm(s, res, s_0):\n if s == '':\n res.append(s_0)\n else:\n for i in range(len(s)):\n perm(s[:i]+s[i+1:], res, s_0+s[i]) ###???\n\ns = 'abc'\nprint(Permutation(s))\n\n#2.5Pandas基础\n#1.两个series的并集\n#2.两个series的非共有元素\n#3.如何获得series的最小值,25%分位数,中位数,75%分位数和最大值\nimport numpy as np\nimport pandas as pd\nsA = pd.Series([1,2,3,4,5,6])\nsB = pd.Series([5,6,7,8,9,10])\nprint(sA[~sA.isin(sB)])#元素在sA不在sB\nu =pd.Series(np.union1d(sA,sB)) #sA∪sB\ni =pd.Series(np.intersect1d(sA,sB)) #sA∩sB\nprint(u)\nprint(u[~u.isin(i)]) #非共有元素\nprint(np.percentile(sA,q=[0,25,50,75,100]))\n","repo_name":"Zhangbingbin11/xiyu-NLPTrainee","sub_path":"python/Day2.py","file_name":"Day2.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18943687874","text":"from tests.testcases import TestCaseUsingMockAPI\nfrom vortexasdk.endpoints.asset_tanks import AssetTanks\n\nfrom tests.mock_client import example_asset_tanks\nfrom vortexasdk.endpoints.asset_tanks_result import AssetTankResult\n\n\nclass TestAssetTanks(TestCaseUsingMockAPI):\n at = AssetTankResult(records=example_asset_tanks, reference={})\n\n def test_search(self):\n asset_tanks = AssetTanks().search().to_df()\n assert len(asset_tanks) > 0\n\n def test_storage_type_search_term(self):\n asset_tanks = AssetTanks().search(storage_type=[\"refinery\"]).to_df()\n assert len(asset_tanks) > 0\n\n def test_search_ids(self):\n asset_tanks = (\n AssetTanks()\n .search(\n ids=[\n \"6114b93026e61993797db33a46a5d2acbeacdbd63238a4271efaeafcee94b1d2\"\n ]\n )\n .to_list()\n )\n names = [a.name for a in asset_tanks]\n assert \"AAM001\" in names\n\n def test_to_list(self):\n names = [x.name for x in self.at.to_list()]\n\n assert names == [\"AAM001\", \"ASL011\"]\n\n def test_check_columns(self):\n asset_tanks = AssetTanks().search().to_df()\n assert list(asset_tanks.columns) == [\n \"id\",\n \"capacity_bbl\",\n \"crude_confidence\",\n \"location_id\",\n \"name\",\n \"storage_type\",\n \"lat\",\n \"lon\",\n ]\n","repo_name":"VorTECHsa/python-sdk","sub_path":"tests/endpoints/test_asset_tanks.py","file_name":"test_asset_tanks.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"22"} +{"seq_id":"40301069306","text":"import geopandas as gpd\nimport os\nimport dash_leaflet as dl\nimport numpy as np\nfrom dash import Dash, html\nfrom dash_extensions.javascript import Namespace\nimport dash_bootstrap_components as dbc\nimport plotly.express as px\nfrom itertools import cycle\nfrom typing import List, Dict\nfrom pyproj import Transformer\n\ndef get_rotation_angle(feature: Dict) -> float:\n transformer = Transformer.from_crs(4326, 25832) # Create a transformer from EPSG:4326 to EPSG:25832\n coordinates = feature['geometry']['coordinates'] # Extract coordinates from the feature geometry\n pc = [transformer.transform(coord[1], coord[0]) for coord in coordinates] # Transform coordinates to EPSG:25832\n\n # Calculate the side lengths of the rectangle\n side_lengths = [np.linalg.norm(np.array(pc[i]) - np.array(pc[i - 1])) for i in range(1, 5)]\n\n # Find the indices of the two longest sides (opposite sides in a rectangle)\n long_side_indices = sorted(range(len(side_lengths)), key=lambda i: side_lengths[i])[-2:]\n\n p1 = np.array(pc[long_side_indices[0]])\n p2 = np.array(pc[long_side_indices[0] + 1])\n v = p1 - p2\n \n angle_rad = np.arctan2(v[0], v[1]) # Calculate the angle in radians using arctan2\n angle_deg = np.degrees(angle_rad) + 90 # Convert the angle from radians to degrees\n\n # The next two conditions ensure the text is never upside down by limiting the rotation angle between -90 and 90 degrees\n if angle_deg < -90:\n angle_deg += 180\n elif angle_deg > 90:\n angle_deg -= 180\n\n return angle_deg # Return the rotation angle in degrees\n\ndef get_centroid(gdf: gpd.GeoDataFrame, feature: Dict, properties: List[str]) -> List[float]:\n query_string = \" and \".join([f\"{prop} == '{feature['properties'][prop]}'\" for prop in properties])\n filtered_gdf = gdf.query(query_string)\n \n if not filtered_gdf.empty:\n centroid = filtered_gdf.iloc[0].geometry.centroid\n return [centroid.y, centroid.x]\n else:\n return None\n\ncwd = os.getcwd()\n\ndef find(name, path):\n for root, dirs, files in os.walk(path):\n if name in files:\n return os.path.join(root, name)\n\ncolors:str = cycle(px.colors.qualitative.Plotly)\n\n# Instantiate dash app\ndash_app = Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])\napp = dash_app.server\n\n# Read the ViewFrames GeoJSON file\nviewFramesFileName = 'ViewFrames.geojson'\n# Next line is an example ViewFrames.geojson feature\n# {\"type\":\"FeatureCollection\",\"name\":\"ViewFrames\",\"features\":[{\"type\":\"Feature\",\"properties\":{\"DwgNumber\":\"001\",\"PdfLink\":\"https://damgaardri.sharepoint.com/:b:/s/022-1226Egedal-KrogholmvejEtape1/EWqAQXO0zMxAjEQMbuu7oeYB_jAvgBvUlLHfRp8Om0dGlA\"},\"geometry\":{\"coordinates\":[[699583.9,6185399],[699535.3,6185198.5],[699466.9,6185215],[699515.4,6185415.5],[699583.9,6185399]],\"type\":\"LineString\"}}]}\ngdf = gpd.read_file(find(viewFramesFileName, cwd), crs='EPSG:25832')\ngdf = gdf.to_crs(epsg=4326)\ngeojson_data = gdf.__geo_interface__\n# add color property to each feature in the GeoJSON\nfor feature in geojson_data[\"features\"]:\n feature[\"properties\"][\"color\"] = next(colors)\n\n# Read the FJV GeoJSON file\n#fjvFileName = 'Fjernvarme_old.geojson'\nfjvFileName = 'Fjernvarme.geojson'\ngdf_fjv = gpd.read_file(find(fjvFileName, cwd), crs='EPSG:25832')\ngdf_fjv = gdf_fjv.to_crs(epsg=4326)\nfjv_data = gdf_fjv.__geo_interface__\n\n# Split the GeoJSON data into two separate GeoJSON objects based on the feature color property\nnon_black_features = [feature for feature in fjv_data[\"features\"] if feature[\"properties\"][\"color\"] != \"#000000\"]\nblack_features = [feature for feature in fjv_data[\"features\"] if feature[\"properties\"][\"color\"] == \"#000000\"]\n\nnon_black_data = {\"type\": \"FeatureCollection\", \"features\": non_black_features}\nblack_data = {\"type\": \"FeatureCollection\", \"features\": black_features}\n\n# Create the namespace and the layout\nns = Namespace(\"myNamespace\", \"mySubNamespace\")\n\ndash_app.layout = html.Div([\n dl.Map(center=[np.mean(gdf.geometry.centroid.y), np.mean(gdf.geometry.centroid.x)], \n zoom=17, maxZoom=25, children=[\n dl.TileLayer( # this styles base map\n url = 'https://tiles.stadiamaps.com/tiles/alidade_smooth_dark/{z}/{x}/{y}{r}.png',\n attribution = '© Stadia Maps ',\n maxNativeZoom=20\n ),\n # fjv geometry\n dl.GeoJSON(\n data=non_black_data, id=\"fjv_non_black\", options=dict(style=ns(\"style_fjv\"))\n ),\n # fjv geometry\n dl.GeoJSON(\n data=black_data, id=\"fjv_black\", options=dict(style=ns(\"style_fjv\"))\n ),\n # view frames rectangles\n dl.GeoJSON(\n data=geojson_data, id=\"viewframes\", options=dict(style=ns(\"style\"))\n ),\n \n # text marker\n *[\n dl.DivMarker(position=[get_centroid(gdf, feature, ['DwgNumber'])[0],\n get_centroid(gdf, feature, ['DwgNumber'])[1]],\n iconOptions={'className': 'custom-div-icon'},\n children=\n html.A(\n html.Div(\n f\"{feature['properties']['DwgNumber']}\",\n className='custom-text', # assets/custom.css\n style={'color': f\"{feature['properties']['color']}\",\n 'transform': f\"rotate({get_rotation_angle(feature)}deg)\",\n 'font-size': '14px', # Adjust the font size\n 'font-weight': 'bold', # Adjust the font weight\n }),\n href = feature['properties']['PdfLink'], target='_blank' # This is html.A\n ))\n for feature in geojson_data[\"features\"]]\n ], style={'width': '100%', 'height': '100vh', 'margin': \"auto\", \"display\": \"block\"}, id=\"map\")\n]\n)\n\n# @dash_app.callback(Output(\"click-output\", \"children\"), Input(\"viewframes\", \"click_feature\"))\n# def on_viewframe_click(feature):\n# if feature is not None:\n# return f\"You clicked on viewframe {feature['properties']['id']}\"\n\nif __name__ == '__main__':\n dash_app.run_server(debug=True)\n","repo_name":"shtirlitsDva/022-1226_Egedal_Krogholmvej_Etape_1","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9676156281","text":"import os\r\nimport random\r\nfrom hangman_stuff import *\r\n\r\n\r\ngame_state = None\r\ndifficulty = \"\"\r\n\r\n\r\ndef game_setup():\r\n global difficulty\r\n lives = ()\r\n type_of_game = \"\"\r\n while type_of_game == \"\":\r\n type_of_game = input(\"Do you want to enter a custom word or use a random word? [C/R] \")\r\n if type_of_game == \"C\":\r\n word_to_guess = input(\"What is the word? \")\r\n word_to_guess = word_to_guess.lower()\r\n elif type_of_game == \"R\":\r\n word_to_guess = wordbank[random.randint(0, len(wordbank) + 1)]\r\n else:\r\n type_of_game = \"\"\r\n while type(lives) != int:\r\n os.system(\"cls\")\r\n print(\"What difficulty do you want to play?\\nNormal: 6 lives\\nHard: 3 lives\\nInsane: 1 life\")\r\n difficulty = input(\"Enter here: \")\r\n if difficulty == \"Normal\":\r\n lives = 6\r\n elif difficulty == \"Hard\":\r\n lives = 3\r\n elif difficulty == \"Insane\":\r\n lives = 1\r\n settings = [lives, word_to_guess]\r\n os.system('cls')\r\n return settings\r\n\r\n\r\ndef encode_word(word):\r\n encoded_word = \"\"\r\n for i in word:\r\n if i == \" \":\r\n encoded_word += i\r\n else:\r\n encoded_word += \"_\"\r\n return encoded_word\r\n\r\n\r\ndef hangmandrawing(difficulty, lives):\r\n if difficulty == \"Normal\":\r\n return (HANGMANPICS[-(lives + 1)])\r\n if difficulty == \"Hard\":\r\n return(HANGMANPICS[-(lives * 2 + 1)])\r\n if difficulty == \"Insane\":\r\n return(HANGMANPICS[-(lives * 6 + 1)])\r\n\r\n\r\ndef hangman(word, guessed_letters, lives, key, missed_letters):\r\n global game_state\r\n new_parameters = [word, guessed_letters, lives, key, missed_letters]\r\n while game_state == None:\r\n word = new_parameters[0]\r\n guessed_letters = new_parameters[1]\r\n lives = new_parameters[2]\r\n key = new_parameters[3]\r\n new_parameters = hangmanguess(word, guessed_letters, lives, key, missed_letters)\r\n\r\n\r\ndef hangmanguess(word, guessed_letters, lives, key, missed_letters):\r\n global difficulty\r\n encoded_word = \"\"\r\n lives = lives\r\n if (word != encoded_word) and lives != 0:\r\n guess = input(\"Guess a letter or the whole word: \").lower()\r\n if len(guess) == 1:\r\n if guess in word:\r\n guessed_letters += [guess]\r\n encoded_word = \"\"\r\n for i in word:\r\n if i == \" \" or i in guessed_letters:\r\n encoded_word += i\r\n else:\r\n encoded_word += \"_\"\r\n key = encoded_word\r\n else:\r\n if guess not in missed_letters:\r\n missed_letters += [guess]\r\n lives -= 1\r\n encoded_word = key\r\n elif len(guess) > 1:\r\n if guess == word:\r\n encoded_word = word\r\n key = word\r\n elif guess != word:\r\n lives = 0\r\n missed_letters += [guess]\r\n os.system('cls')\r\n print(hangmandrawing(difficulty, lives))\r\n print(\"Word:\", key, \"\\nMissed Letters:\", missed_letters, \"\\nLives:\", lives)\r\n win_or_lose(word, encoded_word, lives)\r\n return [word, guessed_letters, lives, encoded_word, missed_letters]\r\n\r\n\r\ndef win_or_lose(word, guessed_word, lives):\r\n global game_state\r\n if (word == guessed_word) and lives != 0:\r\n print(\"You guessed the word correctly!\")\r\n game_state = \"Win\"\r\n input()\r\n elif lives == 0:\r\n print(\"You lost!\\nThe word was\", word)\r\n game_state = \"Lose\"\r\n input()\r\n\r\n\r\ndef main():\r\n global game_state, difficulty\r\n settings = game_setup()\r\n guessed_letters = []\r\n missed_letters = []\r\n lives = settings[0]\r\n word = settings[1]\r\n key = encode_word(word)\r\n print(hangmandrawing(difficulty, lives))\r\n print(key)\r\n hangman(word, guessed_letters, lives, key, missed_letters)\r\n\r\n\r\nmain()\r\n\r\n\r\n","repo_name":"3btesham/My-Beginner-Projects","sub_path":"hangman/HANGMAN.py","file_name":"HANGMAN.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70864173815","text":"# Import necessary modules from kivy library\nfrom kivy.app import App\nfrom kivy.uix.image import Image\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.label import Label\nfrom kivy.uix.screenmanager import Screen, ScreenManager, NoTransition\nfrom kivy.clock import Clock\nfrom kivy.metrics import dp\n\n\n# Importing configuration and widget modules\nimport user.user_config as user_config\nimport admin.app_config as admin_config\nfrom admin.admin_widgets import *\nfrom user.user_widgets import *\nfrom exercise_details import ExerciseDetails\n\n# Countdown Screen class\nclass CountdownScreen(Screen):\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n # Singleton implementation to have only one instance of CountdownScreen\n if cls._instance is None:\n cls._instance = super(CountdownScreen, cls).__new__(cls, **kwargs)\n return cls._instance\n\n def __init__(self, sm: ScreenManager, app: App, **kwargs):\n super().__init__(**kwargs)\n self._sm = sm # ScreenManager reference\n self._app = app # App reference\n\n # ===============================\n # Main layout\n # ===============================\n layout = FloatLayout(size_hint=[1.0, 1.0], pos_hint={'x': 0, 'y': 0})\n bg = Image(source=admin_config.app['background'], fit_mode=\"fill\")\n layout.add_widget(bg)\n self._layout = layout\n\n # ===============================\n # Label\n # ===============================\n label = Label(\n text=\"Are you ready? Your exercise starts in \",\n size_hint=[0.4, 0.3],\n pos_hint={'center_x': 0.5, 'y': 0.6},\n font_name=admin_config.font_name[3],\n font_size=admin_config.font_size[5],\n )\n layout.add_widget(label)\n self.add_widget(layout)\n\n def load_counter(self, initial_time: int = user_config.countdown_time):\n # Load and start the countdown timer\n layout = self._layout\n self.counter = Label(\n text=str(initial_time),\n size_hint=[0.4, 0.4],\n pos_hint={'center_x': 0.5, 'y': 0.25},\n font_name=admin_config.font_name[2],\n font_size=admin_config.font_size[6],\n )\n layout.add_widget(self.counter)\n\n def on_countdown_update(*args):\n try:\n value = int(self.counter.text)\n except BaseException as err:\n print(f\"An error occurred. \\n{err}\")\n self._timer.cancel()\n\n value -= 1\n self.counter.text = str(value)\n if value < 1:\n # Change screen at this point.\n self._sm.transition = NoTransition()\n self._sm.current = 'exercise_start'\n\n self._timer = Clock.schedule_interval(\n on_countdown_update,\n 1.0\n )\n\n def unload_counter(self):\n # Unload and stop the countdown timer\n while True:\n if not hasattr(self, 'counter'):\n continue\n\n self._layout.remove_widget(self.counter)\n del self.counter\n\n if not hasattr(self, '_timer'):\n continue\n\n self._timer.cancel()\n self._timer = None\n del self._timer\n break\n\n def on_pre_leave(self, *args):\n # Event handler called before leaving the screen\n self.unload_counter()\n\n def on_enter(self, *args):\n # Event handler called when entering the screen\n self.load_counter()","repo_name":"Rrrjieee/Final_repo_Fitquest","sub_path":"app/user/user_exercise_pre_start.py","file_name":"user_exercise_pre_start.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"69995963577","text":"def solve(kor, jpn, k_goals, j_goals):\n\t# kor[1] ... kor[n], jpn[1] ... jpn[n]\n\tn = len(kor) - 1\n\n\tdp = [[0 for _ in range(n+1)] for _ in range(n+1)]\n\t# dp[i][j] = kor[1]...kor[i]와 jpn[1]...jpn[j]에 대한 최대 가능 골 수로 정의\n\t# dp[n][n]이 우리가 원하는 답\n\n\tfor i in range(1, n+1):\n\t\tfor j in range(1, n+1):\n\t\t\t# dp[i][j] = 4가지 경우(first, second, third, fourth)가 가능\n\t\t\t# 그 중에서 최대 골 수를 계산\n\t\t\t# kor[i]와 jpn[j]가 짝을 맺는 경우와 둘 다 고려에서 제외되는 경우\n\t\t\tfirst_second = dp[i-1][j-1]\n\t\t\tif kor[i]!=jpn[j]:\n\t\t\t\tif (kor[i]=='W' and k_goals[i] > j_goals[j]) or (jpn[j]=='W' and k_goals[i] < j_goals[j]):\n\t\t\t\t\tfirst_second += k_goals[i] + j_goals[j]\n\t\t\t# kor[i]와 jpn[j] 중 하나만 고려에서 제외되는 경우\n\t\t\tthird = dp[i-1][j]\n\t\t\tfouth = dp[i][j-1]\n\t\t\t# first, second, third, fouth를 계산해보자\n\t\t\tdp[i][j] = max(first_second, third, fouth)\n\n\treturn dp[n][n]\n\n# kor = 한국의 W와 L의 문자열, jpn = 일본의 W와 L의 문자열 (주의: kor[1], jpn[1]부터 시작)\nkor = ' '+input()\nk_goals = [0] + [int(x) for x in input().split()]\njpn = ' '+input()\nj_goals = [0] + [int(x) for x in input().split()]\nprint(solve(kor, jpn, k_goals, j_goals))","repo_name":"bugpigg/algorithm_python","sub_path":"fromSchool/숙명의 라이벌/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10116224226","text":"#A module that interacts with the Open MapQuest APIs.\n#This is where you should do things like building URLs,\n#making HTTP requests, and parsing JSON responses.\n\nimport json\nimport urllib.request\nimport urllib.parse\n\nMAPQUEST_API_KEY='wLAbzAL9QfgHH9B9XpKXLAqdjCbqlvMB'\n\nBASE_MAPQUEST_DIRECTION_URL='http://open.mapquestapi.com/directions/v2/route?'\nBASE_MAPQUEST_ELEVATION_URL='http://open.mapquestapi.com/elevation/v1/profile?'\n\ndef build_direction_url(address_query:[])->str:\n query_parameter=[\n ('key',MAPQUEST_API_KEY),\n ('from',address_query[0])\n ]\n \n for address in address_query:\n if address!=address_query[0]:\n query_parameter.append(('to',address))\n\n return BASE_MAPQUEST_DIRECTION_URL+urllib.parse.urlencode(query_parameter)\n \ndef build_elevation_urls(lat_lng_collection:[[]])->[]:\n url_list=[]\n lat_lng_data=''\n for lat_lng in lat_lng_collection:\n for item in lat_lng:\n lat_lng_data+=str(item)\n lat_lng_data+=','\n url=BASE_MAPQUEST_ELEVATION_URL+'key='+MAPQUEST_API_KEY+'&latLngCollection='+lat_lng_data[:-1]\n url_list.append(url)\n lat_lng_data=''\n\n return url_list\n\ndef get_json_result(url:str)->dict:\n response=None\n try:\n response = urllib.request.urlopen(url)\n json_text=response.read().decode(encoding='utf-8')\n return json.loads(json_text)\n finally:\n if response!=None:\n response.close()\n \n","repo_name":"heqiao1017/Programming-with-Python-Software-Libraries","sub_path":"Direction Info with MapQuest API/interact_API.py","file_name":"interact_API.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40790576670","text":"import factory\n\nfrom .professor import ProfessorFactory\nfrom .section import SectionFactory\nfrom .student import StudentFactory\nfrom scuevals_api import models\nfrom scuevals_api.resources.evaluations import EvaluationSchemaV1\n\neval_v1_data = {\n 'attitude': 1,\n 'availability': 1,\n 'clarity': 1,\n 'grading_speed': 1,\n 'resourcefulness': 1,\n 'easiness': 1,\n 'workload': 1,\n 'recommended': 1,\n 'comment': 'Love the lectures'\n}\n\nEvaluationSchemaV1().load(data=eval_v1_data)\n\n\nclass EvaluationFactory(factory.alchemy.SQLAlchemyModelFactory):\n class Meta:\n model = models.Evaluation\n sqlalchemy_session = models.db.session\n sqlalchemy_session_persistence = 'flush'\n\n version = 1\n data = eval_v1_data\n display_grad_year = True\n display_majors = True\n\n student = factory.SubFactory(StudentFactory)\n professor = factory.SubFactory(ProfessorFactory)\n section = factory.SubFactory(SectionFactory)\n","repo_name":"SCUEvals/scuevals-api","sub_path":"tests/fixtures/factories/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"39424882505","text":"import turtle\n\nsnow = turtle.Turtle()\n\nwindow = turtle.Screen()\n\nf = open(\"./istruzioni.txt\", \"w\")\n\nwindow.bgcolor(\"blue\")\nsnow.color(\"white\")\n\nfor _ in range(10):\n snow.down\n snow.forward(100) #Ramo lungo\n f.write(\"forward:\" + \"100\" + \"\\n\")\n snow.left(10)\n f.write(\"left:\" + \"10\" + \"\\n\")\n snow.forward(40) #Ramo corto\n f.write(\"forward:\" + \"40\" + \"\\n\")\n snow.up\n snow.backward(40) #torna indietro\n f.write(\"backward:\" + \"40\" + \"\\n\")\n\n snow.right(20) #Ramo corto\n f.write(\"right:\" + \"20\" + \"\\n\")\n snow.down\n snow.forward(40)\n f.write(\"forward:\" + \"40\" + \"\\n\")\n snow.up\n snow.backward(40)\n f.write(\"backward:\" + \"40\" + \"\\n\")\n\n snow.right(20) #Ramo corto\n f.write(\"right:\" + \"20\" + \"\\n\")\n snow.down\n snow.forward(40)\n f.write(\"forward:\" + \"40\" + \"\\n\")\n snow.up\n snow.backward(40)\n f.write(\"backward:\" + \"40\" + \"\\n\")\n\n snow.left(10)\n f.write(\"left: \" + \"10\" + \"\\n\")\n snow.backward(100)\n f.write(\"backward:\" + \"100\" + \"\\n\")\n snow.right(20)\n f.write(\"right:\" + \"20\" + \"\\n\")\n\nprint(snow)\n\nf.close\nwindow.exitonclick()","repo_name":"AnthonyRuggero/Sistemi_quarta","sub_path":"Python/esercizio31_fiocco/esercizio_31_FioccoDiNeve.py","file_name":"esercizio_31_FioccoDiNeve.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"36622677141","text":"import sys\nimport time\n\nfrom HW3_Student_Solution import HW3_Student_Solution\n\n\n# These should not have to be customized from hw to hw.\nstudent_return = []\nstudent_time = 0\n\ndef read_file(in_file):\n f = open(in_file, 'r')\n n = int(f.readline())\n vec = []\n\n for x in range(0, n):\n vec.append(int(f.readline()))\n\n f.close()\n return vec\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\"Please provide the input filepath as the argument\")\n sys.exit\n\n\n input_file = sys.argv[1]\n\n\n print(\"=======================================================================================================\")\n print(\"Reading Input: \" + input_file)\n\n # Read the file\n in_vec = read_file(input_file)\n\n\n start_time = time.clock()\n\n # Run student code\n student_class = HW3_Student_Solution(in_vec)\n student_return = student_class.output_vector()\n\n end_time = time.clock()\n student_time = end_time-start_time\n\n print(\"Your solution:\")\n print(\"=======================================================================================================\")\n print(\"Size: \" + str(len(student_return)))\n print(\"Final vector: \" + str(student_return))\n print(\"=======================================================================================================\")\n\n print(\"Total time taken= \"+str(student_time)+\" secs\")\n\n\n","repo_name":"achrysan/Time-Complexity","sub_path":"Matrix Vector Algorithm/Driver.py","file_name":"Driver.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3711678543","text":"import matplotlib\n\nimport wx\nimport data_structure\nfrom pyTimeSeries import timeseries, data_retrieval\n\n\nmatplotlib.use('WXAgg')\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_wxagg import FigureCanvasWxAgg\nimport logging\nimport datetime\n\n################################################################################\n\nclass ApplicationMenu(wx.MenuBar):\n \"\"\"\n This is the menu bar for file, edit, help, ... menu items\n \"\"\"\n\n def __init__(self, parent):\n\n logging.info('Creating menu bar')\n\n # Persist parent state for interaction with the listeners\n self.parent = parent\n\n # Add each menu heading in turn\n # (just a File menu for now)\n wx.MenuBar.__init__(self)\n\n self.Append(self.create_filemenu(self.parent), \"&File\")\n\n # Add the MenuBar to the Frame content.\n parent.Show(True)\n\n def create_filemenu(self, parent):\n # Create a file menu\n filemenu = wx.Menu()\n\n # wx.ID_ABOUT and wx.ID_EXIT are standard IDs provided by wxWidgets.\n menu_about = filemenu.Append(wx.ID_ABOUT, \"&About\", \" Demo\")\n filemenu.AppendSeparator()\n parent.Bind(wx.EVT_MENU, self.on_menu_about, menu_about)\n\n menu_exit = filemenu.Append(wx.ID_EXIT, \"E&xit\", \" Exit the program\")\n parent.Bind(wx.EVT_MENU, self.on_menu_exit, menu_exit)\n\n return filemenu\n\n #\n # Listeners\n #\n\n def on_menu_about(self, _):\n # A message dialog box with an OK button. \n # wx.OK is a standard ID in wxWidgets.\n popup(self.parent,\n 'A simple charting test programme',\n self.parent.config.title,\n wx.OK)\n\n def on_menu_exit(self, _):\n # Close the application\n self.parent.Close(True)\n\n################################################################################\n\nclass InfoPanel():\n\n def __init__(self, parent, container, _):\n logging.info('Creating the info panel')\n\n self.parent = parent\n\n self.header = wx.StaticBox(container, -1, 'Series statistics:')\n\n self.current_label = wx.StaticText(container, -1, 'Current:')\n self.min_val_label = wx.StaticText(container, -1, 'Min:')\n self.max_val_label = wx.StaticText(container, -1, 'Max:')\n self.ave_label = wx.StaticText(container, -1, 'Average:')\n self.sd_label = wx.StaticText(container, -1, 'Std dev:')\n self.zscore_label = wx.StaticText(container, -1, 'Z-Score:')\n\n self.current = wx.StaticText(container)\n self.min_val = wx.StaticText(container)\n self.max_val = wx.StaticText(container)\n self.ave = wx.StaticText(container)\n self.sd = wx.StaticText(container)\n self.zscore = wx.StaticText(container)\n\n def layout(self):\n sizer = wx.StaticBoxSizer(self.header, wx.VERTICAL)\n\n flags = wx.ALIGN_LEFT\n\n sizer.AddSpacer(self.parent.GetFont().GetPointSize())\n stats_grid = wx.FlexGridSizer(cols=2, hgap=5)\n stats_grid.Add(self.current_label, flag=flags)\n stats_grid.Add(self.current, flag=wx.EXPAND)\n stats_grid.Add(self.min_val_label, flag=flags)\n stats_grid.Add(self.min_val, flag=wx.EXPAND)\n stats_grid.Add(self.max_val_label, flag=flags)\n stats_grid.Add(self.max_val, flag=wx.EXPAND)\n stats_grid.Add(self.ave_label, flag=flags)\n stats_grid.Add(self.ave, flag=wx.EXPAND)\n stats_grid.Add(self.sd_label, flag=flags)\n stats_grid.Add(self.sd, flag=wx.EXPAND)\n stats_grid.Add(self.zscore_label, flag=flags)\n stats_grid.Add(self.zscore, flag=wx.EXPAND)\n\n stats_grid.AddGrowableCol(1, 1)\n sizer.Add(stats_grid, flag=flags)\n\n return sizer\n\n def update(self, data, _):\n ts = sorted(zip(data['dates'], data['values']))\n self.current.SetLabel('{0}'.format(ts[-1][1]))\n\n min = timeseries.min(ts)[0]\n self.min_val.SetLabel('{0} ({1})'.format(min[1], min[0].strftime('%Y-%m-%d')))\n max = timeseries.max(ts)[0]\n self.max_val.SetLabel('{0} ({1})'.format(max[1], max[0].strftime('%Y-%m-%d')))\n\n self.ave.SetLabel('{0:.2f}'.format(timeseries.mean(ts)[0][1]))\n self.sd.SetLabel('{0:.2f}'.format(timeseries.sd(ts)[0][1]))\n self.zscore.SetLabel('{0:.2f}'.format(timeseries.zscore(ts)[0][1]))\n\n################################################################################\n\nclass ControlPanel():\n\n def __init__(self, parent, container, config):\n logging.info('Creating the control panel')\n\n # wx.Panel.__init__(self, parent)\n self.parent = parent\n\n # Text box that allows the user to select the series to view\n self.symbol_label = wx.StaticText(container, -1, 'Symbol')\n\n self.symbol_textbox = wx.TextCtrl(\n container, style=wx.TE_PROCESS_ENTER)\n\n self.parent.Bind(\n wx.EVT_TEXT_ENTER,\n self.on_select_symbol,\n self.symbol_textbox)\n self.symbol_textbox.SetValue(str(config.symbol))\n\n # Create a button that allows the user to create a new\n # data set\n self.redraw_button = wx.Button(container, -1, 'Redraw')\n self.parent.Bind(\n wx.EVT_BUTTON,\n self.on_redraw_button,\n self.redraw_button)\n\n def layout(self):\n \"\"\"\n Return a sizer that lays out the control panel elements\n \"\"\"\n sizer = wx.BoxSizer(wx.HORIZONTAL)\n flags = wx.ALIGN_LEFT | wx.ALL | wx.ALIGN_CENTER_VERTICAL\n\n sizer.Add(self.symbol_label, flag=flags)\n sizer.Add(self.symbol_textbox, 0, border=3, flag=flags)\n sizer.Add(self.redraw_button, 0, border=3, flag=wx.ALIGN_RIGHT)\n\n return sizer\n\n #\n # Listeners\n #\n\n def on_select_symbol(self, _):\n \"\"\"\n Called when the listener fires on the set sample size text box \n \"\"\"\n self.parent.config.symbol = self.symbol_textbox.GetValue()\n logging.debug('Redrawing chart for symbol ' +\n self.parent.config.symbol)\n\n data = get_data(self.parent.config.date, self.parent.config.symbol)\n self.parent.chart.draw(data, self.parent.config)\n self.parent.chart_info.update(data, self.parent.config)\n\n def on_redraw_button(self, event):\n \"\"\"\n Called when the listener fires on the redraw button\n \"\"\"\n logging.debug('User clicked the redraw button')\n self.on_select_symbol(event)\n\n################################################################################\n\ndef popup(parent, message, caption, style):\n \"\"\"\n Wrap the dialogue box popup into a single function call\n \"\"\"\n dlg = wx.MessageDialog(parent, message, caption, style)\n dlg.ShowModal()\n dlg.Destroy()\n\n################################################################################\n\ndef get_data(date, symbol):\n args = dict(\n symbol=symbol,\n start=datetime.datetime(2003, 11, 11),\n end=datetime.datetime(2013, 11, 11))\n\n loader = 'download_yahoo_timeseries'\n\n ts = data_retrieval.get_time_series(loader, args)\n dates, values = zip(*ts[0][data_structure.TIMESERIES])\n return dict(dates=dates, values=[float(v) for v in values])\n\n################################################################################\n\nclass ChartCanvas():\n \"\"\"\n Container for the matplotlib (or any other) chart object\n \"\"\"\n def __init__(self, container, config):\n # Create the matplotlib figure and attach it to a canvas\n self.figure = Figure(\n (config.chart_width, config.chart_height),\n dpi=config.chart_dpi)\n self.canvas = FigureCanvasWxAgg(container, -1, self.figure)\n self.chart = self.figure.add_subplot(111)\n\n def layout(self):\n return self.canvas\n\n def draw(self, data, _):\n \"\"\"\n Redraw figure\n \"\"\"\n logging.debug('Redrawing time series')\n\n self.chart.clear()\n # self.axes.grid(self.cb_grid.IsChecked())\n\n self.chart.plot(data['dates'], data['values'])\n self.figure.autofmt_xdate()\n self.canvas.draw()\n\n################################################################################\n\nclass ApplicationConfig():\n \"\"\"\n Constant config that's shared between the application elements\n \"\"\"\n\n def __init__(self):\n pass\n\n title = 'python-tools'\n\n chart_dpi = 100\n chart_height = 4.0\n chart_width = 5.0\n\n symbol = 'IBM'\n date = datetime.datetime(2011, 11, 11)\n\n################################################################################\n\nclass ApplicationFrame(wx.Frame):\n \"\"\"\n This is the primary application interface.\n It contains panels the user can interact with (menu bar, control\n panel, charting window, ...)\n \"\"\"\n\n def __init__(self):\n self.config = ApplicationConfig()\n\n wx.Frame.__init__(self, None, -1, self.config.title)\n\n # Add the menu bar\n menu = ApplicationMenu(self)\n self.SetMenuBar(menu)\n\n # Create the panel that contains the chart\n self.panel = wx.Panel(self)\n # self.chart_panel = wx.Panel(self)\n self.chart = ChartCanvas(self.panel, self.config)\n self.chart_info = InfoPanel(self, self.panel, self.config)\n chart_sizer = wx.BoxSizer(wx.HORIZONTAL)\n chart_sizer.Add(self.chart.layout(), 3, wx.EXPAND | wx.ALL)\n chart_sizer.Add(self.chart_info.layout(), 1, wx.EXPAND | wx.ALL, border=5)\n # self.chart_panel.SetSizerAndFit(chart_sizer)\n\n # Create a panel for the user input\n self.control_panel = ControlPanel(self, self.panel, self.config)\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(self.control_panel.layout(), 0, wx.ALIGN_CENTER | wx.TOP)\n sizer.Add(chart_sizer, 1, wx.EXPAND | wx.ALL)\n self.panel.SetSizerAndFit(sizer)\n\n # self.CreateStatusBar()\n\n # Draw an initial plot on startup\n data = get_data(self.config.date, self.config.symbol)\n self.chart.draw(data, self.config)\n self.chart_info.update(data, self.config)\n\n################################################################################\n\ndef python_tools():\n logging.info('Starting python-tools')\n\n app = wx.PySimpleApp()\n app.frame = ApplicationFrame()\n app.frame.Fit()\n app.frame.Center()\n app.frame.Show()\n app.MainLoop()\n\n################################################################################\n\nif __name__ == '__main__':\n logging.basicConfig(level='DEBUG')\n python_tools()\n\n################################################################################\n","repo_name":"lhoghu/python-tools","sub_path":"pyTimeSeries/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":10644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"75090505976","text":"__version__ = '0.0.3'\n\n# registers\n_LM75A_TEMP = const(0x00) # Temperature register (r) 16-bit\n_LM75A_CONF = const(0x01) # Configuration register (r/w) 8-bit, default: enabled, comparator, os active low, single fault\n_LM75A_THYST = const(0x02) # Hysteresis register (r/w) 16-bit, default: 75°C\n_LM75A_TOS = const(0x03) # Overtemperature shutdown register (r/w) 16-bit, default: 80°C\n\nclass LM75A:\n\tdef __init__(self, i2c, address=0x48):\n\t\tself._i2c = i2c\n\t\tself._address = address # 0x48-0x4F\n\t\tself._config = 0x00\n\t\tself._buf1 = bytearray(1)\n\t\tself._buf2 = bytearray(2)\n\t\tself.check()\n\t\tself.config()\n\n\tdef check(self):\n\t\tif self._i2c.scan().count(self._address) == 0:\n\t\t\traise OSError('LM75A not found at I2C address {:#x}'.format(self._address))\n\n\tdef config(self, shutdown=None, os_mode=None, os_polarity=None, os_fault_queue=None):\n\t\tif shutdown is not None:\n\t\t\tself._config = (self._config & ~1) | (shutdown & 1)\n\n\t\tif os_mode is not None:\n\t\t\tself._config = (self._config & ~2) | ((os_mode << 1) & 2)\n\n\t\tif os_polarity is not None:\n\t\t\tself._config = (self._config & ~4) | ((os_polarity << 2) & 4)\n\n\t\tif os_fault_queue is not None:\n\t\t\tself._config = (self._config & ~24) | ((os_fault_queue << 3) & 24)\n\n\t\tself._buf1[0] = self._config\n\t\tself._i2c.writeto_mem(self._address, _LM75A_CONF, self._buf1)\n\n\tdef temp(self):\n\t\tself._i2c.readfrom_mem_into(self._address, _LM75A_TEMP, self._buf2)\n\t\tval = (self._buf2[0] << 3) | (self._buf2[1] >> 5)\n\t\treturn self._twos_comp(val, 11) * 0.125\n\n\tdef tos(self, temp):\n\t\tself._temp_to_9bit_reg(temp)\n\t\tself._i2c.writeto_mem(self._address, _LM75A_TOS, self._buf2)\n\n\tdef thyst(self, temp):\n\t\tself._temp_to_9bit_reg(temp)\n\t\tself._i2c.writeto_mem(self._address, _LM75A_THYST, self._buf2)\n\n\tdef _twos_comp(self, val, bits):\n\t\tmask = 2 ** (bits - 1)\n\t\treturn -(val & mask) + (val & ~mask)\n\n\tdef _rev_twos_comp(self, val, bits):\n\t\treturn val & ((1 << bits) -1)\n\n\tdef _temp_to_9bit_reg(self, temp):\n\t\tval = self._rev_twos_comp(int(temp / 0.5), 9)\n\t\tself._buf2[0] = val >> 1\n\t\tself._buf2[1] = val << 7\n","repo_name":"ywz978020607/History_mpy","sub_path":"Micropython_esp32_8266/mpy-温湿度 dht ds18x20等/lm75a.py","file_name":"lm75a.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"22"} +{"seq_id":"73077595256","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def reverseKGroup(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\n\n dummyhead = dummy = ListNode(0)\n curr = head\n stack = []\n while curr:\n stack.append(curr)\n curr = curr.next\n if len(stack) ==k:\n while stack:\n dummy.next = stack.pop()\n dummy = dummy.next\n \n for node in stack:\n dummy.next = node\n dummy = dummy.next\n\n dummy.next = None\n return dummyhead.next\n \n \n # dummyhead =dummy = ListNode(0)\n # prev = None\n # curr = head\n # nxt = curr.next\n # count =0\n # while nxt:\n # curr.next = prev\n # prev = curr\n # curr = nxt\n # nxt = nxt.next\n # count+=1\n # if count == k:\n # star = new=prev\n # while new:\n # dummy.next = new\n # dummy = dummy.next\n # new = new.next\n # count = 0\n # prev = None\n # curr.next = prev\n # while curr:\n # dummy.next = curr\n # curr = curr.next\n # dummy = dummy.next\n # return dummyhead.next","repo_name":"akashraj98/Coding-Question-Practice","sub_path":"0025-reverse-nodes-in-k-group/0025-reverse-nodes-in-k-group.py","file_name":"0025-reverse-nodes-in-k-group.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"6790031150","text":"# -*- coding: utf-8 -*-\nfrom asyncio.log import logger\nfrom odoo import models, fields, api, _\nfrom functools import partial\nfrom odoo.tools.misc import formatLang\n\n\nclass SaleOrderInherit(models.Model):\n _inherit = 'sale.order'\n\n def write(self, vals):\n res = super(SaleOrderInherit, self).write(vals)\n for r in self:\n if 'carrier_id' not in vals:\n r.changing_order_line()\n amount_tax_usd, amount_untaxed_usd = 0, 0\n for so_line in self.order_line:\n so_line._onchange_price()\n amount_aux = (so_line.price_unit_usd * so_line.product_uom_qty)\n amount_untaxed_usd += (so_line.price_unit_usd * so_line.product_uom_qty)\n if so_line.tax_id:\n amount_tax_usd += ((so_line.tax_id.amount / 100) * amount_aux)\n\n query_update = f\"\"\"UPDATE sale_order SET amount_tax_usd = {amount_tax_usd}, amount_untaxed_usd = {amount_untaxed_usd}, amount_total_usd = {amount_tax_usd + amount_untaxed_usd} WHERE id = {r.id}\"\"\"\n self.env.cr.execute(query_update)\n return res\n\n @api.onchange('order_line')\n def changing_order_line(self):\n if len(self.order_line) > 0:\n amount_real = sum([line.price_total for line in self.order_line if not line.is_delivery])\n if amount_real > 50:\n delivery_line = self.env['sale.order.line'].search([('order_id', '=', self.id), ('is_delivery', '=', True)])\n if not delivery_line:\n return\n delivery_line.price_total = 0.0\n else:\n id_2_search = False\n if len(self.ids) > 0:\n id_2_search = self.ids[0]\n if id_2_search:\n delivery_line_exists = self.env['sale.order.line'].search([('is_delivery', '=', True), ('order_id', '=', id_2_search)])\n if not delivery_line_exists:\n delivery_carrier = self.env['delivery.carrier'].search([('name', '=ilike', 'Delivery')], order='write_date desc', limit=1)\n if not delivery_carrier:\n delivery_carrier = delivery_carrier.create({\n 'name': 'Delivery',\n 'product_id': self.env['product.product'].search([('name', '=ilike', 'Delivery'), ('type', '=', 'service')]).id,\n 'fixed_price': 3\n })\n self._create_delivery_line(delivery_carrier, delivery_carrier.fixed_price)\n\n def _create_delivery_line(self, carrier, price_unit):\n SaleOrderLine = self.env['sale.order.line']\n if self.partner_id:\n # set delivery detail in the customer language\n carrier = carrier.with_context(lang=self.partner_id.lang)\n\n # Apply fiscal position\n taxes = carrier.product_id.taxes_id.filtered(lambda t: t.company_id.id == self.company_id.id)\n taxes_ids = taxes.ids\n if self.partner_id and self.fiscal_position_id:\n taxes_ids = self.fiscal_position_id.map_tax(taxes, carrier.product_id, self.partner_id).ids\n\n # Create the sales order line\n carrier_with_partner_lang = carrier.with_context(lang=self.partner_id.lang)\n if carrier_with_partner_lang.product_id.description_sale:\n so_description = '%s: %s' % (carrier_with_partner_lang.name, carrier_with_partner_lang.product_id.description_sale)\n else:\n so_description = carrier_with_partner_lang.name\n values = {\n 'order_id': self.ids[0],\n 'name': so_description,\n 'product_uom_qty': 1,\n 'product_uom': carrier.product_id.uom_id.id,\n 'product_id': carrier.product_id.id,\n 'tax_id': [(6, 0, taxes_ids)],\n 'is_delivery': True,\n }\n if carrier.invoice_policy == 'real':\n values['price_unit'] = 0\n values['name'] += _(' (Estimated Cost: %s )', self._format_currency_amount(price_unit))\n else:\n values['price_unit'] = price_unit\n if carrier.free_over and self.currency_id.is_zero(price_unit):\n values['name'] += '\\n' + 'Free Shipping'\n if self.order_line:\n values['sequence'] = self.order_line[-1].sequence + 1\n sol = SaleOrderLine.sudo().create(values)\n\n # probando solucionar el bug #13\n self.delivery_set = True\n\n return sol\n\n\nclass SaleOrderLineInherit(models.Model):\n _inherit = 'sale.order.line'\n\n def _onchange_price_sql(self):\n vals = {}\n if self.order_id.pricelist_id:\n if self.order_id.pricelist_id.currency_id.id == 3:\n vals = {\n 'price_unit_usd': self.price_unit,\n 'price_subtotal_usd': self.price_subtotal\n }\n elif self.order_id.pricelist_id.currency_id.id != 3:\n vals = {\n 'price_unit_usd': self.order_id.pricelist_id.currency_id._convert(self.price_unit, self.env.company.currency_id, self.env.company, self.order_id.date_order, False),\n 'price_subtotal_usd': self.order_id.pricelist_id.currency_id._convert(self.price_subtotal, self.env.company.currency_id, self.env.company, self.order_id.date_order, False)\n }\n return vals\n","repo_name":"binaural-dev/pleni-demo","sub_path":"3mit-modules/3mit_delivery_automatic/models/sale_order_inherit.py","file_name":"sale_order_inherit.py","file_ext":"py","file_size_in_byte":5409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33762133916","text":"from collections import deque\ndef solution(p, l):\n \n q = deque()\n cnt = 0\n \n for index, element in enumerate(p):\n q.append((element,index))\n \n while q:\n item, idx = q.popleft()\n maxx = max(p)\n if item == maxx: # 여기서 가장 오래 고민\n p.pop(p.index(maxx))\n cnt += 1\n if idx == l:\n return cnt\n else:\n q.append((item,idx))\n","repo_name":"HyeJoonKim/Algorithm","sub_path":"자료구조/Programmers_프린터.py","file_name":"Programmers_프린터.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"75333275575","text":"\ntry:\n h=open(\"01_basic_write.txt\",\"w+\")\n h.write('1,almasud,Abdullah Al Masud\\n'\n '2,rimon,Rimol Ali\\n'\n '3,niloy,Niloy Roy\\n'\n '4,sourov,Sourov Deb Sharma\\n'\n '5,sathi,Sathi Rani Roy\\n'\n )\n\nexcept IOError:\n print(\"An IOError has occurred!\")\nfinally:\n h.close()\n\n\n","repo_name":"zameerhossain/Python_Django_WorkHome","sub_path":"python_exercise_on_files/01_basic_write.py","file_name":"01_basic_write.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24441711536","text":"\nimport math\n\nimport numpy as np\nimport torch\n\ndef zero_mean_unit_variance(data: np.ndarray) -> np.ndarray:\n \"\"\"Normalizes to zero mean and unit variance.\"\"\"\n offset = np.mean(data, axis=0)\n scale = np.std(data, axis=0)\n # Handle columns with zero variance.\n scale = np.where(scale == 0, np.ones_like(scale), scale)\n return (data - offset) / scale\n\n\ndef normalize(data: np.ndarray) -> np.ndarray:\n \"\"\"Normalizes to range [0, 1].\"\"\"\n data_min = data.min(axis=0)\n data_max = data.max(axis=0)\n return (data - data_min) / (data_max - data_min)\n\n\ndef make_regularized_pca_loss(lambd: float, *, norm: int = 2):\n \"\"\"Sets up the objective function\n\n Args:\n lambd (float): shrinkage penalty for regularization\n norm (int, optional): Shrinkage penalty:Lasso = 1, Ridge = 2. Defaults to 2.\n \"\"\"\n\n def loss(X, Y, A):\n \"\"\"squared frobenius norm loss funnction.\"\"\"\n\n n, m = A.shape\n n_x, k = X.shape\n k, m_y = Y.shape\n assert n_x == n\n assert m_y == m\n\n mse = torch.norm(A - X @ Y) ** 2 / (n * m)\n x_regulariser = torch.norm(X, norm) ** norm / (n * k)\n y_regulariser = torch.norm(Y, norm) ** norm / (m * k)\n\n return mse + lambd * (x_regulariser + y_regulariser)\n\n return loss\n\n\ndef SVD_initialization(A: torch.Tensor, rank: int):\n \"\"\"Args: Transformed Data Matrix A\n\n Initializes X = U ̃ * (Sigma ̃)^1/2, and Y = (Sigma ̃)^1/2 * V ̃T diag(sigma), with\n offset row initialized with the means.\n \"\"\"\n\n # SVD to get initial point.\n A = A.cpu().detach().numpy()\n stdev = A.std(0)\n u, s, v = np.linalg.svd(A, full_matrices=False)\n u = u[:, :rank]\n s = np.diag(np.sqrt(s[:rank]))\n v = v[:rank, :]\n\n X_init, Y_init = np.asarray(u.dot(s)), np.asarray(s.dot(v)) * np.asarray(stdev)\n\n X_init, Y_init = torch.from_numpy(X_init), torch.from_numpy(Y_init)\n X_init.requires_grad = True\n Y_init.requires_grad = True\n\n return X_init, Y_init\n","repo_name":"kiranikram/GLRM","sub_path":"src/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16939799187","text":"\"\"\"\n划分数据集,分为train和val\n\"\"\"\n\n\nimport os\nfrom PIL import Image\nimport random\nimport json\n\nif __name__ == '__main__':\n path = '../data_pro'\n all_img = 0\n label = 0 \n val_data = list()\n train_data = list()\n with open(path + '/3_normalized_data.json', 'r') as file:\n file = json.load(file)\n # print(file)\n sample = random.sample(range(len(file)), int(len(file) * 0.2))\n # print(len(sample))\n for i in sample:\n val_data.append(file[i])\n # print(file)\n for _data in file:\n # print(_data)\n if _data not in val_data:\n train_data.append(_data)\n print(len(val_data))\n print(len(train_data))\n # imgsize_count = dict()\n # # print(dirpath + filepath)\n # img_num = len(filenames)\n # all_img += img_num\n # sample = random.sample(range(len(filenames)), int(len(filenames) * 0.2))\n # label = int(dirpath[-1])\n # for name in sample:\n # sample_name = dict()\n # sample_name['file_name'] = dirpath + \"/\" + filenames[name]\n # # print(dirpath)\n # print(sample_name)\n # sample_name['label'] = label\n # val_data.append(sample_name)\n # for i in range(len(filenames)):\n # if i not in sample:\n # train_name = dict()\n # train_name['file_name'] = dirpath + \"/\" + filenames[i]\n # train_name['label'] = label\n # train_data.append(train_name)\n # # val_data[label] = sample_name\n # # train_data[label] = train_name\n # # print(sample)\n # # for filepath in filenames:\n # # img = Image.open(dirpath + \"\\\\\" + filepath)\n # # imgSize = img.size # 图片的长和宽\n # # # print(imgSize)\n # # if imgSize not in imgsize_count.keys():\n # # imgsize_count[imgSize] = 0\n # # imgsize_count[imgSize] += 1\n # # # img.show()\n # # print(imgsize_count)\n # # print(img_num)\n # # print(all_img)\n # # print(val_data)\n # # print(train_data)\n json_data = json.dumps(val_data)\n with open(path + '/4_val_norm.json', 'w') as f_six:\n f_six.write(json_data)\n json_data = json.dumps(train_data)\n with open(path + '/4_train_norm.json', 'w') as f_six:\n f_six.write(json_data)","repo_name":"DuanChenL/FGVC10","sub_path":"utils/4_divid_train_Data.py","file_name":"4_divid_train_Data.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"73779455416","text":"while True:\n reply = input('Enter text:')\n if reply == 'stop': break\n print(reply.upper())\n\n\n# Doing Math on User Inputs\nwhile True:\n reply = input('Enter text for math on user:')\n if reply == 'stop': break\n print(int(reply) ** 2)\nprint('Bye')\n\n\n# Handling Errors by Testing Inputs\nS = '123'\nT = 'xxx'\nprint(S.isdigit(), T.isdigit())\n\nwhile True:\n reply = input('Enter text for handling errors segment:')\n if reply == 'stop':\n break \n elif not reply.isdigit():\n print('Bad' * 8)\n else:\n print(int(reply) ** 2)\nprint('Bye')\n\n\n# Handling Errors with try statements\nwhile True:\n reply = input('Enter text for try statement:')\n if reply == 'stop': break\n try:\n num = int(reply)\n except:\n print('Bad!' * 8)\n else:\n print(num ** 2)\nprint('Bye')\n\n\n# Supporting floating-point numbers\nwhile True:\n reply = input('Enter text to show support for floating-point no.:')\n if reply == 'stop': break\n try:\n print(float(reply) ** 2) \n except:\n print('Bad!' * 8)\nprint('Kwaheri')\n\n\n# Nesting Code Three Levels Deep\nwhile True:\n reply = input('Enter text for 3 levels deep:')\n if reply == 'stop':\n break\n elif not reply.isdigit():\n print('Bad!' * 8)\n else:\n num = int(reply)\n if num < 20:\n print('low')\n else:\n print(num ** 2)\nprint(\"Tothi'e\")\n","repo_name":"bimri/learning-python","sub_path":"chapter_10/user_input.py","file_name":"user_input.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41400120003","text":"#Néfi Leite - Byu\n\nfrom math import prod\n\n\ncomprasProdutos = []\ncomprasQuantidade = []\ncomprasPrecos = []\ncarrinho = []\n\npreco = 0\nproduto = \" \"\n\nprint(\"\\nWelcome to the Gift Supermarket!\\n\")\n\nop = 0\nwhile op != 5:\n print(\"-----------------------\"\n \"\\n1- Add item\\n\"\n \"2- View cart\\n\"\n \"3- Remove item\\n\"\n \"4- Compute total\\n\"\n \"5- Quit\\n\"\n \"-----------------------\\n\")\n op = int(input(\"Please select one of the following: \"))\n \n if op == 1:\n print()\n \n produto = str(input(\"What item would you like to add? \"))\n quantidade = int(input(\"How many units of this product? \"))\n preco = float(input(f\"What is the price of '{produto}'? \"))\n print(f'{produto} has a beem added to the cart.\\n')\n \n comprasProdutos.append(produto)\n comprasQuantidade.append(int(quantidade))\n comprasPrecos.append(float(preco))\n carrinho.append(str(\"---\"))\n \n elif op == 2:\n if len(comprasProdutos) == 0:\n print(\"Empty list!\\n\") \n \n i = 1\n print(\"----------------------------------------------------\") \n for x in range(len(comprasProdutos)):\n print(i,\"- Products: \",comprasProdutos[x],\" - Units: \",comprasQuantidade[x],\" - Price: R$ \",comprasPrecos[x],\"\", carrinho[x]) \n i += 1\n print(\"----------------------------------------------------\") \n \n elif op == 3: #I had difficult to remove the items\n index = 1\n print(\"----------------------------------------------------\") \n for x in range(len(comprasProdutos)):\n print(index,\"- Products: \",comprasProdutos[x],\" - Units: \",comprasQuantidade[x],\" - Price: R$ \",comprasPrecos[x],\"\", carrinho[x]) \n index += 1\n print(\"----------------------------------------------------\") \n remove = int(input(\"What's your choice? Type the code product \"))\n index = remove\n comprasProdutos.pop(index)\n comprasQuantidade.pop(index)\n comprasPrecos.pop(index)\n \n elif op == 4:\n resultado = 0\n for x in range(len(comprasProdutos)):\n resultado = (resultado + (comprasQuantidade[x] * comprasPrecos[x]))\n print(\"\\nTotal: R$\", resultado) \n\n elif op == 5:\n print(\"Thank you. See you later!\")\n \n else:\n print(\"Wrong code, type again!\") \n\n","repo_name":"neficl/Byu_cse110","sub_path":"compras.py","file_name":"compras.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11401309769","text":"import csv, collections\r\n\r\ndef load_tower_times():\r\n\tf = csv.reader(open(\"../output/time_by_tower.csv\", 'rb'), delimiter=',')\r\n\ttowers = {}\r\n\tfor line in f:\r\n\t\ttower,region,time,count = line\r\n\t\tif tower not in towers:\r\n\t\t\ttowers[tower] = {}\r\n\t\ttowers[tower][time] = count\r\n\treturn towers\r\n\t\r\ndef load_tower_stats():\r\n\tf = csv.reader(open(\"../output/towers_with_stats.csv\", 'rb'), delimiter='|')\r\n\ttowers = {}\r\n\ti = 0\r\n\tfor line in f:\r\n\t\tif i != 0:\r\n\t\t\tsitename, lat, lon, urban = line[1], line[2], line[3], line[21]\r\n\t\t\ttowers[sitename] = (lat, lon, int(urban))\r\n\t\ti += 1\r\n\treturn towers\r\n\t\r\ntowers_stats = load_tower_stats()\r\ntowers_times = load_tower_times()\r\nurban = {}\r\nrural = {}\r\n\r\nfor tower, val in towers_stats.items():\r\n\ttry:\r\n\t\tif val[2] == 1: \t# tower is urban\r\n\t\t\turban[tower] = towers_times[tower]\r\n\t\telse:\r\n\t\t\trural[tower] = towers_times[tower]\r\n\texcept: pass\r\n\t\t\r\nwurban = open(\"../output/urban_times.csv\", 'w')\r\nwurban.write(\"tower,time,count\\n\")\r\nalltimes = collections.defaultdict(int)\r\nfor tower, values in urban.items():\r\n\tfor t, ct in values.items():\r\n\t\talltimes[t] += int(ct)\r\n\t\twurban.write(\"%s,%s,%s\\n\" % (tower, t, ct))\r\nfor t, v in alltimes.items():\r\n\twurban.write(\"total,%s,%s\\n\" % (t, v))\r\nwurban.close()\r\n\r\nwrural = open(\"../output/rural_times.csv\", 'w')\r\nwrural.write(\"tower,time,count\\n\")\r\nalltimes = collections.defaultdict(int)\r\nfor tower, values in rural.items():\r\n\tfor t, ct in values.items():\r\n\t\talltimes[t] += int(ct)\r\n\t\twrural.write(\"%s,%s,%s\\n\" % (tower, t, ct))\r\nfor t, v in alltimes.items():\r\n\twrural.write(\"total,%s,%s\\n\" % (t, v))\r\nwrural.close()","repo_name":"shangyian/greatlakes-call-data","sub_path":"uganda/region_features/compare_times_by_region.py","file_name":"compare_times_by_region.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27733577694","text":"import json, boto3, requests, subprocess, os, http.cookiejar\nfrom threading import Thread\nfrom glob import glob\n\n\"\"\"\n1. Using cookies, go to your feed and get a list of all authors.\n2. Iterate through the dictionary and create several {author: [video_id, video_id1,...]} dictionaries.\n3. Iterate through the dictionaries and push each video to Telegram.\n\"\"\"\n\n# Converting duration from ffmpeg's output into seconds Telegram will comprehend.\ndef time(time):\n chunks = time.split(\"\\n\")[0].split(\":\")\n seconds = int(chunks[0]) * 60 * 60 + int(chunks[1]) * 60 + int(chunks[2])\n\n return int(seconds)\n\n# Opening cookies.txt file and using the data from there. Has to be uploaded to S3 manually by someone.\n# Pulling a file with TikTok cookies from the browser because TikTok authentication via passport/web/user/login/ is too complicated for my brain. :^)\ndef get_cookies(cookies_path):\n cookies = {}; cj = http.cookiejar.MozillaCookieJar(); cj.load(filename=cookies_path)\n for c in cj:\n cookies[str(c).split(\"Cookie \")[1].split(\"=\")[0]] = str(c).split(\"Cookie \")[1].split(\"=\")[1].split(\" for\")[0]\n\n return cookies\n\n# Not used right now. Potential future functionality. Collecting IDs of all videos from a given account.\ndef collectVideoIds(username):\n res = requests.get(f\"https://www.tiktok.com/@{username}\", headers={\"Content-Type\": \"application/json\"})\n output = res.content.decode().replace('')\n for line in output.split(\"\\n\"):\n if \"SIGI_STATE\" in line:\n target = line\n break\n target = target.replace('', \"\")\n target = json.loads(target)\n\n return target[\"ItemList\"][\"user-post\"][\"list\"]\n\n# The multithreaded function.\ndef downloadVideo(username, video):\n cmd_str = f\"/var/task/yt-dlp --config-locations /var/task/yt-dlp.conf https://www.tiktok.com/@{username}/video/{video}\"\n event = subprocess.run(cmd_str, shell=True, capture_output=True, text=True)\n try:\n print (f\"All good: {event.stdout}\")\n except:\n print (f\"All bad: {event.stderr}\")\n\ndef shipToTelegram(username, videos):\n files = {}; media = []\n # https://stackoverflow.com/questions/11968689/python-multithreading-wait-till-all-threads-finished\n threadz = [Thread(target=downloadVideo, args=[username, video]) for video in videos]\n [t.start() for t in threadz]\n [t.join() for t in threadz]\n\n if len(videos) > 1:\n # Request requirements with a single video are different from those of a request with several videos.\n for _file in glob(f\"/tmp/{username}*.mp4\"):\n thumb = _file.split(\".mp4\")[0] + \"_thumb.jpg\"\n # Do three things simultaneously: 1) create a thumbnail, 2) get video duration, and 3) get video resolution.\n cmd_str = f\"\"\"/var/task/ffmpeg -y -i {_file} -vf scale=w='min(320\\, iw*3/2):h=-1' -vframes 1 {thumb} 2>&1 | grep -oP '(Duration: \\K[0-9]+:[0-9]+:[0-9]+)|(Stream .*, \\K[0-9]+x[0-9]+)' | head -2\"\"\"\n event = subprocess.run(cmd_str, shell=True, capture_output=True, text=True)\n duration = time(event.stdout); height = int(event.stdout.split(\"\\n\")[1].split(\"x\")[1]); width = int(event.stdout.split(\"\\n\")[1].split(\"x\")[0])\n files[_file] = open(_file, \"rb\"); files[thumb] = open(thumb, \"rb\")\n media.append({\"type\": \"video\", \"media\": f\"attach://{_file}\", \"thumbnail\": f\"attach://{thumb}\", \"supports_streaming\": True, \"width\": width, \"height\": height, \"duration\": duration})\n # https://stackoverflow.com/questions/58893142/how-to-send-telegram-mediagroup-with-caption-text\n media[0][\"caption\"] = f\"Завантажено з допомогою tiktok-hoarder.\\nhttps://www.tiktok.com/@{username}\"\n payload = {\n \"chat_id\": CHAT_ID,\n \"media\": json.dumps(media),\n \"disable_notification\": True\n }\n msg = requests.post(f\"https://api.telegram.org/bot{TOKEN}/sendMediaGroup\", params=payload, files=files)\n if not msg.json()[\"ok\"]:\n print (f\"Caught an error uploading {media} to Telegram:\", msg.json())\n if FEEDBACK_CHANNEL != \"/dev/null\":\n report = requests.post(f\"https://api.telegram.org/bot{TOKEN}/sendMessage\", headers={\"Content-Type\": \"application/json\", \"Cache-Control\": \"no-cache\"}, json={\"chat_id\":FEEDBACK_CHANNEL, \"is_personal\": False, \"text\": f\"Caught an error uploading {['https://www.tiktok.com/@' + username + '/video/' + _item + '; ' for _item in videos]} to Telegram:\\n{msg.json()}\"})\n if not report.json[\"ok\"]:\n print (f\"Caught an error reporting another error to the feedback channel:\\n{report.json()}\")\n elif len(videos) != 0:\n thumb = f\"/tmp/{username}-{videos[0]}_thumb.jpg\"\n # Do three things simultaneously: 1) create a thumbnail, 2) get video duration, and 3) get video resolution.\n cmd_str = f\"\"\"/var/task/ffmpeg -y -i /tmp/{username}-{videos[0]}.mp4 -vf scale=w='min(320\\, iw*3/2):h=-1' -vframes 1 {thumb} 2>&1 | grep -oP '(Duration: \\K[0-9]+:[0-9]+:[0-9]+)|(Stream .*, \\K[0-9]+x[0-9]+)' | head -2\"\"\"\n event = subprocess.run(cmd_str, shell=True, capture_output=True, text=True)\n duration = time(event.stdout); height = int(event.stdout.split(\"\\n\")[1].split(\"x\")[1]); width = int(event.stdout.split(\"\\n\")[1].split(\"x\")[0])\n files = {\"video\": (f\"{username}-{videos[0]}.mp4\", open(f\"/tmp/{username}-{videos[0]}.mp4\", \"rb\")), \"thumbnail\": (f\"{thumb}\", open(f\"{thumb}\", \"rb\"))}\n payload = {\n \"chat_id\": CHAT_ID,\n \"caption\": f\"Завантажено з допомогою tiktok-hoarder.\\nhttps://www.tiktok.com/@{username}\",\n \"is_personal\": False,\n \"disable_notification\": True,\n \"supports_streaming\": True,\n \"duration\": duration,\n \"height\": height,\n \"width\": width\n }\n msg = requests.post(f\"https://api.telegram.org/bot{TOKEN}/sendVideo\", data=payload, files=files)\n if not msg.json()[\"ok\"]:\n print (f\"Caught an error uploading {files} to Telegram:\", msg.json())\n if FEEDBACK_CHANNEL != \"/dev/null\":\n report = requests.post(f\"https://api.telegram.org/bot{TOKEN}/sendMessage\", headers={\"Content-Type\": \"application/json\", \"Cache-Control\": \"no-cache\"}, json={\"chat_id\": FEEDBACK_CHANNEL, \"is_personal\": False, \"text\": f\"Caught an error uploading {'https://www.tiktok.com/@' + username + '/video/' + videos[0]} to Telegram:\\n{msg.json()}\"})\n if not report.json[\"ok\"]:\n print (f\"Caught an error reporting another error to the feedback channel:\\n{report.json()}\")\n\n# Not used right now. Potential functionality. Pushing videos to S3.\ndef pushVideo(filename):\n S3.upload_file(f\"{'/tmp/' + filename}\", BUCKET, f\"videos/{filename}\")\n os.remove(f\"{'/tmp/' + filename}\")\n print (f\"Successfully pushed and removed {filename}\")\n\n# Master function Lambda is configured to execute.\ndef lambda_handler(event, context):\n global S3, CHAT_ID, BUCKET, TOKEN, FEEDBACK_CHANNEL, GEOLOCK # see CF template for the description of all but S3\n CHAT_ID = os.environ[\"chat_id\"]; BUCKET = os.environ[\"bucket\"]; TOKEN = os.environ[\"token\"]; FEEDBACK_CHANNEL = os.environ[\"feedback\"]; GEOLOCK = os.environ[\"geolock\"]\n if \"local_storage:\" not in BUCKET:\n S3 = boto3.client(\"s3\"); S3.download_file(BUCKET, \"cookies.txt\", \"/tmp/cookies.txt\")\n cookies = get_cookies(\"/tmp/cookies.txt\")\n else:\n cookies = get_cookies(BUCKET.split(\":\")[1])\n\n\n # Getting videos from the personal feed.\n # Apparently, regardless of the query parameters TikTok will give you exactly 8 videos per GET request.\n res = requests.get(f\"https://api19-va.tiktokv.com/aweme/v1/feed/?type=0&app_name=trill&min_cursor=-1&max_cursor=0®ion={GEOLOCK}\", cookies=cookies, headers={\"Content-Type\": \"application/json\"}).json()[\"aweme_list\"]\n authors_and_videos = {}\n # This is how geolock is additionally enforced, apart from passing geolock parameters.\n for video in res:\n if video[\"author\"][\"region\"] != GEOLOCK:\n print (f\"Skipping https://www.tiktok.com/@{video['author']['unique_id']}/video/{video['aweme_id']} because it came from {video['author']['region']} instead of {GEOLOCK}.\")\n continue\n else:\n authors_and_videos.setdefault(video[\"author\"][\"unique_id\"], []).append(video[\"aweme_id\"])\n\n threadz = [Thread(target=shipToTelegram, args=[author, authors_and_videos[author]]) for author in authors_and_videos]\n [t.start() for t in threadz]\n [t.join() for t in threadz]\n\n removal_counter = 0\n for _object in glob(f\"/tmp/*.mp4\"):\n try:\n print (f\"Removing {_object}\"); os.remove(f\"{_object}\"); removal_counter += 1\n except Exception as e:\n print (f\"Removed {removal_counter} in total and failed to remove {_object}. Reason:\\n{e}\")\n\n return {\n 'statusCode': 200,\n 'body': json.dumps(f\"Pushed {removal_counter} videos this time.\")\n }\n","repo_name":"nariman-namazov/tiktok-hoarder","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":9208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28358542614","text":"import keras.backend as K\nfrom keras.optimizers import Optimizer\n\n\nclass Eve(Optimizer):\n '''Eve optimizer.\n\n Default parameters follow those provided in the original paper.\n\n # Arguments\n lr: float >= 0. Learning rate.\n beta_1/beta_2/beta_3: floats, 0 < beta < 1. Generally close to 1.\n small_k/big_K: floats\n epsilon: float >= 0. Fuzz factor.\n\n # References\n - [Improving Stochastic Gradient Descent With FeedBack](http://arxiv.org/abs/1611.01505v1.pdf)\n '''\n\n def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,\n beta_3=0.999, small_k=0.1, big_K=10,\n epsilon=1e-8, decay=0., **kwargs):\n super(Eve, self).__init__(**kwargs)\n self.__dict__.update(locals())\n self.iterations = K.variable(0)\n self.lr = K.variable(lr)\n self.beta_1 = K.variable(beta_1)\n self.beta_2 = K.variable(beta_2)\n self.beta_3 = K.variable(beta_3)\n self.small_k = K.variable(small_k)\n self.big_K = K.variable(big_K)\n self.decay = K.variable(decay)\n self.inital_decay = decay\n\n def get_updates(self, params, loss):\n grads = self.get_gradients(loss, params)\n self.updates = [K.update_add(self.iterations, 1)]\n\n lr = self.lr\n if self.inital_decay > 0:\n lr *= (1. / (1. + self.decay * self.iterations))\n\n t = self.iterations + 1\n lr_t = lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t))\n\n shapes = [K.get_variable_shape(p) for p in params]\n ms = [K.zeros(shape) for shape in shapes]\n vs = [K.zeros(shape) for shape in shapes]\n f = K.variable(0)\n d = K.variable(1)\n self.weights = [self.iterations] + ms + vs + [f, d]\n\n cond = K.greater(t, K.variable(1))\n small_delta_t = K.switch(K.greater(loss, f), self.small_k + 1, 1. / (self.big_K + 1))\n big_delta_t = K.switch(K.greater(loss, f), self.big_K + 1, 1. / (self.small_k + 1))\n\n c_t = K.minimum(K.maximum(small_delta_t, loss / (f + self.epsilon)), big_delta_t)\n f_t = c_t * f\n r_t = K.abs(f_t - f) / (K.minimum(f_t, f))\n d_t = self.beta_3 * d + (1 - self.beta_3) * r_t\n\n f_t = K.switch(cond, f_t, loss)\n d_t = K.switch(cond, d_t, K.variable(1.))\n\n self.updates.append(K.update(f, f_t))\n self.updates.append(K.update(d, d_t))\n\n for p, g, m, v in zip(params, grads, ms, vs):\n m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\n v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)\n p_t = p - lr_t * m_t / (d_t * K.sqrt(v_t) + self.epsilon)\n\n self.updates.append(K.update(m, m_t))\n self.updates.append(K.update(v, v_t))\n\n new_p = p_t\n self.updates.append(K.update(p, new_p))\n return self.updates\n\n def get_config(self):\n config = {'lr': float(K.get_value(self.lr)),\n 'beta_1': float(K.get_value(self.beta_1)),\n 'beta_2': float(K.get_value(self.beta_2)),\n 'beta_3': float(K.get_value(self.beta_3)),\n 'small_k': float(K.get_value(self.small_k)),\n 'big_K': float(K.get_value(self.big_K)),\n 'epsilon': self.epsilon}\n base_config = super(Eve, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n","repo_name":"tdeboissiere/DeepLearningImplementations","sub_path":"Eve/Eve.py","file_name":"Eve.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","stars":1807,"dataset":"github-code","pt":"22"} +{"seq_id":"29842109807","text":"class Character :\n def __init__(self, name):\n self.name = name\n self.health = 50\n self.power = 50\n self.defense = 30\n self.weapon = None\n \n def attack(self, target):\n \n print(f\"[ATTACK] {self.name} attacked {target.name}.\")\n damage = target.defend(self.power) #! Abstraction\n print(f\"AND cause Damage equal {damage}\")\n target.health -= damage\n return self\n\n def defend(self, damage):\n print(f\"[DEFEND] {self.name} defended {damage} AND reduce it by {self.defense}.\")\n damage -= self.defense\n return damage\n\n\n\nclass Barbarian(Character): #! Inheritance\n def __init__(self, name):\n super().__init__(name)\n self.power+=30 #! Polymorphism change parent attribute\n self.health+=20 #! Polymorphism change parent attribute\n self.rage = 30 #! Polymorphism add new attribute\n \n\nclass Elf(Character):\n def __init__(self, name):\n super().__init__(name)\n\n # ! Polymorphism \n def magic_attack(self, target):\n target.health -= self.power\n target.power -= 20\n target.defense -= 20\n\n\nclass Seer:\n def __init__(self) :\n self.hidden_type = Barbarian(\"SEER\") #! Abstraction\n self.see_range = 100\n\n\n# class Seer (Barbarian):\n# def __init__(self, name) :\n# super().__init__(name)\n# # self.hidden_type = Barbarian(\"SEER\") #! Abstraction\n\n\njohn = Character(\"JOHN\")\nconan = Barbarian(\"CONAN\")\nelon = Seer()\nelon.hidden_type.attack(john)\nprint(\"CONAN Health : \",conan.health)\nconan.attack(john)\n\njane = Character(\"JANE\")","repo_name":"IyadJuini/python_stack","sub_path":"02-python-oop/w01-d03-01-four-pillars/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27189331421","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 7 10:25:52 2019\n\n@author: yifei\n\"\"\"\n\nimport os\nimport time\nimport argparse\nimport numpy as np\nfrom DataBuilder import d_Builder\nfrom ModelBuilder import m_Builder\n\ndef ranking_MAP(data, result, rank_k):\n \n n, t = data.shape\n m_MAP = np.zeros([rank_k, 1])\n for i in range(0, t):\n sort_data = sorted(enumerate(data[:, i]), key = lambda x:x[1], reverse=True)\n sort_result = sorted(enumerate(result[:, i]), key = lambda x:x[1], reverse=True)\n sort_data = np.array(sort_data)\n sort_result = np.array(sort_result)\n flags = sort_data[:, 0] == sort_result[:, 0]\n flags = flags.reshape([n, 1])\n for j in range(0, rank_k):\n temp = flags[0:j+1].reshape([1, j+1])\n m_MAP[j] = m_MAP[j] + len(np.where(temp == True)[0]) / (j+1)\n m_MAP = m_MAP / t\n return m_MAP\n\nif __name__ == \"__main__\":\n \n parser = argparse.ArgumentParser()\n parser.add_argument('--model_name', type=str, default='Linear',\n help='model name (default: Linear)')\n parser.add_argument('--ar_days', type=int, default=7,\n help='historical days in prediction (default: 7)')\n parser.add_argument('--kernel_names', type=str, default='dis', nargs='+',\n help='kernel type for inference (default: dis)')\n # sequential data file path\n parser.add_argument('--data_path', type=str, \n default=\"D:/yifei/Documents/Codes_on_GitHub/External_data/CHI_Region/\",\n help='data path')\n # settings of data preprocessing for training and evaluation when sequential data is available\n parser.add_argument('--train_end', type=int, default=910,\n help='end date of training dataset (default: 910)')\n parser.add_argument('--train_days', type=int, default=180,\n help='num of train days (default: 180)')\n parser.add_argument('--eval_days', type=int, default=21,\n help='num of eval days (default: 21)')\n parser.add_argument('--simi_len', type=int, default=90,\n help='days for building dis kernel (default: 90)')\n \n parser.add_argument('--learning_rate', type=float, default=1e-2)\n parser.add_argument('--iters', type=int, default=100)\n parser.add_argument('--threshold', type=float, default=1e-3)\n parser.add_argument('--hyper_parameters', type=int, nargs='+')\n \n args = parser.parse_args()\n train_end = args.train_end\n train_days = args.train_days\n eval_days = args.eval_days\n ar_days = args.ar_days\n data_path = args.data_path\n model_name = args.model_name\n kernel_names = args.kernel_names\n simi_len = args.simi_len\n learning_rate = args.learning_rate\n iters = args.iters\n threshold = args.threshold\n hyper_parameters = args.hyper_parameters\n if model_name == 'NN-CCRF':\n is_nnccrf = True\n else:\n is_nnccrf = False\n if model_name == 'ARMA':\n ar_days = hyper_parameters[0]\n \n # Build training and evaluation dataset when you have sequential data\n '''\n m_dataBuilder = d_Builder(data_path, ar_days, \n train_end, train_days, eval_days)\n (train_x, train_y), (eval_x, eval_y) = m_dataBuilder.load_xy(is_nnccrf)\n '''\n \n path = os.getcwd()\n \n # Load preprocessed dataset\n # train_days = 180, eval_days = 21, simi_len = 90\n if is_nnccrf:\n train_x = np.load(r'{}\\data\\nnccrf\\train_x.npy'.format(path))\n train_y = np.load(r'{}\\data\\nnccrf\\train_y.npy'.format(path))\n eval_x = np.load(r'{}\\data\\nnccrf\\eval_x.npy'.format(path))\n eval_y = np.load(r'{}\\data\\nnccrf\\eval_y.npy'.format(path))\n \n else:\n train_x = np.load(r'{}\\data\\non-nnccrf\\train_x.npy'.format(path))\n train_y = np.load(r'{}\\data\\non-nnccrf\\train_y.npy'.format(path))\n eval_x = np.load(r'{}\\data\\non-nnccrf\\eval_x.npy'.format(path))\n eval_y = np.load(r'{}\\data\\non-nnccrf\\eval_y.npy'.format(path))\n \n \n if model_name == 'TCP':\n #tcp_kernel = m_dataBuilder.load_tcp_kernel(kernel_names)\n tcp_kernel = np.load(r'{}\\data\\kernel_tcp.npy'.format(path))\n if model_name == 'CRFasRNN':\n #crf_kernels = m_dataBuilder.load_kernels(kernel_names, simi_len)\n crf_kernel = np.load(r'{}\\data\\kernel_crf.npy'.format(path))\n \n # Train the model\n m_modelBuilder = m_Builder(model_name, learning_rate, iters, threshold)\n if model_name == 'TCP':\n hyper_parameters = [hyper_parameters]\n hyper_parameters.extend([tcp_kernel])\n if model_name == 'CRFasRNN':\n hyper_parameters = [hyper_parameters]\n hyper_parameters.extend([crf_kernel])\n m_modelBuilder.train_model(hyper_parameters, train_x, train_y)\n rmse, rmse_std, pred_y, true_y = m_modelBuilder.eval_model(eval_x, eval_y)\n \n # rmse results\n n_regions = pred_y.shape[0]\n error_1 = (pred_y[:, 0:1] - true_y[:, 0:1])**2\n error_7 = (pred_y[:, 0:7] - true_y[:, 0:7])**2\n error_14 = (pred_y[:, 0:14] - true_y[:, 0:14])**2\n error_21 = (pred_y[:, 0:21] - true_y[:, 0:21])**2\n rmse_1 = np.sqrt(np.sum(error_1) / (n_regions*1))\n rmse_7 = np.sqrt(np.sum(error_7) / (n_regions*7))\n rmse_14 = np.sqrt(np.sum(error_14) / (n_regions*14))\n rmse_21 = np.sqrt(np.sum(error_21) / (n_regions*21))\n \n \n # ranking results\n rankings_1 = ranking_MAP(true_y[:, 0:1], pred_y[:, 0:1], 10)\n rankings_7 = ranking_MAP(true_y[:, 0:7], pred_y[:, 0:7], 10)\n rankings_14 = ranking_MAP(true_y[:, 0:14], pred_y[:, 0:14], 10)\n rankings_21 = ranking_MAP(true_y[:, 0:21], pred_y[:, 0:21], 10)\n results = np.zeros([1, 8])\n results[0, 0] = np.mean(rankings_1[0:5])\n results[0, 1] = np.mean(rankings_1[0:10])\n results[0, 2] = np.mean(rankings_7[0:5])\n results[0, 3] = np.mean(rankings_7[0:10])\n results[0, 4] = np.mean(rankings_14[0:5])\n results[0, 5] = np.mean(rankings_14[0:10])\n results[0, 6] = np.mean(rankings_21[0:5])\n results[0, 7] = np.mean(rankings_21[0:10])\n \n print('{} prediction performance:'.format(model_name))\n print('RMSE:\\t{:.5f}\\tRMSE_STD:\\t{:.5f}'.format(rmse, rmse_std))\n if rmse != -1 and rmse is not None:\n if model_name == 'TCP' or model_name == 'CRFasRNN':\n hyper_parameters = hyper_parameters[0]\n file = open(r'{}\\data\\results_{}.txt'.format(path, model_name), 'a+')\n # save parameteres and hyper-parameters\n file.write('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t'.format(train_end, \n train_days, eval_days, ar_days, simi_len, kernel_names, learning_rate, \n hyper_parameters, iters))\n # save prediction and ranking results, short-term: rmse_1/7 and long-term rmse_14/21\n file.write('{:.5f}\\t{:.5f}\\t{:.5f}\\t{:.5f}\\t'.format(rmse_1, rmse_7, rmse_14, rmse_21))\n file.write('{:.5f}\\t{:.5f}\\t{:.5f}\\t{:.5f}\\t{:.5f}\\t{:.5f}\\t{:.5f}\\t{:.5f}\\n'.format(\n results[0, 0], results[0, 1], results[0, 2], results[0, 3],\n results[0, 4], results[0, 5], results[0, 6], results[0, 7]))\n file.close()\n \n \n \n \n \n \n \n \n \n","repo_name":"SZDAVE93/crime_models","sub_path":"train_eval_models.py","file_name":"train_eval_models.py","file_ext":"py","file_size_in_byte":7225,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"22565148706","text":"#!/Users/soonio/miniconda3/envs/honeypi_env/bin/python\n\n############################################################\n# Argument Options\n# Header needs to have phylum -> species all separated by '_'\n\nimport argparse\nparser = argparse.ArgumentParser(\"Joins two honeypi outputs. Usage: honeypi_joinTwoResults.py -i1 ASVs_1.fasta,ASVs_counts_1.txt,ASVs_taxonomy_1.txt -i2 ASVs_2.fasta,ASVs_counts_2.txt,ASVs_taxonomy_2.txt\")\nparser.add_argument(\"-i1\",\n action = \"store\", \n dest = \"input1\", \n metavar = \"input1\",\n help = \"[REQUIRED] Master files\", \n required = True)\nparser.add_argument(\"-i2\",\n action = \"store\", \n dest = \"input2\", \n metavar = \"input2\",\n help = \"[REQUIRED] Tables to add\", \n required = True)\noptions = parser.parse_args()\n\n############################################################\n\nfrom Bio import SeqIO, AlignIO\nimport pandas as pd\n\n\noutfile_table = open(\"ASVs_counts_combined.txt\", \"w\")\noutfile_taxonomy = open(\"ASVs_taxonomy_combined.txt\", \"w\")\noutfile_fasta = open(\"ASVs_combined.fasta\", \"w\")\n\n# 1\ninfile_fasta_1 = open(options.input1.split(\",\")[0], \"r\")\ninfile_table_1 = open(options.input1.split(\",\")[1], \"r\")\ninfile_taxonomy_1 = open(options.input1.split(\",\")[2], \"r\")\n\nUID2seq_1 = {}\nfor record in SeqIO.parse(infile_fasta_1, \"fasta\"):\n UID2seq_1[str(record.description)] = str(record.seq)\nseq2UID_1 = {v: k for k, v in UID2seq_1.items()}\n\ntable_1 = pd.read_csv(infile_table_1, sep = \"\\t\", index_col=0)\ntable_1 = table_1.rename(index = UID2seq_1)\n\ntaxonomy_1 = pd.read_csv(infile_taxonomy_1, sep = \"\\t\", index_col=0, names = [\"taxonomy\", \"score\"])\n# taxonomy_1.set_index(\"id\", inplace = True)\ntaxonomy_1 = taxonomy_1.rename(index = UID2seq_1)\n\n\n# 2\ninfile_fasta_2 = open(options.input2.split(\",\")[0], \"r\")\ninfile_table_2 = open(options.input2.split(\",\")[1], \"r\")\ninfile_taxonomy_2 = open(options.input2.split(\",\")[2], \"r\")\n\nUID2seq_2 = {}\nfor record in SeqIO.parse(infile_fasta_2, \"fasta\"):\n UID2seq_2[str(record.description)] = str(record.seq)\nseq2UID_2 = {v: k for k, v in UID2seq_2.items()}\n\ntable_2 = pd.read_csv(infile_table_2, sep = \"\\t\", index_col=0)\ntable_2 = table_2.rename(index = UID2seq_2)\n\ntaxonomy_2 = pd.read_csv(infile_taxonomy_2, sep = \"\\t\", index_col=0, names = [\"taxonomy\", \"score\"])\n# taxonomy_2.set_index(\"id\", inplace = True)\ntaxonomy_2 = taxonomy_2.rename(index = UID2seq_2)\n\n\n# Join two tables\ntable_joined = table_1.join(table_2, how = \"outer\")\n\n\n# Dictionary with sequence:UID (new)\nseqsBeingAdded = set(UID2seq_2.values()) - set(UID2seq_1.values()) # New sequences in table_2\nstartingUIDIndex = max([int(i.split(\"ASV_\")[1]) for i in seq2UID_1.values()]) + 1\nseq2newUID = {}\nfor seq in seqsBeingAdded:\n newUID = \"ASV_%010d\" % startingUIDIndex\n startingUIDIndex += 1\n seq2newUID[seq] = newUID\n\n# Final seq2UID\nseq2UID_updated = {**seq2UID_1, **seq2newUID}\n\n# Rename Table\ntable_joined = table_joined.rename(index = seq2UID_updated)\ntable_joined = table_joined.fillna(0)\ntable_joined.sort_index(inplace=True)\n\n\n# Deal with taxonomy\ntaxonomyBeingAdded = taxonomy_2[taxonomy_2.index.isin(seqsBeingAdded)]\ntaxonomy_joined = pd.concat([taxonomy_1, taxonomyBeingAdded])\ntaxonomy_joined = taxonomy_joined.rename(index = seq2UID_updated)\ntaxonomy_joined.sort_index(inplace=True)\n\n\n#############\n## Outputs ##\n#############\n\n# Output ASVs_counts_combined.txt\ntable_joined.to_csv(\"ASVs_counts_combined.txt\", index = True, sep = \"\\t\")\n\n# Output ASVs_counts_combined.txt\ntaxonomy_joined.to_csv(\"ASVs_taxonomy_combined.txt\", index = True, sep = \"\\t\", header = False)\n\n# Output ASV_combined.fasta\nsorted(seq2UID_updated.items(), key=lambda x: x[1])\nfor key, value in seq2UID_updated.items():\n outfile_fasta.write(\">\" + value + \"\\n\")\n outfile_fasta.write(key + \"\\n\")\n\nexit(0)\n\n","repo_name":"hsgweon/honeypi","sub_path":"bin/honeypi_joinTwoResults.py","file_name":"honeypi_joinTwoResults.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"16590865925","text":"import re\n\ntext = input()\nsymbols = ['+', '-', '*', '/', '.', 1, 1, 3, 4, 5, 6, 7]\ndemon_regex = r'([a-zA-z0-9-*/.]+[^, ])'\ndamage_regex = r'([-+*\\/]?[\\d+?:\\.\\d+?]+)'\nmultiplying = []\ndeleting = []\ndemons = {}\ndemon_names = re.findall(demon_regex, text)\n\nfor dm in demon_names:\n damage = 0\n demon_health = 0\n demons[dm] = {}\n demons_damage = re.findall(damage_regex, dm)\n for ch in dm:\n if ch.isdigit():\n continue\n elif ch not in symbols:\n demon_health += ord(ch)\n elif ch == \"*\":\n multiplying.append(ch)\n elif ch == \"/\":\n deleting.append(ch)\n damage = sum([float(i) for i in demons_damage])\n if len(multiplying) > 0:\n for el in multiplying:\n damage = 2 * damage\n if len(deleting) > 0:\n for el in deleting:\n damage = damage / 2\n\n demons[dm][demon_health] = damage\n\nfor key, index in sorted(demons.items()):\n print(f\"{key} -\", end=\"\")\n for k, v in index.items():\n print(f\" {k} health, {v:.2f} damage\")","repo_name":"DKolev1978/SoftUni-Python","sub_path":"fundamentals/regular_expressions_more_exercises/nether_realms.py","file_name":"nether_realms.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73505000055","text":"\"\"\"Trie tree data structure.\"\"\"\nfrom stack import Stack\n\nclass Node(object):\n \"\"\"Node object for trietree data structure.\"\"\"\n\n def __init__(self, value, level, parent):\n \"\"\"Set up Node.\"\"\"\n self.parent = parent\n self.value = value\n self.next_list = []\n self.level = level\n self.end = False\n\n\nclass TrieTree(object):\n \"\"\"A trietree data structure.\"\"\"\n\n def __init__(self):\n \"\"\"Set up die tree.\"\"\"\n self.length = 0\n self.root = Node('.', 0, None)\n\n def insert(self, string):\n \"\"\"Insert a node into the tree.\"\"\"\n if not isinstance(string, str):\n raise TypeError('Must be a string, please try again.')\n current = self.root\n for i in range(len(string)):\n try:\n current = list(filter(lambda x: x.value == string[i], current.next_list))[0]\n except IndexError:\n current.next_list.append(Node(string[i], i + 1, current))\n current = current.next_list[-1]\n self.length += 1\n current.end = True\n\n def contains(self, string):\n \"\"\"Return true if string in tree.\"\"\"\n current = self.root\n for i in range(len(string)):\n try:\n current = list(filter(lambda x: x.value == string[i], current.next_list))[0]\n except IndexError:\n return False\n return current.end\n\n def size(self):\n \"\"\"Return total number of words in tree.\"\"\"\n return self.length\n\n def remove(self, string):\n \"\"\"Remove string from tree.\"\"\"\n current = self.root\n for i in range(len(string)):\n try:\n current = list(filter(lambda x: x.value == string[i], current.next_list))[0]\n except IndexError:\n raise IndexError(\"String not in tree.\")\n if current.end is False:\n raise IndexError(\"String not in tree.\")\n if len(current.next_list) > 0:\n current.end = False\n self.length -= 1\n else:\n while True:\n child = current\n current = current.parent\n if len(current.next_list) > 1 or current == self.root:\n current.next_list.remove(child)\n self.length -= 1\n break\n\n def traversal(self, start):\n \"\"\"Traverse through the tree and return a list of all strings with the given start.\"\"\"\n seen = []\n next_up = Stack()\n current = self.root\n for i in range(len(start)):\n try:\n current = list(filter(lambda x: x.value == start[i], current.next_list))[0]\n except IndexError:\n raise IndexError(\"String not in tree.\")\n try:\n while True:\n if current not in seen:\n seen.append(current)\n for i in current.next_list:\n next_up.push(i)\n if current.end:\n word = \"\"\n while True:\n if current != self.root:\n word += current.value\n current = current.parent\n else:\n yield word[::-1]\n break\n if len(next_up) == 0:\n break\n current = next_up.pop().value\n except KeyError:\n\n raise KeyError('Given value does not exist.')","repo_name":"endere/Data-structures-2nd-half","sub_path":"src/trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"36717636918","text":"import numpy as np\nimport cv2\nimport time\nimport calendar\nimport modoscrape\nimport modoscrape.tools\nimport operator\n\n__all__ = [\"tools\", \"locators\", \"chatbot\"]\n\nclass Config:\n def __init__(self):\n self.ASPECT_MIN = 0.68\n self.ASPECT_MAX = 0.74\n self.AREA_MIN = 300\n self.CLIENT_WIDTH = 1920\n self.CLIENT_HEIGHT = 1080\n self.CLIENT_X = 0\n self.CLIENT_Y = 0\n self.MIN_CARD_WIDTH = int(self.CLIENT_WIDTH * 0.022)\n\n #print \"calculated values from width \", self.CLIENT_WIDTH, \": min_card_width \", self.MIN_CARD_WIDTH\n self.channel = '#zeroxtwoa'\n self.nickname = 'zeroxtwoa'\n self.server = 'irc.chat.twitch.tv'\n self.port = 6667\n self.vote_wait = 20\n\n self.progress_msg = [\n 'randomizing /dev/null...',\n 'mining bitcoins...',\n 'pile shuffling...',\n 'mining salt...',\n 'rendering card backs...',\n \"analyzing opponent's basics...\"\n ]\n\n\n\n\nclass SmartCursor:\n def __init__(self):\n self.c = Config()\n self.t = modoscrape.tools.Tools\n self.relx = 1500\n self.rely = 300\n\n def go(self, coord):\n self.relx = int(coord[0])\n self.rely = int(coord[1])\n\n def window_corners(self, bgr):\n img = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)\n\n topright = []\n bottomleft = []\n\n for i, t in enumerate(['close', 'settings']):\n templatefile = './img/template_' + t + '.png'\n template = cv2.imread(templatefile, cv2.IMREAD_GRAYSCALE)\n w, h = template.shape[::-1]\n res = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n\n if (i == 0 and max_val > 0.999):\n topright = max_loc\n elif (i == 1 and max_val > 0.999):\n bottomleft = max_loc\n\n if topright == [] or bottomleft == []:\n return []\n\n locprops = {}\n locprops['bottomleft'] = bottomleft\n locprops['bottomleftx'] = bottomleft[0]\n locprops['bottomlefty'] = bottomleft[1]\n locprops['topright'] = topright\n locprops['toprightx'] = topright[0]\n locprops['toprighty'] = topright[1]\n locprops['width'] = topright[0] - bottomleft[0]\n locprops['height'] = bottomleft[1] - topright[1]\n locprops['centerx'] = bottomleft[0] + int(locprops['width']/2)\n locprops['centery'] = topright[1] + int(locprops['height'] / 2.0)\n\n\n return locprops\n\n def label_point(self, i, direction, bgr, pointx, pointy):\n if i == 4 or i == 9 or i == 12:\n if direction == 'R' :\n pointy -= 10\n pointx -= 18\n elif direction == 'L':\n pointy += 30\n pointx -= 18\n elif direction == 'D':\n pointy += 10\n pointx += 5\n elif direction == 'U':\n pointy += 10\n pointx -= 50\n\n self.t.pointerlabel(bgr, direction + str(i), pointx, pointy)\n\n def draw(self, bgr):\n # draw cursor and return coordinates\n locprops = self.window_corners(bgr)\n\n\n if locprops == []:\n return False\n\n capturex = self.relx + locprops['bottomleftx']\n capturey = self.rely + locprops['toprighty']\n cursor_markers = {'X': (capturex, capturey), 'R': [], 'L': [], 'D': [], 'U': []}\n default_diameter = 2\n\n self.draw_point(bgr, capturex, capturey, 4, (0, 255, 255))\n\n step = 8\n\n # add points to the right, geometrically increasing\n lastx = capturex\n for i in range(1, 1000):\n pointx = lastx + step * i\n if pointx > locprops['toprightx']:\n break\n else:\n lastx = pointx\n cursor_markers['R'].append((pointx, capturey))\n self.draw_point(bgr, pointx, capturey, default_diameter)\n self.label_point(i, 'R', bgr, pointx, capturey)\n\n # add points to the left, geometrically increasing\n lastx = capturex\n for i in range(1, 1000):\n pointx = lastx - step * i\n if pointx < locprops['bottomleftx']:\n break\n else:\n lastx = pointx\n cursor_markers['L'].append((pointx, capturey))\n self.draw_point(bgr, pointx, capturey, default_diameter)\n self.label_point(i, 'L', bgr, pointx, capturey)\n\n # add points downwards\n lasty = capturey\n for i in range(1, 1000):\n pointy = lasty + step * i\n if pointy > locprops['bottomlefty']:\n break\n else:\n lasty = pointy\n cursor_markers['D'].append((capturex, pointy))\n self.draw_point(bgr, capturex, pointy, default_diameter)\n self.label_point(i, 'D', bgr, capturex, pointy)\n\n # add points upwards\n lasty = capturey\n for i in range(1, 1000):\n pointy = lasty - step * i\n if pointy < locprops['toprighty']:\n break\n else:\n lasty = pointy\n cursor_markers['U'].append((capturex, pointy))\n self.draw_point(bgr, capturex, pointy, default_diameter)\n self.label_point(i, 'U', bgr, capturex, pointy)\n\n\n self.t.show('cursor drawn', bgr)\n return cursor_markers\n\n def draw_point(self, bgr, x, y, diameter, color=(255, 255, 255)):\n\n cv2.circle(bgr, (x, y), diameter + 2, color, -1)\n cv2.circle(bgr, (x, y), diameter + 1, (0, 0, 0), -1)\n cv2.circle(bgr, (x, y), diameter, color, -1)\n\n # dynamic version of the points in NESW directions above, TODO\n # def point_series(self, originpos, direction, locprops):\n #\n # step = 8\n # pointlist = []\n #\n # op = operator.sub\n # if direction == 'W' or direction == 'S':\n # op = operator.add\n #\n # calc = {\n # 'E': {'op': operator.add, 'limit:': locprops['toprightx'], 'comp': operator.gt, 'axiscoord': 'y'}\n # }\n #\n # lastpos = originpos\n # for i in range(1, 1000):\n # op = calc[direction]['op']\n # point = op(lastpos, step * i)\n #\n # if (direction == 'E' and point > locprops['toprightx']):\n # break\n # else:\n # lastpos = point\n # pointlist.append((point, axiscoord))\n\n\n","repo_name":"rc9000/modoscrape","sub_path":"modoscrape/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3225506860","text":"# 简单计算器程序\n\n# 这个函数用于两个数相加\ndef add(x, y):\n return x + y\n\n# 这个函数用于两个数相减\ndef subtract(x, y):\n return x - y\n\n# 这个函数用于两个数相乘\ndef multiply(x, y):\n return x * y\n\n# 这个函数用于两个数相除\ndef divide(x, y):\n if(y==0):\n raise Exception(\"除数不能为零\")\n return x / y\n\n\nprint(\"选择运算.\")\nprint(\"1.相加\")\nprint(\"2.相减\")\nprint(\"3.相乘\")\nprint(\"4.相除\")\n\nwhile True:\n # 从用户处获取输入\n choice = input(\"输入你的选择(1/2/3/4): \")\n\n # 检查选择是否为四个选项之一\n if choice in ('1', '2', '3', '4'):\n num1 = float(input(\"输入第一个数字: \"))\n num2 = float(input(\"输入第二个数字: \"))\n\n if choice == '1':\n print(num1, \"+\", num2, \"=\", add(num1, num2))\n\n elif choice == '2':\n print(num1, \"-\", num2, \"=\", subtract(num1, num2))\n\n elif choice == '3':\n print(num1, \"*\", num2, \"=\", multiply(num1, num2))\n\n elif choice == '4':\n print(num1, \"/\", num2, \"=\", divide(num1, num2))\n break\n else:\n print(\"无效输入\")","repo_name":"Hellohistory/Python-Study_CH","sub_path":"数学计算及相关/简单计算器程序.py","file_name":"简单计算器程序.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"1279703257","text":"import argparse\nimport os\nfrom matplotlib.pyplot import sca\nimport torch\nfrom torch._C import device\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport numpy as np\nimport time\nfrom datasets import find_dataset_def\nfrom model import *\nimport sys\nfrom datasets.data_io import read_pfm, save_pfm\nimport cv2\nfrom plyfile import PlyData, PlyElement\nfrom PIL import Image\nfrom datasets.dtu_yan_eval import DTU_MVSDatasetEval\nfrom datasets.tankstemple_yan_eval import TanksTemple_MVSDatasetEval\nimport utils \n\ncudnn.benchmark = True\n\ndef print_args(args):\n print(\"################################ args ################################\")\n for k, v in args.__dict__.items():\n print(\"{0: <10}\\t{1: <30}\\t{2: <20}\".format(k, str(v), str(type(v))))\n print(\"########################################################################\")\n\n# view fuse; pixel diff; relative depth diff;\ntankstemple_hyper = {\n 'Family': [5,2.0, 0.005],\n 'Francis': [8,2.0,0.0025],\n 'Horse': [4,1.0, 0.01],\n 'Lighthouse':[7,3.0, 0.005],\n 'M60': [5,2.0,0.0025],\n 'Panther': [6,4.0, 0.005],\n 'Playground':[7,3.0, 0.005],\n 'Train': [6,2.5, 0.005],\n 'Auditorium':[3,1.0, 0.01],\n 'Ballroom': [3,2.5, 0.01],\n 'Courtroom': [4,1.0, 0.01],\n 'Museum': [4,1.0, 0.01],\n 'Palace': [4,2.5, 0.01],\n 'Temple': [3,2.5, 0.01]\n}\n\ndtu_hyper = {\n 'scan1': [3, 0.5,0.01],\n 'scan4': [3, 0.5,0.01],\n 'scan9': [3,0.25,0.01],\n 'scan10': [3,0.25,0.01],\n 'scan11': [3, 0.5,0.01],\n 'scan12': [3,0.25,0.01],\n 'scan13': [3,0.75,0.01],\n 'scan15': [3,0.25,0.01],\n 'scan23': [3,0.25,0.01],\n 'scan24': [3, 0.5,0.01],\n 'scan29': [3, 0.5,0.01],\n 'scan32': [3,0.25,0.01],\n 'scan33': [3, 0.5,0.01],\n 'scan34': [3,0.25,0.01],\n 'scan48': [3, 0.5,0.01],\n 'scan49': [3,0.25,0.01],\n 'scan62': [3, 0.5,0.01],\n 'scan75': [3,0.25,0.01],\n 'scan77': [3,0.25,0.01],\n 'scan110': [3,0.25,0.01],\n 'scan114': [3, 0.5,0.01],\n 'scan118': [3,0.75,0.01]\n}\n\n\nparser = argparse.ArgumentParser(description='Predict depth, filter, and fuse. May be different from the original implementation')\nparser.add_argument('--loadckpt', default=None, help='load a specific checkpoint')\nparser.add_argument('--dataset', default='dtu', help='select dataset')\nparser.add_argument('--outdir', default='./outputs', help='output dir')\nparser.add_argument('--testlist', help='testing scan list')\nparser.add_argument('--number_views_pred', type=int, default=3, help='number of views used to estimate depth map')\nparser.add_argument('--iter', nargs=\"+\", type=int, default=[10, 2], help='number of iteration')\nparser.add_argument('--abs_pixel_diff', type=float, default=1, help='pixel difference threshold')\nparser.add_argument('--relative_depth_diff', type=float, default=0.01, help='relative depth difference threshold')\nparser.add_argument('--number_views_fuse', type=int, default=2, help='min agree views number')\nparser.add_argument('--small', action='store_true', help='use small model')\nparser.add_argument('--dropout', type=float, default=0.0)\nparser.add_argument('--name', type=str, default='multi')\nparser.add_argument('--run_depth', dest='run_depth', action='store_true') \nparser.add_argument('--run_fusion', dest='run_fusion', action='store_true')\n\n\n# parse arguments and check\nargs = parser.parse_args()\nprint(\"argv:\", sys.argv[1:])\nprint_args(args)\n\n\n# run MVS model to save depth maps and confidence maps\ndef save_depth(eval_dataset):\n \n test_loader = torch.utils.data.DataLoader(\n eval_dataset,\n batch_size=1,\n shuffle=False)\n\n # model\n model = DispMVS(args)\n model.cuda()\n\n # load checkpoint file specified by args.loadckpt\n print(\"loading model {}\".format(args.loadckpt))\n state_dict = torch.load(args.loadckpt)\n model.load_state_dict(state_dict)\n model.eval()\n\n with torch.no_grad():\n for i, (imgs,Ks,Rs,Ts,depth_min,depth_max,depth_filename) in enumerate(test_loader):\n\n N = imgs.shape[1]\n if N<=1:\n continue\n \n device = 'cuda'\n imgs = imgs.to(device)\n Ks = Ks.to(device)\n Rs = Rs.to(device)\n Ts = Ts.to(device)\n depth_min = 1.0/depth_min.float().to(device)\n depth_max = 1.0/depth_max.float().to(device)\n depth_filename = depth_filename[0]\n\n # print(imgs.shape)\n b,_,_,_,h,w = imgs.shape\n init_depth = torch.rand((b,1,h//16,w//16),device=device)*(depth_min-depth_max) + depth_max\n init_depth = 1.0/init_depth\n\n torch.cuda.reset_max_memory_allocated()\n begin_time = time.time()\n depth_fusion = model(imgs,Ks,Rs,Ts,args.iter,init_depth,depth_min,depth_max)\n end_time = time.time()\n print('time:',end_time-begin_time)\n print('Memory Allocation:',torch.cuda.memory_allocated()/1024/1024,torch.cuda.max_memory_allocated()/1024/1024)\n\n depth_fusion = depth_fusion[-1]\n depth_fusion = depth_fusion.cpu().numpy()[0,0]\n\n depth_filename = os.path.join(args.outdir,depth_filename)\n\n # save depth maps\n os.makedirs(depth_filename.rsplit('/', 1)[0], exist_ok=True)\n save_pfm(depth_filename+'.pfm', depth_fusion)\n print('process:',depth_filename)\n \n\n\n# save a binary mask\ndef save_mask(filename, mask):\n assert mask.dtype == np.bool\n mask = mask.astype(np.uint8) * 255\n Image.fromarray(mask).save(filename)\n\n\n# read a pair file, [(ref_view1, [src_view1-1, ...]), (ref_view2, [src_view2-1, ...]), ...]\ndef read_pair_file(filename):\n data = []\n with open(filename) as f:\n num_viewpoint = int(f.readline())\n # 49 viewpoints\n for view_idx in range(num_viewpoint):\n ref_view = int(f.readline().rstrip())\n src_views = [int(x) for x in f.readline().rstrip().split()[1::2]]\n if len(src_views) != 0:\n data.append((ref_view, src_views))\n return data\n\n\n# project the reference point cloud into the source view, then project back\ndef reproject_with_depth(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src):\n width, height = depth_ref.shape[1], depth_ref.shape[0]\n ## step1. project reference pixels to the source view\n # reference view x, y\n x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height))\n x_ref, y_ref = x_ref.reshape([-1]), y_ref.reshape([-1])\n # reference 3D space\n xyz_ref = np.matmul(np.linalg.inv(intrinsics_ref),\n np.vstack((x_ref, y_ref, np.ones_like(x_ref))) * depth_ref.reshape([-1]))\n # source 3D space\n xyz_src = np.matmul(np.matmul(extrinsics_src, np.linalg.inv(extrinsics_ref)),\n np.vstack((xyz_ref, np.ones_like(x_ref))))[:3]\n # source view x, y\n K_xyz_src = np.matmul(intrinsics_src, xyz_src)\n xy_src = K_xyz_src[:2] / K_xyz_src[2:3]\n\n ## step2. reproject the source view points with source view depth estimation\n # find the depth estimation of the source view\n x_src = xy_src[0].reshape([height, width]).astype(np.float32)\n y_src = xy_src[1].reshape([height, width]).astype(np.float32)\n sampled_depth_src = cv2.remap(depth_src, x_src, y_src, interpolation=cv2.INTER_LINEAR)\n # mask = sampled_depth_src > 0\n\n # source 3D space\n # NOTE that we should use sampled source-view depth_here to project back\n xyz_src = np.matmul(np.linalg.inv(intrinsics_src),\n np.vstack((xy_src, np.ones_like(x_ref))) * sampled_depth_src.reshape([-1]))\n # reference 3D space\n xyz_reprojected = np.matmul(np.matmul(extrinsics_ref, np.linalg.inv(extrinsics_src)),\n np.vstack((xyz_src, np.ones_like(x_ref))))[:3]\n # source view x, y, depth\n depth_reprojected = xyz_reprojected[2].reshape([height, width]).astype(np.float32)\n K_xyz_reprojected = np.matmul(intrinsics_ref, xyz_reprojected)\n xy_reprojected = K_xyz_reprojected[:2] / K_xyz_reprojected[2:3]\n x_reprojected = xy_reprojected[0].reshape([height, width]).astype(np.float32)\n y_reprojected = xy_reprojected[1].reshape([height, width]).astype(np.float32)\n\n return depth_reprojected, x_reprojected, y_reprojected\n\n\ndef check_geometric_consistency(\n depth_ref, intrinsics_ref, extrinsics_ref, \n depth_src, intrinsics_src, extrinsics_src):\n width, height = depth_ref.shape[1], depth_ref.shape[0]\n x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height))\n depth_reprojected, x2d_reprojected, y2d_reprojected = reproject_with_depth(\n depth_ref, intrinsics_ref, extrinsics_ref,\n depth_src, intrinsics_src, extrinsics_src)\n # check |p_reproj-p_1| < 1\n dist = np.sqrt((x2d_reprojected - x_ref) ** 2 + (y2d_reprojected - y_ref) ** 2)\n\n # check |d_reproj-d_1| / d_1 < 0.01\n depth_diff = np.abs(depth_reprojected - depth_ref)\n relative_depth_diff = depth_diff / depth_ref\n\n mask = np.logical_and(dist < args.abs_pixel_diff, relative_depth_diff < args.relative_depth_diff)\n depth_reprojected[~mask] = 0\n\n return mask, depth_reprojected\n\n\ndef filter_depth(eval_dataset, scan_folder, out_folder, plyfilename):\n # the pair file\n pair_file = os.path.join(scan_folder, \"pair.txt\")\n # for the final point cloud\n vertexs = []\n vertex_colors = []\n\n pair_data = read_pair_file(pair_file)\n\n # for each reference view and the corresponding source views\n for ref_view, src_views in pair_data:\n\n img_path = os.path.join(scan_folder, 'images/{:0>8}.jpg'.format(ref_view))\n proj_path = os.path.join(scan_folder, 'cams_1/{:0>8}_cam.txt'.format(ref_view))\n depth_path = os.path.join(out_folder, 'depth_est/{:0>8}.pfm'.format(ref_view))\n\n # skip empty depth\n if not os.path.exists(depth_path):\n continue\n\n # load the camera parameters\n ref_intrinsics, ref_extrinsics_r,ref_extrinsics_t,depth_min,depth_max = eval_dataset.read_cam_file(proj_path)\n ref_extrinsics = np.zeros((4,4))\n ref_extrinsics[0:3,0:3]=ref_extrinsics_r\n ref_extrinsics[0:3,3:4]=ref_extrinsics_t\n ref_extrinsics[3,3] = 1\n # load the reference image\n ref_img = eval_dataset.read_img(img_path)\n # load the estimated depth of the reference view\n ref_depth_est = read_pfm(depth_path)[0]\n\n all_srcview_depth_ests = []\n\n # compute the geometric mask\n geo_mask_sum = 0\n for src_view in src_views:\n\n img_path = os.path.join(scan_folder, 'images/{:0>8}.jpg'.format(src_view))\n proj_path = os.path.join(scan_folder, 'cams_1/{:0>8}_cam.txt'.format(src_view))\n depth_path = os.path.join(out_folder, 'depth_est/{:0>8}.pfm'.format(src_view))\n\n # skip empty depth\n if not os.path.exists(depth_path):\n continue\n\n # camera parameters of the source view\n src_intrinsics, src_extrinsics_r,src_extrinsics_t,_,_ = eval_dataset.read_cam_file(\n proj_path)\n src_extrinsics = np.zeros((4,4))\n src_extrinsics[0:3,0:3]=src_extrinsics_r\n src_extrinsics[0:3,3:4]=src_extrinsics_t\n src_extrinsics[3,3] = 1\n # the estimated depth of the source view\n src_depth_est = read_pfm(depth_path)[0]\n geo_mask, depth_reprojected = check_geometric_consistency(ref_depth_est, ref_intrinsics, ref_extrinsics,\n src_depth_est, src_intrinsics, src_extrinsics)\n geo_mask_sum += geo_mask.astype(np.int32)\n all_srcview_depth_ests.append(depth_reprojected)\n\n\n depth_est_averaged = (sum(all_srcview_depth_ests) + ref_depth_est) / (geo_mask_sum + 1)\n\n # remove out of range\n depth_range_mask = np.logical_and(depth_est_averaged>depth_min, depth_est_averaged= args.number_views_fuse\n geo_mask = np.logical_and(geo_mask,depth_range_mask)\n\n os.makedirs(os.path.join(out_folder, \"mask\"), exist_ok=True)\n save_mask(os.path.join(out_folder, \"mask/{:0>8}_geo.png\".format(ref_view)), geo_mask)\n\n print(\"processing {}, ref-view{:0>2}, final-mask:{}\".format(scan_folder, ref_view,geo_mask.mean()))\n\n height, width = depth_est_averaged.shape[:2]\n x, y = np.meshgrid(np.arange(0, width), np.arange(0, height))\n valid_points = geo_mask\n print(\"valid_points\", valid_points.mean())\n x, y, depth = x[valid_points], y[valid_points], depth_est_averaged[valid_points]\n color = ref_img[:, :, :][valid_points] # hardcoded for DTU dataset\n color = color*0.5 + 0.5\n xyz_ref = np.matmul(np.linalg.inv(ref_intrinsics),\n np.vstack((x, y, np.ones_like(x))) * depth)\n xyz_world = np.matmul(np.linalg.inv(ref_extrinsics),\n np.vstack((xyz_ref, np.ones_like(x))))[:3]\n vertexs.append(xyz_world.transpose((1, 0)))\n vertex_colors.append((color * 255).astype(np.uint8))\n\n vertexs = np.concatenate(vertexs, axis=0)\n vertex_colors = np.concatenate(vertex_colors, axis=0)\n vertexs = np.array([tuple(v) for v in vertexs], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])\n vertex_colors = np.array([tuple(v) for v in vertex_colors], dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])\n\n vertex_all = np.empty(len(vertexs), vertexs.dtype.descr + vertex_colors.dtype.descr)\n for prop in vertexs.dtype.names:\n vertex_all[prop] = vertexs[prop]\n for prop in vertex_colors.dtype.names:\n vertex_all[prop] = vertex_colors[prop]\n\n el = PlyElement.describe(vertex_all, 'vertex')\n PlyData([el]).write(plyfilename)\n print(\"saving the final model to\", plyfilename)\n\n\n# decide the dataset\nif args.dataset == 'dtu':\n # dataset, dataloader\n eval_dataset = DTU_MVSDatasetEval(\n args.outdir, \n args.testlist,\n 'test',\n args.number_views_pred)\nelif args.dataset == 'tankstemple':\n # dataset, dataloader\n eval_dataset = TanksTemple_MVSDatasetEval(\n args.outdir, \n args.testlist,\n 'test',\n args.number_views_pred)\nelif args.dataset == 'eth3d':\n # dataset, dataloader\n eval_dataset = Eth3D_MVSDatasetEval(\n args.outdir, \n args.testlist,\n 'test',\n args.number_views_pred)\nelse:\n print(\"Error Wrong Dataset\")\n\nprint('run_depth:',args.run_depth)\nprint('run_fusion:',args.run_fusion)\n\n# generate depth map\nif args.run_depth:\n save_depth(eval_dataset)\n\n# fusion point cloud\nif args.run_fusion:\n with open(args.testlist) as f:\n scans = f.readlines()\n scans = [line.rstrip() for line in scans]\n for scan in scans:\n\n if args.dataset == 'dtu':\n args.number_views_fuse = dtu_hyper[scan][0]\n args.abs_pixel_diff = dtu_hyper[scan][1]\n args.relative_depth_diff = dtu_hyper[scan][2]\n ply_out_path = os.path.join(args.outdir, args.name+'_{}_l3.ply'.format(scan))\n\n if args.dataset == 'tankstemple':\n args.number_views_fuse = tankstemple_hyper[scan][0]\n args.abs_pixel_diff = tankstemple_hyper[scan][1]\n args.relative_depth_diff = tankstemple_hyper[scan][2]\n ply_out_path = os.path.join(args.outdir,'{}.ply'.format(scan))\n \n print('scan info:', scan)\n print('scan info number_views_fuse:',args.number_views_fuse)\n print('scan info abs_pixel_diff:',args.abs_pixel_diff)\n print('scan info relative_depth_diff:',args.relative_depth_diff)\n\n # scan_id = int(scan[4:])\n scan_folder = os.path.join(args.outdir, scan)\n out_folder = os.path.join(args.outdir, scan)\n # step2. filter saved depth maps with photometric confidence maps and geometric constraints\n filter_depth(eval_dataset, scan_folder, out_folder, ply_out_path )","repo_name":"Yannnnnnnnnnnn/DispMVS_release","sub_path":"gen_depth_point.py","file_name":"gen_depth_point.py","file_ext":"py","file_size_in_byte":16378,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"22"} +{"seq_id":"73167385657","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n# the year\nyear = input(\"Enter the year: \")\n\n## url of the page\nurl = \"https://www.basketball-reference.com/leagues/NBA_{}_per_game.html\".format(year)\n\nhtml = urlopen(url)\n\nsoup = BeautifulSoup(html, features=\"html.parser\")\n\n# get column headers\nsoup.findAll('tr', limit=2)\n\n# create a list of the headers\nheaders = [th.getText() for th in soup.findAll('tr', limit=2)[0].findAll('th')]\n\nheaders = headers[1:]\n\nrows = soup.findAll('tr')[1:]\nplayer_stats = [[td.getText() for td in rows[i].findAll('td')] for i in range(len(rows))]\n\nstats = pd.DataFrame(player_stats, columns = headers)\n\nstats_clean = stats.dropna()\nprint(stats_clean)\n\nstats_clean.to_csv('nba_stats_{}.csv'.format(year))\n","repo_name":"jrschmiedl/SportsReferenceWebScrapper","sub_path":"WebScrappingNBA_Averages.py","file_name":"WebScrappingNBA_Averages.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33246439638","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup as bs\nfrom urllib.request import urlopen\nimport requests\nimport hashlib\nimport MySQLdb\nimport pymysql\nimport time\n\ndef connect_to_db():\n global connection\n global cursor\n\n connection = MySQLdb.connect(\n user=\"scrapingman\",\n passwd=\"myPassword-1\",\n host=\"localhost\",\n db=\"scrapingdata\",\n charset=\"utf8\")\n\n cursor=connection.cursor()\n\n\ndef unconnect_to_db():\n connection.commit()\n connection.close()\n\n\ndef sql_datetime_form(written_timestamp):\n if (written_timestamp[-1] == '전'):\n time_list = written_timestamp.split('시')\n time_before = int(time_list[0])\n now = time.localtime()\n if (time_before < now.tm_hour):\n written_time = \"%04d-%02d-%02d %02d:%02d:%02d\" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour-time_before, now.tm_min, now.tm_sec)\n else:\n if (now.tm_mday != 1):\n written_time = \"%04d-%02d-%02d %02d:%02d:%02d\" % (now.tm_year, now.tm_mon, now.tm_mday-1, now.tm_hour-time_before+24, now.tm_min, now.tm_sec)\n else:\n if (now.tm_mon != 1):\n written_time = \"%04d-%02d-%02d %02d:%02d:%02d\" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)\n else:\n written_time = \"%04d-%02d-%02d %02d:%02d:%02d\" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)\n else: \n written_time = written_timestamp.replace('. ', '-', 2).replace('.', \"\") + \":00\"\n\n return written_time\n\n\ndef search_article(drama_name, drama_id, pageNo):\n print(\"===== Search naver blog \", drama_name, \" pageNo=\", pageNo, \" =====\")\n links = []\n # hdr = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9','Accept-Encoding': 'gzip, deflate, br','Accept-Language': 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7','Cache-Control': 'max-age=0','Connection': 'keep-alive','Host': 'section.blog.naver.com','User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'}\n \n browser = webdriver.Chrome('./chromedriver')\n url = 'https://section.blog.naver.com/Search/Post.nhn?pageNo=' + str(pageNo) +'&rangeType=ALL&orderBy=sim&keyword='+drama_name\n browser.get(url)\n # response = requests.get(url, params=hdr)\n source = browser.page_source\n # time.sleep(5)\n # source = response.text\n data = bs(source, 'html.parser')\n body = data.find('body')\n a_tags = body.select('ui-view > div#wrap > main#container > div.layout_content > div#content > section.wrap_search_list > div.area_list_search > div.list_search_post > div.item.multi_pic > div.info_post > div.desc > a.desc_inner')\n for a_tag in a_tags:\n links.append(a_tag.attrs['href'])\n\n print(links)\n return links\n\n\ndef get_each_article(links, drama_id):\n try:\n browser = webdriver.Chrome('./chromedriver')\n for link in links:\n print(\"===== Start to crawl \", link, \" =====\")\n url_hash = hashlib.sha256(link.encode()).hexdigest()\n if (cursor.execute(\"SELECT title FROM naver_blog WHERE url_hash=%s\", [url_hash]) != 0):\n print(\"Already crawled blog\")\n continue\n\n browser.get(link)\n browser.switch_to.frame('mainFrame')\n # hdr = {'User-Agent': 'Mozilla/5.0'}\n # response = requests.get(link, params=hdr)\n source = browser.page_source\n # source = response.text\n data = bs(source, 'html.parser')\n body = data.find('body')\n main = body.select_one('div#head-skin > div#body > div#whole-border > div#whole-body > div#wrapper > div#twocols > div#content-area > div > div > div > div > table > tbody > tr > td.bcc > div > div.se-viewer')\n # footer = body.select_one('div#head-skin > div#body > div#whole-border > div#whole-body > div#wrapper > div#twocols > div#content-area > div > div > div > div > table > tbody > tr > td.bcc > div > div.wrap_postcomment')\n # footer = body.select_one('div#head-skin > div#body > div#whole-border > div#whole-body > div#wrapper > div#twocols > div#content-area > div > div > div > div > table > tbody > tr > td.bcc > div.post-btn > div.wrap_postcomment')\n if (main == None):\n main = body.select_one('div#head-skin > div#body > div#whole-border > div#whole-body > div#wrapper > div#twocols > div#content-area > div > div > div > div > table.post-body > tbody > tr > td.bcc > div > div.view > div.se-viewer')\n if (main == None):\n main = body.select_one('div#head-skin > div#body > div#whole-border > div#whole-body > div#wrapper > div#twocols > div#content-area > div > div > div > div > table.post-body > tbody > tr > td.bcc > div > div > div.view')\n\n if (main != None):\n title = main.select_one('div > div > div > div > div > p').get_text()\n written_timestamp = main.select_one('div > div > div > div > span.se_publishDate').get_text()\n body_text_list = main.select('div.se-main-container > div.se-component > div.se-component-content > div.se-section > div.se-module > p.se-text-paragraph')\n body_text = \"\"\n for text_piece in body_text_list:\n body_text = body_text + text_piece.get_text()\n # likes = footer.select('div.area_sympathy > a > div > span > em')[1].get_text()\n # comments = int(footer.select_one('div.area_comment > a > em').get_text().strip())\n \n # news_tuple = (drama_id, url_hash, title, written_time, body_text, likes, comments)\n \n\n # try:\n # # cursor.execute(\"INSERT INTO naver_blog(id, url_hash, title, modified_time, body_text, likes, comments) values (%s, %s, %s, %s, %s, %s, %s)\", news_tuple)\n \n # except Exception as e:\n # print(e)\n \n else:\n main = body.find_all('td', {'class':'bcc'})[1]\n title = main.select_one('table.post-top > tbody > tr > td > div.htitle > span').get_text().strip()\n written_timestamp = main.select_one('table.post-top > tbody > tr > td > p.date').get_text().strip()\n body_text = main.select_one('div#postViewArea > div.post-view').get_text().strip()\n \n\n written_time = sql_datetime_form(written_timestamp)\n news_tuple = (drama_id, url_hash, title, written_time, body_text)\n cursor.execute(\"INSERT INTO naver_blog(id, url_hash, title, modified_time, body_text) values (%s, %s, %s, %s, %s)\", news_tuple)\n\n except Exception as e:\n print(e)\n \n \n\n\nif __name__ == \"__main__\":\n connect_to_db()\n cursor.execute(\"SELECT id, drama_name from drama_info order by drama_name\")\n drama_id_name_list = cursor.fetchall()\n\n for drama in drama_id_name_list:\n for pageNo in range(1, 10):\n links = search_article(drama[1], drama[0], pageNo)\n get_each_article(links, drama[0])\n connection.commit()\n\n unconnect_to_db()\n","repo_name":"Park-Yegi/Drama-View-Rate-Prediction","sub_path":"WebCrawlingBot/crawl_naver_blog.py","file_name":"crawl_naver_blog.py","file_ext":"py","file_size_in_byte":7355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18479125038","text":"assignments = []\n\ndef assign_value(values, box, value):\n \"\"\"\n Please use this function to update your values dictionary!\n Assigns a value to a given box. If it updates the board record it.\n \"\"\"\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values\n\n# PROBLEM 1: NAKED TWINS\n\n# Using the same units and peers dictionaries from the lectures.\n# Using the same reduce_puzzle() function from the lectures.\ndef naked_twins(values):\n \"\"\"Eliminate values using the naked twins strategy.\n Args:\n values(dict): a dictionary of the form {'box_name': '123456789', ...}\n\n Returns:\n the values dictionary with the naked twins eliminated from peers.\n \"\"\"\n # Find all instances of naked twins\n # Eliminate the naked twins as possibilities for their peers\n\n new_values = values.copy()\n naked_twins = []\n for box in new_values:\n if len(new_values[box]) == 2:\n for peer in peers[box]:\n if box < peer and new_values[peer] == new_values[box]:\n naked_twins.append([box, peer])\n for nt in naked_twins:\n # Find the units that contains these two naked twins\n units = [u for u in unitlist if nt[0] in u and nt[1] in u]\n for unit in units:\n for box in unit:\n if box != nt[0] and box != nt[1]:\n \n new_values[box] = new_values[box].replace(new_values[nt[0]][0], '')\n assign_value(new_values, box, new_values[box]) # viz\n \n new_values[box] = new_values[box].replace(new_values[nt[0]][1], '')\n assign_value(new_values, box, new_values[box]) # viz\n \n if len([box for box in new_values.keys() if len(new_values[box]) == 0]):\n return False\n return new_values\n\n#PROBLEM 2: DIAGONAL SUDOKU\n\ndigits = '123456789'\nrows = 'ABCDEFGHI'\ncols = digits\n\ndef cross(A, B):\n \"Cross product of elements in A and elements in B.\"\n return [a+b for a in A for b in B]\n\nsquares = cross(rows, cols)\nunitlist = ([cross(rows, c) for c in cols] +\n [cross(r, cols) for r in rows] +\n [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')])\nunits = dict((s, [u for u in unitlist if s in u])\n for s in squares)\npeers = dict((s, set(sum(units[s],[]))-set([s]))\n for s in squares)\n\ndiagonal1 = [a[0]+a[1] for a in zip(rows, cols)]\ndiagonal2 = [a[0]+a[1] for a in zip(rows, cols[::-1])]\ndiag_unitlist = unitlist + [diagonal1, diagonal2]\ndiag_units = dict((s, [u for u in diag_unitlist if s in u])\n for s in squares)\ndiag_peers = dict((s, set(sum(diag_units[s],[]))-set([s]))\n for s in squares)\n\ndef grid_values(grid, show_dots=False):\n \"\"\"\n Convert grid into a dict of {square: char} with '123456789' for empties.\n Args:\n grid(string) - A grid in string form.\n Returns:\n A grid in dictionary form\n Keys: The boxes, e.g., 'A1'\n Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.\n \"\"\"\n chars = []\n for c in grid:\n if c in digits:\n chars.append(c)\n if c == '.':\n if show_dots:\n chars.append('.')\n else:\n chars.append(digits)\n assert len(chars) == 81\n return dict(zip(squares, chars))\n\ndef display(values):\n \"\"\"\n Display the values as a 2-D grid.\n Args:\n values(dict): The sudoku in dictionary form\n \"\"\"\n width = 1+max(len(values[s]) for s in squares)\n line = '+'.join(['-'*(width*3)]*3)\n for r in rows:\n print(''.join(values[r+c].center(width)+('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n print()\n\ndef eliminate(values):\n '''\n Goes through all the boxes. If a box has only one available value,\n it will remove this value from all the peers of this box.\n '''\n new_values = values.copy()\n solved_values = [box for box in new_values.keys() if len(new_values[box]) == 1]\n for box in solved_values:\n digit = new_values[box]\n for peer in diag_peers[box]:\n new_values[peer] = new_values[peer].replace(digit,'')\n assign_value(new_values, peer, new_values[peer]) # viz\n return new_values\n\ndef only_choice(values):\n '''\n Goes through all the units u. If a unit has a certain value d that will only\n fit in one box of u, it will assign d to this box.\n '''\n new_values = values.copy()\n for unit in diag_unitlist:\n for digit in '123456789':\n dplaces = [box for box in unit if digit in new_values[box]]\n if len(dplaces) == 1:\n new_values[dplaces[0]] = digit\n assign_value(new_values, dplaces[0], new_values[dplaces[0]]) # viz\n return new_values\n\ndef reduce_puzzle(values):\n '''\n It will apply eliminate and only_choice repeatedly.\n If at any point, there is a box with zero available values, it will return False.\n Otherwise, the loop will stop whenever the sudoku stays the same during one iteration.\n '''\n new_values = values.copy()\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n stalled = False\n while not stalled:\n #display(values)\n # Check how many boxes have a determined value\n solved_values_before = len([box for box in new_values.keys() if len(new_values[box]) == 1])\n # Use the Only Choice Strategy\n new_values = eliminate(new_values)\n # Use the Single Possibility Strategy\n new_values = only_choice(new_values)\n # Check how many boxes have a determined value, to compare\n solved_values_after = len([box for box in new_values.keys() if len(new_values[box]) == 1])\n # If no new values were added, stop the loop.\n stalled = solved_values_before == solved_values_after\n # Sanity check, return False if there is a box with zero available values:\n if len([box for box in new_values.keys() if len(new_values[box]) == 0]):\n return False\n return new_values\n\ndef search(values):\n '''\n Using depth-first search and propagation, try all possible values.\n At any given point, it picks the box with fewer available values\n (if there is more than one, it will pick some box), and propagate over that box.\n '''\n new_values = reduce_puzzle(values.copy())\n if new_values is False:\n return False ## Failed earlier\n if all(len(new_values[s]) == 1 for s in squares):\n return new_values ## Solved!\n ## Chose the unfilled square s with the fewest possibilities\n n,s = min((len(new_values[s]), s) for s in squares if len(new_values[s]) > 1)\n for value in new_values[s]:\n new_sudoku = new_values.copy()\n new_sudoku[s] = value\n attempt = search(new_sudoku)\n if attempt:\n return attempt\n\ndef solve(grid):\n \"\"\"\n Find the solution to a Sudoku grid.\n Args:\n grid(string): a string representing a sudoku grid.\n Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n Returns:\n The dictionary representation of the final sudoku grid. False if no solution exists.\n \"\"\"\n grid = grid_values(grid)\n return search(grid)\n\nif __name__ == '__main__':\n diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n display(solve(diag_sudoku_grid))\n\n try:\n from visualize import visualize_assignments\n visualize_assignments(assignments)\n\n except SystemExit:\n pass\n except:\n print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')\n","repo_name":"eliekawerk/udacity","sub_path":"Artificial Intelligence Nanodegree/Solving a Sudoku with AI/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":7869,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"18331072983","text":"from class_drawing import drawing\nfrom base import Session, engine, Base\n\n#Base.metadata.create_all(engine)\n\nsession = Session()\n\n#dwg1 = drawing(\"C2010001\", \"A\")\n#session.add(dwg1)\n#session.commit()\nmatch_dwg = session.query(drawing).filter(drawing.dwg_num == 'C200345566').order_by(-drawing.created).first()\nif 'A1' == match_dwg.revision:\n print(f'

    {match_dwg.dwg_num}{match_dwg.revision} is Good for Construction

    ')\nelse:\n print(f'

    This drawing is Not Good for Construction

    ')\n\n#content = session.query(drawing).filter(drawing.dwg_num.like('%345%')).first()\n#print(content)\nprint(session.query(drawing).order_by(drawing.created.desc()).all())\nsession.close()\n","repo_name":"anurakp54/DrawingControl","sub_path":"entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26176576184","text":"import math\n\ndef fuel(mass):\n ff = math.floor(float(mass)/3.0)-2\n return ff\n \n#tests \nassert(fuel(12))==2\nassert(fuel(14))==2\nassert(fuel(1969))==654\nassert(fuel(100756))==33583\n\n\ndef fuelforfuel(mass,s):\n if mass < 9:\n return s\n else: \n mass = fuel(mass)\n s = s+mass\n return fuelforfuel(mass,s)\n\n\n#tests\nassert(fuelforfuel(14,0)==2)\nassert(fuelforfuel(1969,0)==966)\nassert(fuelforfuel(100756,0)==50346)\n\n# 1\ninp = inp.split()\nsumm1 = 0\nfor i in range(len(inp)):\n summ1+=fuel(inp[i])\nprint(summ1)\n\n# 2\ninp = inp.split()\nsumm2 = 0\nfor i in range(len(inp)):\n summ2+=fuelforfuel(int(inp[i]),0)\nprint(summ2)\n","repo_name":"meggerrsss/Advent-of-Code","sub_path":"2019/2019d1.py","file_name":"2019d1.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26051327162","text":"from HTMLParser import HTMLParser\nfrom textwrap import TextWrapper\n\nclass HtmlReader(HTMLParser):\n LINE_SEPARATOR = '\\n'\n\n def __init__(self, url, config):\n HTMLParser.__init__(self)\n self.url = url\n self.config = config\n self.data = []\n self.data_adding = False\n self.tag_level = 0\n self.ignored_tag_level = 0\n self.current_href = ''\n\n def handle_starttag(self, tag, attributes):\n if tag in self.config.get('ParseOptions', 'search_tags').split():\n self.tag_level += 1\n\n if tag in self.config.get('ParseOptions', 'ignored_tags').split():\n self.ignored_tag_level += 1\n\n if self.tag_level and tag == 'a':\n for attr in attributes:\n if attr[0] == 'href':\n self.current_href = attr[1]\n break\n\n def handle_data(self, data):\n if not self.ignored_tag_level and self.tag_level and data.strip() != '':\n data = self.__sanitize(data)\n if self.current_href:\n data = data + ' [' + self.current_href + '] '\n\n self.data.append(data)\n self.data_adding = True\n\n def handle_endtag(self, tag):\n self.current_href = ''\n if tag in self.config.get('ParseOptions', 'search_tags').split() and self.tag_level:\n self.tag_level -= 1\n\n if self.data_adding:\n self.data.append(2 * self.LINE_SEPARATOR)\n self.data_adding = False\n\n if tag in self.config.get('ParseOptions', 'ignored_tags').split():\n self.ignored_tag_level -= 1\n\n def get_text(self):\n text = ''.join(self.data)\n text = text.replace(' ', ' ')\n text = text.splitlines()\n\n wrapper = TextWrapper()\n wrapper.replace_whitespace = False\n wrapper.width = int(self.config.get('FormatOptions', 'line_width'))\n wrapped_text = ''\n\n for line in text:\n wrapped_text += wrapper.fill(line) + self.LINE_SEPARATOR\n\n return wrapped_text\n\n def __sanitize(self, str):\n chars = ['laquo;', 'raquo;', 'nbsp;']\n for char in chars:\n str = str.replace(char, '')\n\n str = ' '.join(str.split())\n return str","repo_name":"ramil350/pm_reader","sub_path":"app/lib/html_reader.py","file_name":"html_reader.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41291691723","text":"# -*- coding:utf-8 -*-\n# Power by godaipl 2018-11-26 17:28:15\n\nimport requests\n\nimport ssl\n\nimport time\n\nimport json\n\nfrom test.ds_rhzx.MySqlHelper import insert_batch\n\nurl = 'https://db.99gfd.com/query/getresult/'\n\n\ndef test_get_latest1000_userids():\n \"\"\"\n 测试http get请求\n \"\"\"\n headers = {}\n headers['Cookie'] = 'sessionid=awtop4tsbz6hzjctssbc3f81jrd2u2em'\n headers['Content-Type'] = 'application/x-www-form-urlencoded'\n headers[\n 'User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36' # 测试用\n headers['Host'] = 'db.99gfd.com' # 测试用\n headers['Referer'] = 'https://db.99gfd.com/query/index/' # 测试用\n headers['X-Requested-With'] = 'XMLHttpRequest' # 测试用\n\n params = {}\n params[\n 'sql'] = \"select t_loan_order.UserId from t_loan_order where exists(select * from t_repayment_schedule where t_loan_order.Id = t_repayment_schedule.LoanOrderId) order by t_loan_order.CreateDate desc limit 1000;\"\n params['db'] = \"172.16.100.204\"\n params['db_name'] = \"loandb\"\n\n ssl._create_default_https_context = ssl._create_unverified_context\n # params就是普通的url后加参数的方式\n r = requests.get(\n url=url, params=params, headers=headers, verify=ssl.CERT_NONE)\n print('r\\'s json is ', r.json)\n print('r\\'s json is ', r)\n userIdStr = ''\n for userIdTmp in r.json().get('results'):\n userIdStr += userIdTmp.get('UserId')\n userIdStr += ','\n\n print(\"userIdStr is \", userIdStr)\n\n\ndef test_get_non_userids():\n \"\"\"\n 测试http get请求\n \"\"\"\n headers = {}\n headers['Cookie'] = 'sessionid=losb3rs5b4ucvlv4f0l0w8mj07uap2tt'\n headers['Content-Type'] = 'application/x-www-form-urlencoded'\n headers[\n 'User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36' # 测试用\n headers['Host'] = 'db.99gfd.com' # 测试用\n headers['Referer'] = 'https://db.99gfd.com/query/index/' # 测试用\n headers['X-Requested-With'] = 'XMLHttpRequest' # 测试用\n\n params = {}\n params[\n 'sql'] = \"select distinct tu.user_id from loandb.tmp_userid tu where tu.user_id not in (select distinct UserId from loandb.t_fdd_contract tfc where tfc.ContractType = 'CREDIT_QUERY_AGE') and tu.user_id not in (select distinct UserId from loandb.t_loan_order tlo where tlo.GlobalPlatformId = 17 and Status = 4) limit 100000;\"\n params['db'] = \"172.16.100.204\"\n params['db_name'] = \"loandb\"\n\n ssl._create_default_https_context = ssl._create_unverified_context\n # params就是普通的url后加参数的方式\n r = requests.get(\n url=url, params=params, headers=headers, verify=ssl.CERT_NONE)\n print('r\\'s json is ', r.json)\n print('r\\'s json is ', r)\n userIdStr = ''\n for userIdTmp in r.json().get('results'):\n userIdStr += userIdTmp.get('user_id')\n userIdStr += ','\n\n print(\"userIdStr is \", userIdStr)\n\n\ndef test_userIds_get():\n \"\"\"\n 测试http get请求\n \"\"\"\n headers = {}\n headers['Cookie'] = 'sessionid=losb3rs5b4ucvlv4f0l0w8mj07uap2tt'\n headers['Content-Type'] = 'application/x-www-form-urlencoded'\n headers[\n 'User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36' # 测试用\n headers['Host'] = 'db.99gfd.com' # 测试用\n headers['Referer'] = 'https://db.99gfd.com/query/index/' # 测试用\n headers['X-Requested-With'] = 'XMLHttpRequest' # 测试用\n\n params = {}\n params[\n 'sql'] = \"select user_id, create_time from credit_report_search_stats where task_id is not null and create_time < '2019-06-01' limit 10000000;\"\n params['db'] = \"172.16.100.203\"\n params['db_name'] = \"thirdparty2\"\n\n ssl._create_default_https_context = ssl._create_unverified_context\n # params就是普通的url后加参数的方式\n r = requests.get(\n url=url, params=params, headers=headers, verify=ssl.CERT_NONE)\n print('r\\'s json is ', r.json)\n print('r\\'s json is ', r)\n userIdStr = ''\n array = []\n columns = ['user_id', 'create_time']\n for userIdJson in r.json().get('results'):\n array_per = []\n print(userIdJson)\n array_per.append(userIdJson.get('user_id'))\n dateStr = userIdJson.get('create_time')\n dateRight = dateStr.replace(\"\\xa0\", \" \")\n array_per.append(dateRight)\n array.append(array_per)\n\n multi_mig_oracle_datas(array, 'tmp_userid', columns)\n\n\ndef multi_mig_oracle_datas(data_array, table_name, table_columns):\n \"\"\"\n 具体同步oralce数据库中的数据至Mysql中\n :param data_array: 从oracle数据库中查询出来的数据\n :param table_name: 最终需要生成的目标mysql数据库表\n :return:\n \"\"\"\n question_marks = \",\".join([\"%s\" for i in range(len(table_columns))])\n table_columns_marks = \",\".join(table_columns)\n\n sql_insert = \"INSERT IGNORE INTO %s (%s) VALUES (%s)\" % \\\n (table_name, table_columns_marks, question_marks)\n\n t1 = time.time()\n insert_batch(sql_insert, data_array)\n t2 = time.time()\n print(\"insert_batch 单个批量插入耗时 \", t2 - t1, \" s\")\n","repo_name":"zlbl/AutoTestPytestN","sub_path":"test/ds_rhzx/test_thirdparty2_db.py","file_name":"test_thirdparty2_db.py","file_ext":"py","file_size_in_byte":5333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71325629522","text":"import unittest\n\nfrom dtk.utils.core.DTKConfigBuilder import DTKConfigBuilder\nfrom dtk.utils.reports import *\n\nclass TestSummaryReport(unittest.TestCase):\n\n def setUp(self):\n self.cb = DTKConfigBuilder.from_defaults('MALARIA_SIM')\n\n def test_summary_report(self):\n add_summary_report(self.cb)\n reportsJSON = format(self.cb.custom_reports)\n self.assertEqual(len(reportsJSON['Custom_Reports']['MalariaSummaryReport']['Reports']), 1)\n self.assertTrue('libmalariasummary_report_plugin.dll' in list(self.cb.dlls)[0])\n\n def test_immunity_report(self):\n add_immunity_report(self.cb)\n reportsJSON = format(self.cb.custom_reports)\n self.assertEqual(len(reportsJSON['Custom_Reports']['MalariaImmunityReport']['Reports']), 1)\n self.assertTrue('libmalariaimmunity_report_plugin.dll' in list(self.cb.dlls)[0])\n\n def test_survey_reports(self):\n add_survey_report(self.cb, survey_days=[365, 730])\n reportsJSON = format(self.cb.custom_reports)\n self.assertEqual(len(reportsJSON['Custom_Reports']['MalariaSurveyJSONAnalyzer']['Reports']), 2)\n self.assertTrue('libmalariasurveyJSON_analyzer_plugin.dll' in list(self.cb.dlls)[0])\n\n def test_multiple_reports(self):\n add_summary_report(self.cb)\n add_immunity_report(self.cb)\n reportsJSON = format(self.cb.custom_reports)\n self.assertEqual(len(reportsJSON['Custom_Reports']['MalariaSummaryReport']['Reports']), 1)\n self.assertEqual(len(reportsJSON['Custom_Reports']['MalariaImmunityReport']['Reports']), 1)\n self.assertTrue(any(['libmalariasummary_report_plugin.dll' in d for d in self.cb.dlls]))\n self.assertTrue(any(['libmalariaimmunity_report_plugin.dll' in d for d in self.cb.dlls]))\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"m-v-nikolov/dtk-tools-py3","sub_path":"test/test_reports.py","file_name":"test_reports.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11228523246","text":"import StringIO\nimport subprocess\n\n# Base field Z_p\np = 2**255 - 19\n\ndef modp_inv(x):\n return pow(x, p-2, p)\n\n# Square root of -1\nmodp_sqrt_m1 = pow(2, (p-1) // 4, p)\n\n# Compute corresponding x-coordinate, with low bit corresponding to\n# sign, or return None on failure\ndef recover_x(y, sign):\n if y >= p:\n return None\n x2 = (y*y-1) * modp_inv(d*y*y+1)\n if x2 == 0:\n if sign:\n return None\n else:\n return 0\n\n # Compute square root of x2\n x = pow(x2, (p+3) // 8, p)\n if (x*x - x2) % p != 0:\n x = x * modp_sqrt_m1 % p\n if (x*x - x2) % p != 0:\n return None\n\n if (x & 1) != sign:\n x = p - x\n return x\n\n# Curve constant\nd = -121665 * modp_inv(121666) % p\n\n# Base point\ng_y = 4 * modp_inv(5) % p\ng_x = recover_x(g_y, 0)\n\n# Points are represented as affine tuples (x, y).\n\ndef point_add(P, Q):\n x1, y1 = P\n x2, y2 = Q\n x3 = ((x1*y2 + y1*x2) * modp_inv(1 + d*x1*x2*y1*y2)) % p\n y3 = ((y1*y2 + x1*x2) * modp_inv(1 - d*x1*x2*y1*y2)) % p\n return (x3, y3)\n\n# Computes Q = s * P\ndef point_mul(s, P):\n Q = (0, 1) # Neutral element\n while s > 0:\n if s & 1:\n Q = point_add(Q, P)\n P = point_add(P, P)\n s >>= 1\n return Q\n\ndef to_bytes(x):\n ret = bytearray(32)\n for i in range(len(ret)):\n ret[i] = x % 256\n x >>= 8\n assert x == 0\n return ret\n\ndef to_ge_precomp(P):\n # typedef struct {\n # fe_loose yplusx;\n # fe_loose yminusx;\n # fe_loose xy2d;\n # } ge_precomp;\n x, y = P\n return ((y + x) % p, (y - x) % p, (x * y * 2 * d) % p)\n\ndef to_base_25_5(x):\n limbs = (26, 25, 26, 25, 26, 25, 26, 25, 26, 25)\n ret = []\n for l in limbs:\n ret.append(x & ((1<>= l\n assert x == 0\n return ret\n\ndef to_base_51(x):\n ret = []\n for _ in range(5):\n ret.append(x & ((1<<51) - 1))\n x >>= 51\n assert x == 0\n return ret\n\ndef to_literal(x):\n ret = \"{{\\n#if defined(OPENSSL_64_BIT)\\n\"\n ret += \", \".join(map(str, to_base_51(x)))\n ret += \"\\n#else\\n\"\n ret += \", \".join(map(str, to_base_25_5(x)))\n ret += \"\\n#endif\\n}}\"\n return ret\n\ndef main():\n d2 = (2 * d) % p\n\n small_precomp = bytearray()\n for i in range(1, 16):\n s = (i&1) | ((i&2) << (64-1)) | ((i&4) << (128-2)) | ((i&8) << (192-3))\n P = point_mul(s, (g_x, g_y))\n small_precomp += to_bytes(P[0])\n small_precomp += to_bytes(P[1])\n\n large_precomp = []\n for i in range(32):\n large_precomp.append([])\n for j in range(8):\n P = point_mul((j + 1) << (i * 8), (g_x, g_y))\n large_precomp[-1].append(to_ge_precomp(P))\n\n bi_precomp = []\n for i in range(8):\n P = point_mul(2*i + 1, (g_x, g_y))\n bi_precomp.append(to_ge_precomp(P))\n\n\n buf = StringIO.StringIO()\n buf.write(\"\"\"/* Copyright (c) 2020, Google Inc.\n *\n * Permission to use, copy, modify, and/or distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY\n * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION\n * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN\n * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */\n\n// This file is generated from\n// ./make_curve25519_tables.py > curve25519_tables.h\n\n\nstatic const fe d = \"\"\")\n buf.write(to_literal(d))\n buf.write(\"\"\";\n\nstatic const fe sqrtm1 = \"\"\")\n buf.write(to_literal(modp_sqrt_m1))\n buf.write(\"\"\";\n\nstatic const fe d2 = \"\"\")\n buf.write(to_literal(d2))\n buf.write(\"\"\";\n\n#if defined(OPENSSL_SMALL)\n\n// This block of code replaces the standard base-point table with a much smaller\n// one. The standard table is 30,720 bytes while this one is just 960.\n//\n// This table contains 15 pairs of group elements, (x, y), where each field\n// element is serialised with |fe_tobytes|. If |i| is the index of the group\n// element then consider i+1 as a four-bit number: (i₀, i₁, i₂, i₃) (where i₀\n// is the most significant bit). The value of the group element is then:\n// (i₀×2^192 + i₁×2^128 + i₂×2^64 + i₃)G, where G is the generator.\nstatic const uint8_t k25519SmallPrecomp[15 * 2 * 32] = {\"\"\")\n for i, b in enumerate(small_precomp):\n buf.write(\"0x%02x, \" % b)\n buf.write(\"\"\"\n};\n\n#else\n\n// k25519Precomp[i][j] = (j+1)*256^i*B\nstatic const ge_precomp k25519Precomp[32][8] = {\n\"\"\")\n for child in large_precomp:\n buf.write(\"{\\n\")\n for val in child:\n buf.write(\"{\\n\")\n for term in val:\n buf.write(to_literal(term) + \",\\n\")\n buf.write(\"},\\n\")\n buf.write(\"},\\n\")\n buf.write(\"\"\"};\n\n#endif // OPENSSL_SMALL\n\n// Bi[i] = (2*i+1)*B\nstatic const ge_precomp Bi[8] = {\n\"\"\")\n for val in bi_precomp:\n buf.write(\"{\\n\")\n for term in val:\n buf.write(to_literal(term) + \",\\n\")\n buf.write(\"},\\n\")\n buf.write(\"\"\"};\n\"\"\")\n\n proc = subprocess.Popen([\"clang-format\"], stdin=subprocess.PIPE)\n proc.communicate(buf.getvalue())\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"WebKit/WebKit","sub_path":"Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/curve25519/make_curve25519_tables.py","file_name":"make_curve25519_tables.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"en","doc_type":"code","stars":6880,"dataset":"github-code","pt":"3"} +{"seq_id":"40118023111","text":"from typing import List\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n for i in nums:\n for j in nums:\n if i + j == target:\n print([i, j])\n return [i, j]\n\ns = Solution()\n\nprint(s.twoSum([2,7,11,15], 9))\n","repo_name":"Lobbyra/leetcode-training","sub_path":"twoSum1/twoSum1.py","file_name":"twoSum1.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"75075710160","text":"from isaacgym import gymapi\nfrom isaacgym import gymutil\nfrom isaacgym import gymtorch\nfrom isaacgym.torch_utils import *\nimport matplotlib.pyplot as plt\n\nimport math\nimport numpy as np\nimport torch\nimport random\nimport time\n\n\ndef quat_axis(q, axis=0):\n basis_vec = torch.zeros(q.shape[0], 3, device=q.device)\n basis_vec[:, axis] = 1\n return quat_rotate(q, basis_vec)\n\n\ndef orientation_error(desired, current):\n cc = quat_conjugate(current)\n q_r = quat_mul(desired, cc)\n return q_r[:, 0:3] * torch.sign(q_r[:, 3]).unsqueeze(-1)\n\n\ndef cube_grasping_yaw(q, corners):\n \"\"\" returns horizontal rotation required to grasp cube \"\"\"\n rc = quat_rotate(q, corners)\n yaw = (torch.atan2(rc[:, 1], rc[:, 0]) - 0.25 * math.pi) % (2 * math.pi)\n theta = 0.5 * yaw\n w = theta.cos()\n x = torch.zeros_like(w)\n y = torch.zeros_like(w)\n z = theta.sin()\n yaw_quats = torch.stack([x, y, z, w], dim=-1)\n return yaw_quats\n\n\ndef compute_franka_reward(\n actions, device,\n hammer_pose, object_pose, object_linvel,\n franka_lfinger_pos, franka_lfinger_rot, franka_rfinger_pos, franka_rfinger_rot, down_dir,\n num_envs, dist_reward_scale, rot_reward_scale, around_handle_reward_scale, open_reward_scale,\n finger_dist_reward_scale, action_penalty_scale, distX_offset, max_episode_length\n):\n # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, int, float, float, float, float, float, float, float, float) -> Tuple[Tensor, Tensor]\n # how far the hand should be from hammer for grasping\n grasp_offset = 0.06\n gripper_close_offset = 0.025\n\n # set the reward weight and some configs here\n gripper_above_hammer_weight = 5.0\n gripper_to_hammer_dist_weight = 30.0\n two_finger_dist_reward_weight = 35.0\n gripper_downward_weight = 5.0\n gripper_hammer_y_weight = 3.0\n gripper_both_sides_weight = 5.0\n hammer_lift_weight = 10000.0\n hammer_init_height = 0.42\n hammer_max_height = 0.8\n\n # calculate grasp reward\n\n # calculate the gripper to hammer direction\n franka_gripper_pos = (franka_lfinger_pos + franka_rfinger_pos) / 2.0\n hammer_pos = hammer_pose[:, :3]\n\n to_hammer = (hammer_pos - franka_gripper_pos).to(device)\n gripper_hammer_dist = torch.norm(to_hammer, dim=-1).unsqueeze(-1)\n gripper_hammer_dir = to_hammer / gripper_hammer_dist\n\n gripper_hammer_dot = gripper_hammer_dir @ down_dir.view(3, 1)\n above_hammer_reward = (gripper_above_hammer_weight * (gripper_hammer_dot ** 2)*torch.sign(gripper_hammer_dot)).squeeze()\n\n\n # calculate the gripper to hammer distance\n d = torch.norm(franka_gripper_pos - hammer_pos, p=2, dim=-1).to(device)\n dist_reward = torch.where(d > grasp_offset, 1.0 - torch.tanh(d), to_torch([1.0]).repeat(num_envs).to(device))\n gripper_to_hammer_reward = dist_reward * gripper_to_hammer_dist_weight\n\n rewards = dist_reward * gripper_to_hammer_dist_weight\n rewards += above_hammer_reward\n\n # calculate the gripper towards direction should be facing downward\n gripper_rot = (franka_lfinger_rot + franka_rfinger_rot).to(device) / 2.0\n z_dir = to_torch([0, 0, 1]).repeat(num_envs, 1).to(device)\n gripper_z_dir = quat_apply(gripper_rot, z_dir)\n gripper_z_dot = gripper_z_dir @ down_dir.view(3, 1)\n gripper_downward_reward = (torch.sign(gripper_z_dot)*(gripper_z_dot**2) * gripper_downward_weight).squeeze()\n rewards += gripper_downward_reward\n\n # calculate the reward if the gripper_y and hammer_y alignment\n hammer_rot = hammer_pose[:, 3:7].to(device)\n y_dir = to_torch([0, 1, 0]).repeat(num_envs, 1).to(device)\n hammer_y_dir = quat_apply(hammer_rot, y_dir)\n gripper_y_dir = quat_apply(gripper_rot, y_dir)\n gripper_hammer_y_dot = torch.bmm(hammer_y_dir.view(num_envs, 1, 3), gripper_y_dir.view(num_envs, 3, 1))\n gripper_hammer_y_reward = ((gripper_hammer_y_dot ** 2) * gripper_hammer_y_weight).squeeze()\n rewards += gripper_hammer_y_reward\n\n # if gripper is close to hammer, add bonus for gripper on both sides of hammer\n hammer_to_lfinger = franka_lfinger_pos - hammer_pos\n hammer_to_lfinger_dist = torch.norm(hammer_to_lfinger, dim=-1).unsqueeze(-1)\n hammer_to_lfinger_dir = hammer_to_lfinger / hammer_to_lfinger_dist\n\n hammer_to_rfinger = franka_rfinger_pos - hammer_pos\n hammer_to_rfinger_dist = torch.norm(hammer_to_rfinger, dim=-1).unsqueeze(-1)\n hammer_to_rfinger_dir = hammer_to_rfinger / hammer_to_rfinger_dist\n\n hammer_both_sides_dot = torch.bmm(hammer_to_lfinger_dir.view(num_envs, 1, 3), hammer_to_rfinger_dir.view(num_envs, 3, 1)).squeeze()\n both_sides_reward = -torch.sign(hammer_both_sides_dot) * hammer_both_sides_dot ** 2\n\n rewards += both_sides_reward * gripper_both_sides_weight\n\n\n # add reward of distance of two fingers to hammer to encourage gripper close\n lfinger_dist = torch.norm(franka_lfinger_pos - hammer_pos, dim=-1)\n rfinger_dist = torch.norm(franka_rfinger_pos - hammer_pos, dim=-1)\n\n two_finger_dist = 2 - torch.tanh(lfinger_dist) - torch.tanh(rfinger_dist)\n\n # hand_hammer_close = (gripper_hammer_dist < grasp_offset).squeeze()\n two_finger_dist_reward = two_finger_dist_reward_weight * (two_finger_dist).squeeze()\n rewards += two_finger_dist_reward\n\n # determine if we're holding the hammer (grippers are closed and box is near)\n gripper_sep = torch.norm(franka_lfinger_pos - franka_rfinger_pos, dim =-1).unsqueeze(-1).to(device)\n gripped = (gripper_sep < gripper_close_offset) & (gripper_hammer_dist < grasp_offset)\n\n # add penalty for dropping off the table\n hammer_drop_table = (hammer_pos[:, 2] < 0.4).float().squeeze().to(device)\n hammer_drop_penalty = -1000 * hammer_drop_table\n rewards += hammer_drop_penalty\n\n # add reward for lifting the hammer\n hammer_lift_height = torch.clamp(hammer_pos[:, 2] - hammer_init_height, 0, hammer_max_height).to(device)\n hammer_lift_reward = gripped.float().squeeze() * hammer_lift_height * hammer_lift_weight\n rewards += hammer_lift_reward\n\n # regularization on the actions (summed for each environment)\n action_penalty = torch.sum(actions.squeeze() ** 2, dim=-1).to(device)\n rewards -= action_penalty_scale * action_penalty\n\n return rewards, above_hammer_reward, gripper_to_hammer_reward, gripper_downward_reward, gripper_hammer_y_reward, hammer_lift_reward, both_sides_reward, two_finger_dist_reward\n\n\n# set random seed\nnp.random.seed(42)\n\ntorch.set_printoptions(precision=4, sci_mode=False)\n\n# acquire gym interface\ngym = gymapi.acquire_gym()\n\n# parse arguments\nargs = gymutil.parse_arguments(description=\"Franka Jacobian Inverse Kinematics Example\")\n\n# configure sim\nsim_params = gymapi.SimParams()\nsim_params.up_axis = gymapi.UP_AXIS_Z\nsim_params.gravity = gymapi.Vec3(0.0, 0.0, -9.8)\nsim_params.dt = 1.0 / 60.0\nsim_params.substeps = 2\nsim_params.use_gpu_pipeline = args.physx_gpu\nif args.physics_engine == gymapi.SIM_PHYSX:\n sim_params.physx.solver_type = 1\n sim_params.physx.num_position_iterations = 8\n sim_params.physx.num_velocity_iterations = 1\n sim_params.physx.rest_offset = 0.0\n sim_params.physx.contact_offset = 0.001\n sim_params.physx.friction_offset_threshold = 0.001\n sim_params.physx.friction_correlation_distance = 0.0005\n sim_params.physx.num_threads = args.num_threads\n sim_params.physx.use_gpu = args.use_gpu\nelse:\n raise Exception(\"This example can only be used with PhysX\")\n\n# set torch device\n# device = 'cuda'\ndevice = 'cuda' if sim_params.use_gpu_pipeline else 'cpu'\n\n# create sim\nsim = gym.create_sim(args.compute_device_id, args.graphics_device_id, args.physics_engine, sim_params)\nif sim is None:\n raise Exception(\"Failed to create sim\")\n\n# create viewer\nviewer = gym.create_viewer(sim, gymapi.CameraProperties())\nif viewer is None:\n raise Exception(\"Failed to create viewer\")\n\nasset_root = \"../../assets\"\n\n# create table asset\ntable_dims = gymapi.Vec3(0.6, 1.0, 0.4)\nasset_options = gymapi.AssetOptions()\nasset_options.fix_base_link = True\ntable_asset = gym.create_box(sim, table_dims.x, table_dims.y, table_dims.z, asset_options)\n\n\n# create box asset\nbox_size = 0.03\nbox_dims = gymapi.Vec3(0.04, 0.04, 0.04)\nasset_options = gymapi.AssetOptions()\nbox_asset = gym.create_box(sim, box_dims.x, box_dims.y, box_dims.z, asset_options)\n\n\n# create hammer asset\nbox_asset_file = \"urdf/hammer_convex.urdf\"\nasset_options = gymapi.AssetOptions()\nbox_size = 0.035\nbox_asset = gym.load_asset(sim, asset_root, box_asset_file, asset_options)\n\n# load franka asset\nfranka_asset_file = \"urdf/franka_description/robots/franka_panda.urdf\"\nasset_options = gymapi.AssetOptions()\nasset_options.armature = 0.01\nasset_options.fix_base_link = True\nasset_options.disable_gravity = True\nasset_options.flip_visual_attachments = True\nfranka_asset = gym.load_asset(sim, asset_root, franka_asset_file, asset_options)\n\n# configure franka dofs\nfranka_dof_props = gym.get_asset_dof_properties(franka_asset)\nfranka_lower_limits = franka_dof_props[\"lower\"]\nfranka_upper_limits = franka_dof_props[\"upper\"]\nfranka_ranges = franka_upper_limits - franka_lower_limits\nfranka_mids = 0.3 * (franka_upper_limits + franka_lower_limits)\n\n# use position drive for all dofs\nfranka_dof_props[\"driveMode\"].fill(gymapi.DOF_MODE_POS)\nfranka_dof_props[\"stiffness\"][:7].fill(400.0)\nfranka_dof_props[\"damping\"][:7].fill(40.0)\n# grippers\nfranka_dof_props[\"stiffness\"][7:].fill(800.0)\nfranka_dof_props[\"damping\"][7:].fill(40.0)\n\n# default dof states and position targets\nfranka_num_dofs = gym.get_asset_dof_count(franka_asset)\ndefault_dof_pos = np.zeros(franka_num_dofs, dtype=np.float32)\ndefault_dof_pos[:7] = franka_mids[:7]\n# grippers open\ndefault_dof_pos[7:] = franka_upper_limits[7:]\n\ndefault_dof_state = np.zeros(franka_num_dofs, gymapi.DofState.dtype)\ndefault_dof_state[\"pos\"] = default_dof_pos\n\n# get link index of panda hand, which we will use as end effector\nfranka_link_dict = gym.get_asset_rigid_body_dict(franka_asset)\nfranka_hand_index = franka_link_dict[\"panda_hand\"]\n\ntable_dof = gym.get_asset_rigid_body_count(table_asset)\nbox_dof = gym.get_asset_rigid_body_count(box_asset)\n\n\n# configure env grid\nnum_envs = 1\nnum_per_row = int(math.sqrt(num_envs))\nspacing = 1.0\nenv_lower = gymapi.Vec3(-spacing, 0.0, -spacing)\nenv_upper = gymapi.Vec3(spacing, spacing, spacing)\nprint(\"Creating %d environments\" % num_envs)\n\nfranka_pose = gymapi.Transform()\nfranka_pose.p = gymapi.Vec3(0, 0, 0)\n\ntable_pose = gymapi.Transform()\ntable_pose.p = gymapi.Vec3(0.5, 0.0, 0.5 * table_dims.z)\n\nbox_pose = gymapi.Transform()\n\nenvs = []\nbox_idxs = []\nhand_idxs = []\ninit_pos_list = []\ninit_rot_list = []\nhammer_actor_idxs = []\n\n# add ground plane\nplane_params = gymapi.PlaneParams()\nplane_params.normal = gymapi.Vec3(0, 0, 1)\ngym.add_ground(sim, plane_params)\n\nfor i in range(num_envs):\n # create env\n env = gym.create_env(sim, env_lower, env_upper, num_per_row)\n envs.append(env)\n\n # add table\n table_handle = gym.create_actor(env, table_asset, table_pose, \"table\", i, 0)\n\n # add box\n box_pose.p.x = table_pose.p.x + np.random.uniform(-0.1, 0.1)\n box_pose.p.y = table_pose.p.y + np.random.uniform(-0.2, 0.2)\n box_pose.p.z = table_dims.z + 0.5 * box_size\n box_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), np.random.uniform(-math.pi, math.pi))\n box_handle = gym.create_actor(env, box_asset, box_pose, \"box\", i, 0)\n color = gymapi.Vec3(np.random.uniform(0, 1), np.random.uniform(0, 1), np.random.uniform(0, 1))\n gym.set_rigid_body_color(env, box_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, color)\n\n # get hammer actor index\n hammer_actor_idx = gym.get_actor_index(env, box_handle, gymapi.DOMAIN_SIM)\n hammer_actor_idxs.append(hammer_actor_idx)\n\n # get global index of box in rigid body state tensor\n box_idx = gym.get_actor_rigid_body_index(env, box_handle, 0, gymapi.DOMAIN_SIM)\n box_idxs.append(box_idx)\n\n # add franka\n franka_handle = gym.create_actor(env, franka_asset, franka_pose, \"franka\", i, 2)\n\n\n # set dof properties\n gym.set_actor_dof_properties(env, franka_handle, franka_dof_props)\n\n # set initial dof states\n gym.set_actor_dof_states(env, franka_handle, default_dof_state, gymapi.STATE_ALL)\n\n # set initial position targets\n gym.set_actor_dof_position_targets(env, franka_handle, default_dof_pos)\n\n # get initial hand pose\n hand_handle = gym.find_actor_rigid_body_handle(env, franka_handle, \"panda_hand\")\n hand_pose = gym.get_rigid_transform(env, hand_handle)\n init_pos_list.append([hand_pose.p.x, hand_pose.p.y, hand_pose.p.z])\n init_rot_list.append([hand_pose.r.x, hand_pose.r.y, hand_pose.r.z, hand_pose.r.w])\n\n # get global index of hand in rigid body state tensor\n hand_idx = gym.find_actor_rigid_body_index(env, franka_handle, \"panda_hand\", gymapi.DOMAIN_SIM)\n hand_idxs.append(hand_idx)\n\n # save left and right finger handle\n lfinger_handle = gym.find_actor_rigid_body_handle(env, franka_handle, \"panda_leftfinger\")\n rfinger_handle = gym.find_actor_rigid_body_handle(env, franka_handle, \"panda_rightfinger\")\n\n\n# point camera at middle env\ncam_pos = gymapi.Vec3(4, 3, 2)\ncam_target = gymapi.Vec3(-4, -3, 0)\nmiddle_env = envs[num_envs // 2 + num_per_row // 2]\ngym.viewer_camera_look_at(viewer, middle_env, cam_pos, cam_target)\n\n# ==== prepare tensors =====\n# from now on, we will use the tensor API that can run on CPU or GPU\ngym.prepare_sim(sim)\n\n# initial hand position and orientation tensors\ninit_pos = torch.Tensor(init_pos_list).view(num_envs, 3).to(device)\ninit_rot = torch.Tensor(init_rot_list).view(num_envs, 4).to(device)\n\n# hand orientation for grasping\ndown_q = torch.stack(num_envs * [torch.tensor([1.0, 0.0, 0.0, 0.0])]).to(device).view((num_envs, 4))\n\n# box corner coords, used to determine grasping yaw\nbox_half_size = 0.5 * box_size\ncorner_coord = torch.Tensor([box_half_size, box_half_size, box_half_size])\ncorners = torch.stack(num_envs * [corner_coord]).to(device)\n\n# downward axis\ndown_dir = torch.Tensor([0, 0, -1]).to(device).view(1, 3)\n\n# get jacobian tensor\n# for fixed-base franka, tensor has shape (num envs, 10, 6, 9)\n_jacobian = gym.acquire_jacobian_tensor(sim, \"franka\")\njacobian = gymtorch.wrap_tensor(_jacobian)\n\n\n# jacobian entries corresponding to franka hand\nj_eef = jacobian[:, franka_hand_index - 1, :]\n\n# Actor root state tensor\n_actor_root_state_tensor = gym.acquire_actor_root_state_tensor(sim)\nactor_root_state_tensor = gymtorch.wrap_tensor(_actor_root_state_tensor).view(-1, 13)\n\n# get rigid body state tensor\n_rb_states = gym.acquire_rigid_body_state_tensor(sim)\nrb_states = gymtorch.wrap_tensor(_rb_states)\n\n# get dof state tensor\n_dof_states = gym.acquire_dof_state_tensor(sim)\ndof_states = gymtorch.wrap_tensor(_dof_states)\ndof_pos = dof_states[:, 0].view(num_envs, 9, 1)\n\n# Create a tensor noting whether the hand should return to the initial position\nhand_restart = torch.full([num_envs], False, dtype=torch.bool).to(device)\n\n# save some reward list\nrewards = []\ngripper_hammer_dir_rewards = []\ngripper_hammer_dis_rewards = []\ngripper_down_rewards = []\ngripper_hammer_y_rewards = []\nhammer_lift_rewards = []\nboth_side_rewards = []\ntwo_finger_dist_rewards = []\n\nitr = 0\n\n# simulation loop\nwhile not gym.query_viewer_has_closed(viewer):\n # step the physics\n gym.simulate(sim)\n gym.fetch_results(sim, True)\n\n # refresh tensors\n gym.refresh_rigid_body_state_tensor(sim)\n gym.refresh_dof_state_tensor(sim)\n gym.refresh_jacobian_tensors(sim)\n gym.refresh_actor_root_state_tensor(sim)\n\n box_pos = rb_states[box_idxs, :3]\n box_rot = rb_states[box_idxs, 3:7]\n \n hand_pos = rb_states[hand_idxs, :3]\n hand_rot = rb_states[hand_idxs, 3:7]\n\n to_box = box_pos - hand_pos\n box_dist = torch.norm(to_box, dim=-1).unsqueeze(-1)\n box_dir = to_box / box_dist\n box_dot = box_dir @ down_dir.view(3, 1)\n\n # get pose for hammer, box and finger\n hammer_pose = actor_root_state_tensor[hammer_actor_idxs, 0:7]\n hammer_pos = actor_root_state_tensor[hammer_actor_idxs, 0:3]\n hammer_rot = actor_root_state_tensor[hammer_actor_idxs, 3:7]\n hammer_linvel = actor_root_state_tensor[hammer_actor_idxs, 7:10]\n hammer_angvel = actor_root_state_tensor[hammer_actor_idxs, 10:13]\n\n rb_states_reshape = rb_states.view(num_envs, -1, 13)\n\n franka_lfinger_pos = rb_states_reshape[:, lfinger_handle][:, 0:3]\n franka_rfinger_pos = rb_states_reshape[:, rfinger_handle][:, 0:3]\n franka_lfinger_rot = rb_states_reshape[:, lfinger_handle][:, 3:7]\n franka_rfinger_rot = rb_states_reshape[:, rfinger_handle][:, 3:7]\n\n # how far the hand should be from box for grasping\n grasp_offset = 0.1\n\n # determine if we're holding the box (grippers are closed and box is near)\n gripper_sep = dof_pos[:, 7] + dof_pos[:, 8]\n gripped = (gripper_sep < box_size) & (box_dist < grasp_offset + 0.5 * box_size)\n\n yaw_q = cube_grasping_yaw(box_rot, corners)\n box_yaw_dir = quat_axis(yaw_q, 0)\n hand_yaw_dir = quat_axis(hand_rot, 0)\n yaw_dot = torch.bmm(box_yaw_dir.view(num_envs, 1, 3), hand_yaw_dir.view(num_envs, 3, 1)).squeeze(-1)\n\n # determine if we have reached the initial position; if so allow the hand to start moving to the box\n to_init = init_pos - hand_pos\n init_dist = torch.norm(to_init, dim=-1)\n hand_restart = (hand_restart & (init_dist > 0.02)).squeeze(-1)\n return_to_start = (hand_restart | gripped.squeeze(-1)).unsqueeze(-1)\n\n # if hand is above box, descend to grasp offset\n # otherwise, seek a position above the box\n above_box = ((box_dot >= 0.99) & (yaw_dot >= 0.95) & (box_dist < grasp_offset * 3)).squeeze(-1)\n grasp_pos = box_pos.clone()\n grasp_pos[:, 2] = torch.where(above_box, box_pos[:, 2] + grasp_offset, box_pos[:, 2] + grasp_offset * 2.5)\n\n # compute goal position and orientation\n goal_pos = torch.where(return_to_start, init_pos, grasp_pos)\n goal_rot = torch.where(return_to_start, init_rot, quat_mul(down_q, quat_conjugate(yaw_q)))\n\n # compute position and orientation error\n pos_err = goal_pos - hand_pos\n orn_err = orientation_error(goal_rot, hand_rot)\n dpose = torch.cat([pos_err, orn_err], -1).unsqueeze(-1)\n\n # solve damped least squares\n j_eef_T = torch.transpose(j_eef, 1, 2)\n d = 0.05 # damping term\n lmbda = torch.eye(6).to(device) * (d ** 2)\n u = (j_eef_T @ torch.inverse(j_eef @ j_eef_T + lmbda) @ dpose).view(num_envs, 9, 1)\n\n # update position targets\n pos_target = dof_pos + u\n\n # gripper actions depend on distance between hand and box\n close_gripper = (box_dist < grasp_offset + 0.02) | gripped\n # always open the gripper above a certain height, dropping the box and restarting from the beginning\n hand_restart = hand_restart | (box_pos[:, 2] > 0.7)\n keep_going = torch.logical_not(hand_restart)\n close_gripper = close_gripper & keep_going.unsqueeze(-1)\n grip_acts = torch.where(close_gripper, torch.Tensor([[0., 0.]] * num_envs).to(device), torch.Tensor([[0.04, 0.04]] * num_envs).to(device))\n pos_target[:, 7:9] = grip_acts.unsqueeze(-1)\n\n # set new position targets\n gym.set_dof_position_target_tensor(sim, gymtorch.unwrap_tensor(pos_target))\n\n # update viewer\n gym.step_graphics(sim)\n gym.draw_viewer(viewer, sim, False)\n gym.sync_frame_time(sim)\n\n # check reward plot\n down_dir = torch.Tensor([0, 0, -1]).to(device).view(1, 3)\n dist_reward_scale = 1.5\n rot_reward_scale = 0.5\n around_handle_reward_scale = 1.0\n open_reward_scale = 4.0\n finger_dist_reward_scale = 10.0\n action_penalty_scale = 7.5\n distX_offset = 0.04\n max_episode_length = 500\n\n # compute the reward after this state\n reward, above_hammer_reward, gripper_to_hammer_reward, gripper_downward_reward, gripper_hammer_y_reward, hammer_lift_reward, both_side_reward, two_finger_dist_reward = \\\n compute_franka_reward(\n u, device,\n hammer_pose, None, None,\n franka_lfinger_pos, franka_lfinger_rot, franka_rfinger_pos, franka_rfinger_rot, down_dir,\n num_envs, dist_reward_scale, rot_reward_scale, around_handle_reward_scale, open_reward_scale,\n finger_dist_reward_scale, action_penalty_scale, distX_offset, max_episode_length\n )\n\n rewards.append(float(reward.squeeze().cpu().numpy()))\n gripper_hammer_dir_rewards.append(above_hammer_reward.squeeze().cpu().numpy())\n gripper_hammer_dis_rewards.append(gripper_to_hammer_reward.squeeze().cpu().numpy())\n gripper_down_rewards.append(gripper_downward_reward.squeeze().cpu().numpy())\n gripper_hammer_y_rewards.append(gripper_hammer_y_reward.squeeze().cpu().numpy())\n hammer_lift_rewards.append(hammer_lift_reward.squeeze().cpu().numpy())\n both_side_rewards.append(both_side_reward.squeeze().cpu().numpy())\n two_finger_dist_rewards.append(two_finger_dist_reward.squeeze().cpu().numpy())\n\n itr += 1\n print(itr)\n\n if itr >= 200:\n break\n\nplt.plot(range(199), rewards[1:])\nplt.xlabel('Iteration')\nplt.ylabel('Mean Reward')\nplt.savefig(\"reward_plot.png\")\nplt.clf()\n\nplt.plot(range(199), gripper_hammer_dir_rewards[1:])\nplt.xlabel('Iteration')\nplt.ylabel('Gripper Hammer dir Reward')\nplt.savefig(\"gripper_hammer_dir_plot.png\")\nplt.clf()\n\nplt.plot(range(199), gripper_hammer_dis_rewards[1:])\nplt.xlabel('Iteration')\nplt.ylabel('Gripper Hammer dis Reward')\nplt.savefig(\"gripper_hammer_dis_plot.png\")\nplt.clf()\n\nplt.plot(range(199), gripper_down_rewards[1:])\nplt.xlabel('Iteration')\nplt.ylabel('Gripper Downward Reward')\nplt.savefig(\"gripper_down_plot.png\")\nplt.clf()\n\nplt.plot(range(199), gripper_hammer_y_rewards[1:])\nplt.xlabel('Iteration')\nplt.ylabel('Gripper Y Reward')\nplt.savefig(\"gripper_y_plot.png\")\nplt.clf()\n\nplt.plot(range(199), hammer_lift_rewards[1:])\nplt.xlabel('Iteration')\nplt.ylabel('Hammer lift Reward')\nplt.savefig(\"hammer_lift_plot.png\")\nplt.clf()\n\nplt.plot(range(199), both_side_rewards[1:])\nplt.xlabel('Iteration')\nplt.ylabel('Both Side Reward')\nplt.savefig(\"both_side_plot.png\")\nplt.clf()\n\nplt.plot(range(199), two_finger_dist_rewards[1:])\nplt.xlabel('Iteration')\nplt.ylabel('Two finger dist Reward')\nplt.savefig(\"two_finger_dist_plot.png\")\nplt.clf()\n\n# cleanup\ngym.destroy_viewer(viewer)\ngym.destroy_sim(sim)\n\n\n","repo_name":"homangab/isaacgym_hammering","sub_path":"python/examples/franka_hammer_ik.py","file_name":"franka_hammer_ik.py","file_ext":"py","file_size_in_byte":22192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23801354677","text":"from PyQt5 import QtWidgets\nfrom pysat.spectral.baseline_code.airpls import AirPLS\n\nfrom point_spectra_gui.ui.AirPLS import Ui_Form\nfrom point_spectra_gui.util.BasicFunctionality import Basics\n\n\nclass Ui_Form(Ui_Form, Basics):\n def setupUi(self, Form):\n super().setupUi(Form)\n self.checkMinAndMax()\n Basics.setupUi(self, Form)\n\n def get_widget(self):\n return self.groupbox\n\n def setHidden(self, bool):\n self.get_widget().setHidden(bool)\n\n def connectWidgets(self):\n airPLS = AirPLS()\n self.smoothnessSpinBox.setValue(airPLS.smoothness_)\n self.convergenceThresholdDoubleSpinBox.setValue(airPLS.conv_thresh_)\n self.maxNumOfIterationsSpinBox.setValue(airPLS.max_iters_)\n\n def function(self):\n methodParameters = {'smoothness_': float(self.smoothnessSpinBox.value()),\n 'conv_thresh_': int(self.convergenceThresholdDoubleSpinBox.value()),\n 'max_iters_': float(self.maxNumOfIterationsSpinBox.value())}\n return methodParameters\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n Form = QtWidgets.QWidget()\n ui = Ui_Form()\n ui.setupUi(Form)\n Form.show()\n sys.exit(app.exec_())\n","repo_name":"tisaconundrum2/PySAT-GUI","sub_path":"point_spectra_gui/core/baselineRemovalMethods/AirPLS.py","file_name":"AirPLS.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"22179314186","text":"import json\nimport ics\nfrom collections import defaultdict\nfrom datetime import datetime, time, timedelta\nfrom time import mktime\n\nimport cherrypy\nfrom pockets import listify\nfrom sqlalchemy.orm import joinedload\n\nfrom uber.config import c\nfrom uber.decorators import ajax, all_renderable, cached, csrf_protected, csv_file, render, schedule_view, site_mappable\nfrom uber.errors import HTTPRedirect\nfrom uber.models import AdminAccount, AssignedPanelist, Attendee, Event, PanelApplication\nfrom uber.utils import check, localized_now, normalize_newlines\n\n\ndef get_schedule_data(session, message):\n schedule = defaultdict(lambda: defaultdict(list))\n for event in session.query(Event).all():\n schedule[event.start_time_local][event.location].append(event)\n for i in range(1, event.duration):\n half_hour = event.start_time_local + timedelta(minutes=30 * i)\n schedule[half_hour][event.location].append(c.EVENT_BOOKED)\n\n max_simul = {}\n for id, name in c.EVENT_LOCATION_OPTS:\n max_events = 1\n for i in range(c.PANEL_SCHEDULE_LENGTH):\n half_hour = c.EPOCH + timedelta(minutes=30 * i)\n max_events = max(max_events, len(schedule[half_hour][id]))\n max_simul[id] = max_events\n\n for half_hour in schedule:\n for location in schedule[half_hour]:\n for event in schedule[half_hour][location]:\n if isinstance(event, Event):\n simul = max(len(schedule[half_hour][event.location]) for half_hour in event.half_hours)\n event.colspan = 1 if simul > 1 else max_simul[event.location]\n for i in range(1, event.duration):\n schedule[half_hour + timedelta(minutes=30 * i)][event.location].remove(c.EVENT_BOOKED)\n schedule[half_hour + timedelta(minutes=30 * i)][event.location].append(event.colspan)\n\n for half_hour in schedule:\n for id, name in c.EVENT_LOCATION_OPTS:\n span_sum = sum(getattr(e, 'colspan', e) for e in schedule[half_hour][id])\n for i in range(max_simul[id] - span_sum):\n schedule[half_hour][id].append(c.EVENT_OPEN)\n\n schedule[half_hour] = sorted(\n schedule[half_hour].items(), key=lambda tup: c.ORDERED_EVENT_LOCS.index(tup[0]))\n\n max_simul = [(id, c.EVENT_LOCATIONS[id], colspan) for id, colspan in max_simul.items()]\n return {\n 'message': message,\n 'schedule': sorted(schedule.items()),\n 'max_simul': sorted(max_simul, key=lambda tup: c.ORDERED_EVENT_LOCS.index(tup[0]))\n }\n\n\n@all_renderable()\nclass Root:\n @cached\n @schedule_view\n def index(self, session, message=''):\n if c.ALT_SCHEDULE_URL:\n raise HTTPRedirect(c.ALT_SCHEDULE_URL)\n else:\n # external view attendees can look at with no admin menus/etc\n # we cache this view because it takes a while to generate\n return get_schedule_data(session, message)\n\n @schedule_view\n @csv_file\n def time_ordered(self, out, session):\n for event in session.query(Event).order_by('start_time', 'duration', 'location').all():\n out.writerow([event.timespan(30), event.name, event.location_label])\n\n @site_mappable(download=True)\n @schedule_view\n def xml(self, session):\n cherrypy.response.headers['Content-type'] = 'text/xml'\n schedule = defaultdict(list)\n for event in session.query(Event).order_by('start_time').all():\n schedule[event.location_label].append(event)\n return render('schedule/schedule.xml', {\n 'schedule': sorted(schedule.items(), key=lambda tup: c.ORDERED_EVENT_LOCS.index(tup[1][0].location))\n })\n\n @site_mappable(download=True)\n @schedule_view\n def schedule_tsv(self, session):\n cherrypy.response.headers['Content-Type'] = 'text/tsv'\n cherrypy.response.headers['Content-Disposition'] = 'attachment;filename=Schedule-{}.tsv'.format(\n int(localized_now().timestamp()))\n\n schedule = defaultdict(list)\n for event in session.query(Event).order_by('start_time').all():\n schedule[event.location_label].append(dict(event.to_dict(), **{\n 'date': event.start_time_local.strftime('%m/%d/%Y'),\n 'start_time': event.start_time_local.strftime('%I:%M:%S %p'),\n 'end_time': (event.start_time_local + timedelta(minutes=event.minutes)).strftime('%I:%M:%S %p'),\n 'description': normalize_newlines(event.description).replace('\\n', ' ')\n }))\n\n return render('schedule/schedule.tsv', {\n 'schedule': sorted(schedule.items(), key=lambda tup: c.ORDERED_EVENT_LOCS.index(tup[1][0]['location']))\n })\n\n @site_mappable(download=True)\n def ical(self, session, **params):\n icalendar = ics.Calendar()\n\n if 'locations' not in params or not params['locations']:\n locations = [id for id, name in c.EVENT_LOCATION_OPTS]\n calname = \"full\"\n else:\n locations = json.loads(params['locations'])\n if len(locations) > 3:\n calname = \"partial\"\n else:\n calname = \"_\".join([name for id, name in c.EVENT_LOCATION_OPTS\n if str(id) in locations])\n\n calname = '{}_{}_schedule'.format(c.EVENT_NAME, calname).lower().replace(' ', '_')\n\n for location in locations:\n for event in session.query(Event)\\\n .filter_by(location=int(location))\\\n .order_by('start_time').all():\n icalendar.events.add(ics.Event(\n name=event.name,\n begin=event.start_time,\n end=(event.start_time + timedelta(minutes=event.minutes)),\n description=normalize_newlines(event.description),\n created=event.created.when,\n location=event.location_label))\n\n cherrypy.response.headers['Content-Type'] = \\\n 'text/calendar; charset=utf-8'\n cherrypy.response.headers['Content-Disposition'] = \\\n 'attachment; filename=\"{}.ics\"'.format(calname)\n\n return icalendar\n\n if not c.HIDE_SCHEDULE:\n ical.restricted = False\n\n @csv_file\n def csv(self, out, session):\n out.writerow(['Session Title', 'Date', 'Time Start', 'Time End', 'Room/Location',\n 'Schedule Track (Optional)', 'Description (Optional)', 'Allow Checkin (Optional)',\n 'Checkin Begin (Optional)', 'Limit Spaces? (Optional)', 'Allow Waitlist (Optional)'])\n rows = []\n for event in session.query(Event).order_by('start_time').all():\n rows.append([\n event.name,\n event.start_time_local.strftime('%m/%d/%Y'),\n event.start_time_local.strftime('%I:%M:%S %p'),\n (event.start_time_local + timedelta(minutes=event.minutes)).strftime('%I:%M:%S %p'),\n event.location_label,\n event.guidebook_track,\n normalize_newlines(event.description).replace('\\n', ' '),\n '', '', '', ''\n ])\n for r in sorted(rows, key=lambda tup: tup[4]):\n out.writerow(r)\n\n @csv_file\n def panels(self, out, session):\n out.writerow(['Panel', 'Time', 'Duration', 'Room', 'Description', 'Panelists'])\n for event in sorted(session.query(Event).all(), key=lambda e: [e.start_time, e.location_label]):\n if 'Panel' in event.location_label or 'Autograph' in event.location_label:\n panelist_names = ' / '.join(ap.attendee.full_name for ap in sorted(\n event.assigned_panelists, key=lambda ap: ap.attendee.full_name))\n\n out.writerow([\n event.name,\n event.start_time_local.strftime('%I%p %a').lstrip('0'),\n '{} minutes'.format(event.minutes),\n event.location_label,\n event.description,\n panelist_names])\n\n @schedule_view\n def panels_json(self, session):\n cherrypy.response.headers['Content-Type'] = 'application/json'\n return json.dumps([\n {\n 'name': event.name,\n 'location': event.location_label,\n 'start': event.start_time_local.strftime('%I%p %a').lstrip('0'),\n 'end': event.end_time_local.strftime('%I%p %a').lstrip('0'),\n 'start_unix': int(mktime(event.start_time.utctimetuple())),\n 'end_unix': int(mktime(event.end_time.utctimetuple())),\n 'duration': event.minutes,\n 'description': event.description,\n 'panelists': [panelist.attendee.full_name for panelist in event.assigned_panelists]\n }\n for event in sorted(session.query(Event).all(), key=lambda e: [e.start_time, e.location_label])\n ], indent=4).encode('utf-8')\n\n @schedule_view\n def now(self, session, when=None):\n if when:\n now = c.EVENT_TIMEZONE.localize(datetime(*map(int, when.split(','))))\n else:\n now = c.EVENT_TIMEZONE.localize(datetime.combine(localized_now().date(), time(localized_now().hour)))\n\n current, upcoming = [], []\n for loc, desc in c.EVENT_LOCATION_OPTS:\n approx = session.query(Event).filter(Event.location == loc,\n Event.start_time >= now - timedelta(hours=6),\n Event.start_time <= now).all()\n for event in approx:\n if now in event.half_hours:\n current.append(event)\n\n next_events = session.query(Event).filter(\n Event.location == loc,\n Event.start_time >= now + timedelta(minutes=30),\n Event.start_time <= now + timedelta(hours=4)).order_by('start_time').all()\n\n if next_events:\n upcoming.extend(event for event in next_events if event.start_time == next_events[0].start_time)\n\n return {\n 'now': now if when else localized_now(),\n 'current': current,\n 'upcoming': upcoming\n }\n\n def form(self, session, message='', panelists=(), **params):\n event = session.event(params, allowed=['location', 'start_time'])\n if 'name' in params:\n session.add(event)\n\n # Associate a panel app with this event, and if the event is new, use the panel app's name and title\n if 'panel_id' in params and params['panel_id']:\n add_panel = session.panel_application(id=params['panel_id'])\n add_panel.event_id = event.id\n session.add(add_panel)\n if event.is_new:\n event.name = add_panel.name\n event.description = add_panel.description\n for pa in add_panel.applicants:\n if pa.attendee_id:\n assigned_panelist = AssignedPanelist(attendee_id=pa.attendee.id, event_id=event.id)\n session.add(assigned_panelist)\n\n message = check(event)\n if not message:\n new_panelist_ids = set(listify(panelists))\n old_panelist_ids = {ap.attendee_id for ap in event.assigned_panelists}\n for ap in event.assigned_panelists:\n if ap.attendee_id not in new_panelist_ids:\n session.delete(ap)\n for attendee_id in new_panelist_ids:\n if attendee_id not in old_panelist_ids:\n attendee = session.attendee(id=attendee_id)\n session.add(AssignedPanelist(event=event, attendee=attendee))\n raise HTTPRedirect('edit#{}', event.start_slot and (event.start_slot - 1))\n\n assigned_panelists = sorted(event.assigned_panelists, reverse=True, key=lambda a: a.attendee.first_name)\n\n approved_panel_apps = session.query(PanelApplication).filter(\n PanelApplication.status == c.ACCEPTED,\n PanelApplication.event_id == None).order_by('applied') # noqa: E711\n\n return {\n 'message': message,\n 'event': event,\n 'assigned': [ap.attendee_id for ap in assigned_panelists],\n 'panelists': [(a.id, a.full_name) for a in session.all_panelists()],\n 'approved_panel_apps': approved_panel_apps\n }\n\n @csrf_protected\n def delete(self, session, id):\n session.delete(session.event(id))\n raise HTTPRedirect('edit?message={}', 'Event successfully deleted')\n\n @ajax\n def move(self, session, id, location, start_slot):\n event = session.event(id)\n event.location = int(location)\n event.start_time = c.EPOCH + timedelta(minutes=30 * int(start_slot))\n resp = {'error': check(event)}\n if not resp['error']:\n session.commit()\n return resp\n\n @ajax\n def swap(self, session, id1, id2):\n from uber.model_checks import overlapping_events\n e1, e2 = session.event(id1), session.event(id2)\n e1.location, e2.location = e2.location, e1.location\n e1.start_time, e2.start_time = e2.start_time, e1.start_time\n\n resp = {'error': overlapping_events(e1, e2.id) or overlapping_events(e2, e1.id)}\n if not resp['error']:\n session.commit()\n return resp\n\n def edit(self, session, message=''):\n panelists = defaultdict(dict)\n assigned_panelists = session.query(AssignedPanelist).options(\n joinedload(AssignedPanelist.event), joinedload(AssignedPanelist.attendee)).all()\n\n for ap in assigned_panelists:\n panelists[ap.event.id][ap.attendee.id] = ap.attendee.full_name\n\n events = []\n for e in session.query(Event).order_by('start_time').all():\n d = {attr: getattr(e, attr) for attr in ['id', 'name', 'duration', 'start_slot', 'location', 'description']}\n d['panelists'] = panelists[e.id]\n events.append(d)\n\n return {\n 'events': events,\n 'message': message\n }\n\n def panelists_owed_refunds(self, session):\n return {\n 'panelists': [a for a in session.query(Attendee)\n .filter_by(ribbon=c.PANELIST_RIBBON)\n .options(joinedload(Attendee.group))\n .order_by(Attendee.full_name).all()\n if a.paid_for_badge and not a.has_been_refunded]\n }\n\n @schedule_view\n def panelist_schedule(self, session, id):\n attendee = session.attendee(id)\n events = defaultdict(lambda: defaultdict(lambda: (1, '')))\n for ap in attendee.assigned_panelists:\n for timeslot in ap.event.half_hours:\n rowspan = ap.event.duration if timeslot == ap.event.start_time else 0\n events[timeslot][ap.event.location_label] = (rowspan, ap.event.name)\n\n schedule = []\n when = min(events)\n locations = sorted(set(sum([list(locations) for locations in events.values()], [])))\n while when <= max(events):\n schedule.append([when, [events[when][where] for where in locations]])\n when += timedelta(minutes=30)\n\n return {\n 'attendee': attendee,\n 'schedule': schedule,\n 'locations': locations\n }\n\n @schedule_view\n @csv_file\n def panel_tech_needs(self, out, session):\n panels = defaultdict(dict)\n panel_applications = session.query(PanelApplication).filter(\n PanelApplication.event_id == Event.id, Event.location.in_(c.PANEL_ROOMS))\n\n for panel in panel_applications:\n panels[panel.event.start_time][panel.event.location] = panel\n\n if not panels:\n raise HTTPRedirect('../accounts/homepage?message={}', \"No panels have been scheduled yet!\")\n\n curr_time, last_time = min(panels).astimezone(c.EVENT_TIMEZONE), max(panels).astimezone(c.EVENT_TIMEZONE)\n out.writerow(['Panel Starts'] + [c.EVENT_LOCATIONS[room] for room in c.PANEL_ROOMS])\n while curr_time <= last_time:\n row = [curr_time.strftime('%H:%M %a')]\n for room in c.PANEL_ROOMS:\n p = panels[curr_time].get(room)\n row.append('' if not p else '{}\\n{}\\n{}\\n{}'.format(\n p.event.name,\n ' / '.join(p.tech_needs_labels),\n p.other_tech_needs,\n 'Panelists are bringing themselves: {}'.format(p.panelist_bringing) if p.panelist_bringing else ''\n ).strip())\n out.writerow(row)\n curr_time += timedelta(minutes=30)\n","repo_name":"magfest/ubersystem","sub_path":"uber/site_sections/schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":16924,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"3"} +{"seq_id":"24747532508","text":"__author__ = \"Victor Armegioiu\"\n\nfrom math import sqrt\nimport random\n\n\"\"\"\n @param: base \n base to be used for modular exponentiation\n\n @param: exp\n power to raise the base to\n\n @param: modulo\n value of the field we're working in, (Z/pZ)*\n\n @returns: \n Computes (base ^ exp) % modulo in log2(exp) time.\n\"\"\"\ndef mod_exp(base, exp, modulo):\n ans = 1\n while exp:\n if exp & 1:\n ans = (ans * base) % modulo\n\n base = (base * base) % modulo\n exp >>= 1\n\n return ans\n\n\"\"\"\n Computation of the Jacobi symbol (a / n) using modular congruences.\n Calculated using the properties listed here :\n https://en.wikipedia.org/wiki/Jacobi_symbol#Calculating_the_Jacobi_symbol \n\n @param: a\n numerator of the jacobi symbol\n\n @param: b\n denominator of the jacobi symbol\n\n @returns:\n value of the jacobi symbol (a / n)\n\"\"\"\ndef jacobi_symbol(a, n):\n if n == 1:\n return 1\n\n elif a == 0:\n return 0\n\n elif a == 1:\n return 1\n\n elif a == 2:\n if n % 8 in [3, 5]:\n return -1\n elif n % 8 in [1, 7]:\n return 1\n\n elif a < 0:\n return (-1) ** ((n - 1) / 2) * jacobi_symbol(-1 * a, n)\n\n if a % 2 == 0:\n return jacobi_symbol(2, n) * jacobi_symbol(a / 2, n)\n\n elif a % n != a:\n return jacobi_symbol(a % n, n)\n\n else:\n if a % 4 == n % 4 == 3:\n return -1 * jacobi_symbol(n, a)\n else:\n return jacobi_symbol(n, a)\n\n\n\"\"\"\n @param: p\n p is a prime s.t. p = 2 * q + 1 and q == prime\n As such, the only prime divisors of p - 1 are\n 2 and (p - 1) / 2 = q;\n\n @returns:\n primitive root of the prime p iff p = 2 * q + 1\n\"\"\"\ndef primitive_root(p):\n s = p - 1\n divisors = (2, s // 2)\n\n while True:\n g = random.randint(2, p - 1)\n if mod_exp(g, s // divisors[0], p) != 1 and mod_exp(g, s // divisors[1], p) != 1:\n return g\n\n\"\"\"\n Quick computation of the U_{n + 1} and V_{n + 1} without\n calculating intermediate terms.\n\n https://en.wikipedia.org/wiki/Lucas_pseudoprime#Implementing_a_Lucas_probable_prime_test\n\"\"\"\ndef U_V_subscript(k, n, U, V, P, Q, D):\n k, n, U, V, P, Q, D = map(int, (k, n, U, V, P, Q, D))\n digits = list(map(int, str(bin(k))[2 : ]))\n subscript = 1\n\n for digit in digits[1 : ]:\n U, V = U * V % n, (pow(V, 2, n) - 2 * pow(Q, subscript, n)) % n\n subscript *= 2\n\n if digit == 1:\n if not (P * U + V) & 1:\n if not (D * U + P * V) & 1:\n U, V = (P * U + V) >> 1, (D * U + P * V) >> 1\n\n else:\n U, V = (P * U + V) >> 1, (D * U + P * V + n) >> 1\n\n elif not (D * U + P * V) & 1:\n U, V = (P * U + V + n) >> 1, (D * U + P * V) >> 1\n\n else:\n U, V = (P * U + V + n) >> 1, (D * U + P * V + n) >> 1\n\n subscript += 1\n U, V = U % n, V % n\n\n return U, V\n\n\ndef encode(plaintext, bits):\n\tbyte_array = bytearray(plaintext, 'utf-16')\n\tencoded = []\n\n\tchunk = bits // 8\n\tj = -chunk\n\n\tfor i in range(len(byte_array)):\n\t\tif i % chunk == 0:\n\t\t\tj += chunk\n\t\t\tencoded.append(0)\n\n\t\tencoded[j // chunk] += byte_array[i] * (2 ** (8 * (i % chunk)))\n\n\treturn encoded\n\n\ndef decode(encoded, bits):\n\tbyte_array = []\n\tchunk = bits // 8\n\n\tfor integer in encoded:\n\t\tfor i in range(chunk):\n\t\t\tcopy = integer\n\n\t\t\tfor j in range(i + 1, chunk):\n\t\t\t\tcopy %= 2 ** (8 * j)\n\n\t\t\tletter = copy // (2 ** (8 * i))\n\t\t\tbyte_array.append(letter)\n\t\t\n\t\t\tinteger -= letter * 2 ** (8 * i)\n\n\n\tdecoded = bytearray(b for b in byte_array).decode('utf-16')\n\treturn decoded\n","repo_name":"victor-armegioiu/ElGamal-Encrypted-Text-Hidden-via-Steganography","sub_path":"ntheory_utilities.py","file_name":"ntheory_utilities.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"71197029843","text":"from sqlalchemy import (\n Column,\n String,\n Integer,\n ForeignKey,\n UniqueConstraint,\n DateTime,\n)\nfrom sqlalchemy.orm import relationship\n\nfrom db.models.base import BaseModel\n\n\nclass User(BaseModel):\n __tablename__ = \"user\"\n name = Column(String(255), nullable=False, index=True)\n datetime = Column(DateTime)\n job_id = Column(Integer, ForeignKey(\"job.id\"), nullable=False)\n department_id = Column(Integer, ForeignKey(\"department.id\"), nullable=False)\n job = relationship(\n \"Job\",\n back_populates=\"users\",\n primaryjoin=\"and_(Job.id==User.job_id, Job.is_active==True)\",\n )\n department = relationship(\n \"Department\",\n back_populates=\"users\",\n primaryjoin=\"and_(Department.id==User.department_id, Department.is_active==True)\",\n )\n\n __table_args__ = (UniqueConstraint(\"name\", \"job_id\", \"department_id\"),)\n","repo_name":"jose1522/data_pipeline","sub_path":"data_api/db/models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18334658751","text":"import sys\nimport math\nimport bisect\nfrom heapq import heapify, heappop, heappush\nfrom collections import deque, defaultdict, Counter\nfrom functools import lru_cache\nfrom itertools import accumulate, combinations, permutations\n\nsys.setrecursionlimit(1000000)\nMOD = 10 ** 9 + 7\nMOD99 = 998244353\n\ninput = lambda: sys.stdin.readline().strip()\nNI = lambda: int(input())\nNMI = lambda: map(int, input().split())\nNLI = lambda: list(NMI())\nSI = lambda: input()\nSMI = lambda: input().split()\nSLI = lambda: list(SMI())\n\n\ndef adjlist(n, edges, directed=False, in_origin=1):\n if len(edges) == 0:\n return [[] for _ in range(n)]\n\n weighted = True if len(edges[0]) > 2 else False\n if in_origin == 1:\n if weighted:\n edges = [[x - 1, y - 1, w] for x, y, w in edges]\n else:\n edges = [[x - 1, y - 1] for x, y in edges]\n\n res = [[] for _ in range(n)]\n\n if weighted:\n for u, v, c in edges:\n res[u].append([v, c])\n if not directed:\n res[v].append([u, c])\n\n else:\n for u, v in edges:\n res[u].append(v)\n if not directed:\n res[v].append(u)\n\n return res\n\n\ndef main():\n N, T = NMI()\n T -= 1\n AB = [NLI() for _ in range(N-1)]\n\n G = adjlist(N, AB)\n R = [0] * (N)\n\n def dfs(now, par):\n r = 0\n for goto in G[now]:\n if goto == par:\n continue\n r = max(r, dfs(goto, now)+1)\n R[now] = r\n return r\n\n dfs(T, -1)\n\n print(*R)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Mao-beta/AtCoder","sub_path":"tessoku-book/B65.py","file_name":"B65.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30767706777","text":"# Перегрузите магический метод __str__ у всех классов.\n# У проверяющих он должен выводить информацию в следующем виде:\n# print(some_reviewer)\n# Имя: Some\n# Фамилия: Buddy\n\n# У лекторов:\n# print(some_lecturer)\n# Имя: Some\n# Фамилия: Buddy\n# Средняя оценка за лекции: 9.9\n\n# У студентов :\n# print(some_student)\n# Имя: Ruoy\n# Фамилия: Eman\n# Средняя оценка за домашние задания: 9.9\n# Курсы в процессе изучения: Python, Git\n# Завершенные курсы: Введение в программирование\n\n# 2) Реализуйте возможность сравнивать (через операторы сравнения)\n# между собой лекторов по средней оценке за лекции и студентов по\n# средней оценке за домашние задания.\n\nclass Mentor:\n def __init__(self,name,surname):\n self.name = name\n self.surname = surname\n self.courses_attached = [] # 'courses_attached'-'подключенные курсы'\n self.grades_dict_lecturer = {} # словарь с оценками лекторам от студентов\n self.average_grade = 0 # Средняя оценка за лекции\n\n\nclass Student:\n def __init__(self,name,surname,gender,):\n self.name=name\n self.surname = surname\n self.gender=gender\n self.finished_courses = []\n self.courses_in_progress = [] # 'изучаемые на данный момент курсы'\n self.grades_dict_student = {} # словарь с оценками студентов от проверяющих\n self.average_grade = 0 # Средняя оценка за ДЗ\n\n # Метод для добавления пройденных курсов\n def add_courses (self,course_name):\n self.finished_courses.append(course_name)\n\n # Метод вычисления средней оценки за ДЗ :\n def average_grade_student(self):\n grade_list=[]\n for val in self.grades_dict_student.values():\n grade_list.extend(val)\n # Подсчитаем сумму оценок:\n sum_=sum(grade_list)\n # Подсчитаем среднее значение всех оценок\n self.average_grade = round(sum_/len(grade_list),2)\n return self.average_grade\n\n # Метод выставления оценок студентами лекторам\n # student_1.add_grades_lecturer(lecturer_1, 'Python', 9)\n def add_grades_lecturer(self, lecturer, course, grades):\n # Если лектор - экземпляр класса Lecturer , курс входит в список курсов ,которые\n # ведёт лектор и курс входит в список текущих курсов студента :\n if isinstance(lecturer, Lecturer) and course in lecturer.courses_attached \\\n and course in self.courses_in_progress:\n if course in lecturer.grades_dict_lecturer:\n lecturer.grades_dict_lecturer[course] += [grades]\n else:\n lecturer.grades_dict_lecturer[course] = [grades]\n else:\n print(f'Ошибка. Проверте , является ли {lecturer.name} {lecturer.surname} экземпляром'\n f' класса Student , входит ли \"{course}\" в список курсов , которые на данный момент'\n f' изучает студент {lecturer.name} {lecturer.surname} '\n f' и является ли \"{course}\" - курсом , на котором преподаёт'\n f' {self.name} {self.surname}')\n\n # метод __str__ для Student :\n def __str__(self):\n res = f'Имя: {self.name}\\nФамилия : {self.surname}\\nСредняя ' \\\n f'оценка за ДЗ: {self.average_grade_student()}\\n' \\\n f'Курсы в процессе изучения: {\", \".join(self.courses_in_progress)}\\n' \\\n f'Завершенные курсы: {\", \".join(self.finished_courses)}'\n return res\n\n # Метод сравнения средних оценок студентов:\n def __lt__(self, other):\n if not isinstance(other, Student):\n print('Not a Student!')\n return\n return self.average_grade < other.average_grade\n\n\n# Дочерние для класса Mentor классы Lecturer (лекторы) и Reviewer(проверяющие)\nclass Lecturer(Mentor):\n\n # Метод вычисления средней оценки за лекции :\n def average_grade_lectures(self):\n grade_list=[]\n for val in self.grades_dict_lecturer.values():\n grade_list.extend(val)\n # Подсчитаем сумму оценок:\n sum_=sum(grade_list)\n # Подсчитаем среднее значение всех оценок\n self.average_grade = round(sum_/len(grade_list),2)\n return self.average_grade\n\n # Метод сравнения средних оценок лекторов :\n def __lt__(self, other):\n if not isinstance(other, Lecturer):\n print('Not a Lecturer!')\n return\n return self.average_grade < other.average_grade\n\n\n # метод __str__ для Lecturer :\n def __str__(self):\n res = f'Имя: {self.name}\\nФамилия : {self.surname}\\nСредняя ' \\\n f'оценка за лекции: {self.average_grade_lectures()}'\n return res\n\n\nclass Reviewer(Mentor):\n # Метод , который позволяет проверяющему добавить оценку в словарь студента\n # по названию курса\n def add_grades_student(self, student, course, grades):\n if isinstance(student, Student) and course in self.courses_attached \\\n and course in student.courses_in_progress:\n if course in student.grades_dict_student:\n student.grades_dict_student[course] += [grades]\n else:\n student.grades_dict_student[course] = [grades]\n else:\n print(f'Ошибка. Проверте , является ли {student.name} {student.surname} экземпляром'\n f' класса Student , входит ли \"{course}\" в список курсов , которые на данный момент'\n f' изучает студент {student.name} {student.surname} '\n f' и является ли \"{course}\" - курсом , на котором преподаёт'\n f' {self.name} {self.surname}')\n # метод __str__ для Reviewer :\n def __str__(self):\n res = f'Имя: {self.name}\\nФамилия = {self.surname}'\n return res\n# Проверка метода __str__ для проверяющих:\nreviewer_1 = Reviewer ('Иван','Петров')\nprint(reviewer_1) # Имя: Иван\n # Фамилия = Петров\n# Инициализация лекторов :\n(lecturer_1) = Lecturer('Егор','Седов')\nlecturer_1.courses_attached.append('Python')\nlecturer_2 = Lecturer('Анна','Федотова')\nlecturer_2.courses_attached.append('Python')\n# Инициализация студентов :\nstudent_1= Student('Вася','Пупкин','пацан')\nstudent_1.courses_in_progress.append('Python')\nstudent_2= Student('Ольга','Зуева','девка')\nstudent_2.courses_in_progress.append('Python')\nstudent_3= Student('Елена','Белая','девка')\nstudent_3.courses_in_progress.append('Python')\n# Выставим оценки лектору-1\nstudent_1.add_grades_lecturer(lecturer_1, 'Python', 9)\nstudent_2.add_grades_lecturer(lecturer_1, 'Python', 9)\nstudent_3.add_grades_lecturer(lecturer_1, 'Python', 10)\n# Выставим оценки лектору-2\nstudent_1.add_grades_lecturer(lecturer_2, 'Python', 9)\nstudent_2.add_grades_lecturer(lecturer_2, 'Python', 8)\nstudent_3.add_grades_lecturer(lecturer_2, 'Python', 8)\n# Посмотрим оценки обоих лекторов(проверка):\nprint(lecturer_1.grades_dict_lecturer) # {'Python': [9, 9, 10]}\nprint(lecturer_2.grades_dict_lecturer) # {'Python': [9, 8, 8]}\n# Вызов метода вычисления средней оценки лектора_1\nprint(lecturer_1.average_grade_lectures()) # 9.33\n# Проверка переопределения метода __str__ для лекторов:\nprint(lecturer_1) # Имя: Егор\n # Фамилия : Седов\n # Средняя оценка за лекции: 9.33\n# Инициализируем проверяющих\nreviewer_1 = Reviewer ('Иван','Петров') # Проверяющий_1\nreviewer_1.courses_attached.append('Python') # Добавили курс 'Python' в список пров��ряемых курсов\n# Зададим выставление оценок проверяющими (reviewer) студентам соответствующих курсов\nreviewer_1.add_grades_student(student_1, 'Python', 10) # Добавили оценку -1 студенту-1\nreviewer_1.add_grades_student(student_1, 'Python', 9) # Добавили оценку -2 студенту-1\nreviewer_1.add_grades_student(student_1, 'Python', 10) # Добавили оценку -3 студенту-1\nprint(f'Оценки для student_1 - {student_1.grades_dict_student }') # {'Python': [10, 9, 10]}\nreviewer_1.add_grades_student(student_2, 'Python', 8) # Добавили оценку -1 студенту-1\nreviewer_1.add_grades_student(student_2, 'Python', 9) # Добавили оценку -2 студенту-1\nreviewer_1.add_grades_student(student_2, 'Python', 9) # Добавили оценку -3 студенту-1\nprint(f'Оценки для student_2 - {student_2.grades_dict_student }') # {'Python': [10, 9, 10]\n# Проверка - Средняя оценка для student_1:\nprint(student_1.average_grade_student()) # 9.67\n# Добавим курс в список текущих курсов\nstudent_1.courses_in_progress.append('Git')\nprint(student_1.courses_in_progress)\n# Добавим курс в список оконченных курсов :\nstudent_1.finished_courses.append('Введение в программирование')\n# Проверка переопределения метода __str__ для студентов:\nprint(student_1) # Имя: Вася\n # Фамилия : Пупкин\n # Средняя оценка за ДЗ: 9.67\n # Курсы в процессе изучения: Python, Git\n # Завершенные курсы: Введение в программирование\n\n\n# 2) Реализуйте возможность сравнивать (через операторы сравнения)\n# между собой лекторов по средней оценке за лекции и студентов по\n# средней оценке за домашние задания.\n\n# Проверим словари с оценками у лекторов:\nprint (lecturer_1.grades_dict_lecturer) # {'Python': [9, 9, 10]}\nprint (lecturer_2.grades_dict_lecturer) # {'Python': [9, 8, 8]}\n# Средние оценки лекторов 1 и 2 :\nlecturer_1.average_grade = lecturer_1.average_grade_lectures()\nlecturer_2.average_grade = lecturer_2.average_grade_lectures()\nprint(lecturer_1.average_grade,lecturer_2.average_grade) # 9.33 8.33\n# Производим сравнение лекторов по средним оценкам за лекции\nprint(lecturer_1 < lecturer_2) # False\n\n# Средние оценки студентов 1 и 2 :\nstudent_1.average_grade = student_1.average_grade_student()\nstudent_2.average_grade = student_2.average_grade_student()\nprint(student_1.average_grade,student_2.average_grade) # 9.67 8.67\n# Сравним студентов по средней оценке по ДЗ :\nprint(student_1 > student_2) # True\n\n","repo_name":"Greon68/DZ-OOP","sub_path":"ДЗ-3.Полиморфизм и магические методы.py","file_name":"ДЗ-3.Полиморфизм и магические методы.py","file_ext":"py","file_size_in_byte":12464,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28602391354","text":"#! /usr/bin/python\nfrom __future__ import print_function;\nimport sys\nimport json;\nsys.path.insert(0, '../NaaradServer/NewServer');\nimport serverinfo\n\nfrom mySock import mysocket;\nimport time;\n\nclass MyException(Exception):\n pass;\n\ndef decnotify(argv):\n \"\"\"\n De-register continuous notification associated with the given UUID.\n \"\"\"\n if (len(sys.argv) < 2):\n\n print(\"\\nUsage: \"+sys.argv[0]+\" UUID\\n\");\n print(decnotify.__doc__);\n else:\n try:\n uuid=sys.argv[1];\n\n FULLCMD=\"abortcnotify \"+str(sys.argv[1]);\n print(FULLCMD);\n\n naaradSoc=mysocket();\n naaradSoc.connect(serverinfo.SERVER,serverinfo.PORT);time.sleep(0.1);\n naaradSoc.send(\"De-cnotify App\");\n naaradSoc.send(FULLCMD); \n naaradSoc.send(\"done\"); \n except MyException as e:\n print(str(e));\n\nif __name__ == \"__main__\":\n decnotify(sys.argv)\n","repo_name":"sanbee/Naarad","sub_path":"RPi/Apps/decnotify.py","file_name":"decnotify.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"70536103762","text":"import random\nimport simpy\nimport statistics\nimport numpy\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# Define the parameters of the simulation\nSIM_TIME = ['Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept', 'Oct'] # Simulation time in month\nINIT_BEES = 50000 # Initial number of bees in the colony\nINFECTION_RATE = 0.5 # Probability of a bee getting infected each month\nMORTALITY_RATE = 0.02 # Probability of an infected bee dying each month\n\n# Initialize the colony\nbees = [{'status': 'healthy'} for i in range(INIT_BEES)]\n\n# Initialize the counters\nnum_infected = 0\nnum_dead = 0\nnum_healthy = INIT_BEES\nnum_infected_lastmonth = 0\nnum_dead_lastmonth = 0\nnum_healthy_lastmonth = INIT_BEES\nnum_reproduced = 0\n\n# Run the simulation\ninfected_bees = 1\ninfected_bees_per_month = []\ndead_bees_per_month = []\n\nfor month in SIM_TIME:\n if month == \"Apr or month May and Jun\":\n infection_rate = 0.025\n\n\nfor month in SIM_TIME:\n if month == \"Jul\":\n infection_rate = 0.05\n\nfor month in SIM_TIME:\n if month == \"Jul or month Aug\":\n infection_rate = 0.1\n\nfor month in SIM_TIME:\n if month == \"Aug or month Sept\":\n infection_rate = 0.75\n\nfor month in SIM_TIME:\n if month == \"Sept or month Oct\":\n infection_rate = 0.50 \n \n\n\n # Infect some bees\n num_reproduced += 1000\n for bee in bees:\n \tif bee['status'] == 'healthy' and random.random() < INFECTION_RATE:\n bee['status'] = 'infected'\n num_infected += 1\n num_healthy -= 1\n # Kill some infected bees\n dead_bees = [bee for bee in bees if bee['status'] == 'infected' and\n \t random.random() < MORTALITY_RATE]\n for bee in dead_bees:\n \tbees.remove(bee)\n \tnum_dead += 1\n \tnum_infected -= 1\n infected_bees_per_month.append(num_infected)\n dead_bees_per_month.append(num_dead)\n\n\n # Print the number of infected, dead, and healthy bees\n print(\"Month {}: {} bees infected (+{}), {} bees dead (+{}), {} bees healthy (-{})\".format(month,\n num_infected, num_infected\n - num_infected_lastmonth,\n num_dead, num_dead - num_dead_lastmonth,\n num_healthy, num_healthy_lastmonth - num_healthy))\n # Update the counters for the next day\n num_infected_lastmonth = num_infected\n num_dead_lastmonth = num_dead\n num_healthy_lastmonth = num_healthy\n \n# Print the total number of dead, infected, and healthy bees\nprint(\"Total number of infected bees: {}\".format(num_infected))\nprint(\"Total number of dead bees: {}\".format(num_dead))\nprint(\"Total number of healthy bees: {}\".format(num_healthy))\n\n\n# infected bees graph\nplt.plot(infected_bees_per_month)\nplt.xlabel('Month (April-October)')\nplt.ylabel('Infected bees')\nplt.title('Number of infected bees over time')\nplt.show()\n\n# dead bees graph\nplt.plot(dead_bees_per_month)\nplt.xlabel('Month (April-October)')\nplt.ylabel('Dead bees')\nplt.title('Number of deaths in bees over time')\nplt.show()\n","repo_name":"Julia-almeida/mitesandbees","sub_path":"bees_3.py","file_name":"bees_3.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25335381060","text":"# Runtime: 32 ms, faster than 92.40% of Python3 online submissions for Fraction to Recurring Decimal.\r\n# Memory Usage: 13.3 MB, less than 19.36% of Python3 online submissions for Fraction to Recurring Decimal.\r\nclass Solution:\r\n def fractionToDecimal(self, numerator: int, denominator: int) -> str:\r\n if numerator == 0:\r\n return \"0\"\r\n if denominator == 0:\r\n return \"\" \r\n if (numerator > 0 and denominator > 0) or (numerator < 0 and denominator < 0):\r\n res = \"\"\r\n else:\r\n res = \"-\"\r\n numerator, denominator = abs(numerator), abs(denominator)\r\n if numerator % denominator == 0:\r\n res += str(int(numerator / denominator))\r\n else:\r\n res += str(int(numerator / denominator)) + '.'\r\n remain = numerator % denominator # 余数\r\n idx = len(res)\r\n remain_dict = {}\r\n while remain and remain not in remain_dict:\r\n remain_dict[remain] = idx\r\n idx += 1\r\n remain *= 10\r\n res += str(int(remain / denominator))\r\n remain %= denominator\r\n if remain: # 是否是循环\r\n res = res[:remain_dict[remain]] + '(' + res[remain_dict[remain]:]\r\n res += ')'\r\n return res\r\n","repo_name":"daidai21/Leetcode","sub_path":"Algorithms/Python3.x/166-Fraction_to_Recurring_Decimal.py","file_name":"166-Fraction_to_Recurring_Decimal.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"5263714631","text":"from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.db.transaction import atomic\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.utils.translation import ugettext as _\n\nfrom misago.acl import add_acl\nfrom misago.core.errorpages import not_allowed\n\nfrom misago.threads import permissions, moderation, goto\nfrom misago.threads.forms.report import ReportPostForm\nfrom misago.threads.reports import user_has_reported_post, report_post\nfrom misago.threads.views.generic.base import ViewBase\n\n\n__all__ = [\n 'QuotePostView',\n 'ApprovePostView',\n 'UnhidePostView',\n 'HidePostView',\n 'DeletePostView',\n 'ReportPostView'\n]\n\n\nclass PostView(ViewBase):\n is_atomic = True\n require_post = True\n\n def dispatch(self, request, *args, **kwargs):\n if self.require_post and request.method != \"POST\":\n return not_allowed(request)\n\n post = None\n response = None\n\n if self.is_atomic:\n with atomic():\n post = self.get_post(request, True, **kwargs)\n response = self.real_dispatch(request, post)\n else:\n post = self.get_post(request, **kwargs)\n response = self.real_dispatch(request, post)\n\n if response:\n return response\n else:\n return self.redirect_to_post(request.user, post)\n\n def real_dispatch(self, request, post):\n raise NotImplementedError(\n \"post views have to override real_dispatch method\")\n\n def redirect_to_post(self, user, post):\n posts_qs = self.exclude_invisible_posts(post.thread.post_set,\n user,\n post.forum,\n post.thread)\n return redirect(goto.post(post.thread, posts_qs, post))\n\n\nclass QuotePostView(PostView):\n is_atomic = False\n require_post = False\n\n def real_dispatch(self, request, post):\n quote_tpl = u'[quote=\"%s, post:%s, topic:%s\"]\\n%s\\n[/quote]'\n formats = (post.poster_name, post.pk, post.thread_id, post.original)\n return JsonResponse({\n 'quote': quote_tpl % formats\n })\n\n\nclass ApprovePostView(PostView):\n def real_dispatch(self, request, post):\n if not post.acl['can_approve']:\n raise PermissionDenied(_(\"You can't approve this post.\"))\n\n if post.id == post.thread.first_post_id:\n moderation.approve_thread(request.user, post.thread)\n messages.success(request, _(\"Thread has been approved.\"))\n else:\n moderation.approve_post(request.user, post)\n messages.success(request, _(\"Post has been approved.\"))\n\n post.thread.synchronize()\n post.thread.save()\n post.forum.synchronize()\n post.forum.save()\n\n\nclass UnhidePostView(PostView):\n is_atomic = False\n\n def real_dispatch(self, request, post):\n permissions.allow_unhide_post(request.user, post)\n moderation.unhide_post(request.user, post)\n messages.success(request, _(\"Post has been made visible.\"))\n\n\nclass HidePostView(PostView):\n is_atomic = False\n\n def real_dispatch(self, request, post):\n permissions.allow_hide_post(request.user, post)\n moderation.hide_post(request.user, post)\n messages.success(request, _(\"Post has been hidden.\"))\n\n\nclass DeletePostView(PostView):\n def real_dispatch(self, request, post):\n post_id = post.id\n\n permissions.allow_delete_post(request.user, post)\n moderation.delete_post(request.user, post)\n\n post.thread.synchronize()\n post.thread.save()\n post.forum.synchronize()\n post.forum.save()\n\n posts_qs = self.exclude_invisible_posts(post.thread.post_set,\n request.user,\n post.forum,\n post.thread)\n posts_qs = posts_qs.select_related('thread', 'forum')\n\n if post_id < post.thread.last_post_id:\n target_post = posts_qs.order_by('id').filter(id__gt=post_id)\n else:\n target_post = posts_qs.order_by('-id').filter(id__lt=post_id)\n\n target_post = target_post[:1][0]\n target_post.thread.forum = target_post.forum\n\n add_acl(request.user, target_post.forum)\n add_acl(request.user, target_post.thread)\n add_acl(request.user, target_post)\n\n messages.success(request, _(\"Post has been deleted.\"))\n return self.redirect_to_post(request.user, target_post)\n\n\nclass ReportPostView(PostView):\n require_post = False\n\n template = 'misago/thread/report_modal.html'\n alerts_template = 'misago/thread/post_alerts.html'\n\n def dispatch(self, request, *args, **kwargs):\n if not request.is_ajax():\n return not_allowed(request)\n\n return super(ReportPostView, self).dispatch(request, *args, **kwargs)\n\n def real_dispatch(self, request, post):\n if not post.acl['can_report']:\n raise PermissionDenied(_(\"You can't report posts.\"))\n\n if user_has_reported_post(request.user, post):\n return JsonResponse({\n 'is_reported': True,\n 'message': _(\"You have already reported this post.\")})\n\n form = ReportPostForm()\n if request.method == 'POST':\n form = ReportPostForm(request.POST)\n if form.is_valid():\n report_post(request,\n post,\n form.cleaned_data['report_message'])\n\n message = _(\"%(user)s's post has been \"\n \"reported to moderators.\")\n message = message % {'user': post.poster_name}\n return JsonResponse({\n 'message': message,\n 'label': _(\"Reported\"),\n 'alerts': self.render_alerts(request, post)\n })\n else:\n field_errors = form.errors.get('report_message')\n if field_errors:\n field_error = field_errors[0]\n else:\n field_error = _(\"Error reporting post.\")\n\n return JsonResponse({'is_error': True, 'message': field_error})\n\n return self.render(request, {'form': form})\n\n def render_alerts(self, request, post):\n return render(request, self.alerts_template, {\n 'forum': post.forum,\n 'thread': post.thread,\n 'post': post\n }).content\n\n\n","repo_name":"xuzhao1211/OnlineExam","sub_path":"misago/threads/views/generic/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":6652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9990219481","text":"import numpy as np\nfrom beautifultable import BeautifulTable\nfrom scipy.stats import pearsonr, spearmanr\nfrom sklearn.metrics import matthews_corrcoef, f1_score\n\nfrom core.common import PRINT\n\n\ndef remove_ignore_labels(preds, label_ids, ignored_labels):\n final_preds = []\n final_label_ids = []\n for pred_t, label_id_t in zip(preds, label_ids):\n if not (pred_t == label_id_t and pred_t in ignored_labels):\n final_preds.append(pred_t)\n final_label_ids.append(label_id_t)\n\n return final_preds, final_label_ids\n\n\nclass FScorer:\n def __init__(self, beta=1.0):\n self._beta = beta\n self.clear()\n\n @property\n def precision(self):\n if self._true_positive + self._false_positive:\n return self._true_positive / (self._true_positive + self._false_positive)\n return 0.0\n\n @property\n def recall(self):\n if self._true_positive + self._false_negative:\n return self._true_positive / (self._true_positive + self._false_negative)\n return 0.0\n\n @property\n def fscore(self):\n numerator = (1 + self._beta ** 2) * self.precision * self.recall\n denominator = self._beta ** 2 * self.precision + self.recall\n if denominator:\n return numerator / denominator\n return 0.0\n\n def clear(self):\n self._true_positive = 0\n self._true_negative = 0\n self._false_positive = 0\n self._false_negative = 0\n\n def __call__(self, predicts, golds):\n raise NotImplementedError()\n\n\nclass SpanClassificationFscore(FScorer):\n def __init__(self, labels=[], beta=1.0):\n super().__init__(beta)\n self._labels = labels\n\n def __call__(self, predicts, golds):\n predicted_mentions = set(predicts)\n gold_mentions = set(golds)\n\n padded_predicted_mentions, padded_gold_mentions = [], []\n\n mentions = predicted_mentions | gold_mentions\n\n for mention in mentions:\n *_, mention_label = mention\n\n padded_predicted_mentions.append(\n mention in predicted_mentions and mention_label in self._labels\n )\n padded_gold_mentions.append(\n mention in gold_mentions and mention_label in self._labels\n )\n\n matcher = list(zip(padded_predicted_mentions, padded_gold_mentions))\n\n self._true_positive += matcher.count((True, True))\n self._true_negative += matcher.count((False, False))\n self._false_positive += matcher.count((True, False))\n self._false_negative += matcher.count((False, True))\n\n\nclass Metrics(object):\n\n def compute(self, preds, golds):\n raise NotImplementedError()\n\n\nclass AccAndF1Metrics(Metrics):\n def __init__(self, average='micro', acc_only=False):\n self.average = average\n self.acc_only = acc_only\n\n def compute(self, preds, golds):\n _preds = None\n _golds = None\n for batch_predicts, batch_golds in zip(preds, golds):\n if _preds is None:\n _preds = batch_predicts\n _golds = batch_golds\n else:\n _preds = np.append(batch_predicts, _preds, axis=0)\n _golds = np.append(batch_golds, _golds, axis=0)\n\n acc = AccAndF1Metrics.simple_accuracy(_preds, _golds)\n if not self.acc_only:\n f1 = f1_score(y_true=_golds, y_pred=_preds, average=self.average)\n return {\n \"acc\": acc,\n \"f1\": f1,\n \"acc_and_f1\": (acc + f1) / 2,\n }\n else:\n return {\"acc\": acc}\n\n @staticmethod\n def simple_accuracy(preds, labels):\n return (preds == labels).mean()\n\n\nclass PearsonAndSpearman(Metrics):\n\n def _to_table(self, pearson_corr, spearman_corr):\n table = BeautifulTable()\n\n table.column_headers = [\n \"Pearson\",\n \"Spearmanr\",\n \"Corr\",\n ]\n\n table.column_alignments[\"Pearson\"] = BeautifulTable.ALIGN_RIGHT\n table.column_alignments[\"Spearmanr\"] = BeautifulTable.ALIGN_RIGHT\n table.column_alignments[\"Corr\"] = BeautifulTable.ALIGN_RIGHT\n\n table.set_style(BeautifulTable.STYLE_COMPACT)\n table.append_row((pearson_corr, spearman_corr, (pearson_corr + spearman_corr) / 2))\n\n return table\n\n def compute(self, preds, golds):\n _preds = None\n _golds = None\n for batch_predicts, batch_golds in zip(preds, golds):\n if _preds is None:\n _preds = batch_predicts\n _golds = batch_golds\n else:\n _preds = np.append(batch_predicts, _preds, axis=0)\n _golds = np.append(batch_golds, _golds, axis=0)\n\n pearson_corr = pearsonr(_preds, _golds)[0]\n spearman_corr = spearmanr(_preds, _golds)[0]\n to_print = '\\n' + str(self._to_table(pearson_corr, spearman_corr))\n return {\n \"pearson\": pearson_corr,\n \"spearmanr\": spearman_corr,\n \"corr\": (pearson_corr + spearman_corr) / 2,\n PRINT: to_print\n }\n\n\nclass MatthewsCorrcoef(Metrics):\n\n def compute(self, preds, golds):\n _preds = None\n _golds = None\n for batch_predicts, batch_golds in zip(preds, golds):\n if _preds is None:\n _preds = batch_predicts\n _golds = batch_golds\n else:\n _preds = np.append(batch_predicts, _preds, axis=0)\n _golds = np.append(batch_golds, _golds, axis=0)\n\n return {\n \"mcc\": matthews_corrcoef(_golds, _preds)\n }\n\n\nclass SpanClassificationMetrics(Metrics):\n def __init__(self, labels=[], beta=1.0):\n self._labels = labels\n self.single_scores = {\n label: SpanClassificationFscore(labels=[label], beta=beta) for label in labels\n }\n self.overall_score = SpanClassificationFscore(labels=labels, beta=beta)\n\n def compute(self, preds, golds):\n # Clear last results:\n for label in self._labels:\n self.single_scores[label].clear()\n self.overall_score.clear()\n\n for batch_predicts, batch_golds in zip(preds, golds):\n for sentence_index in range(len(batch_golds)):\n for label in self._labels:\n self.single_scores[label](\n batch_predicts[sentence_index], batch_golds[sentence_index]\n )\n\n self.overall_score(\n batch_predicts[sentence_index], batch_golds[sentence_index]\n )\n\n return {\n \"precision\": self.overall_score.precision,\n \"recall\": self.overall_score.recall,\n \"f1\": self.overall_score.fscore\n }\n","repo_name":"anhnt170489/jamin_nlp","sub_path":"core/eval/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":6752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2358116444","text":"from django.shortcuts import render\n\nfrom rest_framework import generics\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom drf_yasg.utils import swagger_auto_schema\nfrom drf_yasg import openapi\n\nfrom movies.models import *\nfrom movies.serializers import *\n\ncity_param = openapi.Parameter('cityID', openapi.IN_QUERY, description=\"Name of the city\", type=openapi.TYPE_STRING)\nmovie_param = openapi.Parameter('movieID', openapi.IN_QUERY, description=\"Name of the movie\", type=openapi.TYPE_STRING)\nshow_param = openapi.Parameter('showID', openapi.IN_QUERY, description=\"ID of the show\", type=openapi.TYPE_INTEGER)\ntheater_param = openapi.Parameter('theater', openapi.IN_QUERY, description=\"ID of the theater\", type=openapi.TYPE_INTEGER)\ntheaterid_body_param = openapi.Parameter('theaterID', openapi.IN_BODY, description=\"ID of the theater\", type=openapi.TYPE_INTEGER)\ntheater_body_param = openapi.Parameter('theater_name', openapi.IN_BODY, description=\"Name of the theater\", type=openapi.TYPE_INTEGER)\ncity_body_param = openapi.Parameter('city', openapi.IN_BODY, description=\"Name of the city\", type=openapi.TYPE_STRING)\naddress_param = openapi.Parameter('address', openapi.IN_BODY, description=\"ID of the theater\", type=openapi.TYPE_INTEGER)\nseattype_body_param = openapi.Parameter('seatType', openapi.IN_BODY, description=\"Type of the seat\", type=openapi.TYPE_STRING)\nseatnumber_body_param = openapi.Parameter('seatNumber', openapi.IN_BODY, description=\"Number of the seat\", type=openapi.TYPE_STRING)\nmovie_body_param = openapi.Parameter('movie', openapi.IN_BODY, description=\"Name of the movie\", type=openapi.TYPE_STRING)\nshowtime_body_param = openapi.Parameter('showtime', openapi.IN_BODY, description=\"Date and Time of the show\", type=openapi.TYPE_STRING)\n\n# Create your views here.\nclass CityList(generics.ListCreateAPIView):\n '''\n description: This API Lists and Creates City informations.\n parameters:\n - name: city\n type: string enum\n required: true\n location: body\n - name: state\n type: string\n required: true\n location: body\n - name: zipcode\n type: int\n required: true\n location: body\n '''\n\n queryset = City.objects.all()\n serializer_class = CitySerializer\n\nclass TheaterList(APIView):\n '''\n description: This API Lists and Creates Theater informations. \n The list can be filtered using optional query paramaters like city name and movie name\n '''\n\n @swagger_auto_schema(manual_parameters=[city_param, movie_param],)\n def get(self, request, format=None):\n '''\n description: This API Lists and Creates Theater informations. \n The list can be filtered using optional query paramaters like city name and movie name\n parameters:\n - name: city\n description: name of the city\n type: string enum\n required: true\n location: query\n - name: movie\n description: name of the movie\n type: string\n required: true\n location: query\n '''\n\n theater = self.get_queryset()\n serializer = TheaterSerializer(theater, many=True)\n return Response(serializer.data)\n\n @swagger_auto_schema(request_body=openapi.Schema(\n type=openapi.TYPE_OBJECT,\n required=['version'],\n properties={\n 'theater': theater_body_param,\n 'city': city_body_param,\n 'address': address_param,\n },\n ),\n operation_description='Add a theater')\n def post(self, request, format=None):\n '''\n description: This API Creates Theater informations.\n parameters:\n - name: name\n description: name of the theater\n type: string \n required: true\n location: body\n - name: city\n description: name of the city\n type: string\n required: true\n location: body\n - name: address\n description: address of the theater\n type: string\n required: true\n location: body\n '''\n\n try:\n if(request.data['name'] and request.data['city'] and request.data['address']):\n try:\n city = City.objects.get(city=request.data['city'])\n except City.DoesNotExist:\n return Response('City Not Found', status=status.HTTP_404_NOT_FOUND)\n\n theater = Theater.objects.create(city=city, name=request.data['name'], address=request.data['address'])\n serializer = TheaterSerializer(theater)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except Exception as e:\n return Response('Inavlid parameters', status=status.HTTP_400_BAD_REQUEST)\n\n # Filters the queryset based on optional parameters provided\n def get_queryset(self):\n queryset = Theater.objects.all()\n cityID = self.request.query_params.get('city')\n movieID = self.request.query_params.get('movie')\n\n # Filter theater based on city name\n if cityID:\n queryset = queryset.filter(city__city=cityID)\n # Filter theater based on movie name\n if movieID:\n\n #Finds the reverse FK mapping from shows and lists the theaters down.\n showqueryset = Show.objects.all()\n showqueryset = showqueryset.filter(movie__name=movieID).values_list('theater__id', flat=True)\n queryset = queryset.filter(id__in=showqueryset)\n \n return queryset\n\nclass TheaterSeatList(APIView):\n '''\n description: This API Lists and Creates Theater Seating informations of a particular theater. \n The list can be filtered using optional query paramaters theaterID\n\n '''\n @swagger_auto_schema(manual_parameters=[theater_param],)\n def get(self, request, format=None):\n '''\n description: This API Lists Theater Seating informations. \n parameters:\n - name: theater\n description: name of the theater\n type: string \n required: true\n location: query\n '''\n theaterseat = self.get_queryset()\n serializer = TheaterSeatSerializer(theaterseat, many=True)\n return Response(serializer.data)\n\n @swagger_auto_schema(request_body=openapi.Schema(\n type=openapi.TYPE_OBJECT,\n required=['version'],\n properties={\n 'theater': theaterid_body_param,\n 'seatNumber': seatnumber_body_param,\n 'seatType': seattype_body_param,\n },\n ),\n operation_description='Add a Seat in Theater')\n def post(self, request, format=None):\n '''\n description: This API Creates Theater Seating informations.\n parameters:\n - name: theater\n description: ID of the theater\n type: string \n required: true\n location: body\n - name: seatNumber\n description: seat number of the theater seat\n type: string\n required: true\n location: body\n - name: seatType\n description: type of the seat from premium/gold/front \n type: string enum\n required: true\n location: body\n '''\n try:\n if(request.data['seatNumber'] and request.data['seatType'] and request.data['theater']):\n try:\n theater = Theater.objects.get(id=request.data['theater'])\n except Theater.DoesNotExist:\n return Response('Theater Not Found', status=status.HTTP_404_NOT_FOUND)\n\n theaterseat = TheaterSeat.objects.create(seatNumber=request.data['seatNumber'], theater=theater, seatType=request.data['seatType'])\n serializer = TheaterSeatSerializer(theaterseat)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except Exception as e:\n return Response('Inavlid parameters', status=status.HTTP_400_BAD_REQUEST)\n \n # Filters the queryset based on optional parameters provided\n def get_queryset(self):\n queryset = TheaterSeat.objects.all()\n theaterID = self.request.query_params.get('theater')\n\n # Filter theater based on theater id\n if theaterID:\n queryset = queryset.filter(theater__id=theaterID)\n \n return queryset\n\nclass MovieList(generics.ListCreateAPIView):\n '''\n description: This API Lists and Creates Movie informations.\n parameters:\n - name: name\n type: string\n required: true\n location: body\n - name: cast\n type: string\n required: false\n location: body\n - name: director\n type: string\n required: false\n location: body\n - name: language\n type: string enum\n required: false\n location: body\n - name: run_length\n type: string\n required: false\n location: body\n - name: certificate\n type: string enum\n required: false\n location: body\n - name: image\n type: file\n required: false\n location: body\n '''\n\n queryset = Movie.objects.all()\n serializer_class = MovieSerializer\n\nclass ShowList(APIView):\n '''\n description: This API Lists and Creates Shows informations. \n The list can be filtered using optional query paramaters like city name, theater name and movie name\n '''\n\n @swagger_auto_schema(manual_parameters=[city_param, movie_param, theater_param],)\n def get(self, request, format=None):\n '''\n description: This API Lists Shows informations. \n The list can be filtered using optional query paramaters like city name, theater name and movie name\n parameters:\n - name: city\n description: name of the city\n type: string enum\n required: false\n location: query\n - name: movie\n description: name of the movie\n type: string\n required: false\n location: query\n - name: theater\n description: name of the theater\n type: string\n required: false\n location: query\n '''\n show = self.get_queryset()\n serializer = ShowSerializer(show, many=True)\n return Response(serializer.data)\n\n @swagger_auto_schema(request_body=openapi.Schema(\n type=openapi.TYPE_OBJECT,\n required=['version'],\n properties={\n 'theater': theater_body_param,\n 'movie': movie_body_param,\n 'showtime': showtime_body_param,\n },\n ),\n operation_description='Add a Show in Theater')\n def post(self, request, format=None):\n '''\n description: This API Creates Show informations.\n parameters:\n - name: theater\n description: name of the theater\n type: string \n required: true\n location: body\n - name: movie\n description: name of the movie\n type: string\n required: true\n location: body\n - name: showtime\n description: date and time of the show\n type: datetime\n required: true\n location: body\n '''\n\n try:\n if(request.data['movie'] and request.data['theater'] and request.data['showtime']):\n try:\n movie = Movie.objects.get(name=request.data['movie'])\n except Movie.DoesNotExist:\n return Response('Movie Not Found', status=status.HTTP_404_NOT_FOUND)\n\n try:\n theater = Theater.objects.get(name=request.data['theater'])\n except Theater.DoesNotExist:\n return Response('Theater Not Found', status=status.HTTP_404_NOT_FOUND)\n\n show = Show.objects.create(movie=movie, theater=theater, show_time=request.data['showtime'])\n serializer = ShowSerializer(show)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except Exception as e:\n return Response('Inavlid parameters', status=status.HTTP_400_BAD_REQUEST)\n\n # Filters the queryset based on optional parameters provided\n def get_queryset(self):\n queryset = Show.objects.all()\n cityID = self.request.query_params.get('city')\n theaterID = self.request.query_params.get('theater')\n movieID = self.request.query_params.get('movie')\n\n if movieID:\n queryset = queryset.filter(movie__name=movieID)\n if theaterID:\n queryset = queryset.filter(theater=theaterID)\n if cityID:\n queryset = queryset.filter(theater__city__city=cityID)\n \n return queryset\n\n","repo_name":"asadahmedtech/bookbox","sub_path":"mrs/movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22606844039","text":"from sqlalchemy import Table, or_, distinct\nfrom common.database import db_connect\nfrom app.config.config import config\nfrom app.settings import env\nfrom model.collection import Collection\nfrom model.comment import Comment\nfrom model.concern import Concern\nfrom model.praise import Praise\nfrom model.user import User\nfrom sqlalchemy.sql.functions import sum, count\n\nengine, db_session, Base = db_connect()\n\n\nclass Article(Base):\n __table__ = Table('article', Base.metadata, autoload_with=engine)\n\n def calc_search_total_page(self, keyword):\n\n count = config[env].page_count\n\n if keyword:\n search_total_rows = db_session.query(Article, User.nickname).join(\n User, User.uid == Article.uid).filter(\n or_(\n Article.title.like('%' + keyword + '%'),\n Article.article_content.like('%' + keyword + '%')\n ),\n Article.is_valid == 1,\n Article.drafted == 1\n ).all()\n\n search_total_page = len(search_total_rows) // count\n # print('search rows:', keyword, len(search_total_rows))\n else:\n search_total_rows = []\n search_total_page = 0\n\n return search_total_page + 1, search_total_rows\n\n def calc_total_page(self, article_type):\n\n if article_type == 'recommend':\n total_rows = db_session.query(Article, User.nickname).join(\n User, User.uid == Article.uid\n ).filter(\n Article.drafted == 1,\n Article.is_valid == 1).all()\n total_page = len(total_rows) // config[env].page_count\n else:\n total_rows = db_session.query(Article, User.nickname).join(\n User, User.uid == Article.uid\n ).filter(\n Article.label_name == article_type,\n Article.drafted == 1,\n Article.is_valid == 1).all()\n\n total_page = len(total_rows) // config[env].page_count\n\n return total_page + 1, total_rows\n\n def find_article(self, page, article_type='recommend'):\n if page < 1:\n page = 1\n # page = int(page)\n # count = page * config[env].page_count\n count = config[env].page_count\n\n if article_type == 'recommend':\n result = db_session.query(Article, User.nickname).join(\n User, User.uid == Article.uid\n ).filter(\n # Article.label_name == 'java',\n Article.drafted == 1,\n Article.is_valid == 1).order_by(\n Article.create_time.desc()\n ).offset((page - 1) * count).limit(count).all()\n # ).limit(count).all()\n else:\n result = db_session.query(Article, User.nickname).join(\n User, User.uid == Article.uid).filter(\n Article.label_name == article_type,\n Article.is_valid == 1,\n Article.drafted == 1\n ).order_by(\n Article.browse_num.desc()\n ).offset((page - 1) * count).limit(count).all()\n # ).limit(count).all()\n\n return result\n\n def search_article(self, page, keyword):\n if page < 1:\n page = 1\n count = config[env].page_count\n\n result = db_session.query(Article, User.nickname).join(\n User, User.uid == Article.uid).filter(\n or_(\n Article.title.like('%' + keyword + '%'),\n Article.article_content.like('%' + keyword + '%')\n ),\n Article.is_valid == 1,\n Article.drafted == 1\n ).order_by(\n Article.browse_num.desc()\n ).offset((page - 1) * count).limit(count).all()\n\n return result\n\n def get_article_detail(self, aid):\n row = db_session.query(Article).filter_by(aid=aid, drafted=1, is_valid=1).first()\n if row:\n row.browse_num += 1\n db_session.commit()\n return row\n\n def get_article_image(self, aid):\n row = db_session.query(Article).filter_by(aid=aid, drafted=1, is_valid=1).first()\n if not row.article_image.startswith('/images/'):\n row.article_image = '/images/article/header/' + row.article_image\n return row.article_image\n\n # def get_relation_articles(self, label_name):\n # result = db_session.query(Article).filter_by(label_name=label_name).order_by(\n # Article.browse_num.desc()\n # ).limit(5).all()\n # return result\n\n def get_relation_articles(self, tag_list):\n result_list = set()\n\n rows = db_session.query(Article).filter_by(\n drafted=1,\n is_valid=1\n ).order_by(\n Article.browse_num.desc()\n ).all()\n\n for row in rows:\n if row.article_tag.split(','):\n row_tag = row.article_tag.split(',')\n for tag_name in tag_list:\n if tag_name in row_tag:\n result_list.add(row)\n\n return list(result_list)[:5]\n\n def get_user_articles(self, uid):\n user_articles = db_session.query(count(Article.uid)).filter_by(uid=uid, drafted=1, is_valid=1).first()\n # print(user_articles)\n if not user_articles:\n return 0\n # print(user_articles)\n return user_articles[0]\n\n def get_collection_and_praise(self, uid):\n collection = db_session.query(count(Article.uid)).join(\n Collection, Article.aid == Collection.aid).filter(\n Article.uid == uid,\n Article.drafted == 1,\n Article.is_valid == 1,\n Collection.collected == 1,\n Collection.is_valid == 1\n ).first()\n\n praise = db_session.query(count(Article.uid)).join(\n Praise, Article.aid == Praise.aid).filter(\n Article.uid == uid,\n Article.drafted == 1,\n Article.is_valid == 1,\n Praise.praised == 1,\n Praise.is_valid == 1\n ).first()\n\n collection_result = 0\n praise_result = 0\n\n if collection:\n collection_result = collection[0] or 0\n\n if praise:\n praise_result = praise[0] or 0\n\n result = collection_result + praise_result\n\n return result\n\n def insert_article(self, uid, title, content, drafted):\n article = Article(\n uid=uid,\n title=title,\n article_content=content,\n drafted=drafted\n )\n\n db_session.add(article)\n db_session.commit()\n\n return article.aid\n\n def update_article(self,\n aid,\n title,\n content,\n drafted,\n label_name='',\n article_tag='',\n article_type=''\n ):\n\n row = db_session.query(Article).filter_by(aid=aid).first()\n row.title = title\n row.article_content = content\n row.drafted = drafted\n row.label_name = label_name\n row.article_tag = article_tag\n row.article_type = article_type\n\n db_session.commit()\n\n return aid\n\n def update_article_header_img(self, aid, filename):\n row = db_session.query(Article).filter_by(aid=aid, is_valid=1).first()\n if row:\n row.article_image = filename\n db_session.commit()\n\n def get_all_drafted(self, uid):\n result = db_session.query(Article).filter_by(uid=uid, drafted=0, is_valid=1).all()\n return result\n\n def get_one_drafted(self, aid):\n result = db_session.query(Article).filter_by(aid=aid, drafted=0, is_valid=1).first()\n return result\n\n def remove_one_drafted(self, aid):\n row = db_session.query(Article).filter_by(aid=aid, drafted=0, is_valid=1).first()\n row.is_valid = 0\n db_session.commit()\n\n def get_article_by_uid(self, uid):\n # 當前用戶寫了哪些文章\n rows = db_session.query(Article).filter_by(\n uid=uid,\n drafted=1,\n is_valid=1\n ).order_by(\n Article.create_time.desc()\n ).all()\n\n return self.add_article_image_path(rows)\n\n def get_collection_article_by_uid(self, uid):\n # 當前用戶,收藏了哪些文章\n rows = db_session.query(Article).join(\n Collection, Article.aid == Collection.aid\n ).filter(\n Collection.uid == uid,\n Collection.collected == 1,\n Collection.is_valid == 1,\n Article.is_valid == 1\n ).order_by(\n Collection.create_time.desc()\n ).all()\n\n return self.add_article_image_path(rows)\n\n def get_comment_article_by_uid(self, uid):\n # 當前用戶,評論了哪些文章 我的评论\n rows = db_session.query(Article).join(\n Comment, Article.aid == Comment.aid\n ).filter(\n Comment.uid == uid,\n Comment.floor_number != 0,\n Comment.is_valid == 1,\n Article.is_valid == 1\n ).order_by(\n Comment.create_time.desc()\n ).all()\n\n return self.add_article_image_path(rows)\n\n def get_praise_article_by_uid(self, uid):\n # 我的推荐\n rows = db_session.query(Article).join(\n Praise, Article.aid == Praise.aid\n ).filter(\n Praise.uid == uid,\n Praise.praised == 1,\n Praise.is_valid == 1,\n Article.is_valid == 1\n ).order_by(\n Praise.create_time.desc()\n ).all()\n\n return self.add_article_image_path(rows)\n\n def add_article_image_path(self, rows):\n if not rows:\n return None\n\n for row in rows:\n if not row.article_image:\n row.article_image = '1.jpg'\n row.article_image = config[env].article_header_image_path + row.article_image\n\n return rows\n\n # 關注\n def get_concern_num_by_uid(self, uid):\n concern_num = db_session.query(Concern).filter(\n Concern.fid == uid,\n Concern.is_valid == 1\n ).count()\n\n return concern_num\n\n # 粉絲數\n def get_fans_num_by_uid(self, uid):\n fans_num = db_session.query(Concern).filter(\n Concern.tid == uid,\n Concern.is_valid == 1\n ).count()\n\n return fans_num\n\n # 績分\n def get_article_score_by_uid(self, uid):\n score = db_session.query(Article).filter(\n Article.uid == uid,\n Article.is_valid == 1,\n Article.drafted == 1\n ).count()\n\n return score\n\n def remove_article(self, uid, aid):\n row = db_session.query(Article).filter(\n Article.uid == uid,\n Article.aid == aid,\n Article.is_valid == 1,\n Article.drafted == 1\n ).first()\n\n if row:\n praise_row = db_session.query(Praise).filter(\n Praise.aid == aid,\n Praise.praised == 1\n ).first()\n\n collection_row = db_session.query(Collection).filter(\n Collection.aid == aid,\n Collection.collected == 1\n ).first()\n\n if praise_row:\n praise_row.is_valid = 0\n\n if collection_row:\n collection_row.collected = 0\n\n row.is_valid = 0\n db_session.commit()\n return True\n return False\n\n def get_article_list_by_uid(self, uid):\n rows = db_session.query(Article).filter(\n Article.uid == uid,\n Article.drafted == 1,\n Article.is_valid == 1\n ).order_by(\n Article.create_time.desc()\n ).all()\n\n return rows\n\n def get_band_data(self):\n row = db_session.query(Article, User.nickname).join(\n User, User.uid == Article.uid).filter(\n Article.label_name == 'band',\n Article.is_valid == 1,\n Article.drafted == 1\n ).order_by(\n Article.create_time.desc()\n ).limit(1).first()\n\n return row\n\n def get_praised_by_author(self, uid):\n praised_num = db_session.query(count(Article.uid)).join(\n Praise, Article.aid == Praise.aid).filter(\n Article.uid == uid,\n Article.drafted == 1,\n Article.is_valid == 1,\n Praise.praised == 1,\n Praise.is_valid == 1\n ).first()\n\n return praised_num[0] or 0\n\n def get_article_num_by_author(self, uid):\n # 當前用戶寫的文章數\n rows = db_session.query(Article).filter_by(\n uid=uid,\n drafted=1,\n is_valid=1\n ).all()\n\n if not rows:\n return 0\n\n return len(rows)\n\n def get_author_data(self, uid):\n row = db_session.query(User).filter_by(\n uid=uid,\n is_valid=1\n ).first()\n # row = db_session.query(User).join(\n # Article,\n # User.uid == Article.uid\n # ).filter(\n # User.uid == uid\n # ).distinct().first()\n\n avatar = '/images/headers/' + row.avatar\n gender = row.gender\n job = row.job\n slogan = row.slogan\n\n return avatar, gender, job, slogan\n\n def get_uid_by_aid(self, aid):\n row = db_session.query(Article).filter_by(\n aid=aid\n ).first()\n\n return row.uid\n\n","repo_name":"ziliang-wang/buhuman","sub_path":"model/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":13481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26000781987","text":"import os\nimport tqdm\nimport argparse\nimport sys\nsys.path.insert(0, os.path.abspath('..'))\n\nfrom dataset.dataset import SocialIQaDataset\n\nimport torch\nimport numpy as np\nfrom transformers import AutoModelForMultipleChoice, AutoTokenizer\nfrom transformers import AdamW\nimport datasets\n\n# setup args\narg_parser = argparse.ArgumentParser()\narg_parser.add_argument(\n '--model',\n type=str,\n default='roberta-base'\n)\narg_parser.add_argument(\n '--gpu',\n type=int,\n default=0,\n help=f'Specify which gpu to use'\n)\narg_parser.add_argument(\n '-e', '--epoch',\n type=int,\n default=5,\n help=f'Specify number of training epochs'\n)\narg_parser.add_argument(\n '-b', '--batch',\n type=int,\n default=1,\n help=f'Specify batch size'\n)\narg_parser.add_argument(\n '--seed',\n type=int,\n default=0,\n help=f'Specify random seed'\n)\nargs = arg_parser.parse_args()\n\nos.chdir('../')\n\n'''\nhyper-parameter \n'''\nMODEL = args.model\nDEVICE_ID = args.gpu # adjust this to use an unoccupied GPU\nBATCH_SIZE = args.batch\nNUM_EPOCH = args.epoch\nSEED = args.seed\n'''\ncontrol and logging\n'''\n# control randomness\ntorch.manual_seed(SEED)\nnp.random.seed(SEED)\n# model saving and logging paths\nos.makedirs(os.path.dirname('model_weights' + '/'), exist_ok=True)\nMODEL_NAME = f'{MODEL}-socialiqa_bsz_{BATCH_SIZE}_seed_{SEED}'\nlog_file = open(os.path.join('model_weights', f'{MODEL_NAME}.log'), 'w')\n\n'''\nmodel and tokenizer\n'''\n# CUDA settings\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nif device == 'cuda':\n torch.cuda.set_device(DEVICE_ID) # use an unoccupied GPU\n# load model\nmodel = AutoModelForMultipleChoice.from_pretrained(MODEL).to(device)\n# load tokenizer\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\n\n# optimizer\nno_decay = ['bias', 'LayerNorm.weight']\noptimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n]\noptimizer = AdamW(optimizer_grouped_parameters, lr=1e-5)\n\n\n\n# record these for every epoch\nloss_record = []\n\nprint(f'Training {MODEL} for {NUM_EPOCH} epochs, with batch size {BATCH_SIZE}')\n\nfor epo in range(NUM_EPOCH):\n model.train()\n total_loss = 0\n \n '''\n DataLoader\n '''\n dataset = SocialIQaDataset(data='train')\n data_loader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)\n\n # training\n train_iterator_with_progress = tqdm.tqdm(data_loader)\n idx = 0\n for batch in train_iterator_with_progress:\n # prepare input\n batch_answerA = batch['answerA']\n batch_answerB = batch['answerB']\n batch_answerC = batch['answerC']\n batch_label = (batch['label'] - 1).to(device)\n batch_prompt = []\n for b in range(BATCH_SIZE):\n batch_prompt.append(batch['context'][b] + '[UNUSED]' + batch['question'][b])\n\n # input encoding\n tokenizer.add_tokens(['[UNUSED]'])\n model.resize_token_embeddings(len(tokenizer))\n input_encoding = tokenizer(batch_prompt * 3, batch_answerA + batch_answerB + batch_answerC, return_tensors='pt', padding=True).to(device)\n\n # zero-out gradient\n optimizer.zero_grad()\n outputs = model(**{k: v.unsqueeze(0) for k,v in input_encoding.items()}, labels=batch_label)\n\n # compute loss and perform a step\n loss = outputs.loss\n loss.backward()\n optimizer.step()\n\n idx += 1\n\n total_loss += float(loss)\n train_iterator_with_progress.set_description(f'Epoch {epo}')\n train_iterator_with_progress.set_postfix({'Loss': loss.item()})\n\n loss_record.append(total_loss)\n print(f'Loss in epoch {epo}: {total_loss}')\n log_file.write(f'Epoch:{epo} ')\n log_file.write(f'Loss:{total_loss} ')\n\n # evaluation\n model.eval()\n with torch.no_grad():\n '''\n DataLoader\n '''\n valid_dataset = SocialIQaDataset(data='dev')\n valid_data_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=BATCH_SIZE, shuffle=True)\n \n accuracy = datasets.load_metric('accuracy')\n\n for batch in valid_data_loader:\n # prepare input\n batch_answerA = batch['answerA']\n batch_answerB = batch['answerB']\n batch_answerC = batch['answerC']\n batch_label = torch.tensor(batch['label']).to(device) - 1\n batch_prompt = []\n for b in range(BATCH_SIZE):\n batch_prompt.append(batch['context'][b] + '[UNUSED]' + batch['question'][b])\n\n # input encoding\n tokenizer.add_tokens(['[UNUSED]'])\n model.resize_token_embeddings(len(tokenizer))\n input_encoding = tokenizer(batch_prompt * 3, batch_answerA + batch_answerB + batch_answerC, return_tensors='pt', padding=True).to(device)\n\n # forward\n outputs = model(**{k: v.unsqueeze(0) for k,v in input_encoding.items()}, labels=batch_label)\n predictions = torch.argmax(outputs.logits, dim=1)\n\n accuracy.add_batch(predictions=predictions, references=batch_label)\n\n acc = accuracy.compute()\n print(f'acc in epoch {epo}: {acc}')\n log_file.write(f'acc:{acc} ')\n\n SAVE_PATH = os.path.join('model_weights', f'{MODEL_NAME}_epoch_{epo+1}.pt')\n torch.save(model.state_dict(), SAVE_PATH)","repo_name":"ziyuanca/AITA","sub_path":"train/train_SocialIQa.py","file_name":"train_SocialIQa.py","file_ext":"py","file_size_in_byte":5439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7891769315","text":"import uuid\nfrom typing import Literal\n\nfrom chaosiqagent.types import Job\n\n\ndef create_job(target_type: Literal[\"experiment\", \"verification\"] = None):\n target_id = uuid.uuid4()\n if not target_type:\n target_type = \"experiment\"\n return Job(\n id=uuid.uuid4(),\n agent_id=uuid.uuid4(),\n org_id=uuid.uuid4(),\n team_id=uuid.uuid4(),\n target_id=target_id,\n target_type=target_type,\n target_url=f\"https://console.example.com/assets/{target_type}s/{target_id}\",\n access_token=\"azerty1234\",\n payload={},\n )\n","repo_name":"chaosiq/chaosiq-agent","sub_path":"tests/fixtures/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36465541111","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.contrib.auth.models import User, Permission\n\nfrom majora2 import models\nfrom majora2 import util\nfrom tatl import models as tmodels\nfrom django.utils import timezone\n\nclass Command(BaseCommand):\n help = \"Add a DigitalResourceNode\"\n def add_arguments(self, parser):\n parser.add_argument('name')\n\n def handle(self, *args, **options):\n su = User.objects.get(is_superuser=True)\n\n node, created = util.mkroot(options[\"name\"])\n if created:\n print(\"Node %s created\" % node.unique_name)\n treq = tmodels.TatlPermFlex(\n user = su,\n substitute_user = None,\n used_permission = \"majora2.management.commands.mkroot\",\n timestamp = timezone.now(),\n content_object = node,\n )\n treq.save()\n else:\n print(\"Node %s already exists\" % node.unique_name)\n","repo_name":"SamStudio8/majora2","sub_path":"majora2/management/commands/mknode.py","file_name":"mknode.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"3"} +{"seq_id":"27644907372","text":"# GREG Client\nimport zmq\nimport zmq.utils.monitor\nimport chess\nimport sys\nimport http.client\nimport json\nimport time\nimport signal\nimport os\nimport subprocess\nimport socket\n\n# Globals\n\nGameOver = False\nResult = \"\"\nNumMoves = 0\n\n# Functions\n\ndef usage(status):\n '''Prints Usage message'''\n print(f\"Usage: ./GREGSimulator.py [options]\")\n print(f\" -b DEPTH Depth of searches for black (depth = 1)\")\n print(f\" -w DEPTH Depth of searches for white (depth = 1)\")\n print(f\" -n NAME Add unique name\")\n print(f\" -g GAMES Number of Games to play\")\n print(f\" -c COUNT Number of workers (Only for use with WorkerManager)\")\n print(f\" -s H P Server Host and Port (Only for use with WorkerManager)\")\n print(f\" -d Debug\")\n print(f\" -h help\")\n exit(status)\n\ndef spawn(num, port, host):\n '''Asks Worker Manager to spawn Workers'''\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.sendto(json.dumps({\"req\":\"spawn\",\"numWorkers\":f\"{num}\"}).encode(), (host, port))\n\ndef kill(port, host):\n '''Asks Worker Manager to kill Workers'''\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.sendto(json.dumps({\"req\":\"kill\"}).encode(), (host, port))\n\ndef get_move(proc, turn, debug):\n '''Gets move from a process'''\n global GameOver\n global Result\n global NumMoves\n\n # if game is over, dont do anything\n if GameOver:\n return\n\n # get the move\n if debug:\n print(f\"getting move\")\n move = proc.stdout.readline().decode().split()\n if debug:\n print(f\"getting move for {turn}:{move}|\")\n \n # add to total moves made\n NumMoves += 1\n\n # check of game ended\n if move[0] == \"game\":\n Result = \" \".join(move)\n GameOver = True\n return\n\n return move[2] + \"\\n\"\n\ndef make_move(proc, move, turn, debug):\n '''Makes a move to a process'''\n global GameOver\n\n # if game is over, dont do anything\n if GameOver:\n return\n\n if debug:\n print(f\"making move for {turn}:{move}\", end=\"\")\n \n # read \"make move\" or possible \"game over\"\n proc.stdout.readline().decode()\n\n # write the move\n proc.stdin.write(move.encode())\n proc.stdin.flush()\n return\n\ndef play_game(black_depth=1, white_depth=1, name=\"\", debug=False):\n ''' \n plays a game of CPU vs CPU\n 1. \"black\" gets the whites cpu move\n 2. We then use \"white\" to send this move in and get the move black should play\n 3. \"white\" then reads this move and \"black\" writes this.\n\n So, the \"black\" game is the main board, and black uses \"white\" to find out its own move.\n I know, kinda dumb and confusing but not really sure how else to phrase. Maybe \"main\" and \"secondary\"?\n '''\n \n # start players\n black = subprocess.Popen([\"python\", \"./GREGClient.py\", \"-d\", white_depth, \"-n\", name, \"-b\", \"-s\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n white = subprocess.Popen([\"python\", \"./GREGClient.py\", \"-d\", black_depth, \"-n\", name, \"-s\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n\n # play game\n while not GameOver:\n # get move from white cpu\n white_move = get_move(black, \"black\", debug)\n\n # send that move to white to get see what black should play\n make_move(white, white_move, \"white\", debug)\n\n # read blacks response from white\n black_move = get_move(white, \"white\", debug)\n\n # write blacks move to white\n make_move(black, black_move, \"black\", debug)\n\n # collect\n white.kill()\n white.wait()\n black.kill()\n black.wait()\n\n return\n\n# Main Execution\n\ndef main():\n global GameOver\n global Result\n global NumMoves\n\n # options\n black_depth = \"1\"\n white_depth = \"1\"\n debug = False\n name = \"test\"\n num_games = 1\n num_workers = 1\n argind = 1\n host = \"student10.cse.nd.edu\"\n port = 7777\n \n # parse command args\n while argind < len(sys.argv):\n arg = sys.argv[argind]\n if arg == \"-d\":\n debug = True\n elif arg == \"-n\":\n argind += 1\n name = sys.argv[argind]\n elif arg == \"-g\":\n argind += 1\n num_games = int(sys.argv[argind])\n elif arg == \"-c\":\n argind += 1\n num_workers = int(sys.argv[argind])\n elif arg == \"-s\":\n argind += 1\n host = sys.argv[argind]\n argind += 1\n port = int(sys.argv[argind])\n elif arg == \"-b\":\n argind += 1\n black_depth = sys.argv[argind]\n elif arg == \"-w\":\n argind += 1\n white_depth = sys.argv[argind]\n elif arg == \"-h\":\n usage(0)\n else:\n usage(1)\n argind += 1\n\n # spawn workers\n spawn(num_workers, port, host)\n\n # run tests\n white_wins = 0\n black_wins = 0\n draws = 0\n total_time = 0\n times = []\n moves = []\n games = []\n for i in range(num_games):\n # reset variables\n start = time.time_ns()\n GameOver = False\n Result = \"\"\n NumMoves = 0\n\n # play the game\n play_game(black_depth, white_depth, name, debug)\n \n # end\n total_time = time.time_ns() - start\n \n # collect data\n if \"1-0\" in Result:\n white_wins += 1\n elif \"0-1\"in Result:\n black_wins += 1\n elif \"1/2-1/2\" in Result:\n draws += 1\n\n games.append(Result)\n times.append(total_time)\n moves.append(NumMoves)\n\n # collect workers\n kill(port, host)\n\n # print data\n print(f\"Workers : {num_workers}\")\n print(f\"White depth: {white_depth}\")\n print(f\"White Wins : {white_wins}\")\n print(f\"Black depth: {black_depth}\")\n print(f\"Black Wins : {black_wins}\")\n print(f\"Draws : {draws}\")\n print(f\"Moves/sec : {sum(moves)/sum(times) * 1000000000}\")\n print(f\"All games :\")\n for game in games:\n print(f\"{game.split()[2]}\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bwiseman77/GREG","sub_path":"GREGSimulator.py","file_name":"GREGSimulator.py","file_ext":"py","file_size_in_byte":6086,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"16011857044","text":"import nltk\nfrom nltk.tag import pos_tag\nfrom nltk.tokenize import word_tokenize\nimport os\nfrom textblob import TextBlob\nfrom nltk.stem.snowball import EnglishStemmer\n\nstemmer = EnglishStemmer()\n\noutput_path = \"C:/Users/ShravanJagadish/Desktop/Search/Final Project/Output/14_Noun Extraction/\"\npath = \"C:/Users/ShravanJagadish/Desktop/Search/Final Project/Output/14/\"\n\n\nif not os.path.exists(output_path):\n \t\tos.makedirs(output_path)\n\nnoun_list_positive = \"\"\n\nfor file_name in os.listdir(path):\n\tnoun_list_negative = \"\"\t\n\twith open(path+file_name, 'r') as file:\n\t\t#month = file_name.rsplit(\"_\")\n\t\tmy_file = file_name.rsplit(\".txt\")\n\t\tfor line in file:\n\t\t\tblob = TextBlob(line)\n\t\t\tfor sentence in blob.sentences:\n\t\t\t\t# if sentence.sentiment.polarity < 0:\n\t\t\t\t\tfor nword in sentence.noun_phrases:\n\t\t\t\t\t\tnlist = word_tokenize(nword)\n\t\t\t\t\t\t#print nlist\n\t\t\t\t\t\t# if month[1] == 'positive':\n\t\t\t\t\t\t# \t\tnoun_list_positive+=\"\\\"\"\n\t\t\t\t\t\t# else:\n\t\t\t\t\t\t# \t\tnoun_list_negative+=\"\\\"\"\n\t\t\t\t\t\tfor word in nlist:\n\t\t\t\t\t\t\t# if word not in [\"amelie\",\"airport\",\"charlotte\"]:\n\t\t\t\t\t\t\t# if month[1] == 'positive':\n\t\t\t\t\t\t\t# \tnoun_list_positive+=stemmer.stem(word) + \"_\"\n\t\t\t\t\t\t\t# else:\n\t\t\t\t\t\t\t\tnoun_list_negative+=stemmer.stem(word.lower()) + \"_\"\n\t\t\t\t\t\t# if month[1] == 'positive':\n\t\t\t\t\t\t# \t\tnoun_list_positive=noun_list_positive[:-1]\n\t\t\t\t\t\t# \t\tnoun_list_positive+=\"|\"\n\t\t\t\t\t\t# else:\n\t\t\t\t\t\tnoun_list_negative=noun_list_negative[:-1]\n\t\t\t\t\t\tnoun_list_negative+=\" \"\n\n\twith open(output_path+file_name,'w') as writefile:\n\t\twritefile.write(noun_list_negative)\n# with open(output_path+\"sh-2012.txt\",'w') as writefile:\n# \twritefile.write(noun_list_negative)\n","repo_name":"sagarkrkv/Yelp-Dataset-Challenge","sub_path":"Other Trials/noun_phrase_extraction.py","file_name":"noun_phrase_extraction.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18164537686","text":"# Luke Psyhogios, March 28th 2022, Section 001, Problem #0, Challenge 3: Simple Functions\n\n\n# Function: simple_sort_version\n# Input: num1 (number), num2 (number), num3 (number)\n# Processing: Compares three numbers and returns them in ascending order\n# Output: numbers in ascending ordern\ndef simple_sort_version(num1, num2, num3):\n if num1>num2 and num1>num2:\n if num2>num3:\n return num3, num2, num1\n if num3>num2:\n return num2, num3, num1\n if num2>num1 and num2>num3:\n if num1>num3:\n return num3, num1, num2\n if num3>num1:\n return num1, num3, num2\n if num3>num1 and num3>num2:\n if num1>num2:\n return num2, num1, num3\n if num2>num1:\n return num1, num2, num3\n if num1 == num2:\n if num3>num1:\n return num3, num1, num2\n if num1>num3:\n return num3, num2, num1\n if num1 == num3:\n if num2>num1:\n return num3, num1, num2\n if num1>num2:\n return num2, num3, num1\n if num2 == num3:\n if num1>num2:\n return num3, num2, num1\n if num2>num1:\n return num1, num3, num2\n\n# Test code\na,b,c = simple_sort_version(10,20,30)\nprint (a,b,c) # 10 20 30\na,b,c = simple_sort_version(10,30,20)\nprint (a,b,c) # 10 20 30\na,b,c = simple_sort_version(30,20,10)\nprint (a,b,c) # 10 20 30\na,b,c = simple_sort_version(30,20,20)\nprint (a,b,c) # 20 20 30\n\n","repo_name":"LukePsyh/Python-Projects","sub_path":"simple_sort_function.py","file_name":"simple_sort_function.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18283013352","text":"'''\nAuthor: fghpdf\nDate: 2021-10-06 09:06:33\nLastEditTime: 2021-10-06 09:20:00\nLastEditors: fghpdf\n'''\nfrom typing import List\nimport unittest\n\nclass Solution:\n def reverseWords(self, s: str) -> str:\n splitStrings = s.split(\" \")\n for index in range(len(splitStrings)):\n splitStr = splitStrings[index]\n splitStrings[index] = splitStr[::-1]\n\n return \" \".join(splitStrings)\n\n\nclass TestReverseWords(unittest.TestCase):\n def test_reverse_words(self):\n sol = Solution()\n self.assertEqual(sol.reverseWords(\"Let's take LeetCode contest\"), \"s'teL ekat edoCteeL tsetnoc\")\n self.assertEqual(sol.reverseWords(\"God Ding\"), \"doG gniD\")\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"fghpdf/leetcode","sub_path":"py/reverse_words_in_a_string_III/reverse_words_in_a_string_III.py","file_name":"reverse_words_in_a_string_III.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"26004085383","text":"\"\"\" 1. Create a lambda function named salary_with_bonus that calculates employee’s salary with the bonus,\n\na) where both employee's salary and bonus are passed in as parameters\n\nb) where both employee's salary and bonus are passed in as parameters but bonus is fixed as keyword argument (“kwarg”)\n\nc) where only employee's salary is passed in as a parameter and bonus is fixed (hard-coded)\n\nAssume bonus is 5%. Example: for the salary of 25000 the lambda function should return 26250.0\n\"\"\"\n\nsalary_with_bonus_a = lambda salary, bonus: salary + (salary*bonus)/100\nprint(salary_with_bonus_a(25000,5))\n\nsalary_with_bonus_b = lambda salary, bonus = 0.05: salary + salary * bonus\nprint(salary_with_bonus_b(25000))\n\nsalary_with_bonus_c = lambda salary : salary +salary*0.05\nprint(salary_with_bonus_c(25000))\n\n\"\"\"\na) Write an ordinary function to create the biggest number by rearranging the digits of a given number. Example: for the input 213 the function should return 321\n\nb) Use method chaining to create a one-line version of the above function.\n\nc) Create a lambda function from the one-line version of the above function\n\"\"\"\n\ndef rearranging_no(no):\n lst = []\n while(no):\n lst.append(int(no%10))\n no = int(no/10)\n lst.sort(reverse=True)\n result = 0\n for i in lst:\n result = result * 10 + i\n return result\n\nprint(rearranging_no(231))\n\nclass rearrange():\n def no_separate(self, no):\n lst = []\n while (no):\n lst.append(int(no % 10))\n no = int(no / 10)\n lst.sort(reverse=True)\n return(lst)\n\n def return_no(self,lst):\n result = 0\n for i in lst:\n result = result * 10 + i\n return result\n\ntest = rearrange()\nprint(test.return_no(test.no_separate(231)))\n\ndef rearrange_nikola(no):\n lst=list(str(no))\n numbers_desc = sorted(lst, reverse=True)\n biggest_num =''.join(numbers_desc)\n return(biggest_num)\n\ndef rearrange_nikola2(no):\n print(int(''.join(sorted(list(str(no)),reverse = True))))\n\nrearrange_nikola3 = lambda no: ''.join(sorted(list(str(no)),reverse = True))\nprint(f'rearrange nikola 3: {rearrange_nikola3(4567)}')\n\n\n\"\"\"\nWrite a lambda function named cap_string that capitalises the first letter of a string.\n\"\"\"\ncap_str = lambda str : str[:1].upper() +str[1:]\nprint(cap_str('asta e stringul meu '))\n\n","repo_name":"IoanaCojo/FDM-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73527117201","text":"# программа для вычисления площади и длины окружности\nimport math\n\nr = 10 # ведите величину радиуса окружности\n\ns = math.pi*r**2 # расчет площади\nl = 2*math.pi*r # расчет длины окружности\n\nprint('Площадь круга = ', round(s, 3))\nprint('Длина окружности = ', round(l, 3))\n\n","repo_name":"Zyoger/My-First-Repository","sub_path":"Python/UDEMI lesson/Exercise7.py","file_name":"Exercise7.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"11995477297","text":"# -*- coding: utf-8 -*-\nfrom textwrap import dedent as s\nimport dash_core_components as dcc\nimport dash_html_components as html\n\nfrom tools import merge\n\nstyles = {\n 'underline': {\n 'border-bottom': 'thin lightgrey solid',\n 'margin-top': '50px'\n }\n}\n\n\ndef Chapter(name, href=None, caption=None):\n linkComponent = html.A if href.startswith('http') else dcc.Link\n return html.Div([\n html.Li(\n linkComponent(\n name,\n href=href,\n style={'paddingLeft': 0},\n id=href\n )\n ),\n html.Small(dcc.Markdown(s(caption or '')), style={\n 'display': 'block',\n 'marginTop': '-10px' if caption else ''\n }) if caption else None\n ])\n\n\ndef Section(title, links, description=None, headerStyle={}):\n return html.Div([\n html.H2(title, style=merge(styles['underline'], headerStyle)),\n (\n html.Div(description)\n if description is not None else None\n ),\n html.Ul(links)\n ])\n\n\nlayout = html.Div(className='toc', children=[\n html.H1('Dash User Guide'),\n\n Section(\"What's Dash?\", [\n Chapter('Introduction', '/dash/introduction'),\n Chapter('Announcement', 'https://medium.com/@plotlygraphs/introducing-dash-5ecf7191b503'),\n Chapter('Dash App Gallery', '/dash/gallery'),\n Chapter('Winter 2018 Workshops', 'https://plotcon.plot.ly/workshops'),\n ]),\n\n Section('Dash Tutorial', [\n Chapter('Part 1. Installation', '/dash/installation'),\n Chapter(\n 'Part 2. The Dash Layout',\n '/dash/getting-started',\n '''The Dash `layout` describes what your app will\n look like and is composed of a set of declarative Dash components.\n '''),\n Chapter(\n 'Part 3. Basic Callbacks',\n '/dash/getting-started-part-2',\n '''Dash apps are made interactive through Dash Callbacks:\n Python functions that are automatically called whenever an input\n component's property changes. Callbacks can be chained, allowing\n one update in the UI to trigger several updates across the app.'''\n ),\n Chapter(\n 'Part 4. Callbacks With State',\n '/dash/state',\n '''Basic callbacks are fired whenever the values change. Use\n Dash `State` with Dash `Inputs` to pass in extra values whenever\n the `Inputs` change. `State` is useful for UIs that contain\n forms or buttons.'''\n ),\n Chapter(\n 'Part 5. Interactive Graphing and Crossfiltering',\n '/dash/interactive-graphing',\n '''Bind interactivity to the Dash `Graph` component whenever you\n hover, click, or select points on your chart.'''\n ),\n Chapter(\n 'Part 6. Sharing Data Between Callbacks',\n '/dash/sharing-data-between-callbacks',\n '''`global` variables will break your Dash apps. However, there\n are other ways to share data between callbacks. This chapter is\n useful for callbacks that run expensive data processing tasks or\n process large data.\n '''\n )\n ]),\n\n Section('Component Libraries', [\n Chapter('Dash Core Components', '/dash/dash-core-components', '''\n The Dash Core Component library contains a set of higher-level\n components like sliders, graphs, dropdowns, tables, and more.\n '''),\n Chapter('Dash HTML Components', '/dash/dash-html-components', '''\n Dash provides all of the available HTML tags as user-friendly\n Python classes. This chapter explains how this works and the few\n important key differences between Dash HTML components and standard\n html.\n '''),\n Chapter('Build Your Own Components', '/dash/plugins', '''\n Dash components are built with [React.js](https://reactjs.org/).\n Dash provides a React → Dash toolchain that generates a\n Dash-compatible interface to these components in Python.\n ''')\n ]),\n\n Section('Advanced Usage', [\n Chapter('Performance', '/dash/performance', '''\n There are two main ways to speed up dash apps: caching and using\n WebGL chart types.\n '''),\n Chapter('Live Updates', '/dash/live-updates', '''\n Update your apps on page load or on a predefined interval\n (e.g. every 30 seconds).\n '''),\n Chapter('External CSS and JS', '/dash/external-resources', '''\n By default, Dash loads CSS and JS assets from a fast, global CDN -\n but you can optionally these resources locally,\n making your apps completely self contained (no internet access required!).\n Also, learn how to append your own CSS styleseets or JS scripts to\n your apps.\n '''),\n Chapter('URL Routing and Multiple Apps', '/dash/urls', '''\n Dash provides two components (`dcc.Link` and `dcc.Location`) that allow\n you to easily make fast multipage apps using its own\n \"Single Page App (SPA)\" design pattern.\n ''')\n ]),\n\n Section('Production', [\n Chapter('Authentication', '/dash/authentication'),\n Chapter('Deployment', '/dash/deployment'),\n ]),\n\n Section('Getting Help', [\n Chapter('FAQ', 'https://community.plot.ly/c/dash'),\n Chapter('Support and Contact', href='/dash/support')\n ]),\n\n Section('Plotly On-Premises', [\n Chapter(\n 'About Plotly On-Premises',\n 'https://plot.ly/products/on-premise'\n ),\n Chapter(\n 'Deploying Dash Apps on Plotly On-Premises',\n '/dash/deployment/on-premise'\n )],\n description=\"\"\"Plotly On-Premises is Plotly's commercial offering for\n hosting and sharing Dash apps.\"\"\",\n headerStyle={'color': '#0D76BF'}\n )\n])\n","repo_name":"p16i/dash-docs","sub_path":"tutorial/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":6022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"7681548592","text":"from flectra.exceptions import UserError\r\n\r\nfrom flectra import fields, models\r\nclass AdraExpensesGroupedReports(models.TransientModel):\r\n _name = 'adra.expenses.grouped.reports'\r\n _description = 'Generador de Reportes de Gastos Agrupados.'\r\n\r\n x_account_analytic_account_id = fields.Many2one('account.analytic.account', string='Seleccione un proyecto')\r\n x_date_from = fields.Date(string='Desde')\r\n x_date_to = fields.Date(string='Hasta')\r\n\r\n def generate_excel_report(self):\r\n if not self.x_account_analytic_account_id:\r\n raise UserError(\"Por favor, asegúrese de seleccionar un proyecto para el informe.\")\r\n if not self.x_date_from and not self.x_date_to:\r\n raise UserError(\"Por favor, asegúrese de seleccionar tanto la fecha de inicio como la fecha de término para el informe.\")\r\n if not self.x_date_from:\r\n raise UserError(\"Por favor, asegúrese de seleccionar la fecha de inicio para el informe.\")\r\n if not self.x_date_to:\r\n raise UserError(\"Por favor, asegúrese de seleccionar la fecha de término para el informe.\")\r\n data = {\r\n 'x_account_analytic_account_id': self.x_account_analytic_account_id.id,\r\n 'x_project': self.x_account_analytic_account_id.name,\r\n 'x_date_from': self.x_date_from,\r\n 'x_date_to': self.x_date_to\r\n }\r\n report = self.env.ref('adra_account_extended.report_xlsx_expenses_grouped')\r\n return report.report_action(self, data=data)","repo_name":"cv2310/flectra20-adra-programas","sub_path":"wizards/adra_expenses_grouped_reports.py","file_name":"adra_expenses_grouped_reports.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"870231712","text":"n, m = map(int, input().split())\nm = min(m, n-m)\n\ndef count_two(n):\n two = 0\n while n:\n n //= 2\n two += n\n return two\n\ndef count_five(n):\n five = 0\n while n:\n n //= 5\n five += n\n return five\n\n# nCm = n!/(m!(n-m)!)\ntwo_cnt = count_two(n) - count_two(m) - count_two(n-m)\nfive_cnt = count_five(n) - count_five(m) - count_five(n-m)\nprint(min(two_cnt, five_cnt))","repo_name":"Sunrin-Algorithm-Study/Algorithm_study","sub_path":"sources/cometj03_정해성/step/14 number_combi/2004.py","file_name":"2004.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"72484359761","text":"def wsi_prediction(model, img):\r\n from patchify import patchify, unpatchify\r\n import numpy as np\r\n\r\n max_sum = np.sum(np.ones((256,256,3)))\r\n img_patches = patchify(img/255, (256, 256, 3), step=256)\r\n x,y,z = img_patches.shape[0],img_patches.shape[1],img_patches.shape[2]\r\n out = np.zeros((x*256, y*256, 1))\r\n mask_patches = patchify(out, (256, 256, 1), step=256)\r\n\r\n for i in range(x):\r\n for j in range(y):\r\n for k in range(z):\r\n temp = img_patches[i][j][k]\r\n if np.sum(temp) < 0.9*max_sum:\r\n pred = model.predict(np.expand_dims(temp, axis = 0))[0]\r\n mask_patches[i][j][k] = 255*((pred > 0.5).astype(np.uint8))\r\n\r\n return unpatchify(mask_patches, out.shape)","repo_name":"itsazibfarooq/Lung_cancer_Detection","sub_path":"Helper_Code/wsi_prediction.py","file_name":"wsi_prediction.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19904072003","text":"import os\nimport sys\nfrom collections import Counter\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.decomposition import PCA\n\nfrom src import utils\n\nplt.rcdefaults()\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\ntitle = None\nxlabel = None\nylabel = None\n\n\ndef show_class_balance(data, train_data=True):\n y_pos = np.arange(len(data['classes']))\n countImg = Counter(data['y_train']).values()\n if (train_data == False):\n countImg = Counter(data['y_test']).values()\n\n plt.bar(y_pos, countImg, align='center', alpha=0.5, color='blue')\n plt.xticks(y_pos, data['classes'])\n\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n\n plt.show()\n\n\ndef visualize_loss(history):\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Valid'], loc='upper left')\n plt.show()\n\n\ndef visualize_acc(history):\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Valid'], loc='upper left')\n plt.show()\n\n\ndef show_single_image(image):\n plt.imshow(image)\n\n\ndef show_many_images(images, row=4, col=4):\n f, axarr = plt.subplots(row, col)\n for i in range(row):\n for j in range(col):\n axarr[i, j].imshow(images[i * row + j])\n\n\ndef plot_pca(features, labels, classes, n_components=10):\n pca = PCA(n_components=n_components)\n pca.fit(features.T)\n\n fig = plt.figure(figsize = (8,8))\n ax = fig.add_subplot(1,1,1) \n ax.set_title('PCA', fontsize = 20)\n\n targets = list(range(len(classes)))\n colors = ['r', 'b']\n for target, color in zip(targets, colors):\n indicesToKeep = utils.where_equal(labels, target)\n ax.scatter(pca.components_[0, indicesToKeep],\n pca.components_[1, indicesToKeep],\n c = color,\n s = 10)\n ax.legend(classes)\n ax.grid()\n","repo_name":"VictorNM/triplet-cnn-cbir","sub_path":"src/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28578324015","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nRetrieve all log files for each test case and gather the results.\r\n\r\nCreated on Tue Jun 15 14:38:00 2021\r\n\r\n@author: Alexandre Bohyn - alexandre dot bohyn [at] kuleuven dot be\r\n\"\"\"\r\nimport glob\r\nimport re\r\nfrom collections import defaultdict\r\n\r\nimport pandas as pd\r\n\r\n\r\ndef get_log_info(log_filename: str) -> (int, int, int, str):\r\n \"\"\"Function to extract info from log file name\"\"\"\r\n n_val = int(re.search(r\"N(\\d+)\", log_filename).group(1))\r\n m_val = int(re.search(r\"m(\\d+)\", log_filename).group(1))\r\n r_val = int(re.search(r\"r(\\d+)\", log_filename).group(1))\r\n method_str = re.search(r\"_([A-Z]+)\\.txt\", log_filename).group(1)\r\n return n_val, m_val, r_val, method_str\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # List the dirs of all the methods\r\n log_files = glob.glob(\"logs/log_*.txt\")\r\n\r\n col_names = (\"n\", \"parents\", \"candidates\", \"representatives\", \"time\")\r\n df_dict = defaultdict(pd.DataFrame)\r\n for log in log_files:\r\n # Metadata\r\n N, m, r, method = get_log_info(log)\r\n\r\n # Create dataframe from the log file\r\n df = pd.DataFrame()\r\n with open(log, \"r\") as f:\r\n for line in f.readlines():\r\n temp_data = dict()\r\n match = re.finditer(r\":\\s+(\\d+\\.*\\d*)\", line)\r\n for idx, val in enumerate(match):\r\n temp_data[col_names[idx]] = [float(val.group(1))]\r\n temp_data[\"method\"] = method\r\n temp_data_dict = pd.DataFrame.from_dict(temp_data)\r\n\r\n df = pd.concat([df, temp_data_dict], ignore_index=True)\r\n\r\n # Append df to dictionnary of df\r\n key = f\"N{N}_m{m}_r{r}\"\r\n df_dict[key] = pd.concat([df_dict[key], df], ignore_index=True)\r\n\r\n # Make each entry of the dict of df wide\r\n with pd.ExcelWriter(\r\n \"results/global_results.xlsx\"\r\n ) as writer: # pylint: disable=abstract-class-instantiated\r\n for key in df_dict.keys():\r\n or_df = df_dict[key]\r\n or_df.to_excel(f\"results/{key}.xlsx\", index=False)\r\n wide_df = or_df.pivot_table(\r\n index=\"n\",\r\n values=[\"parents\", \"candidates\", \"representatives\", \"time\"],\r\n columns=\"method\",\r\n )\r\n # Write it to the excel file\r\n wide_df.to_excel(writer, sheet_name=key)\r\n wide_df.to_excel(writer, sheet_name=key)\r\n","repo_name":"ABohynDOE/enumeration_fatld","sub_path":"log2csv.py","file_name":"log2csv.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"402817311","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\n\n#Load in the data and disperse respectively\ngrad_2030 = pd.read_csv(\"UCLA_EE_grad_2030_0.csv\",header=None)\n\nGPA_2030 = grad_2030[0].tolist()\nGRE_2030 = grad_2030[1].tolist()\nlabels_2030 = grad_2030[2].tolist()\n\ndataCount = len(labels_2030)\n\nGPA_2030_y = []\nGPA_2030_n = []\nGRE_2030_y = []\nGRE_2030_n = []\nyesCount = 0\nnoCount = 0\n\nfor i in range (dataCount):\n if(labels_2030[i] == 0):\n GPA_2030_n.append(GPA_2030[i])\n GRE_2030_n.append(GRE_2030[i])\n noCount += 1\n else:\n GPA_2030_y.append(GPA_2030[i])\n GRE_2030_y.append(GRE_2030[i])\n yesCount += 1\n\n#Calculate parameters\n#Priors based on Bernoulli\npy_0 = noCount/dataCount\n\n#Mean is simply mean of each group (Confirmed)\nu0_gpa = np.mean(GPA_2030_n)\nu0_gre = np.mean(GRE_2030_n)\nu1_gpa = np.mean(GPA_2030_y)\nu1_gre = np.mean(GRE_2030_y)\n\n#Unequal Variances\nsigGPAy = np.var(GPA_2030_y)\nsigGREy = np.var(GRE_2030_y)\nsigGPAn = np.var(GPA_2030_n)\nsigGREn = np.var(GRE_2030_n)\n\n#Equal variances is weighted sum of sign and sigy\nsigEqualGPA = (noCount/dataCount)*sigGPAn+(yesCount/dataCount)*sigGPAy\nsigEqualGRE = (noCount/dataCount)*sigGREn+(yesCount/dataCount)*sigGREy\n\n#Decision Boundaries\n#Equal variances\n#Following w1x + w0 = 0, we can solve by saying x = -w0/w1 -> Used quadratic equations but updated sigma\nw1 = [-2*(u0_gpa/sigEqualGPA-u1_gpa/sigEqualGPA),-2*(u0_gre/sigEqualGRE-u1_gre/sigEqualGRE)]\nw0 = [(u0_gpa**2/sigEqualGPA-u1_gpa**2/sigEqualGPA)+np.log((1-py_0)*np.sqrt(sigEqualGPA/sigEqualGPA)/(py_0)),\n (u0_gre**2/sigEqualGRE-u1_gre**2/sigEqualGRE)+np.log((1-py_0)*np.sqrt(sigEqualGRE/sigEqualGRE)/(py_0))]\n\nb = np.zeros(2)\n\nfor i in range (2):\n b[i] = -w0[i]/w1[i]\n\nbGPAequal = b[0]\nbGREequal = b[1]\n\nprint(\"GPA2030 Statistics\")\nprint(\"Prior Probability of Non-Attending =\",py_0)\nprint(\"u_o =\",u0_gpa)\nprint(\"u_1 =\",u1_gpa)\nprint(\"Equal Variance =\",sigEqualGPA)\nprint(\"Variance of Admitted =\",sigGPAy)\nprint(\"Variance of Non-Admitted =\",sigGPAn)\nprint(\"Equal Decision Boundary, b =\",bGPAequal)\nprint()\nprint(\"__________________\")\nprint(\"GRE2030 Statistics\")\nprint(\"Prior Probability of Non-Attending =\",py_0)\nprint(\"u_o =\",u0_gre)\nprint(\"u_1 =\",u1_gre)\nprint(\"Equal Variance =\",sigEqualGRE)\nprint(\"Variance of Admitted =\",sigGREy)\nprint(\"Variance of Non-Admitted =\",sigGREn)\nprint(\"Equal Decision Boundary, b =\",bGREequal)\n\nprint()\nprint(\"______________________________________________\")\nprint(\"Testing Equal Variance Boundary Conditions\")\n\n#If our x > b, then it's in class 1 (yes). If our x < b, then it's in class 0 (no)\naccuracyGPAEqual = 0\naccuracyGREEqual = 0\nfor i in range (yesCount):\n if(GPA_2030_y[i] >= bGPAequal):\n accuracyGPAEqual += 1\n\n if(GRE_2030_y[i] >= bGREequal):\n accuracyGREEqual += 1\n\nfor i in range (noCount):\n if(GPA_2030_n[i] < bGPAequal):\n accuracyGPAEqual += 1\n \n if(GRE_2030_n[i] < bGREequal):\n accuracyGREEqual += 1\n\naccuracyGPAEqual *= 100/dataCount\naccuracyGREEqual *= 100/dataCount\n\nprint(\"Equal Variance GPA Accuracy =\",accuracyGPAEqual)\nprint(\"Equal Variance GRE Accuracy =\",accuracyGREEqual)\n\n\nprint()\nprint(\"______________________________________________\")\nprint(\"Testing Unequal Variance Boundary Conditions\")\n\na = [1/sigGPAn-1/sigGPAy,1/sigGREn-1/sigGREy]\nb = [-2*(u0_gpa/sigGPAn-u1_gpa/sigGPAy),-2*(u0_gre/sigGREn-u1_gre/sigGREy)]\nc = [(u0_gpa**2/sigGPAn-u1_gpa**2/sigGPAy)+np.log((1-py_0)*np.sqrt(sigGPAn/sigGPAy)/(py_0)),(u0_gre**2/sigGREn-u1_gre**2/sigGREy)+np.log((1-py_0)*np.sqrt(sigGREn/sigGREy)/(py_0))]\n\nroot1 = np.zeros(2)\nroot2 = np.zeros(2)\n\nfor i in range(2):\n #Calculate our roots\n d = (b[i]**2) - (4*a[i]*c[i])\n\n # find two solutions\n root1[i] = (-b[i]-np.sqrt(d))/(2*a[i])\n root2[i] = (-b[i]+np.sqrt(d))/(2*a[i])\n\nprint(\"The roots for our GPA are:\",root1[0],\"and\",root2[0])\nprint(\"The roots for our GRE are:\",root1[1],\"and\",root2[1])\n\n#To test the accuracies of these\n#If our x > root2, then it's in class 1 (yes). If our x < root2, then it's in class 0 (no)\naccuracyGPAUnequal = 0\naccuracyGREUnequal = 0\n\n#Because our roots have 2 outside the range (>4 or <0), we can focus on root 1\nfor i in range (yesCount):\n if(GPA_2030_y[i] >= root2[0]):\n accuracyGPAUnequal += 1\n\n if(GRE_2030_y[i] >= root2[1]):\n accuracyGREUnequal += 1\n\nfor i in range (noCount):\n if(GPA_2030_n[i] < root2[0]):\n accuracyGPAUnequal += 1\n \n if(GRE_2030_n[i] < root2[1]):\n accuracyGREUnequal += 1\n\naccuracyGPAUnequal *= 100/dataCount\naccuracyGREUnequal *= 100/dataCount\n\nprint(\"Unequal Variance GPA Accuracy =\",accuracyGPAUnequal)\nprint(\"Unequal Variance GRE Accuracy =\",accuracyGREUnequal)\n\n#Visualizations\n#Equal GPA visualization\nfor i in range (yesCount):\n marker = 'o'\n color = 'r'\n plt.scatter(GPA_2030_y[i],0,color=color,marker=marker)\nfor i in range (noCount):\n marker = 'x'\n color = 'b'\n plt.scatter(GPA_2030_n[i],0,color=color,marker=marker)\n\nyBound = np.linspace(-1,1,num=2)\nxBound = [bGPAequal,bGPAequal]\nplt.plot(xBound,yBound,color='y')\n\nx0 = np.linspace(u0_gpa - 3*np.sqrt(sigEqualGPA), u0_gpa + 3*np.sqrt(sigEqualGPA), 100)\ny0 = py_0/np.sqrt(2*np.pi*sigEqualGPA)*np.exp(-(x0-u0_gpa)**2/sigEqualGPA)\nplt.plot(x0, y0)\n\nx1 = np.linspace(u1_gpa - 3*np.sqrt(sigEqualGPA), u1_gpa + 3*np.sqrt(sigEqualGPA), 100)\ny1 = (1-py_0)/np.sqrt(2*np.pi*sigEqualGPA)*np.exp(-(x1-u1_gpa)**2/sigEqualGPA)\nplt.plot(x1, y1)\n\nplt.xlabel(\"GPA\")\nplt.title(\"GPA2030 Equal Variances\")\nplt.show()\n\n#Equal GRE visualization\nfor i in range (yesCount):\n marker = 'o'\n color = 'r'\n plt.scatter(GRE_2030_y[i],0,color=color,marker=marker)\nfor i in range (noCount):\n marker = 'x'\n color = 'b'\n plt.scatter(GRE_2030_n[i],0,color=color,marker=marker)\n\nyBound = np.linspace(-1,1,num=2)\nxBound = [bGREequal,bGREequal]\nplt.plot(xBound,yBound,color='y')\n\nx0 = np.linspace(u0_gre - 3*np.sqrt(sigEqualGRE), u0_gre + 3*np.sqrt(sigEqualGRE), 100)\ny0 = py_0/np.sqrt(2*np.pi*sigEqualGRE)*np.exp(-(x0-u0_gre)**2/sigEqualGRE)\nplt.plot(x0, y0)\n\nx1 = np.linspace(u1_gre - 3*np.sqrt(sigEqualGRE), u1_gre + 3*np.sqrt(sigEqualGRE), 100)\ny1 = (1-py_0)/np.sqrt(2*np.pi*sigEqualGRE)*np.exp(-(x1-u1_gre)**2/sigEqualGRE)\nplt.plot(x1, y1)\n\nplt.xlabel(\"GRE\")\nplt.title(\"GRE2030 Equal Variances\")\nplt.show()\n\n#Unequal GPA variances\nfor i in range (yesCount):\n marker = 'o'\n color = 'r'\n plt.scatter(GPA_2030_y[i],0,color=color,marker=marker)\nfor i in range (noCount):\n marker = 'x'\n color = 'b'\n plt.scatter(GPA_2030_n[i],0,color=color,marker=marker)\n\nxBound = np.linspace(2.5,3,num=25)\nyBound = (xBound**2)*a[0]+xBound*b[0]+c[0]\nplt.plot(xBound,yBound,color='k')\n\nyBound = np.linspace(-1,1,num=2)\nxBound = [root2[0],root2[0]]\nplt.plot(xBound,yBound,color='y')\n\nx0 = np.linspace(u0_gpa - 3*np.sqrt(sigGPAn), u0_gpa + 3*np.sqrt(sigGPAn), 100)\ny0 = py_0/np.sqrt(2*np.pi*sigGPAn)*np.exp(-(x0-u0_gpa)**2/sigGPAn)\nplt.plot(x0, y0)\n\nx1 = np.linspace(u1_gpa - 3*np.sqrt(sigGPAy), u1_gpa + 3*np.sqrt(sigGPAy), 100)\ny1 = (1-py_0)/np.sqrt(2*np.pi*sigGPAy)*np.exp(-(x1-u1_gpa)**2/sigGPAy)\nplt.plot(x1, y1)\n\nplt.xlabel(\"GPA\")\nplt.title(\"GPA2030 Unequal Variances\")\nplt.show()\n\n#Unequal GRE variances\nfor i in range (yesCount):\n marker = 'o'\n color = 'r'\n plt.scatter(GRE_2030_y[i],0,color=color,marker=marker)\nfor i in range (noCount):\n marker = 'x'\n color = 'b'\n plt.scatter(GRE_2030_n[i],0,color=color,marker=marker)\n\nxBound = np.linspace(2.5,3,num=25)\nyBound = (xBound**2)*a[1]+xBound*b[1]+c[1]\nplt.plot(xBound,yBound,color='k')\n\nyBound = np.linspace(-1,1,num=2)\nxBound = [root2[1],root2[1]]\nplt.plot(xBound,yBound,color='y')\n\nx0 = np.linspace(u0_gre - 3*np.sqrt(sigGREn), u0_gre + 3*np.sqrt(sigGREn), 100)\ny0 = py_0/np.sqrt(2*np.pi*sigGREn)*np.exp(-(x0-u0_gre)**2/sigGREn)\nplt.plot(x0, y0)\n\nx1 = np.linspace(u1_gre - 3*np.sqrt(sigGREy), u1_gre + 3*np.sqrt(sigGREy), 100)\ny1 = (1-py_0)/np.sqrt(2*np.pi*sigGREy)*np.exp(-(x1-u1_gre)**2/sigGREy)\nplt.plot(x1, y1)\n\nplt.xlabel(\"GRE\")\nplt.title(\"GRE2030 Unequal Variances\")\nplt.show()","repo_name":"markgeha8/ECEM146","sub_path":"Homework 5/gda.py","file_name":"gda.py","file_ext":"py","file_size_in_byte":8088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9165069904","text":"import pickle\nimport numpy as np\nimport quaternion as npq\nfrom preprocessing.preproc_utils import reject_outliers\nimport argparse\nfrom preprocessing.preproc_utils import data_folder\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"-task_name\", type=str, default=\"shelf\", help=\"String name of the task\"\n)\nparser.add_argument(\"-task_id\", type=int, default=0, help=\"Int task id\")\nparser.add_argument(\n \"-calib_object\", type=str, default=\"ycbv_02\", help=\"Object used in the calibration\"\n)\nparser.add_argument(\n \"-calib_path\",\n type=str,\n default=data_folder.joinpath(\"calibration.pkl\"),\n help=\"Path to pddl lib\",\n)\nparser.add_argument(\n \"-known_pose_path\",\n type=str,\n default=data_folder.joinpath(\"world_object.npy\"),\n help=\"Path to file to save results\",\n)\nparser.add_argument(\"--verbose\", action=\"store_true\")\nargs = parser.parse_args()\n\ncosy_calib_file = args.calib_path\ncalib_preds = pickle.load(open(cosy_calib_file, \"rb\"))\ncosypose_calib_obj_id = args.calib_object\ntask_name = args.task_name\ntask_id = args.task_id\n\nworld_obj = np.load(args.known_pose_path)\nt_array = np.array([x[:3, 3] for x in calib_preds[args.calib_object]])\navg_t = np.mean(reject_outliers(t_array), axis=0)\n\nrot_array = np.array(\n [\n npq.as_rotation_vector(npq.from_rotation_matrix(x[:3, :3]))\n for x in calib_preds[cosypose_calib_obj_id]\n ]\n)\navg_rot = np.mean(reject_outliers(rot_array), axis=0)\n\ncamera_obj = np.eye(4)\ncamera_obj[:3, 3] = avg_t\ncamera_obj[:3, :3] = npq.as_rotation_matrix(npq.from_rotation_vector(avg_rot))\n\nif not data_folder.exists():\n data_folder.mkdir(exist_ok=True, parents=True)\nnp.save(\n data_folder.joinpath(f\"calibration_{task_name}_{task_id}\"),\n world_obj.dot(np.linalg.inv(camera_obj)),\n)\n","repo_name":"agimus-project/video_guided_tamp_planner","sub_path":"preprocessing/01_create_calib_matrix.py","file_name":"01_create_calib_matrix.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"20674891174","text":"# type: ignore\nimport pytest\nfrom aat.common import _in_cpp\nfrom aat.core import Order, Instrument, ExchangeType\n\n_INSTRUMENT = Instrument(\"TE.ST\")\n\n\nclass TestOrder:\n def test_stop_order_validation(self):\n if _in_cpp():\n return\n\n with pytest.raises(AssertionError):\n Order(\n volume=0.0,\n price=5.0,\n side=Order.Sides.SELL,\n exchange=ExchangeType(\"\"),\n order_type=Order.Types.STOP,\n stop_target=Order(\n volume=0.5,\n price=5.0,\n side=Order.Sides.SELL,\n exchange=ExchangeType(\"\"),\n order_type=Order.Types.STOP,\n instrument=_INSTRUMENT,\n ),\n instrument=_INSTRUMENT,\n )\n","repo_name":"AsyncAlgoTrading/aat","sub_path":"aat/tests/core/models/test_order.py","file_name":"test_order.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":528,"dataset":"github-code","pt":"22"} +{"seq_id":"23429300538","text":"'''Get rid of recursion in the following function (using iteration)\nIn a nutshell:\n\n1. Study the function.\n2. Convert all recursive calls into tail calls. (If you can’t, stop. Try another method.)\n3. Introduce a one-shot loop around the function body.\n4. Convert tail calls into continue statements.\n5. Tidy up.\n\nRead more:\nhttp://blog.moertel.com/posts/2013-05-11-recursive-to-iterative.html\n'''\n\n\ndef find_val_or_next_smallest(bst, x):\n '''Get the greatest value <= x in a binary search tree (bst).\n Returns None if no such value can be found.\n https://github.com/tmoertel/recursion-to-iteration\n '''\n if bst is None:\n return None\n elif bst.val == x:\n return x\n elif bst.val > x:\n return find_val_or_next_smallest(bst.left, x)\n else:\n right_best = find_val_or_next_smallest(bst.right, x)\n if right_best is None:\n return bst.val\n return right_best\n","repo_name":"beenorgone-notebook/python-notebook","sub_path":"py-practice/recur_to_iter.py","file_name":"recur_to_iter.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42848703745","text":"import torch\ntorch_device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nfrom transformers import CLIPTextModel, CLIPTokenizer\nfrom diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler\n\n# 1. Load the autoencoder model which will be used to decode the latents into image space. \nvae = AutoencoderKL.from_pretrained(\"CompVis/stable-diffusion-v1-4\", subfolder=\"vae\", use_auth_token=True)\n\n# 2. Load the tokenizer and text encoder to tokenize and encode the text. \ntokenizer = CLIPTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\")\ntext_encoder = CLIPTextModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n\n# 3. The UNet model for generating the latents.\nunet = UNet2DConditionModel.from_pretrained(\"CompVis/stable-diffusion-v1-4\", subfolder=\"unet\", use_auth_token=True)\n\nfrom diffusers import DDIMScheduler\n\nscheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\", clip_sample=False, set_alpha_to_one=False)\n\nfrom torch import autocast\nfrom tqdm.auto import tqdm\nfrom PIL import Image\n\nvae = vae.to(torch_device)\ntext_encoder = text_encoder.to(torch_device)\nunet = unet.to(torch_device) \n\nembedded_text_prompts = {}\n\ndef text_prompt_embed(t):\n with torch.no_grad():\n if t in embedded_text_prompts:\n return embedded_text_prompts[t]\n else: \n text_input = tokenizer([t], padding=\"max_length\", max_length=tokenizer.model_max_length, truncation=True, return_tensors=\"pt\")\n text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]\n embedded_text_prompts[t] = text_embeddings\n return text_embeddings\n\n\n\nfrom flask_socketio import SocketIO, emit\nfrom flask import Flask, request\nfrom flask_cors import CORS\nfrom random import gauss, random\n# from threading import Thread, Event\nfrom time import sleep\nimport time\n\nimport numpy as np\nfrom PIL import Image\nimport base64\nfrom io import BytesIO\nimport torch\n# from gevent.pywsgi import WSGIServer\n# from geventwebsocket.handler import WebSocketHandler\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\n\nsocketio = SocketIO(app, cors_allowed_origins='*')\nCORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\n@app.route('/hello')\ndef hello():\n return \"Hello World!\"\n\n\n# Handle the webapp connecting to the websocket\n@socketio.on('connect')\ndef test_connect():\n print('someone connected to websocket:', request.sid)\n emit('connect', {'data': 'Connected! ayy'})\n \n@socketio.on('disconnect')\ndef test_connect():\n print(request.sid)\n print('someone disconnected to websocket')\n emit('disconnect', {'data': 'Disconnected! ayy'})\n\n# Handle the webapp connecting to the websocket, including namespace for testing\n@socketio.on('connect', namespace='/devices')\ndef test_connect2():\n print('someone connected to websocket!')\n emit('responseMessage', {'data': 'Connected devices! ayy'})\n\n\ndef preprocess_mask(mask):\n mask = mask.convert(\"L\")\n w, h = mask.size\n if w < h:\n w = 512\n h = int(h*(512/w))\n else:\n h = 512\n w = int(w*(512/h))\n\n # if w > 512:\n # h = int(h * (512/w))\n # w = 512\n # if h > 512:\n # w = int(w*(512/h))\n # h = 512\n \n w, h = map(lambda x: x - x % 64, (w, h)) \n w //= 8\n h //= 8\n\n mask = mask.resize((w, h), resample=Image.LANCZOS)\n\n mask = np.array(mask).astype(np.float32) / 255.0\n mask = np.tile(mask, (4,1,1))\n mask = mask[None].transpose(0,1,2,3)\n mask[np.where(mask !=0.0)]=1.0\n mask = torch.from_numpy(mask)\n return mask\n\ndef preprocess(image):\n image = image.convert('RGB')\n w, h = image.size\n if w < h:\n w = 512\n h = int(h*(512/w))\n else:\n h = 512\n w = int(w*(512/h))\n # if w > 512:\n # h = int(h * (512/w))\n # w = 512\n # if h > 512:\n # w = int(w*(512/h))\n # h = 512\n w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64, 32 can sometimes result in tensor mismatch errors\n\n image = image.resize((w, h), resample=Image.LANCZOS)\n print(image.size)\n image = np.array(image).astype(np.float32) / 255.0\n image = image[None].transpose(0, 3, 1, 2)\n image = torch.from_numpy(image)\n return 2.0 * image - 1.0\n\ndef numpy_to_pil(images):\n \"\"\"\n Convert a numpy image or a batch of images to a PIL image.\n \"\"\"\n if images.ndim == 3:\n images = images[None, ...]\n images = (images * 255).round().astype(\"uint8\")\n pil_images = [Image.fromarray(image).convert('RGBA') for image in images]\n return pil_images\n\n@socketio.on('test')\ndef handle_message_t(message):\n print(message)\n\n@socketio.on('gen_step')\n@torch.no_grad()\ndef handle_message(message):\n # print(request.sid)\n # print('someone sent to the websocket', message)\n # print(message.keys())\n # print('gen??')\n now = time.time()\n\n guidance_scale = message['guidance_scale']\n text_prompts = message['text_prompts']\n text_prompt_weights = message['text_prompt_weights']\n directional_prompts = message['directional_prompts']\n\n layer_img_o = Image.open(BytesIO(base64.b64decode(message['layer_img'].split(\",\",1)[1])))\n area_img = Image.new(\"RGBA\", layer_img_o.size, \"WHITE\")\n area_img_o = Image.open(BytesIO(base64.b64decode(message['area_img'].split(\",\",1)[1])))\n area_img.paste(area_img_o, (0,0), area_img_o)\n \n\n\n overcoat_ratio = message['overcoat_ratio']\n seed = message['seed']\n generator = torch.Generator(device='cuda')\n generator.manual_seed(seed) \n\n # set mask from area_img\n area_mask = preprocess_mask(area_img)\n area_mask = area_mask.to(torch_device)\n\n # add black or white background to the layer image\n layer_img_back = Image.new(\"RGBA\", layer_img_o.size, \"WHITE\")\n layer_img_back.paste(layer_img_o, (0, 0), layer_img_o)\n layer_img_back.convert('RGB')\n layer_img = preprocess(layer_img_back)\n # layer_img = layer_img_o.convert('RGB')\n # layer_img = preprocess(layer_img)\n init_latents = vae.encode(layer_img.to(torch_device)).sample()\n init_latents = 0.18215 * init_latents\n\n noise = torch.randn(init_latents.shape, generator=generator, device=torch_device)\n\n \n num_inference_steps = message['steps']\n t = scheduler.timesteps[message['gen_tick']]\n print(message['gen_tick'], t)\n\n if message['gen_tick'] < int((1-overcoat_ratio)*num_inference_steps):\n layer_array = np.copy(np.asarray(layer_img_o))\n\n alphas = np.ones(layer_array[:,:,3,None].shape)*255\n layer_array = np.concatenate((layer_array[:,:,3,None], layer_array[:,:,3,None], layer_array[:,:,3,None], alphas), axis = 2)\n layer_array = np.array(layer_array, dtype = np.uint8)\n \n layer_img_mask = Image.fromarray(layer_array)\n layer_mask = preprocess_mask(layer_img_mask)\n layer_mask = layer_mask.to(torch_device)\n\n\n \n if message['gen_tick']==0: \n \n # latents = noise # start from the purse noise\n\n latents = (1-area_mask)* (1-layer_mask) * noise + (1-(1-area_mask)* (1-layer_mask)) * init_latents # In / Out\n latents = (0.1) * latents + 0.9 * noise\n \n # scheduler.add_noise(latents, noise, t-1)\n else:\n # print('stored latent is used')\n latents = torch.Tensor(message['latents'])\n latents = latents.to(torch_device)\n\n\n # print(latents.size())\n\n\n print(time.time()-now)\n\n # set directional prompt embeddings\n directional_vector = None\n for directional_prompt in directional_prompts:\n if directional_vector == None:\n directional_vector = float(directional_prompt['value'])/100.0 * (text_prompt_embed(directional_prompt['promptB'])-text_prompt_embed(directional_prompt['promptA']))\n else:\n directional_vector = directional_vector + float(directional_prompt['value'])/100.0 * (text_prompt_embed(directional_prompt['promptB'])-text_prompt_embed(directional_prompt['promptA']))\n\n # set prompts\n # text_input = tokenizer(text_prompts, padding=\"max_length\", max_length=tokenizer.model_max_length, truncation=True, return_tensors=\"pt\")\n \n # with torch.no_grad():\n # text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]\n # prompt_weights = torch.Tensor(text_prompt_weights).to(torch_device)\n # text_embeddings = torch.sum(text_embeddings * prompt_weights[:, None, None], dim=0)/torch.sum(prompt_weights)\n\n\n # handle text input\n # text_input = tokenizer(text_prompts, padding=\"max_length\", max_length=tokenizer.model_max_length, truncation=True, return_tensors=\"pt\")\n \n # with torch.no_grad():\n # text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]\n # prompt_weights = None\n # if prompt_weights == None:\n # text_embeddings = torch.mean(text_embeddings, dim=0)\n # else:\n # prompt_weights = torch.Tensor(prompt_weights).to(torch_device)\n # text_embeddings = torch.sum(text_embeddings * prompt_weights[:, None, None], dim=0)/torch.sum(prompt_weights)\n # text_embeddings = text_embeddings.reshape(1, text_embeddings.shape[0], text_embeddings.shape[1])\n\n # max_length = text_input.input_ids.shape[-1]\n # uncond_input = tokenizer(\n # [\"\"], padding=\"max_length\", max_length=max_length, return_tensors=\"pt\"\n # )\n # with torch.no_grad():\n # uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] \n\n # text_embeddings = torch.cat([uncond_embeddings, text_embeddings])\n \n text_prompt_embedding = None\n tpw = 0\n for tp_idx, text_prompt in enumerate(text_prompts):\n # print('inloop1', time.time()-now)\n cur_embedding = text_prompt_embed(text_prompt)\n # print('inloop2', time.time()-now)\n if text_prompt_embedding==None:\n text_prompt_embedding = cur_embedding*text_prompt_weights[tp_idx]\n else:\n text_prompt_embedding = text_prompt_embedding + cur_embedding*text_prompt_weights[tp_idx]\n tpw = tpw + text_prompt_weights[tp_idx]\n # print('inloop3', time.time()-now)\n text_prompt_embedding = text_prompt_embedding/tpw\n\n uncond_embeddings = text_prompt_embed('')\n text_embeddings = torch.cat([uncond_embeddings, text_prompt_embedding])\n # print(uncond_embeddings.size(), text_prompt_embedding.size())\n\n print(time.time()-now)\n\n \n scheduler.set_timesteps(num_inference_steps)\n # Do something about generation\n latent_model_input = torch.cat([latents] * 2)\n # predict the noise residual\n with torch.no_grad():\n noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)[\"sample\"]\n\n # perform guidance\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n \n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n latents = scheduler.step(noise_pred, t, latents)[\"prev_sample\"]\n\n # t_noise = torch.randn(latents.shape, device=torch_device)\n if t > 1:\n # when over overcoat ratio\n if message['gen_tick'] >= int((1-overcoat_ratio)*num_inference_steps):\n init_latents_proper = scheduler.add_noise(init_latents, noise, t-1)\n latents = init_latents_proper * area_mask + latents * (1-area_mask)\n else:\n init_latents_proper = scheduler.add_noise(init_latents, noise, t-1)\n # latents = init_latents_proper * area_mask + latents * (1-area_mask)\n latents = (1-area_mask)* (1-layer_mask) * latents + (1-(1-area_mask)* (1-layer_mask)) * init_latents_proper # In / Out\n # latents = (1-area_mask)* (1-layer_mask) * latents + area_mask * init_latents_proper + (layer_mask*(1-area_mask)*(latents+init_latents_proper)*0.5) # In / Out\n \n # when below overcoat ratio\n else:\n latents = init_latents * area_mask + latents * (1-area_mask)\n\n # latents = (1-area_mask) * latents + area_mask * init_latents\n\n\n\n\n print(time.time()-now)\n # mask \n # latents = area_mask * init_latents + (1-area_mask) * latents\n latents_rt = latents.cpu().detach().numpy().tolist()\n\n latents = 1 / 0.18215 * latents\n \n output_img = vae.decode(latents)\n print('within', time.time()-now)\n output_img = (output_img / 2 + 0.5).clamp(0, 1)\n print('within2-1', time.time()-now)\n output_img = output_img.permute(0, 2, 3, 1)\n print('within2-1-1', time.time()-now)\n torch.cuda.synchronize()\n now2 = time.time()\n output_img = output_img.cpu()\n print('within2-1-2', time.time()-now, time.time()-now2)\n output_img = output_img.numpy()\n # output_img = output_img.cpu().permute(0, 2, 3, 1).numpy()\n print('within2-2', time.time()-now)\n output_img = numpy_to_pil(output_img)[0]\n print('within2-3', time.time()-now)\n output_img = output_img.resize((layer_img_o.size[0], layer_img_o.size[1]), resample=Image.LANCZOS)\n print('within2', time.time()-now)\n output_array = np.asarray(output_img)\n output_array = np.copy(output_array)\n # print('within3', time.time()-now)\n\n area_array = np.asarray(area_img_o)\n area_array = np.where(area_array==255, 255, 0)\n # print(area_array.shape, gaussian.shape)\n output_array[:,:,3] = area_array[:,:,3]\n # print('within4', time.time()-now)\n output_img = Image.fromarray(output_array)\n # print('within5', time.time()-now)\n \n # print('here?')\n buffered = BytesIO()\n output_img.save(buffered, format=\"PNG\")\n output_img_send = base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\n\n print(time.time()-now, time.time())\n print('---------------------')\n emit('gen_done', {'data':message['area_img'].split(\",\",1)[0]+','+output_img_send, 'stroke_id': message['stroke_id'], 'latents':latents_rt})\n\n\n# Handle the webapp sending a message to the websocket, including namespace for testing\n@socketio.on('message', namespace='/devices')\ndef handle_message2():\n print('someone sent to the websocket!')\n\n\n@socketio.on_error_default # handles all namespaces without an explicit error handler\ndef default_error_handler(e):\n print('An error occured:')\n print(e)\n socketio.stop()\n\n\n# if __name__ == '__main__':\nsocketio.run(app, host='0.0.0.0', debug=False, port=5001)\n # http_server = WSGIServer(('',5001), app, handler_class=WebSocketHandler)\n # http_server.serve_forever()\n","repo_name":"johnr0/PromptPaint","sub_path":"ml_server.py","file_name":"ml_server.py","file_ext":"py","file_size_in_byte":14137,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"13294222422","text":"import pygame\nfrom pygame.sprite import Sprite\nfrom pathlib import Path\n\nclass Ship(Sprite):\n \"\"\"Controls the ship in the sideways_shooter\"\"\"\n def __init__(self, ai_game):\n \"\"\"Initialize image size and location\"\"\"\n super().__init__()\n self.screen = ai_game.screen\n self.screen_rect = ai_game.screen.get_rect()\n self.settings = ai_game.settings\n\n # Load image file into game\n self.image = Path(__file__).parent / \"images/ship.bmp\"\n self.image = pygame.image.load(self.image)\n self.rect = self.image.get_rect()\n self.rect.midbottom = self.screen_rect.midbottom\n\n # Decimal placeholder for ship's position\n self.x = float(self.rect.x)\n self.y = float(self.rect.y)\n \n # Movement Flags\n self.move_left = False\n self.move_right = False\n self.move_up = False\n self.move_down = False\n\n def update_movement(self):\n \"\"\"Moves the ship by rect x or rect y\"\"\"\n if self.move_left and self.rect.left > 0:\n self.x -= self.settings.ship_speed\n if self.move_right and self.rect.right < self.screen_rect.right:\n self.x += self.settings.ship_speed\n if self.move_up and self.rect.top > self.screen_rect.top:\n self.y -= self.settings.ship_speed\n if self.move_down and self.rect.bottom < self.screen_rect.bottom:\n self.y += self.settings.ship_speed\n # Adds non-integer value of x/y coordinate to ship's speed\n self.rect.x = self.x\n self.rect.y = self.y\n\n def center_ship(self):\n \"\"\"Center the ship to its original position\"\"\"\n self.rect.midbottom = self.screen_rect.midbottom\n self.x = float(self.rect.x)\n self.y = float(self.rect.y)\n\n def blitme(self):\n \"\"\"Loads in the ship with the designated coordinates\"\"\"\n self.screen.blit(self.image, self.rect)","repo_name":"TylerPrice7/Alien_Invasion","sub_path":"ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2046966577","text":"# Passing arguments to function by assigment\n# 1/Argumentele sun varaibila de tip immutable (str, int, float, bool) la assigment in interiorul functiei se crreeaza o varabila locala\n# Daca vrem sa modificam valaorea argumentelor, folosim return si assigment pt a reflecta modificarile in program\n# 2/ Argumentele sunt mutable(schimbabile in-place, se pot rescrie items, de exp list) - la assigment in interiorul functiei se creeaza tot variabila locala\n# 2.1/ Argumentele mutable se pot \"modifica\" daca le returnam si asignam noua valoare intoarsa, similiar cu 1/\n# 2.2/ Argumentele mutable, daca se modifica in-place(append, assigment pe baza de index, insert, etc), daca suprascriem un item sau inserma unul(operatii de mutare)\n# se realizaeza \"pass by assigment\" in sensul ca modificarea va fi reflectata in program fara a se folosii return\n\n# Pass by assigment inseamna ca argumentele se vor comporta diferit in functie de tipul lor\n# Cand folosim argumente mutable si suprascriem elemente (schimbam/mutam argumentul in interiorul functiei) - schimbarea se reflecta dupa finalizarea functiei in program\n# Mecanismul este similar cu \"Pass by reference\" din alte limbaje de programare\n# Un al doilea mecanims de transmitere a argumentelor este \"Pass by value\" relativ simulat cand realizam assigment in interiorul functiei (atat pt mutable cat si pt immutable data)\n# Pass by assigment functioneaza pt ca Python creeaza variabile ca referinte pt obiecte(=reprezentari concrete ale unor tipuri de date)\n# Este esential daca acele obiecte sunt sau nu mutable pt a determina daca modificarile(mutarile) se reflecta in afara functiei asupra argumentelor transmise\n\n# Modificarile elemtelor listelor transmise ca argumente functilor,se reflecta in afar functiei fara sa fie necesar return\n# Modificarile oricarui tip de argumente se pot reflecta prin return si assigment\n\nx = 7\ngl = ['old', 6, 20]\n\ndef double(number):\n \"\"\"Dubleaza valaorea parametrului number\"\"\"\n number *=2 # se creeaza o variabila locala\n return number\n\ndef f(my_list):\n my_list = [3, 5] # se creeaza o variabila locala\n print(f'Lista din interiorul fucntiei f este {my_list}')\n return my_list\n\ndef g(lst):\n lst[0] = 'new'\n lst[1]+=1\n lst.append(77)\n print(f'Lista din interiorul functiei {lst}')\n\nx = double(x)\nprint(f'Dublul lui x este {x}')\n\n\nl =[6, 7]\nprint(f'Lista l inainte de executia functiei f: {l}')\nf(1)\nprint(f'Lista l dupa executia functiei f: {l}')\n\nprint()\nprint(f'Lista gl inaitne de executia functiei g {gl}')\ng(gl)\nprint(f'Lista gl dupa executia functiei g: {gl}')","repo_name":"IamSoAngryz/exercitii.py","sub_path":"pass_by_assigment.py","file_name":"pass_by_assigment.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"22641687449","text":"import unittest\n\nimport day7\nimport utils\n\nPART_1_EXAMPLE = \"\"\"\n$ cd /\n$ ls\ndir a\n14848514 b.txt\n8504156 c.dat\ndir d\n$ cd a\n$ ls\ndir e\n29116 f\n2557 g\n62596 h.lst\n$ cd e\n$ ls\n584 i\n$ cd ..\n$ cd ..\n$ cd d\n$ ls\n4060174 j\n8033020 d.log\n5626152 d.ext\n7214296 k\n\"\"\"\n\n\nclass Part1Tests(unittest.TestCase):\n\n def test_example(self):\n self.assertEqual(95437, day7.part_1_answer(utils.to_lines(PART_1_EXAMPLE)))\n\n def test_with_input(self):\n self.assertEqual(1297159, day7.part_1_answer(utils.read_input_lines(7)))\n\n\nclass Part2Tests(unittest.TestCase):\n\n def test_example(self):\n self.assertEqual(24933642, day7.part_2_answer(utils.to_lines(PART_1_EXAMPLE)))\n\n def test_with_input(self):\n self.assertEqual(3866390, day7.part_2_answer(utils.read_input_lines(7)))\n","repo_name":"richardfearn/advent-of-code-2022","sub_path":"day7/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37235889535","text":"import pandas as pd\n\n# Note: GridSearchVC description from sklearn website:\n# Exhaustive search over specified parameter values for an estimator.\nfrom sklearn.model_selection import GridSearchCV\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.corpus import stopwords\nstop = stopwords.words('english')\n\ndf = pd.read_csv('../data/movie_data.csv')\n\n\ndef tokenizer(text):\n return text.split()\n\n\ndef tokenizer_porter(text):\n return [porter.stem(word) for word in text.split()]\n\n\n# Divide the data into 25k for training and 25k for testing\nX_train = df.loc[:25000, 'review'].values\ny_train = df.loc[:25000, 'sentiment'].values\n\nX_test = df.loc[25000:, 'review'].values\ny_test = df.loc[25000:, 'sentiment'].values\n\n# Term-Frequency Inverse-Document Frequency (tfidf)\ntfidf = TfidfVectorizer(strip_accents=None,\n lowercase=False,\n preprocessor=None)\n\n# Word stemmer from before\nporter = PorterStemmer()\n\n# This grid contains the parameters that should be ran\n# in the GridSearchCV\nparam_grid = [{\n 'vect__ngram_range': [(1, 1)],\n 'vect__stop_words': [stop, None],\n 'vect__tokenizer': [tokenizer,\n tokenizer_porter],\n 'clf__penalty': ['l1', 'l2'],\n 'clf__C': [1.0, 10.0, 100.0]\n },\n {\n 'vect__ngram_range': [(1, 1)],\n 'vect__stop_words': [stop, None],\n 'vect__tokenizer': [tokenizer,\n tokenizer_porter],\n 'vect__use_idf': [False],\n 'vect__norm': [None],\n 'clf__penalty': ['l1', 'l2'],\n 'clf__C': [1.0, 10.0, 100.0]\n }\n ]\n\nlr_tfidf = Pipeline(\n [\n ('vect', tfidf),\n ('clf', LogisticRegression(random_state=0))\n ]\n)\n\n# GridSearch Logistic Regression for tfidf\ngs_lr_tfidf = GridSearchCV(lr_tfidf,\n param_grid,\n scoring='accuracy',\n cv=5,\n verbose=1,\n n_jobs=-1)\n\ngs_lr_tfidf.fit(X_train, y_train)\n\nprint('Best parameter set: %s ' % gs_lr_tfidf.best_params_)\n\nprint('CV Accuracy: %.3f' % gs_lr_tfidf.best_score_)\n\nclf = gs_lr_tfidf.best_estimator_\n\nprint('Test Accuracy: %.3f' % clf.score(X_test, y_test))","repo_name":"CSwithJC/PythonMachineLearning","sub_path":"chapter_08_sentiment_analysis/07_find_parameters_for_movie_review_classification.py","file_name":"07_find_parameters_for_movie_review_classification.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41905955671","text":"# this is an example code on using the environment. \nimport numpy as np\nfrom StarShip import StarShipGame\nimport Utilities\n# import the env class\n\n# create an object of env class\nenv = StarShipGame(True)\nnp.random.seed(0)\nlog_data =[]\ndef random_policy(episode):\n\n action_space = 6\n state_space =66\n max_steps = 1000\n\n for e in range(episode):\n state = env.reset()\n score = 0\n\n for i in range(max_steps):\n action = np.random.randint(action_space)\n reward, next_state, done = env.step(action)\n score += reward\n state = next_state\n \n if done:\n log_data.append(score)\n Utilities.PlotData(\"epsiode vs loss\",log_data)\n print(\"episode: {}/{}, score: {}\".format(e, episode, score))\n break\n\n\nif __name__ == '__main__':\n\n random_policy(1000)\n","repo_name":"I-Alpha/StarShip_D3QN","sub_path":"random_policy.py","file_name":"random_policy.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"71091745657","text":"#!/usr/bin/python\n# @Time : 2/1/20\n# @Author: liyong\nfrom logzero import logger\nfrom web.models.databases import User, Company, CompanyUser, RoleTypeEnum\nfrom web.apps.base.status import StatusCode\nfrom web.utils.date2json import to_json\nfrom web.apps.user.libs.utils import save_pic, check_user_exist\nfrom web.apps.user.libs.response import auth_failed, permission_deny, validate_error, param_missing,\\\n openid_null, not_found\n\n\nasync def get_user(self, page=1, page_size=10, enterprise_id=None):\n \"\"\"查询用户\"\"\"\n user = self.current_user\n if not user:\n return auth_failed()\n if not enterprise_id:\n rows = user\n rows = [rows] if rows else []\n else:\n company_user = CompanyUser.by_company_user_id(user.id, enterprise_id)\n if not company_user or company_user.role_type == RoleTypeEnum.member: # 该企业下的管理员可查看\n return permission_deny()\n rows = User.by_enterprise_id(enterprise_id, page=page, page_size=page_size) # 查询该企业下的所有用户\n return {\"status\": True, \"code\": StatusCode.success.value, \"msg\": \"获取成功\", \"data\": to_json(rows)}\n\n\nasync def check_user_admin(self):\n \"\"\"查询用户是否是企业联系人\"\"\"\n user = self.current_user\n if not user:\n return auth_failed()\n\n return {\"status\": True, \"code\": StatusCode.success.value, \"msg\": \"获取成功\", \"data\": {'is_admin': user.is_admin}}\n\n\nasync def add_user(self, **kwargs):\n \"\"\"员工注册\"\"\"\n keys = ['userName', 'userPhone', 'enterpriseId']\n val = validate_error(keys, **kwargs)\n if not val.get('status'):\n return val\n if not self.openid:\n return openid_null()\n try:\n company = Company.by_id(kwargs.get('enterpriseId'))\n if not company:\n return {'status': False, 'msg': '该企业不存在', \"code\": StatusCode.not_found_error.value}\n user = self.current_user\n query = check_user_exist(user)\n if not query.get('status'):\n return query\n name = kwargs.get('userName').strip()\n phone = kwargs.get('userPhone').strip()\n instance = User.by_name_phone(name, phone)\n if instance:\n if not user or (user and instance.id != int(user.id)):\n return {'status': False, 'msg': '当前姓名和手机号已占用', \"code\": StatusCode.exist_error.value}\n employee_id = kwargs.get('employeeId')\n if employee_id:\n employee_id = employee_id.strip()\n avatar_pic = kwargs.get('avatarPic')\n if avatar_pic:\n avatar_pic = await save_pic(avatar_pic.strip(), 'avatar', f'avatar_{self.openid}')\n if not avatar_pic:\n return {'status': False, 'msg': '头像保存失败,请重试', \"code\": StatusCode.file_save_error.value}\n if not query.get('existed'): # 如果当前用户不存在,则注册用户时需要记录openid\n user = User.add(\n userName=name,\n employeeId=employee_id,\n userPhone=phone,\n avatarPic=avatar_pic,\n openid=self.openid\n )\n else:\n user = user.update(\n userName=name,\n employeeId=employee_id,\n userPhone=phone,\n avatarPic=avatar_pic\n )\n company_user = CompanyUser.by_company_user_id(user.id, company.id)\n if not company_user:\n CompanyUser.add(company_id=company.id, user_id=user.id)\n return {'status': True, 'msg': '注册成功', \"code\": StatusCode.success.value,\n \"data\": {\"userId\": user.id, \"avatarPic\": user.avatarPic, 'openid': user.openid}}\n except Exception as e:\n logger.error(f\"add user In Error: {str(e)}\")\n self.db.rollback()\n return {'status': False, 'msg': '注册失败', \"code\": StatusCode.db_error.value}\n\n\nasync def update_user(self, enterprise_id=None, user_id=None, **kwargs):\n \"\"\"编辑用户信息\"\"\"\n user = self.current_user\n if not user:\n return auth_failed()\n if not enterprise_id:\n return param_missing('enterpriseId')\n userid = user.id\n if user_id:\n userid = user_id\n user = User.by_id(userid)\n if not user:\n return not_found()\n company_user = CompanyUser.by_company_user_id(userid, enterprise_id)\n if not company_user or (not user_id and company_user.role_type != RoleTypeEnum.admin_rw):\n return permission_deny()\n try:\n keys = ['id', 'openid', 'is_admin', 'company', 'createTime', 'updateTime', 'enterpriseId']\n for key in keys:\n if key in kwargs:\n kwargs.pop(key) # 排除这些key\n name = kwargs.get('userName').strip()\n phone = kwargs.get('userPhone').strip()\n instance = User.by_name_phone(name, phone)\n if instance and instance.id != int(userid):\n return {'status': False, 'msg': '当前姓名和手机号已占用', \"code\": StatusCode.exist_error.value}\n avatar_pic = kwargs.get('avatarPic')\n if avatar_pic:\n avatar_pic = await save_pic(avatar_pic.strip(), 'avatar', f'avatar_{user.openid}')\n if not avatar_pic:\n return {'status': False, 'msg': '头像保存失败,请重试', \"code\": StatusCode.file_save_error.value}\n kwargs['avatarPic'] = avatar_pic\n user = user.update(**kwargs)\n return {'status': True, 'msg': '更新用户成功', \"code\": StatusCode.success.value,\n \"data\": {\"userId\": user.id, \"avatarPic\": user.avatarPic, 'openid': user.openid}}\n except Exception as e:\n logger.error(f\"update user In Error: {str(e)}\")\n self.db.rollback()\n return {'status': False, 'msg': '更新用户失败', \"code\": StatusCode.db_error.value}\n\n\nasync def delete_user(self, enterprise_id=None, user_id=None):\n user = self.current_user\n if not user:\n return auth_failed()\n if not enterprise_id or not user_id:\n return param_missing('enterpriseId或userId')\n company_user = CompanyUser.by_company_user_id(user.id, enterprise_id)\n if not company_user or company_user.role_type != RoleTypeEnum.admin_rw or user.id == int(user_id):\n return permission_deny()\n deleted_instance = CompanyUser.by_company_user_id(user_id, enterprise_id)\n if not deleted_instance:\n return not_found()\n try:\n deleted_instance.delete()\n User.by_id(user_id).delete()\n return {'status': True, 'msg': '删除用户成功', \"code\": StatusCode.success.value}\n except Exception as e:\n logger.error(f\"delete user In Error: {str(e)}\")\n self.db.rollback()\n return {'status': False, 'msg': '删除用户失败', \"code\": StatusCode.db_error.value}\n","repo_name":"Anti2019-nCoV/Anti2019-nCoV","sub_path":"web/apps/user/libs/employee.py","file_name":"employee.py","file_ext":"py","file_size_in_byte":6783,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"22"} +{"seq_id":"40295137939","text":"import unittest\nfrom p7 import Stack\n\nclass P7TestCase(unittest.TestCase):\n def test_push_data(self):\n obj = Stack(4)\n obj.push(1)\n obj.push(5)\n self.assertEqual(obj.push(6),3)\n\n def test_pop_data(self):\n obj = Stack(4)\n obj.push(1)\n obj.push(5)\n self.assertEqual(obj.pop(),5)\n\n def test_size(self):\n obj = Stack(4)\n obj.push(1)\n obj.push(5)\n self.assertEqual(obj.stack_size(),2)\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Nipun2016/Core-Python-Training","sub_path":"Exercises/Urvi/Class and Object/test_p7.py","file_name":"test_p7.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15968682370","text":"import torch\nimport scipy.misc as misc\nimport random\nimport numpy as np\n# import cv2 as cv\n# from matplotlib import transforms\nfrom torchvision import transforms\n\n\ndef resize(image, newsize):\n methods = ['bicubic', 'lanczos', 'bilinear', 'bicubic', 'cubic']\n scale = [4, 3, 2, 1.5, 0.75, 0.5, 0.25]\n value = 0\n if (len(image.shape) == 3):\n w, h, d = image.shape\n else:\n w, h = image.shape\n # s = int(value * 1000) % 7\n # if s == 0:\n # image = image + np.random.random_sample(image.shape) * (int(value * 100) % 50)\n # elif s == 1:\n # image = cv.blur(image, (5, 5))\n # elif s == 2:\n # image = cv.GaussianBlur(image, (5, 5), 1)\n # elif s == 3:\n # image = cv.medianBlur(image, 5)\n # elif s == 4:\n # cv.bilateralFilter(image, 9, 75, 75)\n\n image = misc.imresize(image, newsize, methods[int(value * 10) % 5])\n for i in range(int(value * 100) % 5):\n image = misc.imresize(image, (int(newsize[0] * scale[int(value * (10 ** (1 + i))) % 7]),\n int(newsize[1] * scale[int(value * (10 ** (1 + i))) % 7])),\n methods[int(value * (10 ** (4 + i)) % 5)])\n image = misc.imresize(image, newsize, methods[int(value * (10 ** (4 + i)) % 10 - 5)])\n return image\n\n\ndef newDownscale(image, scale):\n w, h, d = image.shape\n w_new = int(w / scale)\n h_new = int(h / scale)\n image = resize(image, (w_new, h_new))\n return image\n\n\ndef get_downsimple_Tensor(images):\n res = []\n high = ((images + 1) * 255 / 2).numpy().astype(np.uint8).transpose(0, 2, 3, 1)\n for img in high:\n dows = newDownscale(img, 4)\n dows = transforms.ToTensor()(np.array(dows))\n dows = transforms.Normalize((.5, .5, .5), (.5, .5, .5))(dows)\n res.append(dows.numpy())\n return torch.Tensor(res)\n\n","repo_name":"maxCodeVector/InnovativeSRGAN","sub_path":"downSample.py","file_name":"downSample.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10416124339","text":"limit = input(\"Enter the maximum level you want your Fibonacci sequence calculated to: \")\n\na=0\nb=1\ni=0\n\nwhile i < int(limit)-1:\n c = a + b\n a = b\n b = c\n i+=1\n d = i+1\n\nprint(\"{0}: {1}\".format(str(d),b))","repo_name":"TheKicker/CLI-Toolbox","sub_path":"maths/fibonacci-instant.py","file_name":"fibonacci-instant.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"8675597326","text":"#75 sort colors\nclass Solution(object):\n def sortColors(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n for i in nums:\n if i == 1:\n nums.remove(i)\n nums.append(i)\n for i in nums:\n if i == 2:\n nums.remove(i)\n nums.append(i)\n","repo_name":"spencerzhang91/LeetCode","sub_path":"075#sortColors_20150925.py","file_name":"075#sortColors_20150925.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"75144583736","text":"import bcolz\nimport numpy as np\n\ndef load_training_data(path = 'data/', seed=42):\n \"\"\"Load the training already splitted into training and validation.\n\n path - string (optional)\n Path where the data is stored.\n\n seed - integer (optional)\n Seed for the random splitting (if not given 42 is used).\n\n return\n ------\n train_ft - Array with the training data.\n train_target - Array with the training target.\n valid_ft - Array with the validation data.\n valid_target - Array with the validation target.\n \"\"\"\n trn_features = load_array(path + 'train_features.bc')\n trn_targets = load_array(path + 'targets.bc')\n \n myrandom = np.random.RandomState(seed) # This is to make the analysis reproducible\n msk = myrandom.rand(len(trn_features)) > 0.2\n train_ft = trn_features[msk]\n train_targets = trn_targets[msk]\n valid_ft = trn_features[~msk]\n valid_targets = trn_targets[~msk]\n \n return train_ft, train_targets, valid_ft, valid_targets\n\ndef save_array(path, array):\n \"\"\"Save an array as a bcolz array\"\"\"\n c = bcolz.carray(array, rootdir=path, mode='w')\n c.flush()\n \ndef load_array(path):\n \"\"\"Load a bcolz array as a numpy array\"\"\"\n return bcolz.open(path)[:]\n\n\n","repo_name":"baheredia/ml_experiments","sub_path":"portoseguro/data_prep.py","file_name":"data_prep.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2940893933","text":"\r\n\r\nimport numpy as np\r\nimport os\r\nimport cv2\r\nimport sys\r\n# from shutil import copyfile\r\n# # Importing all necessary libraries \r\n# from keras.preprocessing.image import ImageDataGenerator \r\n# from keras.models import Sequential \r\n# from keras.layers import Conv2D, MaxPooling2D \r\n# from keras.layers import Activation, Dropout, Flatten, Dense \r\n# from keras import backend as K \r\nimport matplotlib.pyplot as plt\r\n\r\n# Define Model here\r\n\r\nfrom keras.preprocessing.image import ImageDataGenerator \r\nfrom keras.utils import to_categorical\r\nfrom keras.applications.vgg16 import VGG16\r\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\r\nfrom keras.optimizers import Adam\r\nfrom keras.optimizers import SGD\r\n# snippet of using the LearningRateScheduler callback\r\nfrom keras.callbacks import LearningRateScheduler\r\nimport keras_video\r\nfrom keras import backend as K \r\n\r\n\r\nimg_width, img_height = 224, 224\r\n\r\nimport DeepFakeDetection as model\r\n\r\ndef main():\r\n # str1 = \"/train/manipulated\"\r\n # fileDirectory = \"new_data\" + str1\r\n # destination = \"faces_new_data\"+ str1\r\n \r\n # videos = [f for f in os.listdir(fileDirectory) if (\"mp4\") in f]\r\n # print(videos)\r\n # model.extract_features(fileDirectory,destination,videos,True,show_results =False,frame_rate = 50)\r\n # # model.pad_images(\"original_features/\")\r\n\r\n # originals = [( f ,0) for f in os.listdir('original_features')if (\"jpg\") in f]\r\n # manipulateds = [( f,1) for f in os.listdir('manipulated_features')if (\"jpg\") in f]\r\n # np.random.shuffle(originals)\r\n # np.random.shuffle(manipulateds)\r\n # train_data = []\r\n # val_data = []\r\n # test_data = []\r\n # train_data.extend(originals[0: int(len(originals)*0.6)])\r\n # train_data.extend(manipulateds[0: int(len(manipulateds)*0.6)])\r\n # np.random.shuffle(train_data)\r\n # val_data.extend(originals[int(len(originals)*0.6):int(len(originals)*0.8)])\r\n # val_data.extend(manipulateds[int(len(manipulateds)*0.6):int(len(manipulateds)*0.8)])\r\n # np.random.shuffle(val_data)\r\n # test_data.extend(originals[int(len(originals)*0.8):int(len(originals))])\r\n # test_data.extend(manipulateds[int(len(manipulateds)*0.8):int(len(manipulateds))])\r\n # np.random.shuffle(test_data)\r\n \r\n \r\n # for (filename,label) in train_data:\r\n # if(label == 0):\r\n # copyfile(\"original_features/\" + filename,\"data/train/original/\"+filename)\r\n # else:\r\n # copyfile(\"manipulated_features/\" + filename,\"data/train/manipulated/\"+filename)\r\n\r\n # for (filename,label) in val_data:\r\n # if(label == 0):\r\n # copyfile(\"original_features/\" + filename,\"data/val/original/\"+filename)\r\n # else:\r\n # copyfile(\"manipulated_features/\" + filename,\"data/val/manipulated/\"+filename)\r\n\r\n # for (filename,label) in test_data:\r\n # if(label == 0):\r\n # copyfile(\"original_features/\" + filename,\"data/test/original/\"+filename)\r\n # else:\r\n # copyfile(\"manipulated_features/\" + filename,\"data/test/manipulated/\"+filename)\r\n # print(\"Train_data size: \",len(train_data))\r\n # print(\"Val data size: \",len(val_data))\r\n # print(\"Test data size: \",len(test_data))\r\n\r\n # nb_train_samples = len(train_data) \r\n # nb_validation_samples = len(val_data)\r\n epochs = 24\r\n batch_size = 128\r\n img_width, img_height = 224, 224\r\n\r\n if K.image_data_format() == 'channels_first': \r\n input_shape = (3, img_width, img_height) \r\n else: \r\n input_shape = (img_width, img_height, 3) \r\n\r\n def lr_scheduler(epoch, lr):\r\n decay_rate = 0.1\r\n decay_step = 90\r\n if epoch % decay_step == 0 and epoch:\r\n return lr * decay_rate\r\n return lr\r\n \r\n \r\n learning_rate = 0.1\r\n decay_rate = 5e-2\r\n momentum = 0.9\r\n opt = Adam(lr=learning_rate, decay=decay_rate)\r\n #opt = SGD(lr=learning_rate,momentum=momentum, decay=decay_rate, nesterov=False)\r\n vgg16_model = VGG16(\r\n include_top=True,\r\n input_tensor=None,\r\n input_shape= input_shape,\r\n weights= None,\r\n pooling='max',\r\n classes=2)\r\n\r\n vgg16_model.compile(loss='categorical_crossentropy',optimizer=opt, metrics=['accuracy'])\r\n vgg16_model.summary()\r\n\r\n\r\n\r\n # this is the augmentation configuration we will use for training\r\n train_datagen = ImageDataGenerator()\r\n\r\n # this is the augmentation configuration we will use for testing:\r\n # only rescaling\r\n test_datagen = ImageDataGenerator()\r\n\r\n\r\n\r\n train_generator = train_datagen.flow_from_directory(\r\n 'faces_new_data/train', # this is the target directory\r\n target_size=(224, 224), # all images will be resized to 150x150\r\n batch_size=batch_size,\r\n class_mode=\"categorical\") # since we use binary_crossentropy loss, we need binary labels\r\n\r\n # this is a similar generator, for validation data\r\n validation_generator = test_datagen.flow_from_directory(\r\n 'faces_new_data/val',\r\n target_size=(224, 224),\r\n batch_size=batch_size,\r\n class_mode=\"categorical\")\r\n # this is a similar generator, for validation data\r\n test_generator = test_datagen.flow_from_directory(\r\n 'faces_new_data/test',\r\n target_size=(224, 224),\r\n batch_size=batch_size,\r\n class_mode=\"categorical\")\r\n # this is a generator that will read pictures found in\r\n # subfolers of 'data/train', and indefinitely generate\r\n # batches of augmented image data\r\n \r\n # this is a similar generator, for validation data\r\n \r\n\r\n history = vgg16_model.fit_generator( \r\n train_generator, \r\n steps_per_epoch=20, \r\n epochs=epochs, \r\n validation_data=validation_generator, \r\n validation_steps=20,\r\n verbose=1) \r\n predictions = model.predict(test_generator)\r\n vgg16_model.save_weights('second_try.h5') \r\n print(history.history.keys())\r\n # summarize history for accuracy\r\n plt.plot(history.history['accuracy'])\r\n plt.plot(history.history['val_accuracy'])\r\n plt.title('model accuracy')\r\n plt.ylabel('accuracy')\r\n plt.xlabel('epoch')\r\n plt.legend(['train', 'test'], loc='upper left')\r\n plt.show()\r\n # summarize history for loss\r\n plt.plot(history.history['loss'])\r\n plt.plot(history.history['val_loss'])\r\n plt.title('model loss')\r\n plt.ylabel('loss')\r\n plt.xlabel('epoch')\r\n plt.legend(['train', 'test'], loc='upper left')\r\nmain()\r\n\r\n","repo_name":"IrmakTurkoz/DeepFakeDetectionVoronoi","sub_path":"face_regions_model/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":6995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24683636860","text":"#using CoinGeckoAPI\n#Donation ETH/Erc20 address: 0x172aa811d2F81b71D7c774a96295A9001242F815\n\n\nfrom pycoingecko import CoinGeckoAPI\nimport time\nfrom datetime import datetime\n\ncg = CoinGeckoAPI()\n\nholding = int(input(\"How much Statera tokens do you own: \"))\ndelay = int(input('Choose update delay (in seconds, recomended 60): '))\nprint('------')\n\nwhile True:\n\tprice = cg.get_price(ids='statera', vs_currencies='usd')\n\tvalue = list((list(price.values())[0]).values())[0]\n\tnow = datetime.now()\n\tcurrent_time = now.strftime(\"%H:%M:%S\")\n\tprint('STA Price: $',value)\n\tprint('Your STA is valued at: $',value*holding)\n\tprint(\"Last Updated =\", current_time)\n\tprint('------')\n\ttime.sleep(delay)\n","repo_name":"IronProjects/StateraPriceUpdater","sub_path":"StaPriceUpdater.py","file_name":"StaPriceUpdater.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18604271656","text":"# stdlib\r\nimport os\r\n\r\n# third party\r\nimport click\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n# constants\r\n\r\n\r\ndef readlines(path):\r\n with open(path, \"r\") as handle:\r\n return handle.read().splitlines()\r\n\r\n\r\ndef process(files):\r\n return [[len(line) for line in lines if len(line) != 0] for lines in files]\r\n\r\n\r\ndef group(lists):\r\n series = pd.Series([val for lst in lists for val in lst])\r\n bin_n = int(len(series.unique()) // len(series.unique()) ** (1 / 2))\r\n\r\n bins = pd.cut(series, bin_n)\r\n print(f\"Created {bin_n} bins\")\r\n return series.groupby(bins).mean().sort_values(ascending=False)\r\n\r\n\r\ndef getpaths(directory, extensions):\r\n\r\n return [\r\n os.path.join(r, file)\r\n for r, d, f in os.walk(directory)\r\n for file in f\r\n if os.path.splitext(file)[1] in extensions\r\n ]\r\n\r\n\r\ndef plot(values):\r\n def plt_circle(ax, r, maximum, color):\r\n plot = lambda: ax.add_artist(plt.Circle((0, r - maximum), r, color=color))\r\n if r < maximum * 0.5:\r\n plot()\r\n else:\r\n ax.add_artist(plt.Circle((0, r - maximum), r * 1.03, color=\"#ffffff\"))\r\n plot()\r\n\r\n def gen_color(i, r, maximum, l):\r\n return (min(1, i / l * 1.5), min(1, (i / l)), min(1, (i / l) * 0.9))\r\n\r\n # matplotlib\r\n fig, ax = plt.subplots(1)\r\n mx = max(values)\r\n l = len(values)\r\n limits = mx * 1.1\r\n plt.xlim(-limits, limits)\r\n plt.ylim(-limits, limits)\r\n ax.set_aspect(1)\r\n\r\n # Can do with arrays instead\r\n # plotting\r\n for (i, r) in enumerate(values):\r\n color = gen_color(i, r, mx, l)\r\n plt_circle(ax, r, mx, color)\r\n\r\n plt.grid(False)\r\n plt.show()\r\n\r\n\r\n@click.command()\r\n@click.argument(\"extensions\", nargs=-1)\r\n@click.argument(\"directory\", required=True)\r\ndef main(extensions, directory=\".\"):\r\n if not extensions:\r\n extensions = [\".py\"]\r\n\r\n print(f\"Working with the following extensions: {extensions}\")\r\n\r\n plot(group(process(map(readlines, getpaths(directory, extensions)))))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"finnkauski/liner","sub_path":"liner/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14305603235","text":"\nfrom functions import *\n\nclass LoadingTest:\n\n def test_get_csv(self):\n filename = 'v10'\n file = get_csv(filename)\n assert type(file.columns) == list\n\n\n\n def test_get_video(self):\n video_name = 'v1/2_0.mp4'\n video = get_video(video_name)\n s, img = video.read()\n assert (s==True)\n\n","repo_name":"Dlb97/P_UAH_PREVENTION","sub_path":"lstm/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"4490129575","text":"import tensorflow as tf\nfrom tensorflow.keras import layers\n\n\n# Food 101 数据集\n\ndef _parse_function(filename, label):\n image_string = tf.read_file(filename)\n image_decoded = tf.image.decode_jpeg(image_string)\n image_resized = tf.image.resize_images(image_decoded, [224, 224, 3])\n return image_resized, label\n\n\ndef vgg16(input_tensor, shape=(None, 224, 224, 3)):\n inputs = layers.Input(tensor=input_tensor, shape=shape)\n\n x = layers.Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu', name='conv1_1')(inputs)\n x = layers.Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu', name='conv1_2')(x)\n x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid', name='pool1')(x)\n x = layers.Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu', name='conv2_1')(x)\n x = layers.Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu', name='conv2_2')(x)\n x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid', name='pool2')(x)\n x = layers.Conv2D(filters=256, kernel_size=(3, 3), padding='same', activation='relu', name='conv3_1')(x)\n x = layers.Conv2D(filters=256, kernel_size=(3, 3), padding='same', activation='relu', name='conv3_2')(x)\n x = layers.Conv2D(filters=256, kernel_size=(3, 3), padding='same', activation='relu', name='conv3_3')(x)\n x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid', name='pool3')(x)\n x = layers.Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', name='conv4_1')(x)\n x = layers.Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', name='conv4_2')(x)\n x = layers.Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', name='conv4_3')(x)\n x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid', name='pool4')(x)\n x = layers.Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', name='conv5_1')(x)\n x = layers.Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', name='conv5_2')(x)\n x = layers.Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', name='conv5_3')(x)\n x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid', name='pool5')(x)\n x = layers.Flatten(name='flatten')(x)\n\n x = layers.Dense(units=4096, activation='relu', name='fc6')(x)\n x = layers.Dropout(0.5, name='drop6')(x)\n\n x = layers.Dense(units=4096, activation='relu', name='fc7')(x)\n x = layers.Dropout(0.5, name='drop7')(x)\n\n output = layers.Dense(units=1000, activation='softmax', name='fc8_prob')(x)\n model = tf.keras.Model(inputs=inputs, outputs=output, name='vgg16')\n return model\n\n\ndef train():\n filenames = [\"/media/data/oldcopy/PythonProject/Food101/TFRecord/train.tfrecords\"]\n trainSet = tf.data.TFRecordDataset(filenames)\n trainSet = trainSet.map(_parse_function)\n trainSet = trainSet.repeat()\n trainSet = trainSet.batch(32)\n\n filenames = [\"/media/data/oldcopy/PythonProject/Food101/TFRecord/test.tfrecords\"]\n testSet = tf.data.TFRecordDataset(filenames)\n testSet = testSet.map(_parse_function)\n testSet = testSet.repeat()\n testSet = testSet.batch(32)\n\n model = vgg16(trainSet)\n model.fit(trainSet, epochs=10, batch_size=32, validation_data=testSet)\n\n loss, accuracy = model.evaluate(testSet)\n print(\"loss:%f, accuracy:%f\" % (loss, accuracy))\n","repo_name":"wmpscc/CNN-Series-Getting-Started-and-PyTorch-Implementation","sub_path":"TF2/vgg16.py","file_name":"vgg16.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"22"} +{"seq_id":"2416494744","text":"from collections import defaultdict\r\n\r\nclass Solution:\r\n def maxPoints(self, points: List[List[int]]) -> int:\r\n\r\n if len(points) <= 1:\r\n return len(points)\r\n\r\n result = 1\r\n\r\n # Compute the slope between all pairs of points.\r\n # Most frequent slope will form the line.\r\n for i in range(len(points)):\r\n # Create a new dictionary for each point!\r\n frequency = defaultdict(int)\r\n for j in range(i+1, len(points)):\r\n if points[i][0] != points[j][0]:\r\n slope = (points[i][1]-points[j][1]) / (points[i][0]-points[j][0])\r\n frequency[slope] += 1\r\n result = max(frequency[slope], result)\r\n # In case of infinite slope (Or vertical line)\r\n else:\r\n frequency[math.inf] += 1\r\n result = max(frequency[math.inf], result)\r\n\r\n return result+1","repo_name":"riehseun/riehseun.github.io","sub_path":"leet150/leet149.py","file_name":"leet149.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"1256181987","text":"from simulator import Environment\nimport numpy as np\n\nclass WorkspaceEnvironment(Environment):\n def __init__(self, T):\n Environment.__init__(self,\n input_dimension=2,\n output_dimension=3,\n s0=np.array([0,1]),\n T=T)\n def init_m(self, m0):\n return np.array([m0 for i in range(self.T)])\n def init_rho(self, rho0):\n return np.array([rho0 for i in range(self.T)])\n\n def transition(self, s, a, hyperParam):\n '''\n MDP transition.\n Args:\n s: current state\n a: selected action\n Output:\n s_next: next action\n '''\n # [0,1] is 1, [1,0] is 0\n #rho=1,m=1,k=1,beta=0.5,gamma=0.6,theta=2,c=0.5\n if s[0] == 0:\n prob = 1.0 / (1 + hyperParam.k * np.exp(0-a))\n else:\n prob = 1.0 / (hyperParam.theta + 100 * np.exp(0-a))\n\n x = self.reward_gen(1, prob)\n\n if x == 1:\n s_next = [0,1]\n else:\n s_next = [1,0]\n\n s_next = np.array(s_next)\n return s_next\n\n def get_reward(self, s, a, s_next, hyperParam, t):\n '''\n Get the immediate reward.\n Args:\n s: state\n a: selected action\n s_next: next state\n t:time\n Output:\n r: immediate reward\n '''\n # [0,1] is 1, [1,0] is 0\n if s[0] == 0 and a == 0:\n r = 0\n elif s[0] == 0 and a > 0:\n r = 1\n elif s[0] == 1:\n r = -a * hyperParam.c\n return r\n\n def evaluateEach(self, result):\n '''\n Get the interested goal of each individual behavior. for DoE.\n Args:\n result: the Result class\n Output:\n goal: How to define it depends. This should give the Y value in DoE. The goal could be avg_act,std_act, avg_state\n std_state, qvalss0, qvalss1 ,etc. Will test with avg_act for the time being.\n\n '''\n\n state0 = 0\n state1 = 0\n actions0 = []\n actions1 = []\n payment=0.0\n result = result.resultActState\n for i in range(len(result)):\n if result[i][0][0] == 0:\n if result[i][1] >0 :\n payment = payment + 1\n state1 += 1\n actions1.append(result[i][1])\n elif result[i][0][0] == 1:\n state0 += 1\n actions0.append(result[i][1])\n #payrate=float(payment)/len(result)\n avg_act_state1 = np.std(actions1, axis=0)\n return avg_act_state1\n #return payrate\n","repo_name":"yuesOctober/DRL-DOE","sub_path":"workspace.py","file_name":"workspace.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14958205391","text":"try :\n hours = input(\"Enter Hours: \")\n hours = float(hours)\n rate = input(\"Enter Rate: \")\n rate = float(rate)\n if hours > 40 : \n\n pay = ((hours - 40) * rate * 1.5) + 40 * rate\n print(pay)\n else : \n pay = hours * rate \n print(pay)\nexcept : \n print(\"Wrong data type\")\n\n","repo_name":"Plafig/origin","sub_path":"pay_new.py","file_name":"pay_new.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15930678051","text":"import os\n\nfrom GIBSDownloader.coordinate_utils import Rectangle\nfrom GIBSDownloader.product import Product\n\nclass TileMetadata():\n \"\"\"\n Extracts metadata from a tile's file name\n\n Attributes:\n date (str): Date of the tile\n region (Rectangle): Coordinate region of the tile\n \"\"\"\n def __init__(self, tile_path):\n filename = os.path.basename(tile_path)\n components = filename.split('_')\n date_str = components[0] \n region_str = os.path.splitext(components[1])[0]\n region = Rectangle.from_str(region_str)\n self.date = date_str\n self.region = region\n\nclass TiffMetadata():\n \"\"\"\n Extracts metadata from the original downloaded image's file name \n\n Attributes:\n name (str): base file name without the path\n date (str): date of the image\n product_name (str): name of the downloaded imagery product\n \"\"\"\n def __init__(self, tiff_path):\n filename = os.path.basename(tiff_path)\n components = filename.split('_')\n date = os.path.splitext(components[1])[0]\n self.name = filename\n self.date = date\n self.product_name = components[0]\n\nclass IntermediateMetadata():\n \"\"\"\n Extracts metadata from an intermediate image's file name\n\n The extracted metadata is useful in determining which intermediate image\n contains a given tile based on the intermediate image's pixel coordinates in\n the original image. The metadata stores these pixel coordinates.\n\n Attributes:\n start_x (int): x-pixel coordinate of the top left of the intermediate\n start_y (int): y-pixel coordinate of the top left of the intermediate\n end_x (int): x-pixel coordinate of the bottom right of the intermediate\n end_y (int): y-pixel coordinate of the bottom right of the intermediate\n \"\"\"\n def __init__(self, inter_path):\n filename = os.path.basename(inter_path)\n components = filename.split(\"_\")\n self.name = filename\n self.start_x = int(components[1])\n self.start_y = int(components[2])\n self.end_x = int(components[3])\n self.end_y = int(os.path.splitext(components[4])[0])","repo_name":"spaceml-org/GIBS-Downloader","sub_path":"GIBSDownloader/file_metadata.py","file_name":"file_metadata.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"22"} +{"seq_id":"12744582870","text":"import asyncio\nfrom asyncio import StreamReader\nfrom asyncio import StreamWriter\n\n\nasync def server(reader: StreamReader, writer: StreamWriter) -> None:\n \"\"\"\n Простой TCP эхо-сервер.\n :param reader: читатель потока.\n :param writer: писатель в поток.\n :return: None\n \"\"\"\n\n # Ожидать данные. Прочитать 100 байт при поступлении данных.\n # Если количество байт не указано, то прочить до EOF.\n data = await reader.read(100)\n # Декодировать байты.\n message = data.decode()\n # Получить доступ к дополнительной информации о транспорте.\n # https://docs.python.org/3.8/library/asyncio-protocol.html#asyncio.BaseTransport.get_extra_info\n addr = writer.get_extra_info('peername')\n print(f\"Received {message!r} from {addr!r}\")\n\n # Сформировать сообщение клиенту.\n response = f\"Server handled: {message}\"\n # Декодировать сообщение в байты.\n data = response.encode('utf-8')\n print(f\"Sending: {response}\")\n # Записать в поток байты сообщения.\n writer.write(data)\n # Ожидать, пока не станет возможным возобновить запись в поток.\n await writer.drain()\n print(\"Close writer stream\")\n # Закрыть поток записи.\n writer.close()\n\n\nasync def runner(server):\n # Запустить сервер сокетов и получить coroutine.\n server = await asyncio.start_server(server, '127.0.0.1', 8888)\n # Получить имя первого сокета. Возвращает: ('127.0.0.1', 8888)\n addr = server.sockets[0].getsockname()\n print(f'Serving on {addr}')\n\n async with server:\n # Начать принимать соединения, пока coroutine не будет отменена.\n # Сервер будет закрыт при coroutine.cancel().\n await server.serve_forever()\n\nasyncio.run(runner(server))\n","repo_name":"multiscripter/python","sub_path":"std_lib/asyncio/p02_streams/tcp_echo_server.py","file_name":"tcp_echo_server.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28667833344","text":"\"\"\"มีตัวอักษรพิเศษไหมคะ\"\"\"\r\n\r\n\r\ndef find_special():\r\n \"\"\"ถ้ามีเป็นno ถ้าไม่มีเป็นyes\"\"\"\r\n\r\n text = input()\r\n if text.isalnum():\r\n print(\"Yes, it is.\")\r\n else:\r\n print(\"No, it's not.\")\r\n\r\nfind_special()\r\n","repo_name":"bonnibelz13/pscp-lab","sub_path":"prepro/yes or no.py","file_name":"yes or no.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39838152582","text":"lis = [1,4,9,15,19,23,32]\nval = 23\nindex = -1\nstart = 0\nend = len(lis)\n\n\nwhile start<=end:\n mid = (start + end)//2\n if lis[mid] == val:\n index = mid\n break\n elif val>lis[mid]:\n start = mid + 1\n else:\n end = mid - 1\n \n\nif index == -1:\n print(\"Value not found\")\n\nelse:\n print(\"Value found at index\", index)\n","repo_name":"NamamiShanker/Python-Work","sub_path":"binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9549142553","text":"import pygame\nimport numpy as np\nfrom grid import Grid\nfrom ui import Button, TextUI\n# from ml import Model\nfrom ml import predict\nfrom scipy import ndimage\nfrom joblib import load\nimport tensorflow as tf\n\n\nclass Window:\n \"\"\"Window renderer class\"\"\"\n def __init__(self, model=None):\n pygame.init()\n self.WIDTH = 1920\n self.HEIGHT = 1080\n self.RESOLUTION = (self.WIDTH, self.HEIGHT)\n self.screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN)\n self.clock = pygame.time.Clock()\n self.FPS = 40\n self.running = True\n self.model=model\n self.sigma = 0.75\n \n \n def handle_events(self):\n \"\"\"handles the input events\"\"\"\n if pygame.mouse.get_pressed()[0]:\n x_pos, y_pos = pygame.mouse.get_pos()\n self.grid.update_array(x_pos, y_pos - 100)\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n self.running = False\n \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n pygame.quit()\n self.running = False \n \n def run(self):\n \"\"\"Runs the main loop of the app.\"\"\"\n self.grid = Grid()\n self.reset_button = Button(150, 850, 150, 50, \"Reset Grid\")\n self.gaussian_blur_button = Button(270, 800, 200, 50, \"Gaussian Blur\")\n self.predict_button = Button(800, 300, 100, 50, \"Predict\")\n self.gaussianSigma_plus_button = Button(1000, 800, 60, 60, \"+\")\n self.gaussianSigma_minus_button = Button(1080, 800, 60, 60, \"-\")\n self.predicted_text = TextUI(\"Prediction: \", (950,300), (255,255,255))\n self.sigma_value_text = TextUI(\"Sigma: \", (960,750), (255,255,255))\n self.app_name = TextUI(\"Digit Predictor GUI, v_1.0\", (700, 50), (255,255,255))\n self.app_name.fontSize = 40\n self.sigma_value_text.fontSize = 40\n self.predicted_text.fontSize = 100\n self.gaussianSigma_plus_button.font_size = 40\n self.gaussianSigma_minus_button.font_size = 40 \n \n while self.running:\n self.screen.fill((60,60,90))\n self.clock.tick(self.FPS)\n self.app_name.render(self.screen)\n self.grid.render(screen=self.screen, filter=True, sigma=self.sigma)\n if self.reset_button.render(self.screen):\n self.grid.reset_array()\n \n \n # # Prediction only after clicking on 'Predict' Button\n # if self.predict_button.render(self.screen):\n # final_array = ndimage.gaussian_filter(self.grid.grid_array, sigma=1)\n # self.predicted_text.render(self.screen, str(self.model.predict_input(final_array)[0]))\n \n \n # # Everytime Prediction except if sum of the matrix is zero\n if self.grid.grid_array.sum() != 0:\n final_array = self.grid.get_gaussianBlurerd_array(sigma=self.sigma)\n # prediction = str(self.model.predict_input(final_array)[0])\n # prediction = str(predict(final_array, self.model)[0])\n prediction = str(predict(final_array, self.model))\n self.predicted_text.render(self.screen, prediction)\n \n # if self.gaussian_blur_button.render(self.screen):\n # print(\"Hiiiii\")\n # if self.gaussian_blur_button.Update() == False:\n # print(\"Hurray......\")\n # self.grid.gaussian_blur()\n \n \n # Customize and render Gaussian Blur Sigma\n self.sigma_value_text.render(self.screen, str(np.round(self.sigma, 2)))\n if self.gaussianSigma_plus_button.render(self.screen):\n self.sigma += 0.01\n elif self.gaussianSigma_minus_button.render(self.screen):\n self.sigma -= 0.01\n \n \n pygame.display.flip()\n self.handle_events()\n \n\nif __name__ == \"__main__\":\n # # This is for creating and training a model\n # model = Model()\n # model.save_model()\n # # This is loading already trained and saved model\n # model = load('sgd_clf.joblib')\n # tensorflow model\n model = tf.keras.models.load_model('tf_keras/digit_predictor.model')\n \n app = Window(model=model)\n app.run()","repo_name":"nepalsandesh/Digit_Predictor_GUI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19247128035","text":"from meeting import Meeting\nfrom cal import Calendar\nfrom datetime import datetime\nfrom json import dump, load\n\n\ncalendar = Calendar()\n\nwith open(\"Meetings.json\",encoding=\"utf8\") as file:\n data = load(file)\n for item in data:\n meeting = Meeting(\n datetime.strptime(item['date'], '%Y-%m-%d %H:%M'),\n item['title'])\n calendar.add_meeting(meeting)\n\nif __name__ == '__main__':\n while True:\n option = input('Co chcesz zrobić - L - lista, D - dodaj, Q - wyjdź ')\n if option in ('L','l'):\n for _, meeting in calendar.meetings.items():\n print(f'{meeting.meeting_date} : {meeting.title}')\n elif option in('D', 'd'):\n title = input('Tytuł: ')\n meeting_date = datetime.strptime(input('Data spotkania yyyy-mm-dd hh:mm '), '%Y-%m-%d %H:%M')\n calendar.add_meeting(Meeting(meeting_date, title))\n\n with open('Meetings.json', 'w') as file:\n data = []\n for meeting in calendar.meetings.values():\n data.append({\n 'title': meeting.title,\n 'date': datetime.strftime(meeting_date,'%Y-%m-%d %H:%M')})\n dump(data, file)\n\n elif option in ('Q', 'q'):\n break\n else:\n print('Nie rozumiem')\n\n\n","repo_name":"elblackmountain/Calendar","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23497782470","text":"\n\n#1 - Inicialização\nidade = int( input(\"Digite uma idade ou 0 para sair\") )\nquantidade = 0\nsoma = 0\n\n#2- Condição\nwhile idade != 0:\n quantidade+=1\n soma += idade\n #3- modificação\n idade = int(input(\"Digite uma idade ou 0 para sair\"))\n\nmedia = soma / quantidade\n\nprint(f\"A média de idade é de {media} anos e foram recebidas {quantidade} idades\")\n\n","repo_name":"thiagomiranda84/programacao-aplicada","sub_path":"aula7/exemplo3.py","file_name":"exemplo3.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27702534916","text":"import sys, os, inspect\n\nimport toolspath\nfrom testing import Xv6Test, Xv6Build\n\ncurdir = os.path.realpath(os.path.dirname(inspect.getfile(inspect.currentframe())))\ndef get_description(name):\n cfile = os.path.join(curdir, 'tests', name+'.c')\n with open(cfile, 'r') as f:\n desc = f.readline()\n desc = desc.strip()\n desc = desc[2:]\n if desc[-2:] == '*/':\n desc = desc[:-2]\n return desc.strip()\n\nall_tests = []\nbuild_test = Xv6Build\nfor testname in '''null null2\n stack stack2 stack3 stack4\n heap heap2\n bounds bounds2 bounds3 bounds4\n '''.split():\n members = {\n 'name': testname,\n 'tester': 'tests/' + testname + '.c',\n 'description': get_description(testname)\n }\n newclass = type(testname, (Xv6Test,), members)\n all_tests.append(newclass)\n setattr(sys.modules[__name__], testname, newclass)\n\nclass usertests(Xv6Test):\n name = 'usertests'\n tester = 'tests/usertests.c'\n description = get_description('usertests')\n timeout = 240\n\n#all_tests.append(usertests)\n\nfrom testing.runtests import main\nmain(build_test, all_tests)\n","repo_name":"sd-omkar/cs537","sub_path":"p3/tests/3b/project3b.py","file_name":"project3b.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"22"} +{"seq_id":"43514451762","text":"from itertools import combinations\r\nimport numpy as np\r\n\r\ndef subsets(collection):\r\n '''construct iterator over all subsets in collection'''\r\n for i in range(len(collection)+1):\r\n it = combinations(collection, i)\r\n try:\r\n while True:\r\n yield(list(next(it)))\r\n except StopIteration:\r\n pass\r\n raise StopIteration\r\n\r\ndef multidim_list(n_list):\r\n '''create multidimensional list of size n_list with elements None'''\r\n ret = None\r\n for n in reversed(n_list):\r\n ret = [ret] * n\r\n return ret\r\n\r\ndef len_multidim_list(multidim_list):\r\n '''return dimensions of multidimensional list (assuming hypercube)'''\r\n mlist = multidim_list\r\n ret = []\r\n\r\n while type(mlist) is list:\r\n ret.append(len(mlist))\r\n mlist = mlist[0]\r\n\r\n return ret\r\n\r\ndef prod(l):\r\n '''compute product of all elements in list l'''\r\n ret = 1\r\n for el in l:\r\n ret *= el\r\n return ret\r\n\r\ndef fold(unfolded_tensor, mode, shape):\r\n '''Fold a tensor (taken from tensorly)'''\r\n full_shape = list(shape)\r\n mode_dim = full_shape.pop(mode)\r\n full_shape.insert(0, mode_dim)\r\n return np.moveaxis(np.reshape(unfolded_tensor, full_shape), 0, mode)\r\n\r\ndef unfold(tensor, mode):\r\n '''Unfold a tensor in mode (taken from tensorly)'''\r\n return np.reshape(np.moveaxis(tensor, mode, 0), (tensor.shape[mode], -1))\r\n\r\ndef sparse_tensordot(a, b, dim):\r\n # multiplication of sparse matrix a with tensor b along dimension dim:\r\n # c(i1, ...ij, ..., in) = \\sum_{ij'} a(ij,ij') b(i1, ..., ij', ..., in)\r\n\r\n assert a.shape[1] == b.shape[dim]\r\n\r\n new_shape = list(b.shape)\r\n new_shape[dim] = a.shape[0]\r\n\r\n return fold(a.dot(unfold(b, dim)), dim, new_shape)\r\n","repo_name":"pettni/mdp_network","sub_path":"best/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"35000768541","text":"\"\"\"movies URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n path('adminmovies/',views.admin),\r\n path('getenddate/',views.getenddate),\r\n path('upmovies/',views.upmovie),\r\n path('upload/',views.uploads),\r\n path('movieenter/',views.enter),\r\n path('updatescreen/',views.updatescreen),\r\n path('',views.index),\r\n path('modify/register.html',views.register),\r\n \t\t path('checkdata/',views.checkdata),\r\n \t\t path('checkgoogle/',views.google),\r\n \t\t path('signup/',views.signup), \r\n\t path('signin/',views.signin), \r\n path('profile/',views.profile),\r\n path('changemobile/',views.changemobile),\r\n path('deleteotp/',views.deleteotp),\r\n path('validateotp/',views.validateotp),\r\n path('sendsms/',views.sendSMS),\r\n \tpath('getscreening/',views.getscreening),\r\n \tpath('getscreendetails/',views.getscreendetails),\r\n \tpath('updatepsw/',views.updatepsw),\r\n \tpath('checkseats/',views.checkseats),\r\n \tpath('sendseattomob/',views.sendseattomob),\r\n \t\tpath('getmovies/',views.getmovies),\r\n \tpath('sendemail/',views.sendemail),\r\n \t path('LOGOUT/',views.logout) \r\n]\r\n","repo_name":"hitman26422/quickbookdemo","sub_path":"movies/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29381743213","text":"import pickle\nimport numpy as np\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n d = pickle.load(fo, encoding='bytes')\n return d\n\ndef get_training_data(index):\n if index < 1 or index > 5:\n index = 1\n d = unpickle('cifar-10-batches-py/data_batch_{}'.format(index))\n x = d[b'data']\n # y = [[1 if i == label else 0 for i in range(10)] for label in d[b'labels']]\n y = d[b'labels']\n return (x, np.array(y))\n\ndef get_test_data():\n d = unpickle('cifar-10-batches-py/test_batch')\n x = d[b'data']\n # y = [[1 if i == label else 0 for i in range(10)] for label in d[b'labels']]\n y = d[b'labels']\n return (x, np.array(y))\n\ndef map_label_name(label):\n names = unpickle('cifar-10-batches-py/batches.meta')\n return names[b'label_names'][label].decode()\n","repo_name":"rhidra/tc-cours","sub_path":"4tc/pre/tests/fromscratch/cifar.py","file_name":"cifar.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12326852859","text":"'''\nExample class to demonstrate automatic testing and Jenkins reports\nusing Python.\n\n@author Csaba Sulyok\n'''\n\nimport logging\nimport sys\n\n\nclass ExampleClass(object):\n \n theWord = 'bird'\n \n def whatsTheWord(self):\n return self.theWord\n \n def isTheWord(self, referenceWord):\n return referenceWord.strip().lower() == self.theWord\n\n\n\n'''\nBuild console logger.\n'''\ndef buildLogger(name = 'example'):\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n \n consoleLogger = logging.StreamHandler(stream=sys.stdout)\n consoleLogger.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\n logger.addHandler(consoleLogger)\n return logger\n\n\n\n'''\nMain entry point of distributable.\n'''\nif __name__ == '__main__':\n logger = buildLogger()\n logger.warn('Entry point')\n \n example = ExampleClass()\n logger.info(\"The %s is the word!\" %(example.whatsTheWord()))","repo_name":"KajtarAttila/ubbse-2015","sub_path":"python/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33107726852","text":"import streamlit as st\nfrom PIL import Image\nimport pyparsing\nfrom gsheetsdb import connect\nimport time\nimport random\n\n# Create a connection object.\nconn = connect()\n\n# Perform SQL query on the Google Sheet with caching\n@st.cache(ttl=600, allow_output_mutation=True)\ndef run_query(query):\n rows = conn.execute(query, headers=1)\n return rows.fetchall()\n\n# Get data from Google Sheet\nsheet_url = st.secrets[\"public_gsheets_url\"]\ndf = run_query(f'SELECT * FROM \"{sheet_url}\"')\n\n# Create dictionary of sushi rolls and their ingredients\nsushi_rolls = {}\ningredients = set()\nfor row in df:\n roll_name = row[0]\n roll_ingredients = [ingredient.strip().strip('\"').strip() for ingredient in row[1].split(',')]\n if roll_name not in sushi_rolls:\n sushi_rolls[roll_name] = roll_ingredients\n else:\n sushi_rolls[roll_name].extend(roll_ingredients)\n ingredients.update(roll_ingredients)\ningredients = list(ingredients)\n\n# Function to get list of rolls that can be made with selected ingredients\ndef get_rolls(ingredients):\n available_rolls = []\n for roll, roll_ingredients in sushi_rolls.items():\n if set(roll_ingredients).issubset(ingredients):\n available_rolls.append(roll)\n return available_rolls\n\n# creating list of sushi funfacts\nfunfacts = [\n \"Sushi originated in Southeast Asia and was introduced to Japan in the 8th century.\",\n \"The word sushi means 'sour-tasting' and refers to the vinegared rice used in the dish.\",\n \"In Japan, it's considered bad manners to dip the rice side of sushi into the soy sauce.\",\n \"Tuna is the most popular fish used in sushi.\",\n \"The first sushi bar in the United States opened in 1966 in Los Angeles.\",\n \"The most popular type of sushi in the world is the California Roll, which was created in Los Angeles in the 1970s.\",\n \"In Japan, it's customary to eat sushi with chopsticks.\",\n \"Raw salmon is not traditionally used in Japanese sushi and is a more recent addition.\",\n \"Gari, or pickled ginger, is a common condiment served with sushi.\",\n \"The fastest sushi eater in the world can eat 14 pounds of sushi in 8 minutes.\"\n]\n\n# Streamlit app\nst.title(\"🍣 Sushi Roll Maker\")\n\nwith st.expander(\"Get more information about the app!\"):\n st.write(\"Welcome to the Sushi Roll Maker! 🎉\")\n st.write(\"Are you a fan of sushi and want to make some at home? This app is here to help you.\")\n st.write(\"You can find a variety of sushi roll recipes in our database, and all you need to do is select the ingredients you have on hand. The app will then suggest which rolls you can make.\")\n st.write(\"The recipe database is connected to a Google Drive sheet and is updated automatically every 10 minutes, so you can always find the latest and greatest recipes.\")\n st.write(\"You don't see your favorite sushi roll? No problem! Feel free to add your own recipes to our database by contributing to the [Google Drive Sheet](https://docs.google.com/spreadsheets/d/1LIaTr9CqhJjCCv_V5sdJa490VBqKXhAE_HjL1o-rxcI/edit?usp=sharing). We would love to see what unique creations you come up with.\")\n\nwith st.empty():\n st.write(\" \")\n st.write(\" \")\n st.write(\" \")\n \nst.write(\"Let's start making some sushi! Please select the ingredients you have:\")\nselected_ingredients = [] \n\n# Divide ingredients into three columns for display\ncol1, col2, col3 = st.columns(3)\n\n# Display ingredients and add to selected_ingredients when checked\nwith col1:\n st.markdown(\"
    \", unsafe_allow_html=True)\n for ingredient in ingredients[:len(ingredients)//3]:\n try:\n img = Image.open(f\"images/{ingredient}.png\")\n if st.checkbox(f\"{ingredient}\", key=ingredient):\n selected_ingredients.append(ingredient)\n st.image(img, width=50, use_column_width=True)\n except:\n if st.checkbox(f\"{ingredient}\", key=ingredient):\n selected_ingredients.append(ingredient)\n img = Image.open(\"images/nopic.png\")\n st.image(img, width=50, use_column_width=True)\n st.markdown(\"
    \", unsafe_allow_html=True)\n\nwith col2:\n st.markdown(\"
    \", unsafe_allow_html=True)\n for ingredient in ingredients[len(ingredients)//3:2*len(ingredients)//3]:\n try:\n img = Image.open(f\"images/{ingredient}.png\")\n selected = ingredient in selected_ingredients\n if st.checkbox(f\"{ingredient}\", key=ingredient, value=selected):\n if selected:\n selected_ingredients.remove(ingredient)\n else:\n selected_ingredients.append(ingredient)\n st.image(img, width=50, use_column_width=True)\n except:\n selected = ingredient in selected_ingredients\n if st.checkbox(f\"{ingredient}\", key=ingredient, value=selected):\n if selected:\n selected_ingredients.remove(ingredient)\n else:\n selected_ingredients.append(ingredient)\n img = Image.open(\"images/nopic.png\")\n st.image(img, width=50, use_column_width=True)\n st.markdown(\"
    \", unsafe_allow_html=True)\n\nwith col3:\n st.markdown(\"
    \", unsafe_allow_html=True)\n for ingredient in ingredients[2*len(ingredients)//3:]:\n try:\n img = Image.open(f\"images/{ingredient}.png\")\n selected = ingredient in selected_ingredients\n if st.checkbox(f\"{ingredient}\", key=ingredient, value=selected):\n if selected:\n selected_ingredients.remove(ingredient)\n else:\n selected_ingredients.append(ingredient)\n st.image(img, width=50, use_column_width=True)\n except:\n selected = ingredient in selected_ingredients\n if st.checkbox(f\"{ingredient}\", key=ingredient, value=selected):\n if selected:\n selected_ingredients.remove(ingredient)\n else:\n selected_ingredients.append(ingredient)\n img = Image.open(\"images/nopic.png\")\n st.image(img, width=50, use_column_width=True)\n st.markdown(\"
    \", unsafe_allow_html=True)\n\nif st.button(\"Make sushi 🔪\"):\n st.markdown(\"
    \", unsafe_allow_html=True)\n\n if selected_ingredients:\n bar = st.progress(0)\n status_text = st.empty()\n\n bar.progress(10)\n status_text.text(\"🍣 Deciding what type of sushi to make...\")\n time.sleep(2)\n\n bar.progress(30)\n status_text.text(\"🍣 Looking for inspiration from Jiro Ono sensei...\")\n time.sleep(2)\n\n bar.progress(50)\n status_text.text(\"🍣 Finding the freshest fish at the Tsukiji fish market...\")\n time.sleep(2)\n\n bar.progress(70)\n status_text.text(\"🍣 Preparing the rice with a secret family recipe...\")\n time.sleep(2)\n\n bar.progress(90)\n status_text.text(\"🍣 Perfectly rolling the sushi with years of practice...\")\n time.sleep(2)\n\n if len(selected_ingredients) == len(ingredients):\n st.write(\"Wow! You have selected all possible ingredients. Looks like you're either really hungry or extremely interested in sushi making!\")\n\n st.write(\"You can make the following rolls:\")\n rolls = get_rolls(selected_ingredients)\n\n if rolls:\n for roll in rolls:\n st.write(\"---\")\n st.write(roll)\n\n try:\n roll_img = Image.open(f\"images/{roll}.png\")\n st.image(roll_img, width=100)\n except FileNotFoundError:\n roll_img = Image.open(\"images/nopic.png\")\n st.image(roll_img, width=100)\n\n st.write(f\"Ingredients: {', '.join(sushi_rolls[roll])}\")\n\n bar.progress(100)\n status_text.text(\"🍣 Presenting the stunning sushi creations!\")\n else:\n st.write(\"You cannot make any sushi rolls. Please select more ingredients.\")\n roll_img = Image.open(\"images/nopic.png\")\n st.image(roll_img, width=100)\n else:\n st.write(\"Please select ingredients.\")\n \n st.write(\"---\")\n \n # Select a random funfact\n random_index = random.randint(0, len(funfacts) - 1)\n selected_funfact = funfacts[random_index]\n\n # Display the selected funfact\n st.write(\"Did you know:\")\n st.write(selected_funfact)\n \n st.write(\"---\")\n","repo_name":"DominikDawiec/Sushi-Roll-App","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14899221681","text":"# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport datetime\nimport os\nimport sys\npath = os.path.abspath('../..')\nsys.path.insert(0, path)\n\n\n# -- pyvista configuration ---------------------------------------------------\nimport pyvista\nimport numpy as np\n# Manage errors\npyvista.set_error_output_file('errors.txt')\n# Ensure that offscreen rendering is used for docs generation\npyvista.OFF_SCREEN = True # Not necessary - simply an insurance policy\npyvista.BUILDING_GALLERY = True # necessary when building the sphinx gallery\n# Preferred plotting style for documentation\npyvista.set_plot_theme('document')\npyvista.rcParams['window_size'] = np.array([1024, 768]) * 2\n\n# -- Automatic Doc Pages Generation ------------------------------------------\n\n\nimport omfvista # for documenting\nsys.path.insert(0, '/Users/bane/Documents/OpenGeoVis/Software/gendocs/')\nfrom gendocs import Generator\n\n\nappend_material = \"\"\"\n\n.. toctree::\n :maxdepth: 2\n :caption: Examples\n :hidden:\n\n examples/index\n\n\"\"\"\n\n# Automatically generate documentaion pages\nGenerator().DocumentPackages([omfvista],\n '../../README.rst',\n showprivate=True,\n notify=False,\n append_material=append_material,\n )\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'omfvista'\nyear = datetime.date.today().year\ncopyright = \"2019-{:d}, Bane Sullivan\".format(year)\nauthor = 'Bane Sullivan'\n\n# The short X.Y version\nversion = omfvista.__version__\n# The full version, including alpha/beta/rc tags\nrelease = omfvista.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.ifconfig',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'sphinxcontrib.napoleon',\n 'sphinx_copybutton',\n 'notfound.extension',\n 'sphinx_gallery.gen_gallery',\n 'sphinx.ext.extlinks',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'gendocsdoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'omfvista.tex', 'omfvista Documentation',\n 'Bane Sullivan', 'manual'),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'omfvista', 'omfvista Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'omfvista', 'omfvista Documentation',\n author, 'omfvista', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {'https://docs.python.org/': None,\n 'https://docs.pyvista.org': None,\n 'https://omf.readthedocs.io/en/latest/': None,\n }\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Sphinx Gallery Options\nfrom sphinx_gallery.sorting import FileNameSortKey\n\nsphinx_gallery_conf = {\n # path to your examples scripts\n \"examples_dirs\": [\n \"../../examples/\",\n ],\n # path where to save gallery generated examples\n \"gallery_dirs\": [\"examples\"],\n # Patter to search for example files\n \"filename_pattern\": r\"\\.py\",\n # Remove the \"Download all examples\" button from the top level gallery\n \"download_all_examples\": False,\n # Sort gallery example by file name instead of number of lines (default)\n \"within_subsection_order\": FileNameSortKey,\n # directory where function granular galleries are stored\n \"backreferences_dir\": None,\n # Modules for which function level galleries are created. In\n \"doc_module\": \"omfvista\",\n \"image_scrapers\": (pyvista.Scraper(), 'matplotlib'),\n \"thumbnail_size\": (350, 350),\n}\n","repo_name":"OpenGeoVis/omfvista","sub_path":"docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":7861,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"22"} +{"seq_id":"20476818406","text":"wallBlocks = {};\r\nstairBlocks = {};\r\ndoorBlocks = {};\r\nsupportBlocks ={}\r\nfenceBlocks = {}\r\nslabBlocks = {}\r\n\r\ndef init():\r\n\r\n #Tree\r\n wallBlocks[17] = {};\r\n wallBlocks[17][0] = [5,0]\r\n wallBlocks[17][1] = [5,1]\r\n wallBlocks[17][2] = [5,2]\r\n wallBlocks[17][3] = [5,3]\r\n wallBlocks[162] = {};\r\n wallBlocks[162][0] = [5,4]\r\n wallBlocks[162][1] =[5,5];\r\n\r\n supportBlocks[17] = {};\r\n supportBlocks[17][0] = [17,0]\r\n supportBlocks[17][1] = [17,1]\r\n supportBlocks[17][2] = [17,2]\r\n supportBlocks[17][3] = [17,3]\r\n supportBlocks[162] = {};\r\n supportBlocks[162][0] = [160,0]\r\n supportBlocks[162][1] =[161,1];\r\n\r\n fenceBlocks[17] = {};\r\n fenceBlocks[17][0] = [85,0]\r\n fenceBlocks[17][1] = [188,0]\r\n fenceBlocks[17][2] = [189,0]\r\n fenceBlocks[17][3] = [190,0]\r\n fenceBlocks[162] = {};\r\n fenceBlocks[162][0] = [191,0]\r\n fenceBlocks[162][1] =[192,0];\r\n\r\n stairBlocks[17]={};\r\n stairBlocks[17][0] = 53\r\n stairBlocks[17][1] = 134\r\n stairBlocks[17][2] = 135\r\n stairBlocks[17][3] = 136\r\n stairBlocks[162] = {};\r\n stairBlocks[162][0] = 163\r\n stairBlocks[162][1] =164;\r\n\r\n doorBlocks[17] = {};\r\n doorBlocks[17][0] = 64\r\n doorBlocks[17][1] = 193\r\n doorBlocks[17][2] = 194\r\n doorBlocks[17][3] = 195\r\n doorBlocks[162] = {};\r\n doorBlocks[162][0] = 196\r\n doorBlocks[162][1] = 197;\r\n\r\n #stone\r\n wallBlocks[1] = {};\r\n wallBlocks[1][0] = [43,5]\r\n wallBlocks[4] = {}\r\n wallBlocks[4][0] = [43,3]\r\n wallBlocks[24] = {};\r\n wallBlocks[24][0] = [24,2]\r\n\r\n supportBlocks[1] = {};\r\n supportBlocks[1][0] = [1,0]\r\n supportBlocks[4] = {}\r\n supportBlocks[4][0] = [4,0]\r\n supportBlocks[24] = {};\r\n supportBlocks[24][0] = [24,1] \r\n\r\n stairBlocks[1] = {};\r\n stairBlocks[1][0] = 109\r\n stairBlocks[4] = {}\r\n stairBlocks[4][0] = 67\r\n stairBlocks[24] = {};\r\n stairBlocks[24][0] = 128\r\n\r\n slabBlocks[1] = {};\r\n slabBlocks[1][0] = [44,0]\r\n slabBlocks[4] = {}\r\n slabBlocks[4][0] = [44,3]\r\n slabBlocks[24] = {};\r\n slabBlocks[24][0] = [44,1]\r\n\r\ndef mainHouseBox(width, length,material1,material2):\r\n\r\n (m1,d1)=material1;\r\n (m2,d2)=material2;\r\n wallId = wallBlocks[m1][d1][0];\r\n wallValue = wallBlocks[m1][d1][1];\r\n supportId = supportBlocks[m2][d2][0];\r\n supportValue = supportBlocks[m2][d2][1]\r\n stair = stairBlocks[m2][d2];\r\n door = doorBlocks[m1][d1];\r\n \r\n lv = int((max(width,length)-2) / 5);\r\n\r\n lv = min(lv,3);\r\n \r\n house = [];\r\n for i in range(width):\r\n house.append([]);\r\n for j in range(length):\r\n house[i].append([]);\r\n for k in range(lv):\r\n \r\n house[i][j].append([0,0]);\r\n house[i][j].append([0,0]);\r\n house[i][j].append([0,0]);\r\n house[i][j].append([0,0]);\r\n house[i][j].append([0,0]);\r\n \r\n subWidth = 5;\r\n subLength = (length - 1)/2;\r\n\r\n w1 = subWidth + 1;\r\n w2 = width - subWidth - 1\r\n\r\n l1 = length - subLength;\r\n\r\n #Ground\r\n for x in xrange(1, width - 1):\r\n for z in xrange( 1, length - 1):\r\n if z > l1:\r\n if w1 <= x < w2:\r\n continue\r\n house[x][z][0][0]=supportId;\r\n house[x][z][0][1]=supportValue;\r\n \r\n table = ((1,1),(1,length-2),(width-2,1),(width-2,length-2),\r\n (w1-1,length-2),(w2,length-2),(w1-1,l1),(w2,l1));\r\n\r\n\r\n for l in range(lv):\r\n #eight support\r\n for (x,z) in table:\r\n for he in xrange(1,4):\r\n house[x][z][l*4+he][0]=supportId;\r\n house[x][z][l*4+he][1]=supportValue;\r\n \r\n #wall\r\n for x in xrange(2, width - 2):\r\n z = 1\r\n for he in xrange(1,4):\r\n house[x][z][l*4+he][0]=wallId;\r\n house[x][z][l*4+he][1]=wallValue\r\n if x % 2 == 1:\r\n house[x][z][l*4+2][0]=20\r\n\r\n for x in (1,width-2):\r\n for z in xrange(2,length-2):\r\n for he in xrange(1,4):\r\n house[x][z][l*4+he][0]=wallId;\r\n house[x][z][l*4+he][1]=wallValue\r\n if z % 2 == 1:\r\n house[x][z][l*4+2][0]=20;\r\n \r\n for x in xrange(2, w1-1):\r\n z = length - 2;\r\n for he in xrange(1,4):\r\n house[x][z][l*4+he][0]=wallId;\r\n house[x][z][l*4+he][1]=wallValue\r\n if x % 2 == 1:\r\n house[x][z][l*4+2][0]=20;\r\n\r\n \r\n for x in xrange(w2+1, width - 2):\r\n z = length - 2;\r\n for he in xrange(1,4):\r\n house[x][z][l*4+he][0]=wallId;\r\n house[x][z][l*4+he][1]=wallValue\r\n if x % 2 == 1:\r\n house[x][z][l*4+2][0]=20;\r\n \r\n for x in xrange(w1,w2):\r\n z = l1\r\n for he in xrange(1,4):\r\n house[x][z][l*4+he][0]=wallId;\r\n house[x][z][l*4+he][1]=wallValue\r\n if x % 2 == 1:\r\n house[x][z][l*4+2][0]=20;\r\n\r\n for z in xrange(l1+1, length - 2):\r\n for x in (w1-1,w2):\r\n for he in xrange(1,4):\r\n house[x][z][l*4+he][0]=wallId;\r\n house[x][z][l*4+he][1]=wallValue\r\n if z % 2 == 1:\r\n house[x][z][l*4+2][0]=20;\r\n \r\n #floor\r\n for x in xrange(1, width - 1):\r\n for z in xrange(1, length - 1):\r\n if z > l1:\r\n if w1 <= x < w2:\r\n continue\r\n house[x][z][l*4+4][0]=supportId;\r\n house[x][z][l*4+4][1]=supportValue;\r\n\r\n for x in xrange(1, width-1):\r\n z = 0;\r\n house[x][z][l*4+4][0]=stair;\r\n house[x][z][l*4+4][1]=0;\r\n z = 0 + length - 1;\r\n if x == w1:\r\n continue;\r\n if x == w2-1:\r\n continue;\r\n if w1 <= x < w2:\r\n house[x][l1+1][l*4+4][0]=stair;\r\n house[x][l1+1][l*4+4][1]=2;\r\n else:\r\n house[x][length-1][l*4+4][0]=stair;\r\n house[x][length-1][l*4+4][1]=2;\r\n for z in xrange(1, length - 1):\r\n x = 0;\r\n house[x][z][l*4+4][0]=stair;\r\n house[x][z][l*4+4][1]=3;\r\n x = width - 1;\r\n house[x][z][l*4+4][0]=stair;\r\n house[x][z][l*4+4][1]=1;\r\n\r\n for z in xrange(l1+1, length - 1):\r\n x = w1;\r\n house[x][z][l*4+4][0]=stair;\r\n house[x][z][l*4+4][1]=1;\r\n x = w2-1;\r\n house[x][z][l*4+4][0]=stair;\r\n house[x][z][l*4+4][1]=3;\r\n \r\n \r\n #door\r\n x = (width -1)/2;\r\n z = l1;\r\n house[x][z][1][0]=door;\r\n house[x][z][1][1]=0;\r\n house[x][z][2][0]=door;\r\n house[x][z][2][1]=8;\r\n \r\n house[x][z+1][0][0]=stair;\r\n house[x][z+1][0][1]=2;\r\n\r\n z=z+2;\r\n while (z0 else -1)","repo_name":"shyjnnn/2022-codingtest-study","sub_path":"jiyoung풀이/boj_2644촌수계산.py","file_name":"boj_2644촌수계산.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"73812566135","text":"score = []\nfor _ in range(10):\n score.append(int(input()))\n \ntotal = 0\nidx = 9\nfor i in range(10):\n total += score[i]\n if total >= 100:\n idx = i\n break\n\ndiff_bf = abs(total - score[idx] - 100)\ndiff_af = abs(total - 100)\nif diff_bf == diff_af:\n result = total\nelif diff_af < diff_bf:\n result = total\nelse:\n result = total - score[idx]\n\nprint(result)","repo_name":"lepetitprinz/coding-challenge-auto-push","sub_path":"백준/Bronze/2851. 슈퍼 마리오/슈퍼 마리오.py","file_name":"슈퍼 마리오.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"254888667","text":"import pygame as pg \nimport random\n\n#define some color\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\ngray = (105, 105, 105)\ndarkgray = (55, 55, 55)\nred = (255, 0, 0)\ndarkred = (180, 0, 0)\ngreen = (0, 255, 0)\ndarkgreen = (0, 180, 0)\nblue = (0, 0, 255)\ndarkblue = (0, 0, 180)\n\n#width and heigh for window is almost always 800* 600\ndisplay_width = 800\ndisplay_height = 600\n\nclass Hero(pg.sprite.Sprite):\n def __init__(self):\n pg.sprite.Sprite.__init__(self)\n self.center = (800 // 2, 600 // 2)\n self.radius = 30\n self.destination = None\n\n def setDest(self,destination):\n self.destination = destination\n\n def move(self):\n # self.center\n pass\n\n \n\n\ndef main():\n pg.init()\n\n gameOver = False\n pg.display.set_caption('Dota 2 by Anh')\n windowSurface = pg.display.set_mode((display_width, display_height))\n\n mirana = Hero()\n\n while not gameOver:\n windowSurface.fill(gray)\n\n for event in pg.event.get():\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_x:#press x always quit the game\n gameOver = True\n\n if event.type == pg.MOUSEBUTTONDOWN:\n \n if event.button == 3:\n mousePos = pg.mouse.get_pos()\n # print(mirana.destination, mirana.center)\n mirana.setDest(mousePos)\n \n print(mirana.destination, mirana.center)\n\n\n # while(mirana.center != mirana.destination):\n # mirana.move()\n\n\n pg.draw.circle(windowSurface, blue, mirana.center, mirana.radius)\n \n\n\n pg.display.update()\n\n\nif __name__ == '__main__':\n main()","repo_name":"anhdtran95/Python-Learning","sub_path":"pygame/Dota 2/Hero Model.py","file_name":"Hero Model.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72357324856","text":"#!/usr/bin/python\n\nimport matplotlib.pyplot as plt\nfrom ml_helpers import evaluate_model\n\nfrom svm_author_id import best_svc\nfrom dt_author_id import best_dt\nfrom adaboost import best_adaboost\nfrom random_forest import best_random_forest\nfrom sklearn.naive_bayes import GaussianNB\n\n\n\ntop_accuracy = 0\nbest_model = {}\n\n# Gaussian Naive Bayes\nnb_model = GaussianNB()\nprint()\nprint(\"GAUSSIAN NAIVE BAYES CLASSIFIER\")\nprint()\ntraining_time, predict_time, nb_accuracy = evaluate_model(nb_model, features_train, features_test, labels_train, labels_test)\nprint(f\"Training time: {round(training_time, 3)}s\")\nprint(f\"Prediction time: {round(predict_time, 3)}s\")\nprint(f\"Accuracy: {nb_accuracy}\")\n\n# Support Vector Machine\nkernels = [\"linear\", \"rbf\"]\nCs = [0.01, 0.1, 1, 10, 100, 1000, 10000]\ngammas = [\"scale\", \"auto\", 0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10]\nsvc, svc_params = best_svc(kernels, Cs, gammas, features_train, features_test, labels_train, labels_test)\n\n# Decision Tree\nmin_splits = [2, 10, 20, 40, 50, 100]\nrandom_state = 25\ndt, dt_params = best_dt(min_splits, random_state, features_train, features_test, labels_train, labels_test)\n\n# Random Forest\nn_estimators_list = [5, 10, 20, 50, 80, 100, 200, 500, 1000]\nmin_samples_split = [2, 10, 20, 40, 50, 100]\nrandom_state = 25\nrand_forest, rand_forest_params = best_random_forest(n_estimators_list, min_samples_split, random_state, features_train, features_test, labels_train, labels_test)\n\n# AdaBoost\nn_estimators_list = [5, 10, 20, 50, 80, 100, 200, 500, 1000]\nlearning_rates = [0.001, 0.01, 0.05, 0.1, 0.25, 0.5, 1]\nrandom_state = 25\nadaboost, adaboost_params = best_adaboost(n_estimators_list, learning_rates, random_state, features_train, features_test, labels_train, labels_test)\n\nmodels = { adaboost: adaboost_params,\n rand_forest: rand_forest_params,\n dt: dt_params,\n svc: svc_params,\n nb_model: {\"accuracy\": nb_accuracy}}\n\nfor model, parameters in models.items():\n if parameters[\"accuracy\"] > top_accuracy:\n top_accuracy = parameters[\"accuracy\"]\n best_model = {\"model\": model, \"params\": parameters}\n\nprint(\"Best model used: \")\nprint(f\"Model = {best_model['model']}\")\nprint(f\"Parameters = {best_model['params']}\")\ntraining_time, predict_time, accuracy = evaluate_model(best_model[\"model\"], features_train, features_test, labels_train, labels_test)\nprint(\"-------------------------------------------------\")\nprint(f\"Training time: {round(training_time, 3)}s\")\nprint(f\"Prediction time: {round(predict_time, 3)}s\")\nprint(f\"Accuracy: {accuracy}\")\nprint()\n\nclf = best_model[\"model\"]","repo_name":"eds106/hackathon","sub_path":"comparison.py","file_name":"comparison.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29796143261","text":"import numpy as np\nimport torch\nimport os\n\n\ndef compute_accuracy(prediction, gtruth):\n assert len(prediction) == len(gtruth)\n correct = np.sum(np.array(prediction) == np.array(gtruth))\n return correct / len(prediction)\n\n\ndef save_model(model, output_dir, ep_num):\n model_to_save = (\n model.module if hasattr(model, 'module') else model\n )\n model_name = 'model_' + str(ep_num) + '.bin'\n torch.save(model_to_save.state_dict(), os.path.join(output_dir, model_name))\n","repo_name":"salesforce/ConvSumm","sub_path":"CODS/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"22"} +{"seq_id":"18904884709","text":"import torch\nimport spconv.pytorch as spconv\nimport os\nimport sys\nfrom copy import deepcopy\np = os.path.dirname(os.path.dirname((os.path.abspath(__file__))))\nif p not in sys.path:\n sys.path.append(p)\n\nfrom modules.netvlad import NetVLADLoupe\n\n\ndef attention(query, key, value):\n dim = query.shape[1]\n scores = torch.einsum('bdhn,bdhm->bhnm', query, key) / dim**.5\n prob = torch.nn.functional.softmax(scores, dim=-1)\n return torch.einsum('bhnm,bdhm->bdhn', prob, value), prob\n\n\ndef MLP(channels: list, do_bn=True):\n \"\"\" Multi-layer perceptron \"\"\"\n n = len(channels)\n layers = []\n for i in range(1, n):\n layers.append(\n torch.nn.Conv1d(channels[i - 1],\n channels[i],\n kernel_size=1,\n bias=True))\n if i < (n - 1):\n if do_bn:\n layers.append(torch.nn.InstanceNorm1d(channels[i]))\n layers.append(torch.nn.ReLU())\n return torch.nn.Sequential(*layers)\n\n\nclass MultiHeadedAttention(torch.nn.Module):\n \"\"\" Multi-head attention to increase model expressivitiy \"\"\"\n\n def __init__(self, num_heads: int, d_model: int):\n super().__init__()\n assert d_model % num_heads == 0\n self.dim = d_model // num_heads\n self.num_heads = num_heads\n self.merge = torch.nn.Conv1d(d_model, d_model, kernel_size=1)\n self.proj = torch.nn.ModuleList(\n [deepcopy(self.merge) for _ in range(3)])\n\n def forward(self, query, key, value):\n batch_dim = query.size(0)\n query, key, value = [\n l(x).view(batch_dim, self.dim, self.num_heads, -1)\n for l, x in zip(self.proj, (query, key, value))\n ]\n x, _ = attention(query, key, value)\n return self.merge(x.contiguous().view(batch_dim,\n self.dim * self.num_heads, -1))\n\n\nclass AttentionalPropagation(torch.nn.Module):\n\n def __init__(self, feature_dim: int, num_heads: int):\n super().__init__()\n self.attn = MultiHeadedAttention(num_heads, feature_dim)\n self.mlp = MLP([feature_dim * 2, feature_dim * 2, feature_dim])\n torch.nn.init.constant_(self.mlp[-1].bias, 0.0)\n\n def forward(self, x, source):\n message = self.attn(x, source, source)\n return self.mlp(torch.cat([x, message], dim=1))\n\n\nclass BottleneckSparse2D(torch.nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size) -> None:\n super(BottleneckSparse2D, self).__init__()\n self.conv = spconv.SparseSequential(\n spconv.SubMConv2d(in_channels, out_channels // 4, 1),\n torch.nn.BatchNorm1d(out_channels // 4), torch.nn.ReLU(),\n spconv.SubMConv2d(out_channels // 4, out_channels // 4,\n kernel_size),\n torch.nn.BatchNorm1d(out_channels // 4), torch.nn.ReLU(),\n spconv.SubMConv2d(out_channels // 4, out_channels, 1),\n torch.nn.BatchNorm1d(out_channels))\n self.shotcut_conv = spconv.SparseSequential(\n spconv.SubMConv2d(in_channels, out_channels, 1),\n torch.nn.BatchNorm1d(out_channels))\n self.relu = spconv.SparseSequential(torch.nn.ReLU())\n\n def forward(self, x):\n y = self.conv(x)\n shortcut = self.shotcut_conv(x)\n y = spconv.functional.sparse_add(y, shortcut)\n return self.relu(y)\n\n\nclass FeatureFuse(torch.nn.Module):\n\n def __init__(self, feature_dim, num_heads=1) -> None:\n super(FeatureFuse, self).__init__()\n self.mutihead_attention = AttentionalPropagation(\n feature_dim, num_heads)\n def forward(self, x, source):\n return (x + self.mutihead_attention(x, source))\n\n\nclass backbone(torch.nn.Module):\n def __init__(self, inchannels=64) -> None:\n super(backbone, self).__init__()\n self.dconv_down1 = BottleneckSparse2D(inchannels, inchannels * 2, 11)\n self.dconv_down1_1 = BottleneckSparse2D(inchannels * 2, inchannels * 2,\n 11)\n self.dconv_down2 = BottleneckSparse2D(inchannels * 2, inchannels * 4,\n 7)\n self.dconv_down2_1 = BottleneckSparse2D(inchannels * 4, inchannels * 4,\n 7)\n self.dconv_down3 = BottleneckSparse2D(inchannels * 4, inchannels * 8,\n 5)\n self.dconv_down3_1 = BottleneckSparse2D(inchannels * 8, inchannels * 8,\n 5)\n self.dconv_down4 = spconv.SubMConv2d(inchannels * 8,\n inchannels * 16,\n 3,\n bias=True)\n self.maxpool1 = spconv.SparseMaxPool2d(3, 2, 1, indice_key='up1')\n self.maxpool2 = spconv.SparseMaxPool2d(3, 2, 1, indice_key='up2')\n self.maxpool3 = spconv.SparseMaxPool2d(3, 2, 1, indice_key='up3')\n\n def forward(self, x):\n x = spconv.SparseConvTensor.from_dense(x)\n conv1 = self.dconv_down1(x)\n x = self.maxpool1(conv1)\n x = self.dconv_down1_1(x)\n conv2 = self.dconv_down2(x)\n x = self.maxpool2(conv2)\n x = self.dconv_down2_1(x)\n conv3 = self.dconv_down3(x)\n x = self.maxpool3(conv3)\n x = self.dconv_down3_1(x)\n x = self.dconv_down4(x)\n return x.dense()\n\n\nclass vlad_head(torch.nn.Module):\n def __init__(self) -> None:\n super(vlad_head, self).__init__()\n self.conv = torch.nn.Sequential(torch.nn.Conv2d(512, 512, 3, 1, 1),\n torch.nn.ReLU6(),\n torch.nn.Conv2d(512, 512, 3, 1, 1))\n self.vlad = NetVLADLoupe(\n feature_size=512,\n max_samples=1024,\n cluster_size=32,\n output_dim=1024,\n gating=True,\n add_batch_norm=True,\n is_training=True)\n\n def forward(self, x):\n x = self.conv(x)\n return self.vlad(x.reshape(x.shape[0], x.shape[1], -1, 1))\n\n\nclass overlap_head(torch.nn.Module):\n def __init__(self, inchannels=64) -> None:\n super(overlap_head, self).__init__()\n self.fusenet16 = FeatureFuse(inchannels * 16)\n self.last_conv16 = spconv.SparseSequential(\n spconv.SubMConv2d(inchannels * 16, inchannels * 8, 3, bias=True),\n torch.nn.BatchNorm1d(inchannels * 8), torch.nn.ReLU(),\n spconv.SubMConv2d(inchannels * 8, 1, 3, bias=True))\n\n def forward(self, x):\n x40 = spconv.SparseConvTensor.from_dense(x)\n feature = x40.features\n mask0 = (x40.indices[:, 0] == 0)\n mask1 = (x40.indices[:, 0] == 1)\n fea1 = self.fusenet16(feature[mask0].permute(1, 0).unsqueeze(0),\n feature[mask1].permute(\n 1, 0).unsqueeze(0)).squeeze(0).permute(1, 0)\n fea2 = self.fusenet16(feature[mask1].permute(1, 0).unsqueeze(0),\n feature[mask0].permute(\n 1, 0).unsqueeze(0)).squeeze(0).permute(1, 0)\n x40 = x40.replace_feature(torch.cat([fea1, fea2], dim=0))\n out4 = self.last_conv16(x40)\n out4 = out4.replace_feature(torch.sigmoid(out4.features))\n score0 = out4.features[mask0]\n score1 = out4.features[mask1]\n\n # im0 = torch.zeros(x40.spatial_shape).float().to(fea1.device)\n # indi0 = x40.indices[mask0].long()\n # im0[indi0[:,1], indi0[:,2]] = score0.reshape(-1)\n # plt.subplot(1, 2, 1)\n # plt.imshow(im0.detach().cpu().numpy(), cmap = \"Reds\")\n # # plt.show()\n\n # im1 = torch.zeros(x40.spatial_shape).float().to(fea1.device)\n # indi1 = x40.indices[mask1].long()\n # im1[indi1[:,1], indi1[:,2]] = score1.reshape(-1)\n # plt.subplot(1, 2, 2)\n # plt.imshow(im1.detach().cpu().numpy(), cmap = \"Reds\")\n # plt.show()\n\n score_sum0 = torch.sum(score0) / len(score0)\n score_sum1 = torch.sum(score1) / len(score1)\n return (score_sum0 + score_sum1) / 2., out4\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"fcchit/OverlapNetVLAD","sub_path":"modules/overlapnetvlad.py","file_name":"overlapnetvlad.py","file_ext":"py","file_size_in_byte":8241,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"22"} +{"seq_id":"71212932857","text":"import gym\nimport numpy as np\nfrom gym import spaces\nimport gym_hexario.game_module as game_module\n\n\nLOCAL = 'local'\nGLOBAL = 'global'\nDISCRETE = 'discrete'\nCONTINUOUS = 'continuous'\nFEATURES = 'features'\nPIXEL = 'pixels'\nFULL = 'full'\n\n\nclass HexarioEnv(gym.Env):\n def __init__(self,\n n_agents=1,\n n_random_agents=0,\n map_radius=10,\n max_steps=500,\n perspective=LOCAL,\n observation_space=FEATURES,\n observation_radius=5,\n px_observation_dims=(84, 84),\n action_space=DISCRETE,\n velocity=0.1,\n reward_timestep=0,\n reward_capture=1,\n reward_claim=0,\n reward_kill=100):\n \"\"\"\n OpenAI gym environment for the game Hexar.io\n\n :param n_agents: number of agents\n :param n_random_agents: number of random agents. These are environment internal and cannot be\n controlled\n :param map_radius: size of the arena\n :param max_steps: maximum number of steps the agents are allowed to take in one epoch\n :param perspective [local|global]\n :param observation_space: [features|pixel]\n :param observation_radius:\n :param px_observation_dims:\n :param action_space: [discrete|continuous]\n :param reward_timestep:\n :param reward_capture:\n :param reward_claim:\n :param reward_kill:\n \"\"\"\n\n # do some integrity checks\n assert n_agents > 0 and isinstance(\n n_agents, int), f'invalid value for parameter n_agents: {n_agents}'\n assert n_random_agents >= 0 and isinstance(\n n_random_agents, int), f'invalid value for parameter n_random_agents: {n_random_agents}'\n assert map_radius > 2 and isinstance(\n map_radius, int), f'invalid value for parameter map_radius: {map_radius}'\n assert max_steps > 0 and isinstance(\n max_steps, int), f'invalid value for parameter max_steps: {max_steps}'\n assert observation_radius == FULL or (observation_radius > 1 and isinstance(observation_radius, int)), \\\n f'invalid argument observation_radius: {observation_radius}'\n assert perspective in (\n LOCAL, GLOBAL), f'invalid value for parameter perspective: {perspective}'\n assert observation_space in (FEATURES, PIXEL), \\\n f'invalid value for parameter observation_space: {observation_space}'\n assert action_space in (\n DISCRETE, CONTINUOUS), f'invalid value for parameter action_space: {action_space}'\n assert isinstance(px_observation_dims, tuple) and len(px_observation_dims) == 2, \\\n f'invalid value for parameter pixel_observation_dimensions: {px_observation_dims}'\n assert not (observation_space == FEATURES and action_space == CONTINUOUS), \\\n f'invalid parameters: feature observation space and continuous action space are incompatible'\n\n # TODO maybe find a nice way to implement this\n # TODO the question is when to reset the agents\n assert n_agents == 1 or n_random_agents == 0, \\\n f'currently, multiple agents with multiple random agents is not supported!'\n\n assert isinstance(velocity, float) and velocity > 0 and velocity <= 1, \\\n f'invalid parameter velocity: {velocity}'\n\n self.n_agents = n_agents\n self.n_random_agents = n_random_agents\n self.map_radius = map_radius\n self.max_steps = max_steps\n self.perspective = perspective\n self.observation_radius = 2 * self.map_radius if observation_radius == FULL else observation_radius\n self.px_observation_dims = px_observation_dims\n self.velocity = velocity\n\n # these variable names should not be used because they are for gym en\n self.observation_type = observation_space\n self.action_type = action_space\n\n self.reward_timestep = reward_timestep\n self.reward_capture = reward_capture\n self.reward_claim = reward_claim\n self.reward_kill = reward_kill\n\n self.max_capture = self.__get_num_tiles()\n # keep track of the number of steps the agents already\n # have taken. Reset an agent when it has reached the max_steps\n self.__taken_steps = [0 for _ in range(n_agents)]\n\n # check the action spaces\n if self.action_type == DISCRETE:\n self.action_space = spaces.Discrete(6)\n else:\n self.action_space = spaces.Box(low=0., high=2 * np.pi, shape=(1,))\n\n # check the obs space\n if self.observation_type == FEATURES:\n if self.perspective == LOCAL:\n self.obs_size = 1 + 3 * self.observation_radius \\\n * (self.observation_radius - 1)\n else:\n self.obs_size = 1 + 3 * self.map_radius \\\n * (self.map_radius - 1)\n\n self.observation_space = spaces.Box(low=0., high=10.,\n shape=(self.obs_size,),\n dtype=np.float32)\n # override own private method\n self.__get_observation = self.__get_discrete_observation\n else:\n # screen pixels obs\n self.obs_size = self.px_observation_dims[0] * \\\n self.px_observation_dims[1] * 3\n self.observation_space = spaces.Box(low=0, high=255, dtype=np.float32,\n shape=(*self.px_observation_dims, 3))\n # override own private method\n self.__get_observation = self.__get_pixel_observation\n\n # check number of agents and set reset and step accordingly\n if self.n_agents == 1:\n self.reset = self.__reset_single_agent\n self.step = self.__step_single_agent\n else:\n self.reset = self.__reset_multi_agent\n self.step = self.__step_multi_agent\n\n # access layer for the c++ implementation\n self.__game = game_module.setup(self.map_radius,\n self.n_agents + self.n_random_agents,\n self.perspective == LOCAL,\n self.observation_type == FEATURES,\n self.action_type == DISCRETE,\n self.observation_radius,\n self.px_observation_dims[0],\n self.px_observation_dims[1],\n self.velocity)\n\n def __del__(self):\n del self.__game\n\n def render(self, mode='human'):\n \"\"\"displays the whole board on screen\"\"\"\n return game_module.show(self.__game)\n\n def reset(self, agents=None):\n \"\"\" gets overridden \"\"\"\n raise NotImplemented\n\n def step(self, action):\n \"\"\" gets overridden \"\"\"\n raise NotImplemented\n\n def __get_num_tiles(self):\n return 1 + 3 * self.map_radius * (self.map_radius - 1)\n\n def __get_observation(self, agent_id):\n \"\"\" gets overridden \"\"\"\n raise NotImplementedError\n\n def __reset_single_agent(self):\n \"\"\" \"\"\"\n self.__taken_steps[0] = 0\n game_module.reset_player(self.__game, 0)\n\n # reset the random agents\n for agent_id in range(1, self.n_random_agents + 1):\n game_module.reset_player(self.__game, agent_id)\n\n return self.__get_observation(0)\n\n def __reset_multi_agent(self, agents=()):\n \"\"\" \"\"\"\n n_obs = []\n\n if agents == 'all':\n agents = [i for i in range(self.n_agents)]\n\n for agent_id in agents:\n self.__taken_steps[agent_id] = 0\n game_module.reset_player(self.__game, agent_id)\n\n # TODO what happens to the random agents?\n\n for agent_id in agents:\n n_obs.append(self.__get_observation(agent_id))\n\n return n_obs\n\n def __step_single_agent(self, action):\n \"\"\" \"\"\"\n actions = np.array([action], dtype=np.float32).flatten()\n assert len(actions) == 1\n\n reward = self.reward_timestep\n\n captures_before = game_module.get_num_captures(self.__game, 0)\n claims_before = game_module.get_num_claims(self.__game, 0)\n kills_before = game_module.get_num_kills(self.__game, 0)\n\n if self.n_random_agents > 0:\n # decide the actions for the random agents\n actions = np.concatenate([actions,\n [self.__get_random_agent_action(i) for i in range(1, self.n_random_agents + 1)]])\n actions = actions.flatten().astype(np.float32)\n\n game_module.take_actions(self.__game, actions)\n self.__taken_steps[0] += 1\n\n # check which players are still in the game\n dones = [game_module.is_dead(self.__game, i) for i in range(1 + self.n_random_agents)]\n is_dead = dones[0]\n is_winner = game_module.is_winner(self.__game, 0)\n captures_after = game_module.get_num_captures(self.__game, 0)\n claims_after = game_module.get_num_claims(self.__game, 0)\n kills_after = game_module.get_num_kills(self.__game, 0)\n\n # check if the agent is the last one alive\n all_others_dead = True\n if self.n_random_agents > 0:\n all_others_dead = False not in dones[1:]\n\n if not is_dead:\n reward += self.reward_capture * (captures_after - captures_before)\n reward += self.reward_claim * max(claims_after - claims_before, 0)\n reward += self.reward_kill * max(kills_after - kills_before, 0)\n\n info = dict(\n num_kills=kills_after,\n num_captures=captures_after,\n num_claims=claims_after,\n coverage=captures_after / self.max_capture,\n success=is_winner,\n distances=game_module.get_distances(self.__game, 0),\n all_others_dead=all_others_dead\n )\n\n done = is_dead or is_winner or (\n self.__taken_steps[0] == self.max_steps)\n observation = self.__get_observation(0)\n\n return observation, reward, done, info\n\n def __step_multi_agent(self, actions):\n \"\"\" an action must be passed for every agent \"\"\"\n assert len(actions) == self.n_agents, \"Error: number of \\\n given actions doesn't match number of agents in this game\"\n\n if isinstance(actions, list):\n actions = np.array(actions, dtype=np.float32)\n\n assert isinstance(actions, np.ndarray), \"Error: given actions \\\n are in the wrong format\"\n\n actions = actions.flatten()\n\n n_obs = []\n n_kills = []\n n_claims = []\n n_captures = []\n n_dones = []\n n_rewards = []\n n_infos = []\n\n # cache all the kills and captures\n for agent_id in range(self.n_agents):\n n_kills.append(game_module.get_num_kills(self.__game, agent_id))\n n_claims.append(game_module.get_num_claims(self.__game, agent_id))\n n_captures.append(\n game_module.get_num_captures(self.__game, agent_id))\n\n # take all the actions\n game_module.take_actions(self.__game, actions)\n\n # get the dones, kills, observations\n for agent_id in range(self.n_agents):\n self.__taken_steps[agent_id] += 1\n reward = self.reward_timestep\n\n is_dead = game_module.is_dead(self.__game, agent_id)\n is_winner = game_module.is_winner(self.__game, 0)\n\n kills = game_module.get_num_kills(self.__game, agent_id)\n claims = game_module.get_num_claims(self.__game, agent_id)\n captures = game_module.get_num_captures(self.__game, agent_id)\n\n if not is_dead:\n reward += self.reward_capture * \\\n (captures - n_captures[agent_id])\n reward += self.reward_claim * \\\n (claims - n_claims[agent_id])\n reward += self.reward_kill * (kills - n_kills[agent_id])\n\n info = dict(\n num_kills=kills,\n num_captures=captures,\n num_claims=claims,\n coverage=captures / self.max_capture,\n success=is_winner,\n distances=game_module.get_distances(self.__game, agent_id)\n )\n\n done = is_dead or is_winner or (\n self.__taken_steps[agent_id] == self.max_steps)\n obs = game_module.get_observation(self.__game, agent_id)\n\n n_dones.append(done)\n n_rewards.append(reward)\n n_obs.append(obs)\n n_infos.append(info)\n\n return n_obs, n_rewards, n_dones, n_infos\n\n def __get_discrete_observation(self, agent_id):\n \"\"\" \"\"\"\n return game_module.get_observation(self.__game, agent_id)\n\n def __get_pixel_observation(self, agent_id):\n \"\"\" \"\"\"\n observation = game_module.get_observation(self.__game, agent_id)\n\n return observation.reshape(\n self.px_observation_dims[1],\n self.px_observation_dims[0], 3)\n\n def __get_random_agent_action(self, random_agent_id):\n\n distances = game_module.get_distances(self.__game, random_agent_id)\n probs = np.array([1 if d > 1 else 0 for d in distances], dtype=np.float32)\n probs = probs / sum(probs)\n action = np.random.choice([0, 1, 2, 3, 4, 5], 1, p=probs)\n return action[0]\n","repo_name":"dhansmair/gym-hexario","sub_path":"src/gym_hexario/envs/hexario_env.py","file_name":"hexario_env.py","file_ext":"py","file_size_in_byte":13765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"32057002094","text":"import glob, os, random, sys\nimport threading, numpy as np\nimport datetime, random\nfrom rsync import ls\nimport select, util\n\ndir = ''\n\nif len(sys.argv) > 1 and sys.argv[1] == \"pi\":\n dir = \"/media/pi/Seagate Backup Plus Drive1/shows\"\n print ('pi')\nelse:\n dir = \"/media/burak/Seagate Backup Plus Drive/shows\"\n print ('acer')\n \ndirs,flist = ls(dir)\n\ndef fin(s,l): return np.any([x in l for x in s])\n\nplaylist = [f[0] for f in flist if fin(['.mp4','.mkv','.avi'], f[0])]\n\nprint ('count', len(playlist))\n\nplaylist = [f for f in playlist if 'SG-1' not in f]\nplaylist = [f for f in playlist if 'DS9' not in f]\n#playlist = [f for f in playlist if 'VOYAGER' not in f]\n#playlist = [f for f in playlist if 'Atlantis' not in f]\nplaylist = [f for f in playlist if 'The Next Generation' not in f]\nplaylist = [f for f in playlist if 'Enterprise' not in f]\nplaylist = [f for f in playlist if 'Lost' not in f]\nplaylist = [f for f in playlist if '/TLS/' not in f]\nplaylist = [f for f in playlist if 'BSG' not in f]\nplaylist = [f for f in playlist if 'Doctor' not in f]\n\nprint ('filterd count', len(playlist))\n\nidx = util.my_random(len(playlist))\n\nf = playlist[idx]\n\nprint (\"show idx selected\", idx)\n\ncmd = \"vlc '%s' -f \" % f\n\nprint (cmd)\n\nos.system(cmd)\n\n","repo_name":"osmanatam/kod","sub_path":"vidplay.py","file_name":"vidplay.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"74706013174","text":"class Solution(object):\n def maxNumberOfBalloons(self, text):\n mylistt = [0,0,0,0,0]\n for char in text:\n if char == \"b\":\n mylistt[0] += 1\n elif char == \"a\":\n mylistt[1] += 1\n elif char == \"l\":\n mylistt[2] += 0.5\n elif char == \"o\":\n mylistt[3] += 0.5\n elif char == \"n\":\n mylistt[4] += 1\n return int(min(mylistt))\n \n ","repo_name":"AnaniyaT/ananas","sub_path":"1189-maximum-number-of-balloons/1189-maximum-number-of-balloons.py","file_name":"1189-maximum-number-of-balloons.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43617139908","text":"A = input()\nN = len(A)\nS = [0] * 26\nfor a in A:\n S[ord(a) - ord('a')] += 1\nmod = 998244353\n\nfact = [1, 1]\nfactinv = [1, 1]\ninv = [0, 1]\n\n\ndef cmb(n, r):\n if (r < 0) or (n < r):\n return 0\n r = min(r, n - r)\n return (fact[n] * factinv[r] % mod) * factinv[n - r] % mod\n\n\nfor i in range(2, N + 1):\n fact.append((fact[-1] * i) % mod)\n inv.append((-inv[mod % i] * (mod // i)) % mod)\n factinv.append((factinv[-1] * inv[-1]) % mod)\n\n\nDP = [0] * (N + 1)\nDP[0] = 1\nfor i, value in enumerate(S, 1):\n dp = [0] * (N + 1)\n for j in range(N + 1):\n for k in range(min(value + 1, j + 1)):\n dp[j] += DP[j - k] * cmb(j, k)\n dp[j] %= mod\n DP = dp\nprint(sum(DP[1:]) % mod)","repo_name":"hirofumi999/AtCoderProject","sub_path":"done/ABC234F.py","file_name":"ABC234F.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20636184674","text":"from flask import Flask, render_template\nfrom datetime import timedelta\nimport os\n\nfrom api.upload import api_upload\n\napp = Flask(__name__)\napp.config.from_object(\"config\")\n\napp.register_blueprint(api_upload, url_prefix = \"/api\")\n\napp.config[\"JSON_SORT_KEYS\"] = False\napp.config[\"JSON_AS_ASCII\"] = False\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n\napp.config[\"SECRET_KEY\"] = os.urandom(24)\napp.config[\"PERMANENT_SESSION_LIFETIME\"] = timedelta(days = 1)\n\n@app.route(\"/\")\ndef index():\n\treturn render_template(\"index.html\")\n\nif __name__ == \"__main__\":\n\tapp.run(host = \"0.0.0.0\", port = 5000, debug = True)\n","repo_name":"zxcvbn848/StageIII-WK1-2-AWS","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42814801592","text":"\"\"\" Author: Haris Hasic, Phd Student @ Ishida Lab, Department of Computer Science, Tokyo Institute of Technology \"\"\"\n\nfrom collections import defaultdict\nfrom copy import deepcopy\nfrom typing import List, Tuple, Union\n\nfrom rdkit.Chem.AllChem import Atom, Mol, RWMol\n\nfrom chemical_reactions.general_utils import ReactionRepresentationConversionUtils\nfrom chemical_compounds.general_utils import CompoundRepresentationConversionUtils\nfrom chemical_compounds.analysis_utils import CompoundStructureUtils\nfrom chemical_compounds.editing_utils import EditableStructureUtils\n\n\n# noinspection PyArgumentList\nclass ReactionCoreUtils:\n \"\"\" Description: Group of methods for the handling of chemical reaction cores. \"\"\"\n\n @staticmethod\n def __comp(node, neigh, visited, vis):\n \"\"\" Description: Help merging sub-lists that have common elements. \"\"\"\n\n nodes = {node}\n next_node = nodes.pop\n\n while nodes:\n node = next_node()\n vis(node)\n nodes |= neigh[node] - visited\n\n yield node\n\n @staticmethod\n def __merge_common(lists):\n \"\"\" Description: Merge all sub-list that have common elements. \"\"\"\n\n neigh = defaultdict(set)\n visited = set()\n\n for each in lists:\n for item in each:\n neigh[item].update(each)\n\n for node in neigh:\n if node not in visited:\n yield sorted(ReactionCoreUtils.__comp(node, neigh, visited, visited.add))\n\n @staticmethod\n def __atom_in_core(mol_atom_ind: int, reaction_cores: Union[List, Tuple]) -> bool:\n \"\"\" Description: Check if a specific atom is in any of the lists of core atoms. \"\"\"\n\n for reaction_core in reaction_cores:\n if mol_atom_ind in reaction_core:\n return True\n\n return False\n\n @staticmethod\n def __compound_is_mapped(compound: Union[str, Mol]) -> bool:\n \"\"\" Description: Check if a compound contains at least one mapped atom. \"\"\"\n\n if isinstance(compound, str):\n return \":\" in compound\n\n else:\n for atom in compound.GetAtoms():\n if atom.GetAtomMapNum() != 0:\n return True\n\n return False\n\n @staticmethod\n def same_neighbourhood_size(compound_a: Mol, mol_atom_a: Union[Atom, int], compound_b: Mol,\n mol_atom_b: Union[Atom, int]) -> bool:\n \"\"\" Description: Check whether the same atoms in two different molecules have the same neighbourhood size. \"\"\"\n\n if isinstance(mol_atom_a, int):\n mol_atom_a = compound_a.GetAtomWithIdx(mol_atom_a)\n if isinstance(mol_atom_b, int):\n mol_atom_b = compound_b.GetAtomWithIdx(mol_atom_b)\n\n if len(mol_atom_a.GetNeighbors()) != len(mol_atom_b.GetNeighbors()):\n return False\n\n return True\n\n @staticmethod\n def same_neighbour_atoms(compound_a: Mol, mol_atom_a: Union[Atom, int], compound_b: Mol,\n mol_atom_b: Union[Atom, int]) -> bool:\n \"\"\" Description: Check whether the same atoms in two different molecules have retained the same atoms and atom\n attributes in their immediate neighbourhood according to reaction mapping numbers. \"\"\"\n\n if isinstance(mol_atom_a, int):\n mol_atom_a = compound_a.GetAtomWithIdx(mol_atom_a)\n if isinstance(mol_atom_b, int):\n mol_atom_b = compound_b.GetAtomWithIdx(mol_atom_b)\n\n neighbourhood_a = [(mol_atom.GetAtomMapNum(), mol_atom.GetSymbol(), mol_atom.GetFormalCharge(),\n mol_atom.GetNumRadicalElectrons(), mol_atom.GetTotalValence())\n for mol_atom in mol_atom_a.GetNeighbors()]\n\n neighbourhood_b = [(mol_atom.GetAtomMapNum(), mol_atom.GetSymbol(), mol_atom.GetFormalCharge(),\n mol_atom.GetNumRadicalElectrons(), mol_atom.GetTotalValence())\n for mol_atom in mol_atom_b.GetNeighbors()]\n\n return sorted(neighbourhood_a) == sorted(neighbourhood_b)\n\n @staticmethod\n def same_neighbour_bonds(compound_a: Mol, mol_atom_a: Union[Atom, int], compound_b: Mol,\n mol_atom_b: Union[Atom, int]) -> bool:\n \"\"\" Description: Check whether the same atoms in two different molecules have retained the same bonds and bond\n attributes amongst each other in their immediate neighbourhood. \"\"\"\n\n if isinstance(mol_atom_a, int):\n mol_atom_a_ind = mol_atom_a\n mol_atom_a = compound_a.GetAtomWithIdx(mol_atom_a)\n else:\n mol_atom_a_ind = mol_atom_a.GetIdx()\n\n if isinstance(mol_atom_b, int):\n mol_atom_b_ind = mol_atom_b\n mol_atom_b = compound_b.GetAtomWithIdx(mol_atom_b)\n else:\n mol_atom_b_ind = mol_atom_b.GetIdx()\n\n neighbourhood_1 = [(atom_ind.GetAtomMapNum(),\n str(compound_a.GetBondBetweenAtoms(mol_atom_a_ind, atom_ind.GetIdx()).GetBondType()))\n for atom_ind in mol_atom_a.GetNeighbors()]\n\n neighbourhood_2 = [(atom_ind.GetAtomMapNum(),\n str(compound_b.GetBondBetweenAtoms(mol_atom_b_ind, atom_ind.GetIdx()).GetBondType()))\n for atom_ind in mol_atom_b.GetNeighbors()]\n\n return sorted(neighbourhood_1) == sorted(neighbourhood_2)\n\n @staticmethod\n def get_reaction_core_atoms(reaction_smiles: str) -> Tuple[List, List]:\n \"\"\" Description: Get the indices of atoms that participate in the reaction for each molecule in the reaction.\n If the molecule does not contain such atoms, return an empty list. This method is based on the\n assumption that the mapping is correct and done in a 'complete' fashion. This means that all of\n the atoms in the reactants are mapped, and the ones that persist in the product have the same\n mapping number. \"\"\"\n\n reactants, _, products = ReactionRepresentationConversionUtils.parse_roles_from_reaction_smiles(\n reaction_smiles=reaction_smiles,\n as_what=\"mol\"\n )\n\n reactants_core_atoms = [set() for _ in range(len(reactants))]\n products_core_atoms = [set() for _ in range(len(products))]\n\n for p_ind, product in enumerate(products):\n # Only proceed to investigate products that are atom mapped.\n if ReactionCoreUtils.__compound_is_mapped(product):\n for r_ind, reactant in enumerate(reactants):\n # Only proceed to investigate reactants that are atom mapped.\n if ReactionCoreUtils.__compound_is_mapped(reactant):\n\n for p_atom in product.GetAtoms():\n # If there are atoms in the product that are not mapped, add them to the core.\n if p_atom.GetAtomMapNum() <= 0:\n products_core_atoms[p_ind].add(p_atom.GetIdx())\n continue\n\n for r_atom in reactant.GetAtoms():\n # If there are atoms in the reactant that are not mapped, add them to the core.\n if r_atom.GetAtomMapNum() <= 0:\n reactants_core_atoms[r_ind].add(r_atom.GetIdx())\n continue\n\n # If there are atoms in the reactant and product that have the same atom map number,\n # but different chemical surroundings, add them to the core.\n if p_atom.GetAtomMapNum() == r_atom.GetAtomMapNum():\n if not ReactionCoreUtils.same_neighbourhood_size(product, p_atom.GetIdx(),\n reactant, r_atom.GetIdx()) or \\\n not ReactionCoreUtils.same_neighbour_atoms(product, p_atom.GetIdx(),\n reactant, r_atom.GetIdx()) or \\\n not ReactionCoreUtils.same_neighbour_bonds(product, p_atom.GetIdx(),\n reactant, r_atom.GetIdx()):\n reactants_core_atoms[r_ind].add(r_atom.GetIdx())\n products_core_atoms[p_ind].add(p_atom.GetIdx())\n\n return reactants_core_atoms, products_core_atoms\n\n @staticmethod\n def get_reaction_non_core_atoms(reaction_smiles: str) -> Tuple[List, List]:\n \"\"\" Description: Get the atoms of the molecule which are not included in the specified reaction cores. This\n method is just the inverse of the 'get_reaction_core_atoms' method, and all of the same\n restrictions apply. \"\"\"\n\n reactants, _, products = ReactionRepresentationConversionUtils.parse_roles_from_reaction_smiles(\n reaction_smiles=reaction_smiles,\n as_what=\"mol\"\n )\n\n reactants_non_core_atoms = [set() for _ in range(len(reactants))]\n products_non_core_atoms = [set() for _ in range(len(products))]\n\n for p_ind, product in enumerate(products):\n for r_ind, reactant in enumerate(reactants):\n for p_atom in product.GetAtoms():\n\n # If there are products that are not mapped, add all of their atoms to the non-core.\n if not ReactionCoreUtils.__compound_is_mapped(product):\n products_non_core_atoms[p_ind].add(p_atom.GetIdx())\n continue\n\n for r_atom in reactant.GetAtoms():\n\n # If there are reactants that are not mapped, add all of their atoms to the non-core.\n if not ReactionCoreUtils.__compound_is_mapped(reactant):\n reactants_non_core_atoms[r_ind].add(r_atom.GetIdx())\n continue\n\n # If there are atoms in the reactant and product that have the same atom map number,\n # and same chemical surroundings, add them to the core.\n if p_atom.GetAtomMapNum() == r_atom.GetAtomMapNum():\n if ReactionCoreUtils.same_neighbourhood_size(product, p_atom.GetIdx(),\n reactant, r_atom.GetIdx()) and \\\n ReactionCoreUtils.same_neighbour_atoms(product, p_atom.GetIdx(),\n reactant, r_atom.GetIdx()) and \\\n ReactionCoreUtils.same_neighbour_bonds(product, p_atom.GetIdx(),\n reactant, r_atom.GetIdx()):\n reactants_non_core_atoms[r_ind].add(r_atom.GetIdx())\n products_non_core_atoms[p_ind].add(p_atom.GetIdx())\n\n return reactants_non_core_atoms, products_non_core_atoms\n\n @staticmethod\n def get_inverse_atoms(reaction_smiles: str, marked_atoms: Tuple[List, List]) -> Tuple[List, List]:\n \"\"\" Description: Return the inverse from the marked atoms for each of the reaction roles. \"\"\"\n\n reactants, _, products = ReactionRepresentationConversionUtils.parse_roles_from_reaction_smiles(\n reaction_smiles=reaction_smiles,\n as_what=\"mol_no_maps\"\n )\n\n reaction_roles = [reactants, products]\n reverse_cores = ([], [])\n\n for role_ind, reaction_role in enumerate(reaction_roles):\n for mol_ind, mol in enumerate(reaction_role):\n local_reverse = set()\n for atom in mol.GetAtoms():\n if atom.GetIdx() not in marked_atoms[role_ind][mol_ind]:\n local_reverse.add(atom.GetIdx())\n\n reverse_cores[role_ind].append(local_reverse)\n\n return reverse_cores\n\n @staticmethod\n def get_connected_index_groups(reaction_smiles: str, reaction_cores: Tuple[List, List]):\n \"\"\" Description: Get the list of grouped atom indices. This grouping adds another layer in the standard list of\n lists format. Ideally, the core indices for a single compound in the reaction should all be\n connected, but sometimes this is not the case. This function can be called to check for such\n multi-part cores, and to handle them appropriately. If applied to non-core atoms, this method\n returns the synthon indices. \"\"\"\n\n reactants, _, products = ReactionRepresentationConversionUtils.parse_roles_from_reaction_smiles(\n reaction_smiles=reaction_smiles,\n as_what=\"mol\"\n )\n\n reaction_roles = [reactants, products]\n role_connections, connected_atoms = [[], []], [[], []]\n\n # Step 1: Aggregate all of the atoms which are connected to each other.\n for rc_ind, reaction_core in enumerate(reaction_cores):\n for rr_ind, reaction_role in enumerate(reaction_core):\n atom_connections = []\n for ind_a, atom_a in enumerate(reaction_role):\n for ind_b, atom_b in enumerate(reaction_role):\n if ind_a != ind_b:\n if reaction_roles[rc_ind][rr_ind].GetBondBetweenAtoms(atom_a, atom_b) is not None:\n if [atom_a, atom_b] not in atom_connections and \\\n [atom_b, atom_a] not in atom_connections:\n atom_connections.append([atom_a, atom_b])\n role_connections[rc_ind].append(atom_connections)\n\n # Step 2: Merge all of the individual connections which share the same indices.\n for rlc_ind, role_connection in enumerate(role_connections):\n [connected_atoms[rlc_ind].append(list(ReactionCoreUtils.__merge_common(rc))) for rc in role_connection]\n\n final_connected_core_indices_groups = deepcopy(connected_atoms)\n\n # Step 3: Construct the final final connected core indices groups collection.\n for rc_ind, reaction_core in enumerate(reaction_cores):\n for rr_ind, reaction_role in enumerate(reaction_core):\n for atom in reaction_role:\n if not ReactionCoreUtils.__atom_in_core(atom, connected_atoms[rc_ind][rr_ind]):\n final_connected_core_indices_groups[rc_ind][rr_ind].append([atom])\n\n return final_connected_core_indices_groups\n","repo_name":"AspirinCode/retrochem_toolbox","sub_path":"chemical_reactions/analysis_utils.py","file_name":"analysis_utils.py","file_ext":"py","file_size_in_byte":14907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18738615726","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom treelib import Node, Tree\n\n# initialize empty list of family members\n# familia = []\nfamilia = Tree()\nexisting_personae = set()\nprint(\"Enter the URL of your base article:\")\nbase_article_url = input()\n\n# recursive function to scrape a Wikipedia article\ndef findChildren(url,generation,parent_article):\n response = requests.get(url=url)\n soup = BeautifulSoup(response.content, 'html.parser')\n title = soup.find(id=\"firstHeading\")\n\n if url not in existing_personae:\n print(title.text)\n hyperlink = title.text\n existing_personae.add(url)\n\n if parent_article == \"\":\n familia.create_node(hyperlink, url)\n else:\n familia.create_node(hyperlink, url, parent=parent_article)\n\n infoboxLabels = soup.find_all('th',{\"class\":\"infobox-label\"})\n for label in infoboxLabels:\n # matches = [\"Children\", \"Issue\"]\n # if any(x in label.text for x in matches):\n if \"Issue\" in label.text:\n children = []\n children = label.find_next(\"td\").find_all(\"a\")\n for child in children:\n # check to ensure only Wikipedia links are followed\n if child['href'].find(\"/wiki/\") == -1: \n continue\n\n # go forward one more generation\n gen = generation\n gen += 1\n findChildren(\"https://en.wikipedia.org\" + child['href'], gen, url)\n break \n\n# \"https://en.wikipedia.org/wiki/William_Rockefeller_Sr.\"\nfindChildren(base_article_url, 0,\"\")\n\nfamilia.show()\n# print(familia.depth())\nfamilia.save2file(\"familia-augustus.txt\")\n# jsonFile = open('familia.json', 'w')\n# jsonFile.write(familia.to_json(with_data=True))\n# jsonFile.close()\n# for persona in familia:\n# print(persona)","repo_name":"flxcn/arborfamilias","sub_path":"top_down_scraper.py","file_name":"top_down_scraper.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41237318509","text":"import os\nfrom pymongo import MongoClient\n\n### ------------------------------------- TO-DO --------------------------------------------\n# 1) Make \"initialize_connection\" not accessible from outside the script\n#\n\ndb_handler = None\n\ndef initialize_connection():\n global db_handler\n try:\n print('Connecting to MongoDB...')\n cluster = MongoClient(os.environ.get('MONGODB_URI'))\n db_handler = cluster[\"expense-tracker\"]\n\n except Exception as err:\n print(\"An exception occurred while trying to connect to MongoDB!\")\n print(err)\n\n else:\n print('The connection to MongoDB was successful!')\n\n\ndef connect_to_DB():\n if not (db_handler):\n initialize_connection()\n else:\n print('Using cached MongoDB connection!')\n return db_handler\n\n \n\n\n\n\n\n\n\n","repo_name":"NoamsGit/expense-tracker-angular","sub_path":"back-side/utils/DBconnection.py","file_name":"DBconnection.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"71784804216","text":"from __future__ import annotations\nimport typing\nfrom solana.publickey import PublicKey\nfrom solana.transaction import TransactionInstruction, AccountMeta\nimport borsh_construct as borsh\nfrom .. import types\nfrom ..program_id import PROGRAM_ID\n\n\nclass CreateMetadataAccountV2Args(typing.TypedDict):\n create_metadata_account_args_v2: types.create_metadata_account_args_v2.CreateMetadataAccountArgsV2\n\n\nlayout = borsh.CStruct(\n \"create_metadata_account_args_v2\"\n / types.create_metadata_account_args_v2.CreateMetadataAccountArgsV2.layout\n)\n\n\nclass CreateMetadataAccountV2Accounts(typing.TypedDict):\n metadata: PublicKey\n mint: PublicKey\n mint_authority: PublicKey\n payer: PublicKey\n update_authority: PublicKey\n system_program: PublicKey\n rent: PublicKey\n\n\ndef create_metadata_account_v2(\n args: CreateMetadataAccountV2Args, accounts: CreateMetadataAccountV2Accounts\n) -> TransactionInstruction:\n keys: list[AccountMeta] = [\n AccountMeta(pubkey=accounts[\"metadata\"], is_signer=False, is_writable=True),\n AccountMeta(pubkey=accounts[\"mint\"], is_signer=False, is_writable=False),\n AccountMeta(\n pubkey=accounts[\"mint_authority\"], is_signer=True, is_writable=False\n ),\n AccountMeta(pubkey=accounts[\"payer\"], is_signer=True, is_writable=True),\n AccountMeta(\n pubkey=accounts[\"update_authority\"], is_signer=False, is_writable=False\n ),\n AccountMeta(\n pubkey=accounts[\"system_program\"], is_signer=False, is_writable=False\n ),\n AccountMeta(pubkey=accounts[\"rent\"], is_signer=False, is_writable=False),\n ]\n identifier = b\"\\x18I)\\xed,\\x8e\\xc2\\xfe\"\n encoded_args = layout.build(\n {\n \"create_metadata_account_args_v2\": args[\n \"create_metadata_account_args_v2\"\n ].to_encodable(),\n }\n )\n data = identifier + encoded_args\n return TransactionInstruction(keys, PROGRAM_ID, data)\n","repo_name":"crypt0miester/metaplex-program-library-py","sub_path":"token-metadata/python/src/instructions/create_metadata_account_v2.py","file_name":"create_metadata_account_v2.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"74165105655","text":"def total(arr):\n sum = 0\n for i in range(0, len(arr)):\n if(isPrime(i)):\n sum += arr[i]\n return sum\n\n\ndef isPrime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if (n % 2 == 0 or n % 3 == 0):\n return False\n\n i = 5\n while(i * i <= n):\n if (n % i == 0 or n % (i + 2) == 0):\n return False\n i = i + 6\n return True\n\n\nprint(total([1, 2, 3, 4]))\n","repo_name":"h4x0rlol/codewars","sub_path":"sumofprime-indexedelements.py","file_name":"sumofprime-indexedelements.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"73890309497","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 11 16:46:22 2019\r\n\r\n@author: rj\r\n\"\"\"\r\n\r\ntry:\r\n a=int(input(\"enter the value\"))\r\n b=int(input(\"enter the value\"))\r\n print(a/b)\r\nexcept ZeroDivisionError as e1:\r\n print(\"happened exception is\",e1)\r\n \r\nexcept ValueError as e1:\r\n print(\"happened exception is\",e1)\r\nexcept:\r\n print(\"safsac\")","repo_name":"harshchoudhary1998/Pytthon","sub_path":"except2.py","file_name":"except2.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11448616109","text":"# -*- coding: utf-8 -*-\r\nimport datetime\r\nimport json\r\nimport logging\r\n\r\nfrom django.db import transaction\r\n\r\nfrom console.appstore.appstore import app_store\r\nfrom console.constants import AppConstants\r\nfrom console.models.main import RainbondCenterApp, ServiceShareRecordEvent, PluginShareRecordEvent\r\nfrom console.repositories.market_app_repo import rainbond_app_repo, app_export_record_repo\r\nfrom console.repositories.plugin import plugin_repo, app_plugin_relation_repo,service_plugin_config_repo\r\nfrom console.repositories.share_repo import share_repo\r\nfrom console.services.plugin import plugin_service\r\nfrom console.services.service_services import base_service\r\nfrom www.apiclient.marketclient import MarketOpenAPI\r\nfrom www.apiclient.regionapi import RegionInvokeApi\r\nfrom www.models import TenantServiceInfo, ServiceEvent, make_uuid\r\nfrom console.services.group_service import group_service\r\nfrom console.services.plugin import plugin_config_service\r\n\r\nlogger = logging.getLogger(\"default\")\r\n\r\nregion_api = RegionInvokeApi()\r\n\r\n\r\nclass ShareService(object):\r\n def check_service_source(self, team, team_name, group_id, region_name):\r\n service_list = share_repo.get_service_list_by_group_id(team=team, group_id=group_id)\r\n if service_list:\r\n can_publish_list = [\r\n service for service in service_list if service.service_source != \"market\"\r\n ]\r\n if not can_publish_list:\r\n data = {\"code\": 400, \"success\": False, \"msg_show\": \"此组中的应用全部来源于云市,无法发布\",\r\n \"list\": list(), \"bean\": dict()}\r\n return data\r\n else:\r\n # 批量查询应用状态\r\n service_ids = [service.service_id for service in service_list]\r\n status_list = base_service.status_multi_service(region=region_name, tenant_name=team_name,\r\n service_ids=service_ids, enterprise_id=team.enterprise_id)\r\n for status in status_list:\r\n if status[\"status\"] != \"running\":\r\n data = {\"code\": 400, \"success\": False, \"msg_show\": \"您有应用未在运行状态不能发布。\",\r\n \"list\": list(), \"bean\": dict()}\r\n return data\r\n else:\r\n data = {\"code\": 200, \"success\": True, \"msg_show\": \"您的应用都在运行中可以发布。\",\r\n \"list\": list(), \"bean\": dict()}\r\n return data\r\n else:\r\n data = {\"code\": 400, \"success\": False, \"msg_show\": \"当前组内无应用\", \"list\": list(), \"bean\": dict()}\r\n return data\r\n\r\n def check_whether_have_share_history(self, group_id):\r\n return share_repo.get_rainbond_cent_app_by_tenant_service_group_id(group_id=group_id)\r\n\r\n def get_service_ports_by_ids(self, service_ids):\r\n \"\"\"\r\n 根据多个服务ID查询服务的端口信息\r\n :param service_ids: 应用ID列表\r\n :return: {\"service_id\":TenantServicesPort[object]}\r\n \"\"\"\r\n port_list = share_repo.get_port_list_by_service_ids(service_ids=service_ids)\r\n if port_list:\r\n service_port_map = {}\r\n for port in port_list:\r\n service_id = port.service_id\r\n tmp_list = []\r\n if service_id in service_port_map.keys():\r\n tmp_list = service_port_map.get(service_id)\r\n tmp_list.append(port)\r\n service_port_map[service_id] = tmp_list\r\n return service_port_map\r\n else:\r\n return {}\r\n\r\n def get_service_dependencys_by_ids(self, service_ids):\r\n \"\"\"\r\n 根据多个服务ID查询服务的依赖服务信息\r\n :param service_ids:应用ID列表\r\n :return: {\"service_id\":TenantServiceInfo[object]}\r\n \"\"\"\r\n relation_list = share_repo.get_relation_list_by_service_ids(service_ids=service_ids)\r\n if relation_list:\r\n dep_service_map = {}\r\n for dep_service in relation_list:\r\n service_id = dep_service.service_id\r\n tmp_list = []\r\n if service_id in dep_service_map.keys():\r\n tmp_list = dep_service_map.get(service_id)\r\n dep_service_info = TenantServiceInfo.objects.filter(service_id=dep_service.dep_service_id)[0]\r\n tmp_list.append(dep_service_info)\r\n dep_service_map[service_id] = tmp_list\r\n return dep_service_map\r\n else:\r\n return {}\r\n\r\n def get_service_env_by_ids(self, service_ids):\r\n \"\"\"\r\n 获取应用env\r\n :param service_ids: 应用ID列表\r\n # :return: 可修改的环境变量service_env_change_map,不可修改的环境变量service_env_nochange_map\r\n :return: 环境变量service_env_map\r\n \"\"\"\r\n env_list = share_repo.get_env_list_by_service_ids(service_ids=service_ids)\r\n if env_list:\r\n service_env_map = {}\r\n for env in env_list:\r\n service_id = env.service_id\r\n tmp_list = []\r\n if service_id in service_env_map.keys():\r\n tmp_list = service_env_map.get(service_id)\r\n tmp_list.append(env)\r\n service_env_map[service_id] = tmp_list\r\n return service_env_map\r\n else:\r\n return {}\r\n\r\n def get_service_volume_by_ids(self, service_ids):\r\n \"\"\"\r\n 获取应用持久化目录\r\n \"\"\"\r\n volume_list = share_repo.get_volume_list_by_service_ids(service_ids=service_ids)\r\n if volume_list:\r\n service_volume_map = {}\r\n for volume in volume_list:\r\n service_id = volume.service_id\r\n tmp_list = []\r\n if service_id in service_volume_map.keys():\r\n tmp_list = service_volume_map.get(service_id)\r\n tmp_list.append(volume)\r\n service_volume_map[service_id] = tmp_list\r\n return service_volume_map\r\n else:\r\n return {}\r\n\r\n def get_service_extend_method_by_keys(self, service_keys):\r\n \"\"\"\r\n 获取应用伸缩状态\r\n \"\"\"\r\n extend_method_list = share_repo.get_service_extend_method_by_keys(service_keys=service_keys)\r\n if extend_method_list:\r\n extend_method_map = {}\r\n for extend_method in extend_method_list:\r\n service_key = extend_method.service_key\r\n tmp_list = []\r\n if service_key in extend_method_map.get(service_key):\r\n tmp_list = extend_method_map.get(service_key)\r\n tmp_list.append(extend_method)\r\n extend_method_map[service_key] = tmp_list\r\n return extend_method_map\r\n else:\r\n return {}\r\n\r\n def get_service_probes(self, service_ids):\r\n \"\"\"\r\n 获取应用健康检测探针\r\n \"\"\"\r\n probe_list = share_repo.get_probe_list_by_service_ids(service_ids=service_ids)\r\n if probe_list:\r\n service_probe_map = {}\r\n for probe in probe_list:\r\n service_id = probe.service_id\r\n tmp_list = []\r\n if service_id in service_probe_map.keys():\r\n tmp_list = service_probe_map.get(service_id)\r\n tmp_list.append(probe)\r\n service_probe_map[service_id] = tmp_list\r\n return service_probe_map\r\n else:\r\n return {}\r\n\r\n def get_team_service_deploy_version(self, region, team, service_ids):\r\n try:\r\n res, body = region_api.get_team_services_deploy_version(region, team.tenant_name, {\"service_ids\":service_ids})\r\n if res.status == 200:\r\n service_versions = {}\r\n for version in body[\"list\"]:\r\n service_versions[version[\"service_id\"]] = version[\"build_version\"]\r\n return service_versions\r\n except Exception as e:\r\n logger.exception(e)\r\n logger.debug(\"======>get services deploy version failure\")\r\n return None\r\n\r\n def query_share_service_info(self, team, group_id):\r\n service_list = share_repo.get_service_list_by_group_id(team=team, group_id=group_id)\r\n if service_list:\r\n array_ids = [x.service_id for x in service_list]\r\n deploy_versions = self.get_team_service_deploy_version(service_list[0].service_region, team, array_ids)\r\n array_keys = []\r\n for x in service_list:\r\n if x.service_key == \"application\" or x.service_key == \"0000\" or x.service_key == \"\":\r\n array_keys.append(x.service_key)\r\n # 查询服务端口信息\r\n service_port_map = self.get_service_ports_by_ids(array_ids)\r\n # 查询服务依赖\r\n dep_service_map = self.get_service_dependencys_by_ids(array_ids)\r\n # 查询服务可变参数和不可变参数\r\n # service_env_change_map, service_env_nochange_map = self.get_service_env_by_ids(array_ids)\r\n service_env_map = self.get_service_env_by_ids(array_ids)\r\n # 查询服务持久化信息\r\n service_volume_map = self.get_service_volume_by_ids(array_ids)\r\n # 查询服务伸缩方式信息\r\n extend_method_map = self.get_service_extend_method_by_keys(array_keys)\r\n # 获取应用的健康检测设置\r\n probe_map = self.get_service_probes(array_ids)\r\n\r\n\r\n all_data_map = {}\r\n for service in service_list:\r\n data = dict()\r\n data['service_id'] = service.service_id\r\n data['tenant_id'] = service.tenant_id\r\n data['service_cname'] = service.service_cname\r\n data['service_key'] = service.service_key\r\n if service.service_key == 'application' or service.service_key == '0000' or service.service_key == 'mysql':\r\n data['service_key'] = make_uuid()\r\n service.service_key = data['service_key']\r\n service.save()\r\n # data['need_share'] = True\r\n # else:\r\n # data['need_share'] = False\r\n data[\"service_share_uuid\"] = \"{0}+{1}\".format(data['service_key'], data['service_id'])\r\n data['need_share'] = True\r\n data['category'] = service.category\r\n data['language'] = service.language\r\n data['extend_method'] = service.extend_method\r\n data['version'] = service.version\r\n data['memory'] = service.min_memory\r\n data['service_type'] = service.service_type\r\n data['service_source'] = service.service_source\r\n data['deploy_version'] = deploy_versions[data['service_id']] if deploy_versions else service.deploy_version\r\n data['image'] = service.image\r\n data['service_alias'] = service.service_alias\r\n data['service_region'] = service.service_region\r\n data['creater'] = service.creater\r\n data[\"cmd\"] = service.cmd\r\n data['probes'] = [probe.to_dict() for probe in probe_map.get(service.service_id, [])]\r\n extend_method = extend_method_map.get(service.service_key)\r\n if extend_method:\r\n e_m = dict()\r\n e_m['min_node'] = service.min_node\r\n e_m['max_node'] = extend_method.max_node\r\n e_m['step_node'] = extend_method.step_node\r\n e_m['min_memory'] = service.min_memory\r\n e_m['max_memory'] = extend_method.max_memory\r\n e_m['step_memory'] = extend_method.step_memory\r\n e_m['is_restart'] = extend_method.is_restart\r\n data['extend_method_map'] = e_m\r\n else:\r\n data['extend_method_map'] = {\r\n \"min_node\": service.min_node,\r\n \"max_node\": 20,\r\n \"step_node\": 1,\r\n \"min_memory\": service.min_memory,\r\n \"max_memory\": 65536,\r\n \"step_memory\": 128,\r\n \"is_restart\": 0\r\n }\r\n data['port_map_list'] = list()\r\n if service_port_map.get(service.service_id):\r\n for port in service_port_map.get(service.service_id):\r\n p = dict()\r\n # 写需要返回的port数据\r\n p['protocol'] = port.protocol\r\n p['tenant_id'] = port.tenant_id\r\n p['port_alias'] = port.port_alias\r\n p['container_port'] = port.container_port\r\n p['is_inner_service'] = port.is_inner_service\r\n p['is_outer_service'] = port.is_outer_service\r\n data['port_map_list'].append(p)\r\n\r\n data['service_volume_map_list'] = list()\r\n if service_volume_map.get(service.service_id):\r\n for volume in service_volume_map.get(service.service_id):\r\n s_v = dict()\r\n s_v['category'] = volume.category\r\n s_v['volume_type'] = volume.volume_type\r\n s_v['volume_path'] = volume.volume_path\r\n s_v['volume_name'] = volume.volume_name\r\n data['service_volume_map_list'].append(s_v)\r\n\r\n data['service_env_map_list'] = list()\r\n data['service_connect_info_map_list'] = list()\r\n if service_env_map.get(service.service_id):\r\n for env_change in service_env_map.get(service.service_id):\r\n if env_change.container_port == 0:\r\n e_c = dict()\r\n e_c['name'] = env_change.name\r\n e_c['attr_name'] = env_change.attr_name\r\n e_c['attr_value'] = env_change.attr_value\r\n e_c['is_change'] = env_change.is_change\r\n if env_change.scope == \"outer\":\r\n e_c['container_port'] = env_change.container_port\r\n data['service_connect_info_map_list'].append(e_c)\r\n else:\r\n data['service_env_map_list'].append(e_c)\r\n\r\n data['service_related_plugin_config'] = list()\r\n # plugins_attr_list = share_repo.get_plugin_config_var_by_service_ids(service_ids=service_ids)\r\n plugins_relation_list = share_repo.get_plugins_relation_by_service_ids(service_ids=[service.service_id])\r\n for spr in plugins_relation_list:\r\n service_plugin_config_var = service_plugin_config_repo.get_service_plugin_config_var(spr.service_id,\r\n spr.plugin_id,\r\n spr.build_version)\r\n plugin_data = spr.to_dict()\r\n plugin_data[\"attr\"] = [var.to_dict() for var in service_plugin_config_var]\r\n data['service_related_plugin_config'].append(plugin_data)\r\n\r\n all_data_map[service.service_id] = data\r\n\r\n all_data = list()\r\n for service_id in all_data_map:\r\n service = all_data_map[service_id]\r\n service['dep_service_map_list'] = list()\r\n if dep_service_map.get(service['service_id']):\r\n for dep in dep_service_map.get(service['service_id']):\r\n d = dict()\r\n if all_data_map.get(dep.service_id):\r\n # 通过service_key和service_id来判断依赖关系\r\n d['dep_service_key'] = all_data_map[dep.service_id][\"service_share_uuid\"]\r\n service['dep_service_map_list'].append(d)\r\n\r\n all_data.append(service)\r\n return all_data\r\n else:\r\n return []\r\n\r\n # 查询应用组内使用的插件列表\r\n def query_group_service_plugin_list(self, team, group_id):\r\n service_list = share_repo.get_service_list_by_group_id(team=team, group_id=group_id)\r\n if service_list:\r\n service_ids = [x.service_id for x in service_list]\r\n plugins = plugin_service.get_plugins_by_service_ids(service_ids)\r\n # 默认插件分享\r\n for p in plugins:\r\n p[\"is_share\"] = True\r\n return plugins\r\n else:\r\n return []\r\n\r\n def get_group_services_used_plugins(self, group_id):\r\n service_list = group_service.get_group_services(group_id)\r\n if not service_list:\r\n return []\r\n service_ids = [x.service_id for x in service_list]\r\n sprs = app_plugin_relation_repo.get_service_plugin_relations_by_service_ids(service_ids)\r\n plugin_list = []\r\n temp_plugin_ids = []\r\n for spr in sprs:\r\n if spr.plugin_id in temp_plugin_ids:\r\n continue\r\n tenant_plugin = plugin_repo.get_plugin_by_plugin_ids([spr.plugin_id])[0]\r\n plugin_dict = tenant_plugin.to_dict()\r\n\r\n plugin_dict[\"build_version\"] = spr.build_version\r\n plugin_list.append(plugin_dict)\r\n temp_plugin_ids.append(spr.plugin_id)\r\n return plugin_list\r\n\r\n # def get_service_plugins_config(self, service_id, shared_plugin_info):\r\n # id_key_map = {}\r\n # if shared_plugin_info:\r\n # id_key_map = {i[\"plugin_id\"]: i[\"plugin_key\"] for i in shared_plugin_info}\r\n #\r\n # sprs = app_plugin_relation_repo.get_service_plugin_relation_by_service_id(service_id)\r\n # service_plugin_config_list = []\r\n # for spr in sprs:\r\n # service_plugin_config_var = service_plugin_config_repo.get_service_plugin_config_var(service_id,\r\n # spr.plugin_id,\r\n # spr.build_version)\r\n # plugin_service_config_map = dict()\r\n # for var in service_plugin_config_var:\r\n # config_var = var.to_dict()\r\n # config_var[\"plugin_key\"] = id_key_map.get(spr.plugin_id)\r\n # plugin_service_config_map[spr.plugin_id] = config_var\r\n #\r\n # service_plugin_config_list.append(plugin_service_config_map)\r\n # return service_plugin_config_list\r\n\r\n def wrapper_service_plugin_config(self,service_related_plugin_config,shared_plugin_info):\r\n \"\"\"添加plugin key信息\"\"\"\r\n id_key_map = {}\r\n if shared_plugin_info:\r\n id_key_map = {i[\"plugin_id\"]: i[\"plugin_key\"] for i in shared_plugin_info}\r\n\r\n service_plugin_config_list = []\r\n for config in service_related_plugin_config:\r\n config[\"plugin_key\"] = id_key_map.get(config[\"plugin_id\"])\r\n service_plugin_config_list.append(config)\r\n return service_plugin_config_list\r\n\r\n def create_basic_app_info(self, **kwargs):\r\n return share_repo.add_basic_app_info(**kwargs)\r\n\r\n def create_publish_event(self, record_event, user_name, event_type):\r\n import datetime\r\n event = ServiceEvent(\r\n event_id=make_uuid(),\r\n service_id=record_event.service_id,\r\n tenant_id=record_event.team_id,\r\n type=event_type,\r\n user_name=user_name,\r\n start_time=datetime.datetime.now())\r\n event.save()\r\n return event\r\n\r\n @transaction.atomic\r\n def sync_event(self, user, region_name, tenant_name, record_event):\r\n rc_apps = RainbondCenterApp.objects.filter(record_id=record_event.record_id)\r\n if not rc_apps:\r\n return 404, \"分享的应用不存在\", None\r\n rc_app = rc_apps[0]\r\n event_type = \"share-yb\"\r\n if rc_app.scope == \"goodrain\":\r\n event_type = \"share-ys\"\r\n event = self.create_publish_event(record_event, user.nick_name, event_type)\r\n record_event.event_id = event.event_id\r\n app_templetes = json.loads(rc_app.app_template)\r\n apps = app_templetes.get(\"apps\", None)\r\n if not apps:\r\n return 500, \"分享的应用信息获取失败\", None\r\n new_apps = list()\r\n sid = transaction.savepoint()\r\n try:\r\n for app in apps:\r\n # 处理事件的应用\r\n if app[\"service_key\"] == record_event.service_key:\r\n body = {\r\n \"service_key\": app[\"service_key\"],\r\n \"app_version\": rc_app.version,\r\n \"event_id\": event.event_id,\r\n \"share_user\": user.nick_name,\r\n \"share_scope\": rc_app.scope,\r\n \"image_info\": app.get(\"service_image\", None),\r\n \"slug_info\": app.get(\"service_slug\", None)\r\n }\r\n try:\r\n res, re_body = region_api.share_service(region_name, tenant_name, record_event.service_alias, body)\r\n bean = re_body.get(\"bean\")\r\n if bean:\r\n record_event.region_share_id = bean.get(\"share_id\", None)\r\n record_event.event_id = bean.get(\"event_id\", None)\r\n record_event.event_status = \"start\"\r\n record_event.update_time = datetime.datetime.now()\r\n record_event.save()\r\n image_name = bean.get(\"image_name\", None)\r\n if image_name:\r\n app[\"share_image\"] = image_name\r\n slug_path = bean.get(\"slug_path\", None)\r\n if slug_path:\r\n app[\"share_slug_path\"] = slug_path\r\n new_apps.append(app)\r\n else:\r\n transaction.savepoint_rollback(sid)\r\n return 400, \"数据中心分享错误\", None\r\n except Exception as e:\r\n logger.exception(e)\r\n transaction.savepoint_rollback(sid)\r\n if re_body:\r\n logger.error(re_body)\r\n return 500, \"数据中心分享错误\", None\r\n else:\r\n new_apps.append(app)\r\n app_templetes[\"apps\"] = new_apps\r\n rc_app.app_template = json.dumps(app_templetes)\r\n rc_app.update_time = datetime.datetime.now()\r\n rc_app.save()\r\n transaction.savepoint_commit(sid)\r\n return 200, \"数据中心分享开始\", record_event\r\n except Exception as e:\r\n logger.exception(e)\r\n if sid:\r\n transaction.savepoint_rollback(sid)\r\n return 500, \"应用分享介质同步发生错误\", None\r\n\r\n @transaction.atomic\r\n def sync_service_plugin_event(self, user, region_name, tenant_name, record_id, record_event):\r\n rc_apps = RainbondCenterApp.objects.filter(record_id=record_id)\r\n if not rc_apps:\r\n return 404, \"分享的应用不存在\", None\r\n rc_app = rc_apps[0]\r\n app_template = json.loads(rc_app.app_template)\r\n plugins_info = app_template[\"plugins\"]\r\n plugin_list = []\r\n for plugin in plugins_info:\r\n if record_event.plugin_id == plugin[\"plugin_id\"]:\r\n event_id = make_uuid()\r\n body = {\r\n \"plugin_id\": plugin[\"plugin_id\"],\r\n \"plugin_version\": plugin[\"build_version\"],\r\n \"plugin_key\": plugin[\"plugin_key\"],\r\n \"event_id\": event_id,\r\n \"share_user\": user.nick_name,\r\n \"share_scope\": rc_app.scope,\r\n \"image_info\": plugin.get(\"plugin_image\") if plugin.get(\"plugin_image\") else \"\",\r\n }\r\n\r\n try:\r\n res, body = region_api.share_plugin(region_name, tenant_name, plugin[\"plugin_id\"], body)\r\n data = body.get(\"bean\")\r\n sid = transaction.savepoint()\r\n if not data:\r\n transaction.savepoint_rollback(sid)\r\n return 400, \"数据中心分享错误\", None\r\n\r\n record_event.region_share_id = data.get(\"share_id\", None)\r\n record_event.event_id = data.get(\"event_id\", None)\r\n record_event.event_status = \"start\"\r\n record_event.update_time = datetime.datetime.now()\r\n record_event.save()\r\n image_name = data.get(\"image_name\", None)\r\n if image_name:\r\n plugin[\"share_image\"] = image_name\r\n\r\n transaction.savepoint_commit(sid)\r\n except Exception as e:\r\n logger.exception(e)\r\n if sid:\r\n transaction.savepoint_rollback(sid)\r\n return 500, \"插件分享事件同步发生错误\", None\r\n\r\n plugin_list.append(plugin)\r\n app_template[\"plugins\"] = plugin_list\r\n rc_app.app_template = json.dumps(app_template)\r\n rc_app.save()\r\n return 200, \"success\", record_event\r\n\r\n def get_sync_plugin_events(self, region_name, tenant_name, record_event):\r\n res, body = region_api.share_plugin_result(\r\n region_name, tenant_name, record_event.plugin_id, record_event.region_share_id\r\n )\r\n ret = body.get('bean')\r\n if ret and ret.get('status'):\r\n record_event.event_status = ret.get(\"status\")\r\n record_event.save()\r\n return record_event\r\n\r\n def get_sync_event_result(self, region_name, tenant_name, record_event):\r\n res, re_body = region_api.share_service_result(region_name, tenant_name, record_event.service_alias, record_event.region_share_id)\r\n bean = re_body.get(\"bean\")\r\n if bean and bean.get(\"status\", None):\r\n record_event.event_status = bean.get(\"status\", None)\r\n record_event.save()\r\n return record_event\r\n\r\n def get_app_by_app_id(self, app_id):\r\n app = share_repo.get_app_by_app_id(app_id=app_id)\r\n if app:\r\n return 200, \"应用包获取成功\", app[0]\r\n else:\r\n return 400, '应用包不存在', None\r\n\r\n def get_app_by_key(self, key):\r\n app = share_repo.get_app_by_key(key)\r\n if app:\r\n return app[0]\r\n else:\r\n return None\r\n\r\n def delete_app(self, app):\r\n app.delete()\r\n\r\n def delete_record(self, record):\r\n record.delete()\r\n\r\n def upate_app_complete_by_app_id(self, app_id, data):\r\n app = share_repo.get_app_by_app_id(app_id=app_id)\r\n is_complete = False\r\n if app.scope == 'goodrain':\r\n app.scope = data[\"GRYS\"]\r\n is_complete = True\r\n elif app.scope == 'team' or app.scope == 'enterprise':\r\n app.scope = data[\"GRYB\"]\r\n is_complete = True\r\n app.is_complete = is_complete\r\n app.save()\r\n\r\n def create_service(self, **kwargs):\r\n return share_repo.create_service(**kwargs)\r\n\r\n def create_tenant_service(self, **kwargs):\r\n return share_repo.create_tenant_service(**kwargs)\r\n\r\n def create_tenant_service_port(self, **kwargs):\r\n return share_repo.create_tenant_service_port(**kwargs)\r\n\r\n def create_tenant_service_env_var(self, **kwargs):\r\n return share_repo.create_tenant_service_env_var(**kwargs)\r\n\r\n def create_tenant_service_volume(self, **kwargs):\r\n return share_repo.create_tenant_service_volume(**kwargs)\r\n\r\n def create_tenant_service_relation(self, **kwargs):\r\n return share_repo.create_tenant_service_relation(**kwargs)\r\n\r\n def create_tenant_service_plugin(self, **kwargs):\r\n return share_repo.create_tenant_service_plugin(**kwargs)\r\n\r\n def create_tenant_service_plugin_relation(self, **kwargs):\r\n return share_repo.create_tenant_service_plugin_relation(**kwargs)\r\n\r\n def create_tenant_service_extend_method(self, **kwargs):\r\n return share_repo.create_tenant_service_extend_method(**kwargs)\r\n\r\n def create_service_share_record(self, **kwargs):\r\n return share_repo.create_service_share_record(**kwargs)\r\n\r\n def get_service_share_record(self, group_share_id):\r\n return share_repo.get_service_share_record(group_share_id=group_share_id)\r\n\r\n def get_service_share_record_by_ID(self, ID, team_name):\r\n return share_repo.get_service_share_record_by_ID(ID=ID, team_name=team_name)\r\n\r\n def get_service_share_record_by_group_id(self, group_id):\r\n return share_repo.get_service_share_record_by_groupid(group_id=group_id)\r\n\r\n def get_plugins_group_items(self, plugins):\r\n rt_list = []\r\n for p in plugins:\r\n config_group_list = plugin_config_service.get_config_details(p[\"plugin_id\"], p[\"build_version\"])\r\n p[\"config_groups\"] = config_group_list\r\n if p[\"origin_share_id\"] == \"new_create\":\r\n p[\"plugin_key\"] = make_uuid()\r\n else:\r\n p[\"plugin_key\"] = p[\"origin_share_id\"]\r\n rt_list.append(p)\r\n return rt_list\r\n\r\n # 创建应用分享记录\r\n # 创建应用记录\r\n # 创建介质同步记录\r\n @transaction.atomic\r\n def create_share_info(self, share_record, share_team, share_user, share_info):\r\n # 开启事务\r\n sid = transaction.savepoint()\r\n try:\r\n # 删除历史数据\r\n ServiceShareRecordEvent.objects.filter(record_id=share_record.ID).delete()\r\n RainbondCenterApp.objects.filter(record_id=share_record.ID).delete()\r\n app_templete = {}\r\n # 处理基本信息\r\n try:\r\n app_templete[\"template_version\"] = \"v2\"\r\n group_info = share_info[\"share_group_info\"]\r\n app_templete[\"group_key\"] = group_info[\"group_key\"]\r\n app_templete[\"group_name\"] = group_info[\"group_name\"]\r\n app_templete[\"group_version\"] = group_info[\"version\"]\r\n except Exception as e:\r\n if sid:\r\n transaction.savepoint_rollback(sid)\r\n logger.exception(e)\r\n return 500, \"基本信息处理错误\", None\r\n try:\r\n # 确定分享的插件ID\r\n plugins = share_info.get(\"share_plugin_list\", None)\r\n shared_plugin_info = None\r\n if plugins:\r\n\r\n share_image_info = app_store.get_image_connection_info(\r\n group_info[\"scope\"], share_team.tenant_name\r\n )\r\n for plugin_info in plugins:\r\n plugin_info[\"plugin_image\"] = share_image_info\r\n event = PluginShareRecordEvent(\r\n record_id=share_record.ID,\r\n team_name=share_team.tenant_name,\r\n team_id=share_team.tenant_id,\r\n plugin_id=plugin_info['plugin_id'],\r\n plugin_name=plugin_info['plugin_alias'],\r\n event_status='not_start'\r\n )\r\n event.save()\r\n\r\n shared_plugin_info = self.get_plugins_group_items(plugins)\r\n app_templete[\"plugins\"] = shared_plugin_info\r\n except Exception as e:\r\n if sid:\r\n transaction.savepoint_rollback(sid)\r\n logger.exception(e)\r\n return 500, \"插件处理发生错误\", None\r\n # 处理应用相关\r\n try:\r\n services = share_info[\"share_service_list\"]\r\n if services:\r\n new_services = list()\r\n service_ids = [s[\"service_id\"] for s in services]\r\n version_list = base_service.get_apps_deploy_versions(services[0][\"service_region\"], share_team.tenant_name, service_ids)\r\n delivered_type_map = {v[\"ServiceID\"]: v[\"DeliveredType\"] for v in version_list}\r\n for service in services:\r\n image = service[\"image\"]\r\n # slug应用\r\n # if image.startswith(\"goodrain.me/runner\") and service[\"language\"] != \"dockerfile\":\r\n if delivered_type_map[service['service_id']] == \"slug\":\r\n service['service_slug'] = app_store.get_slug_connection_info(group_info[\"scope\"], share_team.tenant_name)\r\n service[\"share_type\"] = \"slug\"\r\n if not service['service_slug']:\r\n if sid:\r\n transaction.savepoint_rollback(sid)\r\n return 400, \"获取源码包上传地址错误\", None\r\n else:\r\n service[\"service_image\"] = app_store.get_image_connection_info(group_info[\"scope\"], share_team.tenant_name)\r\n service[\"share_type\"] = \"image\"\r\n if not service[\"service_image\"]:\r\n if sid:\r\n transaction.savepoint_rollback(sid)\r\n return 400, \"获取镜像上传地址错误\", None\r\n\r\n service[\"service_related_plugin_config\"] = self.wrapper_service_plugin_config(service[\"service_related_plugin_config\"], shared_plugin_info)\r\n\r\n if service.get(\"need_share\", None):\r\n ssre = ServiceShareRecordEvent(\r\n team_id=share_team.tenant_id,\r\n service_key=service[\"service_key\"],\r\n service_id=service[\"service_id\"],\r\n service_name=service[\"service_cname\"],\r\n service_alias=service[\"service_alias\"],\r\n record_id=share_record.ID,\r\n team_name=share_team.tenant_name,\r\n event_status=\"not_start\")\r\n ssre.save()\r\n new_services.append(service)\r\n app_templete[\"apps\"] = new_services\r\n else:\r\n if sid:\r\n transaction.savepoint_rollback(sid)\r\n return 400, \"分享的应用信息不能为空\", None\r\n except Exception as e:\r\n if sid:\r\n transaction.savepoint_rollback(sid)\r\n logger.exception(e)\r\n return 500, \"应用信息处理发生错误\", None\r\n # 删除同个应用组分享的相同版本\r\n RainbondCenterApp.objects.filter(version=group_info[\"version\"], tenant_service_group_id=share_record.group_id).delete()\r\n # 新增加\r\n app = RainbondCenterApp(\r\n group_key=app_templete[\"group_key\"],\r\n group_name=app_templete[\"group_name\"],\r\n share_user=share_user.user_id,\r\n share_team=share_team.tenant_name,\r\n tenant_service_group_id=share_record.group_id,\r\n pic=group_info.get(\"pic\",\"\"),\r\n source=\"local\",\r\n record_id=share_record.ID,\r\n version=group_info[\"version\"],\r\n enterprise_id=share_team.enterprise_id,\r\n scope=group_info[\"scope\"],\r\n describe=group_info[\"describe\"],\r\n details=group_info.get(\"details\", \"\"),\r\n app_template=json.dumps(app_templete))\r\n app.save()\r\n share_record.step = 2\r\n share_record.update_time = datetime.datetime.now()\r\n share_record.save()\r\n # 提交事务\r\n if sid:\r\n transaction.savepoint_commit(sid)\r\n return 200, \"分享信息处理成功\", share_record.to_dict()\r\n except Exception as e:\r\n logger.exception(e)\r\n if sid:\r\n transaction.savepoint_rollback(sid)\r\n return 500, \"应用分享处理发生错误\", None\r\n\r\n def complete(self, tenant, user, share_record):\r\n app = rainbond_app_repo.get_rainbond_app_by_record_id(share_record.ID)\r\n app_market_url = None\r\n if app:\r\n # 分享到云市\r\n if app.scope == \"goodrain\":\r\n app_market_url = self.publish_app_to_public_market(tenant, user.nick_name, app)\r\n app.is_complete = True\r\n app.update_time = datetime.datetime.now()\r\n app.save()\r\n share_record.is_success = True\r\n share_record.step = 3\r\n share_record.update_time = datetime.datetime.now()\r\n share_record.save()\r\n # 应用有更新,删除导出记录\r\n app_export_record_repo.delete_by_key_and_version(app.group_key, app.version)\r\n return app_market_url\r\n\r\n def publish_app_to_public_market(self, tenant, user_name, app):\r\n market_api = MarketOpenAPI()\r\n data = dict()\r\n data[\"tenant_id\"] = tenant.tenant_id\r\n data[\"group_key\"] = app.group_key\r\n data[\"group_version\"] = app.version\r\n data[\"template_version\"] = app.template_version\r\n data[\"publish_user\"] = user_name\r\n data[\"publish_team\"] = tenant.tenant_alias\r\n data[\"update_note\"] = app.describe\r\n data[\"group_template\"] = app.app_template\r\n data[\"group_share_alias\"] = app.group_name\r\n data[\"logo\"] = app.pic\r\n data[\"details\"] = app.details\r\n result = market_api.publish_v2_template_group_data(tenant.tenant_id, data)\r\n # 云市url\r\n app_url = result[\"app_url\"]\r\n return app_url\r\n\r\n\r\nshare_service = ShareService()\r\n","repo_name":"hctwgl/rainbond-console","sub_path":"console/services/share_services.py","file_name":"share_services.py","file_ext":"py","file_size_in_byte":38864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16642753425","text":"class BankAccount:\n \n account_info = []\n\n def __init__(self, int_rate, balance): \n self.int_rate = int_rate\n self.balance = balance\n BankAccount.account_info.append(self)\n \n def current_balance(self):\n return self.balance\n\n def deposit(self, amount):\n self.balance += amount\n return self\n\n def withdraw(self, amount):\n self.amount = amount\n if self.balance >= self.amount:\n self.balance -= amount\n return self\n else:\n print(\"Insufficient funds: Charging a $5 fee\")\n self.balance -= 5\n\n def display_account_info(self):\n print(f\"Balance: ${self.balance} Interest rate: {self.int_rate}%\")\n\n def yield_interest(self):\n if self.balance > 0:\n self.balance *= self.int_rate\n else:\n print(\"No interest\")\n return self.int_rate\n @classmethod\n def log_account_info(cls):\n sum = 0\n for val in cls.account_info:\n sum += val.balance\n return sum\n\nclass User:\n def __init__(self, name, email, account):\n self.name = name\n self.email = email\n self.account = account\t# added this line\n\n def example_method(self):\n # we can call the BankAccount instance's methods\n self.account.deposit(100)\n # or access its attributes\n print(f\"{self.name}'s Balance: ${self.account.balance}\")\n\naccount1 = BankAccount(0.05, 100)\naccount2 = BankAccount(0.005, 1000)\n\naccount1.deposit(500).deposit(5).deposit(50).withdraw(700)\naccount2.deposit(600).deposit(60).withdraw(6).withdraw(54).withdraw(100).withdraw(200)\n\naccount1.display_account_info()\naccount2.display_account_info()\nprint(f\"Total: {BankAccount.log_account_info()}\")\n\nuser1 = User(\"Jonathan Smith\",\"jonathan.smith722@gmail.com\", account1)\nuser2 = User(\"Meghan Smith\", \"meghan.smith227@gmail.com\", account2)\nuser1.example_method()\nuser2.example_method()","repo_name":"JonDPhysics/Python","sub_path":"Fundamentals/OOP/association_between_classes.py","file_name":"association_between_classes.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72956401655","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n\nimport os\nimport re\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nimport cv2\n\nimport numpy as np\nimport pandas as pd\n\nfrom keras.models import *\nfrom keras.layers import *\nfrom keras.optimizers import *\nfrom keras.utils import *\nfrom keras.callbacks import *\n\nfrom keras import backend as K\nfrom keras.applications.densenet import DenseNet121, preprocess_input\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import fbeta_score\n\nfrom tqdm import tqdm\n\n\n\n\ntrain_images = os.listdir(\"../input/imet-2019-fgvc6/train/\")\ntest_images = os.listdir(\"../input/imet-2019-fgvc6/test/\")\n\nprint(\"number of train images: \", len(train_images))\nprint(\"number of test images: \", len(test_images))\n\n\n\n\ntrain = pd.read_csv(\"../input/imet-2019-fgvc6/train.csv\")\ntrain.head()\n\n\n\n\nlabels = pd.read_csv(\"../input/imet-2019-fgvc6/labels.csv\")\nlabels.head()\n\n\n\n\nlabels.tail()\n\n\n\n\ncultures = [x for x in labels.attribute_name.values if x.startswith(\"culture\")]\ntags = [x for x in labels.attribute_name.values if x.startswith(\"tag\")]\n\n\n\n\nlen(cultures), len(tags)\n\n\n\n\ndef split_culture_tag(x):\n cultures_ = list()\n tags_ = list()\n for i in x.split(\" \"):\n if int(i) <= len(cultures):\n cultures_.append(i)\n else:\n tags_.append(str(int(i) - len(cultures)))\n if not cultures_:\n cultures_.append(str(len(cultures)))\n if not tags_:\n tags_.append(str(len(tags)))\n return \" \".join(cultures_), \" \".join(tags_)\n\n\n\n\nculture_ids = list()\ntag_ids = list()\n\nfor v in tqdm(train.attribute_ids.values):\n c, t = split_culture_tag(v)\n culture_ids.append(c)\n tag_ids.append(t)\n\n\n\n\ntrain[\"culture_ids\"] = culture_ids\ntrain[\"tag_ids\"] = tag_ids\n\ntrain.head()\n\n\n\n\nnum_classes_c = len(cultures) + 1\nnum_classes_t = len(tags) + 1\n\nprint(num_classes_c, num_classes_t)\n\n\n\n\nlabels_map = {v:i for i, v in zip(labels.attribute_id.values, labels.attribute_name.values)}\nlabels_map_rev = {i:v for i, v in zip(labels.attribute_id.values, labels.attribute_name.values)}\n\nnum_classes = len(labels_map)\nprint(\"{} categories\".format(num_classes))\n\n\n\n\nsubmission = pd.read_csv(\"../input/imet-2019-fgvc6/sample_submission.csv\")\nsubmission.head()\n\n\n\n\ndef obtain_y_c(ids):\n y = np.zeros(num_classes_c)\n for idx in ids.split(\" \"):\n y[int(idx)] = 1\n return y\n\ndef obtain_y_t(ids):\n y = np.zeros(num_classes_t)\n for idx in ids.split(\" \"):\n y[int(idx)] = 1\n return y\n\n\n\n\npaths = [\"../input/imet-2019-fgvc6/train/{}.png\".format(x) for x in train.id.values]\n\ntargets_c = np.array([obtain_y_c(y) for y in train.culture_ids.values])\ntargets_t = np.array([obtain_y_t(y) for y in train.tag_ids.values])\n\n\n\n\nclass ImageGenerator(Sequence):\n \n def __init__(self, paths, targets_c, targets_t, batch_size, shape, augment=False):\n self.paths = paths\n self.targets_c = targets_c\n self.targets_t = targets_t\n self.batch_size = batch_size\n self.shape = shape\n self.augment = augment\n \n def __len__(self):\n return int(np.ceil(len(self.paths) / float(self.batch_size)))\n \n def __getitem__(self, idx):\n batch_paths = self.paths[idx * self.batch_size : (idx + 1) * self.batch_size]\n x = np.zeros((len(batch_paths), self.shape[0], self.shape[1], self.shape[2]), dtype=np.float32)\n y = np.zeros((self.batch_size, num_classes, 1))\n for i, path in enumerate(batch_paths):\n x[i] = self.__load_image(path)\n y_c = self.targets_c[idx * self.batch_size : (idx + 1) * self.batch_size]\n y_t = self.targets_t[idx * self.batch_size : (idx + 1) * self.batch_size]\n return x, [y_c, y_t]\n \n def __iter__(self):\n for item in (self[i] for i in range(len(self))):\n yield item\n \n def __load_image(self, path):\n image = cv2.imread(path)\n image = cv2.resize(image, (self.shape[0], self.shape[1]))\n image = preprocess_input(image)\n if self.augment:\n seq = iaa.Sequential([\n iaa.OneOf([\n iaa.Fliplr(0.5),\n iaa.Flipud(0.5),\n iaa.CropAndPad(percent=(-0.25, 0.25)),\n iaa.Crop(percent=(0, 0.1)),\n iaa.Sometimes(0.5,\n iaa.GaussianBlur(sigma=(0, 0.5))\n ),\n iaa.Affine(\n scale={\"x\": (0.8, 1.2), \"y\": (0.8, 1.2)},\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)},\n rotate=(-180, 180),\n shear=(-8, 8)\n )\n ])\n ], random_order=True)\n image = seq.augment_image(image)\n return image\n\n\n\n\nbatch_size = 64\n\ntrain_paths, val_paths, train_targets_c, val_targets_c, train_targets_t, val_targets_t = train_test_split(paths, \n targets_c,\n targets_t,\n test_size=0.1, \n random_state=1029)\n\ntrain_gen = ImageGenerator(train_paths, train_targets_c, train_targets_t, batch_size=batch_size, shape=(224,224,3), augment=False)\nval_gen = ImageGenerator(val_paths, val_targets_c, val_targets_t, batch_size=batch_size, shape=(224,224,3), augment=False)\n\n\n\n\ninp = Input((224, 224, 3))\nbackbone = DenseNet121(input_tensor=inp,\n weights=\"../input/densenet-keras/DenseNet-BC-121-32-no-top.h5\",\n include_top=False)\nx = backbone.output\nx = GlobalAveragePooling2D()(x)\n\ny_c = Dense(1024, activation=\"relu\")(x)\ny_c = Dropout(0.5)(y_c)\ny_c = Dense(num_classes_c, activation=\"sigmoid\", name=\"cultures_out\")(y_c)\n\ny_t = Dense(2048, activation=\"relu\")(x)\ny_t = Dropout(0.5)(y_t)\ny_t = Dense(num_classes_t, activation=\"sigmoid\", name=\"tags_out\")(y_t)\n\n\nmodel = Model(inp, [y_c, y_t])\n\n\n\n\nlosses = {\n \"cultures_out\": 'binary_crossentropy',\n \"tags_out\": 'binary_crossentropy'\n}\n \nloss_weights = {\n \"cultures_out\": 1.0,\n \"tags_out\": 4.0\n}\n\n\n\n\ndef f_score(y_true, y_pred, threshold=0.1, beta=2):\n tp = tp_score(y_true, y_pred, threshold)\n fp = fp_score(y_true, y_pred, threshold)\n fn = fn_score(y_true, y_pred, threshold)\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n return (1+beta**2) * ((precision * recall) / ((beta**2)*precision + recall))\n\n\ndef tp_score(y_true, y_pred, threshold=0.1):\n tp_3d = K.concatenate(\n [\n K.cast(K.expand_dims(K.flatten(y_true)), 'bool'),\n K.cast(K.expand_dims(K.flatten(K.greater(y_pred, K.constant(threshold)))), 'bool'),\n K.cast(K.ones_like(K.expand_dims(K.flatten(y_pred))), 'bool')\n ], axis=1\n )\n tp = K.sum(K.cast(K.all(tp_3d, axis=1), 'int32'))\n return tp\n\n\ndef fp_score(y_true, y_pred, threshold=0.1):\n fp_3d = K.concatenate(\n [\n K.cast(K.expand_dims(K.flatten(K.abs(y_true - K.ones_like(y_true)))), 'bool'),\n K.cast(K.expand_dims(K.flatten(K.greater(y_pred, K.constant(threshold)))), 'bool'),\n K.cast(K.ones_like(K.expand_dims(K.flatten(y_pred))), 'bool')\n ], axis=-1\n )\n fp = K.sum(K.cast(K.all(fp_3d, axis=1), 'int32'))\n return fp\n\n\ndef fn_score(y_true, y_pred, threshold=0.1):\n fn_3d = K.concatenate(\n [\n K.cast(K.expand_dims(K.flatten(y_true)), 'bool'),\n K.cast(K.expand_dims(K.flatten(K.abs(K.cast(K.greater(y_pred, K.constant(threshold)), 'float') - K.ones_like(y_pred)))), 'bool'),\n K.cast(K.ones_like(K.expand_dims(K.flatten(y_pred))), 'bool')\n ], axis=1\n )\n fn = K.sum(K.cast(K.all(fn_3d, axis=1), 'int32'))\n return fn\n\n\ndef precision_score(y_true, y_pred, threshold=0.1):\n tp = tp_score(y_true, y_pred, threshold)\n fp = fp_score(y_true, y_pred, threshold)\n return tp / (tp + fp)\n\n\ndef recall_score(y_true, y_pred, threshold=0.1):\n tp = tp_score(y_true, y_pred, threshold)\n fn = fn_score(y_true, y_pred, threshold)\n return tp / (tp + fn)\n\n\n\n\ncheckpoint = ModelCheckpoint('model.h5', \n monitor='val_tags_out_f_score', \n verbose=1, \n save_best_only=True, \n mode='max', \n save_weights_only=True)\n\nreduce_lr = ReduceLROnPlateau(monitor='val_tags_out_f_score', factor=0.2,\n patience=1, verbose=1, mode='max',\n min_delta=0.0001, cooldown=2, min_lr=1e-7)\n\nearly_stop = EarlyStopping(monitor=\"val_tags_out_f_score\", mode=\"max\", patience=5)\n\n\n\n\nmodel.compile(\n loss=losses,\n loss_weights=loss_weights,\n optimizer=Adam(1e-03),\n metrics=['acc', f_score])\n\n\n\n\nhistory = model.fit_generator(generator=train_gen, \n steps_per_epoch=len(train_gen), \n validation_data=val_gen, \n validation_steps=len(val_gen),\n epochs=20,\n callbacks=[checkpoint, reduce_lr, early_stop])\n\n\n\n\nplt.rcParams['figure.figsize'] = (6,6)\n\nc_fscore = history.history['cultures_out_f_score']\nval_c_fscore = history.history['val_cultures_out_f_score']\nt_fscore = history.history['tags_out_f_score']\nval_t_fscore = history.history['val_tags_out_f_score']\n\nepochs = range(1, len(c_fscore) + 1)\n\nplt.title('Training and validation culture f2 score')\nplt.plot(epochs, c_fscore, 'red', label='Training f_score')\nplt.plot(epochs, val_c_fscore, 'blue', label='Validation f_score')\nplt.legend()\n\nplt.title('Training and validation tag f2 score')\nplt.plot(epochs, t_fscore, 'red', label='Training f_score')\nplt.plot(epochs, val_t_fscore, 'blue', label='Validation f_score')\nplt.legend()\n\nplt.show()\n\n\n\n\nmodel.load_weights(\"./model.h5\")\n\n\n\n\nclass TestImageGenerator(Sequence):\n \n def __init__(self, paths, batch_size, shape):\n self.paths = paths\n self.batch_size = batch_size\n self.shape = shape\n \n def __len__(self):\n return int(np.ceil(len(self.paths) / float(self.batch_size)))\n \n def __getitem__(self, idx):\n batch_paths = self.paths[idx * self.batch_size : (idx + 1) * self.batch_size]\n x = np.zeros((len(batch_paths), self.shape[0], self.shape[1], self.shape[2]), dtype=np.float32)\n for i, path in enumerate(batch_paths):\n x[i] = self.__load_image(path)\n return x\n \n def __iter__(self):\n for item in (self[i] for i in range(len(self))):\n yield item\n \n def __load_image(self, path):\n image = cv2.imread(path)\n image = cv2.resize(image, (self.shape[0], self.shape[1]))\n image = preprocess_input(image)\n return image\n\n\n\n\ntest_paths = [\"../input/imet-2019-fgvc6/test/{}.png\".format(x) for x in submission.id.values]\ntest_gen = TestImageGenerator(test_paths, batch_size=batch_size, shape=(224,224,3))\n\npredicts = model.predict_generator(test_gen, verbose=1)\n\n\n\n\npredicts[0].shape, predicts[1].shape\n\n\n\n\nval_predicts = model.predict_generator(val_gen, verbose=1)\n\n\n\n\nbest_threshold_c = 0.\nbest_score_c = 0.\n\nfor threshold in tqdm(np.arange(0, 0.5, 0.01)):\n f2_score = fbeta_score(val_targets_c, np.array(val_predicts[0]) > threshold, beta=2, average='samples')\n if f2_score > best_score_c:\n best_score_c = f2_score\n best_threshold_c = threshold\n\n\n\n\nbest_threshold_t = 0.\nbest_score_t = 0.\n\nfor threshold in tqdm(np.arange(0, 0.5, 0.01)):\n f2_score = fbeta_score(val_targets_t, np.array(val_predicts[1]) > threshold, beta=2, average='samples')\n if f2_score > best_score_t:\n best_score_t = f2_score\n best_threshold_t = threshold\n\n\n\n\nprint(\"culture classifier: best threshold: {} best score: {}\".format(best_threshold_c, best_score_c))\nprint(\"tag classifier: best threshold: {} best score: {}\".format(best_threshold_t, best_score_t))\n\n\n\n\ndef classifier(probs, th_c, th_t):\n c = list()\n \n # culture classifier\n a = np.array(probs[0] > th_c, dtype=np.int8)\n b = np.where(a == 1)[0]\n for idx in b.tolist():\n if idx != len(cultures):\n c.append(str(idx))\n \n # tag classifier\n a = np.array(probs[1] > th_t, dtype=np.int8)\n b = np.where(a == 1)[0]\n for idx in b.tolist():\n if idx != len(cultures) + len(tags):\n c.append(str(idx + len(cultures)))\n\n return \" \".join(c)\n\n\n\n\npredictions = list()\n\nfor probs in tqdm(zip(predicts[0], predicts[1])):\n predictions.append(classifier(probs, best_threshold_c, best_threshold_t))\n\n\n\n\nlen(predictions)\n\n\n\n\nn = 6\n\nimg = cv2.imread(test_paths[n])\nplt.imshow(img)\n\na = np.array(predicts[0][n]>best_score_c, dtype=np.int8)\nb = np.where(a==1)[0]\nfor idx in b.tolist():\n if idx != len(cultures):\n print(labels_map_rev[idx])\n \na = np.array(predicts[1][n]>best_score_t, dtype=np.int8)\nb = np.where(a==1)[0]\nfor idx in b.tolist():\n if idx != len(cultures) + len(tags):\n print(labels_map_rev[idx + len(cultures)])\n\n\n\n\nsubmission[\"attribute_ids\"] = np.array(predictions)\nsubmission.head()\n\n\n\n\nsubmission.to_csv('submission.csv', index=False)\n\n\n\n\nsubmission.shape\n\n\n\n\nget_ipython().system('head submission.csv')\n\n\n\n\nsubmission_df = submission.copy()\nsubmission_df.n_cate = submission.attribute_ids.apply(lambda x: len(x.split(\" \")))\n_ = submission_df.n_cate.value_counts().sort_index().plot.bar()\n\n","repo_name":"aorursy/lost-nb","sub_path":"hengzheng_imet-densenet-and-two-weighted-outputs-model.py","file_name":"hengzheng_imet-densenet-and-two-weighted-outputs-model.py","file_ext":"py","file_size_in_byte":13705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29417461024","text":"from random import shuffle\r\nreverse_name = lambda string: string[::-1]\r\nvowels_count = lambda string: consonant_count(string, True)\r\ndef consonant_count(string, vowel=False):\r\n frequencies = {}\r\n letters = 'bcdfghjklmnpqrstvwxz' if not vowel else 'aeiou'\r\n for char in string: \r\n if char in letters:\r\n if char in frequencies:\r\n frequencies[char] += 1\r\n else:\r\n frequencies[char] = 1\r\n return frequencies\r\nTitled = lambda string: bool([i for i in ('Dr.', \"Sir\", \"Esq\", \"Phd.\") if i in string])\r\nhyphen_count = lambda name: bool(name.count('-')) \r\ndef Names(List):\r\n splitList = List\r\n first = splitList[0]\r\n last = splitList[-1] if len(splitList) > 1 else 0\r\n try:\r\n del splitList[0], splitList[-1]\r\n middle = splitList \r\n except IndexError: middle = 0\r\n return first, middle, last\r\ntoUp = lambda string: ''.join([chr(ord(a) - 32) if ord(a) not in range(65, 90) else a for a in string])\r\ntoLow = lambda string: ''.join([chr(ord(a) + 32) if ord(a) not in range(97, 122) else a for a in string])\r\nIsPalidromd = lambda string: True if reverse_name(string) == string else False\r\ninitials = lambda split: [a[0] for a in split if a]\r\ndef shuffle_name(name):\r\n new = list(name)\r\n shuffle(new)\r\n return ''.join(new)","repo_name":"abraham-milgram/CS1-Cracker-Edition","sub_path":"WhatsInAName/PrimaryFuns.py","file_name":"PrimaryFuns.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34927538667","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Date\nfrom datetime import datetime, timedelta\nfrom sqlalchemy.orm import sessionmaker\n\n\nengine = create_engine('sqlite:///todo.db?check_same_thread=False')\nBase = declarative_base()\n\n\nclass Table(Base):\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='default_value')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return self.task\n\nBase.metadata.create_all(engine)\n\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\ndef add_new_task():\n new_task = input(\"Enter task\\n\")\n deadline = input(\"Enter deadline\\n\")\n new_row = Table(task=f'{new_task}', deadline=datetime.strptime(deadline,'%Y-%m-%d'))\n session.add(new_row)\n session.commit()\n print(\"The task has been added!\\n\")\n\n\ndef print_today_tasks():\n records = session.query(Table).all()\n print(f\"Today {datetime.today().day} {datetime.today().strftime('%b')}:\")\n if records:\n for i in range(len(records)):\n print(f'{i + 1}. {records[i]}')\n else:\n print(\"Nothing to do!\\n\")\n\n\ndef print_week_tasks():\n for day in range(7):\n next_day = (datetime.today() + timedelta(days=day)).date()\n print(f\"\\n{next_day.strftime ('%A')} {next_day.day} {next_day.strftime ('%b')}:\")\n records = session.query(Table).filter(Table.deadline == next_day).all()\n if records:\n for i in range(len(records)):\n print(f'{i + 1}. {records[i]}')\n else:\n print(\"Nothing to do!\")\n\n\ndef print_all_tasks():\n all_tasks = session.query(Table).order_by(Table.deadline).all()\n deadlines = session.query(Table.deadline).order_by(Table.deadline).all()\n if all_tasks:\n for i in range(len(all_tasks)):\n print(f'{i+1}. {all_tasks[i]}. {deadlines[i].deadline.day} {deadlines[i].deadline.strftime(\"%b\")}')\n else:\n print (\"Nothing to do!\")\n\n\ndef missed_tasks():\n\n rows = session.query(Table).filter(Table.deadline < datetime.today().date()).order_by(Table.deadline).all()\n deadlines = session.query(Table.deadline).filter(Table.deadline < datetime.today().date()).order_by(Table.deadline).all()\n if rows == []:\n print(\"Nothing is missed!\")\n else:\n for i in range(len(rows)):\n print(f\"{i+1}. {rows[i]}. {deadlines[i].deadline.day} {deadlines[i].deadline.strftime('%b')}\")\n\n\ndef delete_task():\n print(\"Choose the number of the task you want to delete:\\n\")\n missed_tasks()\n action = int(input())\n rows = session.query(Table).filter(Table.deadline <= datetime.today().date()).order_by(Table.deadline).all()\n if rows != []:\n session.delete(rows[action-1])\n session.commit()\n print(\"The task has been deleted!\")\n\n\nwhile True:\n action = input(\"\\n1) Today's tasks\\n2) Week's tasks\\n3) All tasks\\n\"\n \"4) Missed tasks\\n5) Add task\\n6) Delete task\\n0) Exit\\n\")\n if action == \"1\":\n print_today_tasks()\n elif action == \"2\":\n print_week_tasks()\n elif action == \"3\":\n print_all_tasks()\n elif action == \"4\":\n print (\"Missed tasks:\")\n missed_tasks()\n elif action == \"5\":\n add_new_task()\n elif action == \"6\":\n delete_task()\n elif action == \"0\":\n print(\"Bye!\")\n break\n\n","repo_name":"Lyasinkovska/PycharmProjects","sub_path":"To-Do List/To-Do List/task/todolist/todolist.py","file_name":"todolist.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12845635498","text":"import subprocess\nfrom flask import Flask, render_template, url_for, request\n\napp = Flask(__name__)\n\n@app.route('/')\ndef viewer1(sequence):\n pdb_id = sequence.replace('|', '.')\n pdb_filename = f'{pdb_id.upper()}.pdb'\n pdb_url = url_for('static', filename='pdbs/' + pdb_filename)\n return render_template('complex.html', data_href=pdb_url)\n\n@app.route('/', methods=['GET', 'POST'])\ndef blast_search():\n # 1:POST요청만 처리\n if request.method == 'POST':\n sequence = request.form.get('sequence')\n result = run_blast_search(sequence)\n return render_template('main.html', blast_results=result)\n # 2:POST요청이 없거나 GET요청인 경우\n return render_template('main.html')\n# BlastSearch 수행\ndef run_blast_search(sequence):\n with open('query.fasta', 'w') as file:\n file.write(f'>query\\n{sequence}')\n subprocess.run(['blastp', '-query', 'query.fasta', '-db', 'blast_db', '-out', 'result.txt'])\n with open('result.txt', 'r') as file:\n lines = file.readlines()\n result = ''.join(lines[27:])\n start_line = 29\n end_line = start_line\n for i, line in enumerate(lines[start_line:], start=start_line):\n if line.strip() == \"\":\n end_line = i\n break\n table_lines = lines[start_line:end_line]\n remaining_lines = lines[end_line:]\n result = {\n 'table_lines': table_lines,\n 'remaining_lines': remaining_lines\n }\n\n# result = file.read()\n return result\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080)\n","repo_name":"hjee02018/mol-renderer","sub_path":"blast.py","file_name":"blast.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16510335478","text":"import threading\r\n\r\n# 定义一个全局变量\r\ng = 0\r\n# 定义两个函数 这是线程执行代码\r\ndef task():\r\n # 每次存款100元\r\n global g # 此处定义全局变量\r\n for i in range(5):\r\n g = g + 100\r\n print('存款成功',i,g)\r\n\r\n\r\ndef task2():\r\n # 每次取款100元\r\n global g # 此处定义全局变量\r\n for i in range(5):\r\n g = g - 100\r\n print('取款成功',i,g)\r\n\r\n\r\n# 创建线程对象\r\nt1 = threading.Thread(target=task)\r\nt2 = threading.Thread(target=task2)\r\n# 调用start方法 让线程开始运行\r\nt1.start()\r\nt2.start()\r\n\r\nprint(g)","repo_name":"rico233333333/Python_Learn","sub_path":"进程、线程、协程/线程/线程共享全局变量.py","file_name":"线程共享全局变量.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"33535092536","text":"#! /usr/bin/env python\n\n\"\"\"\nDaniel Gorrie\nLarge dataset sampler\n\"\"\"\n\nimport random\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\n\n# Constants\nINPUT_FILE = 'train.features'\nINPUT_FILE_SIZE = 8352136\nOUTPUT_FILE = 'train_small.features'\nSAMPLE_SIZE = 110000\nINPUT_LABEL_DIR = 'labels/'\nOUTPUT_LABEL_DIR = 'labels_small/'\n\n\n\ndef main():\n random.seed()\n\n # Generate array of SAMPLE_SIZE random integers in range [0, INPUT_FILE.length)\n # Iterate over the input file grabbing the\n\n indices = dict.fromkeys([random.randint(0, INPUT_FILE_SIZE) for _ in xrange(SAMPLE_SIZE)])\n while len(indices) < SAMPLE_SIZE:\n indices[random.randint(0, INPUT_FILE_SIZE)] = 0\n\n\n # Grab the proper training data\n with open(OUTPUT_FILE, 'w') as out:\n with open(INPUT_FILE, 'r') as f:\n line_count = 0\n for line in f:\n if line_count in indices:\n # append the line to the output file\n out.write(line)\n line_count += 1\n\n # Grab the label files\n label_files = [ f for f in listdir(INPUT_LABEL_DIR) if isfile(join(INPUT_LABEL_DIR,f)) ]\n\n # make a new directory\n d = os.path.dirname(OUTPUT_LABEL_DIR)\n if not os.path.exists(d):\n os.makedirs(d)\n\n # put versions of all the label files in the output directory\n\n for label_file in label_files:\n with open(INPUT_LABEL_DIR + label_file, 'r') as f:\n with open (OUTPUT_LABEL_DIR + label_file, 'w') as out:\n line_count = 0\n for line in f:\n if line_count in indices:\n # append the line to the output file\n out.write(line)\n line_count += 1\n\n\n\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","repo_name":"gorried/hexgraph","sub_path":"src/data_files/sample_data.py","file_name":"sample_data.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4079366578","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# 1. What does an empty dictionary code look like?\n# Ans:- empty dictionary code look like this: {}\ndict = {}\ntype(dict)\n\n\n# 2. What is the value of a dictionary value with the key 'foo' and the value 42?\n# \n\n# In[2]:\n\n\n{'foo':42}\n\n\n# 3. What is the most significant distinction between a dictionary and a list?\n\n# significant distinction between a dictionary and a list is :-\n# \n# List - values or atoms are ordered.\n# dictionary - values or atoms in dictionary are unordered.\n\n# 4. What happens if you try to access spam['foo'] if spam is {'bar': 100}?\n# This will give us KeyError\n\n# In[3]:\n\n\nspam = {'bar':'100'}\nspam ['foo']\n\n\n# 5. If a dictionary is stored in spam, what is the difference between the expressions 'cat' in spam and 'cat' in spam.keys()?\n\n# In[4]:\n\n\n# there is no difference\nspam = {'cat':100}\n'cat' in spam\n\n\n# In[5]:\n\n\n'cat' in spam.keys()\n\n\n# 6.If a dictionary is stored in spam, what is the difference between the expressions 'cat' in spam and 'cat' in spam.values()?\n# \n# 'cat' in spam checks whether there is a 'cat' key in the dictionary\n# \n# 'cat' in spam.values() checks whether there is a value 'cat' for one of the keys in spam.\n\n# In[8]:\n\n\nspam ={'cat':100}\n'cat' in spam\n\n\n# In[9]:\n\n\nspam ={'cat':100}\n'cat' in spam.values()\n\n\n# 7. What is a shortcut for the following code?\n\n# if color not in spam:\n# \n# spam[color] = black\n\n# In[7]:\n\n\n#ans:-\nspam ={'cat':100}\nspam.setdefault('color','black')\nspam\n\n\n# 8. How do you 'pretty print' dictionary values using which module and function?\n\n# In[10]:\n\n\nimport pprint\n\n\n# In[11]:\n\n\ndct_arr = [ {'Name': 'Ranjan', 'Age': '20', 'Country': 'USA'},\n {'Name': 'Joseph', 'Age': '40', 'Country': 'China'},\n {'Name': 'Ankit', 'Age': '45', 'Country': 'Rasia'},\n {'Name': 'nakul', 'Age': '32', 'Country': 'Japan'}\n]\n\n\n# In[12]:\n\n\n# printing with pprint()\npprint.pprint(dct_arr)\n\n\n# In[13]:\n\n\n#Printing with print()\nprint(dct_arr)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Akshay417/Python-basic-assignment-no.1","sub_path":"assignment 05.py","file_name":"assignment 05.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21177431685","text":"import torch\nimport torch.nn as nn\nimport torchvision\n\n\nclass CNNBackbone(nn.Module):\n \"\"\"\n Resnet50 or inception3 network\n \"\"\"\n def __init__(self, cnn='resnet50', ckpt=None, pool='avg', dropout=True):\n \"\"\"\n cnn that return feature maps or feature vectors (global pooling)\n :param cnn: cnn model option\n :param ckpt: pre-trained checkpoint of cnn model\n :param pool: pool the feature maps. choices: avg, max, None\n :param dropout: whether or not add dropout\n \"\"\"\n super(CNNBackbone, self).__init__()\n self.transform_input = False\n self.pool = pool\n if cnn == 'inception3':\n models = torchvision.models.inception_v3(aux_logits=False)\n if ckpt is not None:\n models.load_state_dict(torch.load(ckpt), False)\n self.transform_input = True\n models = list(models.children())[:-1]\n\n elif cnn == 'resnet50':\n models = torchvision.models.resnet50()\n if ckpt is not None:\n models.load_state_dict(torch.load(ckpt))\n models = list(models.children())[:-2]\n\n if pool is not None:\n if pool == 'avg':\n models.append(nn.AdaptiveAvgPool2d(1))\n elif pool == 'max':\n models.append(nn.AdaptiveMaxPool2d(1))\n if dropout:\n models.append(nn.Dropout())\n\n self.model = nn.Sequential(*models)\n\n def forward(self, x):\n if self.transform_input:\n x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5\n x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5\n x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5\n x = self.model(x)\n if self.pool is not None:\n x = x.view(x.size(0), -1)\n return x\n\n\n","repo_name":"rogeryang12/video_reid_pytorch","sub_path":"reid/models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"22"} +{"seq_id":"43358983212","text":"import cv2, math\nimport numpy as np\nimport sys\nsys.path.append('/raid-dgx2/Hasnat/ImQual/')\n\nfrom skimage import feature as ced\nfrom skimage import transform\n\nfrom BRISQUEE_features import *\nfrom Hand_Crafted_Features import *\nbrisq = BRISQUE()\nhcf = Hand_Crafted_Features()\n\n#### Related to LP text ###\ndef include_delimiter_to_lp_text(my_text, delim_):\n if get_pattern(my_text) in ['CCDDDCC','CCDDCC','DDCCCDD','DDCCDD',]:\n my_text_w_sp = my_text[:2] + delim_ + my_text[2:-2] + delim_ + my_text[-2:]\n elif get_pattern(my_text) == 'DDDDCCDD':\n my_text_w_sp = my_text[:4] + delim_ + my_text[4:-2] + delim_ + my_text[-2:]\n elif get_pattern(my_text) in ['DDDDCCDD', 'DDDDCCCDD']:\n my_text_w_sp = my_text[:4] + delim_ + my_text[4:-2] + delim_ + my_text[-2:] \n elif get_pattern(my_text) in ['DDDCCCDD', 'DDDCCDD', 'CCCDDDCC']: \n my_text_w_sp = my_text[:3] + delim_ + my_text[3:-2] + delim_ + my_text[-2:]\n\n return my_text_w_sp\n\ndef isFrench(my_text):\n french_LP_patterns = ['CCDDDCC','CCDDCC','DDCCCDD','DDCCDD','DDDCCCDD','DDDDCCCDD','DDDCCDD','DDDDCCDD','CCCDDDCC']\n is_fr_lp = get_pattern(my_text) in french_LP_patterns\n return is_fr_lp\n \ndef get_pattern(plate):\n lp_type =''\n for sample in plate:\n if sample.isdigit():\n lp_type += 'D'\n elif sample.isalpha():\n lp_type += 'C'\n return lp_type\n\n\n#### Related to Image Quality ###\ndef get_iq_score_proba(img_orig, model, scaler, imRsz=(280, 120), \n feature_type='HC', verbose=False):\n _rsz_H, _rsz_W = (imRsz[1], imRsz[0])\n img_preprocessed = cv2.cvtColor(cv2.resize(img_orig, (_rsz_W, _rsz_H)), cv2.COLOR_BGR2RGB)\n\n # Extract IQ features\n if feature_type == 'HC':\n t_feature = get_hc_features_score(img_preprocessed)\n elif feature_type == 'HCBR':\n t_feature_brisque = brisq.get_feature(img_preprocessed)\n t_feature_hc = get_hc_features_score(img_preprocessed)\n t_feature = np.hstack((t_feature_brisque, t_feature_hc)) \n\n if verbose:\n print(t_feature)\n \n if np.isnan(t_feature).any():\n # means problem in features, so we return it as a bad image \n print('Nan values in IQA features ..')\n prd = [np.array([0.0, 0.0, 0.0, 1.0])]\n else:\n try:\n # Predict IQ\n Xn = scaler.transform(np.expand_dims(np.asarray(t_feature), 0))\n prd = model.predict_proba(Xn)\n except:\n # means problem in prediction, so we return it as a bad image\n print('Problem in IQA prediction ..')\n prd = [np.array([0.0, 0.0, 0.0, 1.0])]\n\n return prd[0]\n\ndef get_hc_features_score(img_orig, imRsz=(280, 120), verbose=False):\n _rsz_H, _rsz_W = (imRsz[1], imRsz[0])\n img_preprocessed = cv2.cvtColor(cv2.resize(img_orig, (_rsz_W, _rsz_H)), cv2.COLOR_BGR2RGB)\n\n # Extract IQ features\n t_feature_hc = hcf.get_hand_crafted_features_combined(img_preprocessed) \n return t_feature_hc\n\n## Related to Image Geometry\ndef get_rotation_angle(image, th_angle=45):\n ## Estimate the edges and lines\n gray_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n edges = ced.canny(gray_img, sigma=3) \n lines = transform.probabilistic_hough_line(edges, line_length=3,line_gap=5)\n\n ## Calculate the angle between each line and the horizontal line:\n angle = 0.0\n nb_lines = 0\n\n ## Calculate the angle between each line and the horizontal line:\n angle_all = []\n for line in lines:\n (x1,y1),(x2,y2) = line\n t_ang = np.rad2deg(math.atan2(y2*1.0 - y1*1.0, x2*1.0 - x1*1.0))\n if np.abs(t_ang) > 90:\n t_ang = 180-np.abs(t_ang)\n \n if np.abs(t_ang) <= th_angle:\n angle_all.append(t_ang)\n \n op_ang = np.mean(angle_all)\n \n return op_ang\n\ndef select_image_with_angle_iqa(all_scores, max_angle_to_normalize=20, verbose=False):\n all_scores = np.asarray(all_scores)\n all_scores[:, 0] = all_scores[:, 0] / max_angle_to_normalize\n all_scores_norm = np.abs(all_scores - np.array([0.0, 1.0]))\n if verbose:\n print(all_scores_norm)\n all_scores_comb = np.sum(all_scores_norm, axis=1)\n sel_indx = np.argmin(all_scores_comb)\n if verbose:\n print((all_scores_comb, sel_indx))\n return sel_indx","repo_name":"mhasnat/my_utils","sub_path":"my_utility_LP_image_selection.py","file_name":"my_utility_LP_image_selection.py","file_ext":"py","file_size_in_byte":4333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29164060419","text":"import tensorflow as tf\nimport tensorflow_datasets as tfds\nfrom tensorflow.keras import layers\nfrom model import Classifier\nfrom model_spec import ModelSpec\nfrom model_builder import CellModel, build_arch_model\nimport numpy as np\nfrom keras.callbacks import CSVLogger\n\nbatch_size = 256\nAUTOTUNE = tf.data.AUTOTUNE\n\n\ndef prepare(ds, data_augmentation=None, shuffle=False, augment=False):\n # Resize and rescale all datasets.\n #ds = ds.map(lambda x, y: (resize_and_rescale(x), y), num_parallel_calls=AUTOTUNE)\n\n if shuffle:\n ds = ds.shuffle(1000)\n\n # Batch all datasets.\n ds = ds.batch(batch_size)\n\n # Use data augmentation only on the training set.\n if augment:\n ds = ds.map(lambda x, y: (data_augmentation(x, training=True), y), num_parallel_calls=AUTOTUNE)\n\n # Use buffered prefetching on all datasets.\n return ds.prefetch(buffer_size=AUTOTUNE)\n\n\nclass LrCustomCallback(tf.keras.callbacks.Callback):\n def __init__(self, amount, batch_size, total_layers, optimizer):\n super(LrCustomCallback, self).__init__()\n self.global_batch = 0\n self.optimizer = optimizer\n self.total_batches = int(total_layers * 20 * amount / batch_size)\n\n def on_train_batch_end(self, batch, logs=None):\n self.global_batch += 1\n progress_fraction = self.global_batch / self.total_batches\n learning_rate = (0.5 * 0.1 * (1 + tf.cos(np.pi * progress_fraction)))\n tf.keras.backend.set_value(self.optimizer.lr, learning_rate)\n\n def on_epoch_begin(self, epoch, logs=None):\n print('Learning Rate: ', tf.keras.backend.eval(self.optimizer.lr))\n\n\nif __name__ == '__main__':\n\n (train_ds, val_ds, test_ds), metadata = tfds.load(\n 'mnist',\n split=['train[:80%]', 'train[80%:90%]', 'train[90%:]'],\n with_info=True,\n as_supervised=True,\n )\n num_classes = metadata.features['label'].num_classes\n print(num_classes)\n\n data_augmentation = tf.keras.Sequential([\n layers.Rescaling(1./255),\n layers.RandomRotation(0.2),\n layers.CenterCrop(28, 28)\n ])\n\n valid_augmentation = tf.keras.Sequential([\n layers.Rescaling(1./255),\n layers.CenterCrop(28, 28)\n ])\n\n train_ds = prepare(train_ds, data_augmentation, shuffle=True, augment=True)\n val_ds = prepare(val_ds, valid_augmentation, augment=True)\n test_ds = prepare(test_ds, valid_augmentation, augment=True)\n\n train_ds = train_ds.cache()\n val_ds = val_ds.cache()\n\n loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)\n optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.1, momentum=0.9, epsilon=1.0)\n\n # Create an instance of the model\n model = tf.keras.Sequential()\n matrix = np.array([[0, 0, 0, 0, 0, 1, 0], # input layer\n [0, 0, 0, 0, 0, 0, 0], # 1x1 conv\n [0, 0, 0, 0, 0, 0, 0], # 3x3 conv\n [0, 0, 0, 0, 0, 0, 0], # 5x5 conv (replaced by two 3x3's)\n [0, 0, 0, 0, 0, 0, 0], # 5x5 conv (replaced by two 3x3's)\n [0, 0, 0, 0, 0, 0, 1], # 3x3 max-pool\n [0, 0, 0, 0, 0, 0, 0]])\n\n ops = ['INPUT', 'conv1x1-bn-relu', 'conv1x1-bn-relu', 'conv1x1-bn-relu', 'conv1x1-bn-relu', 'conv1x1-bn-relu',\n 'OUTPUT']\n\n spec = ModelSpec(matrix, ops)\n model.add(build_arch_model(spec, (None, 28, 28, 1), is_training=True, num_cells=3, num_stacks=3))\n model.add(Classifier(10))\n model.build([None, 28, 28, 1])\n model.summary()\n\n model.compile(optimizer=optimizer,\n loss=loss_object,\n metrics=['accuracy'])\n\n epochs = 20 * 12\n early_stopping_callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3, mode='min')\n csv_logger_callback = CSVLogger('./normal_training_log.csv', append=False, separator=',')\n lr_scheduler_callback = LrCustomCallback(metadata.splits['train'].num_examples,\n batch_size,\n 12,\n optimizer)\n history = model.fit(\n train_ds,\n validation_data=val_ds,\n epochs=epochs,\n callbacks=[early_stopping_callback, csv_logger_callback, lr_scheduler_callback]\n )\n\n model.evaluate(test_ds, verbose=2)","repo_name":"gary9987/tensorflow-get-started","sub_path":"incremental/normal_training.py","file_name":"normal_training.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21966658766","text":"\"\"\"\n Given a sorted array nums, remove the duplicates in-place such that duplicates appeared\n at most twice and return the new length.\n\n Do not allocate extra space for another array; you must do this by modifying the input array\n in-place with O(1) extra memory.\n\"\"\"\nfrom typing import List\n\n\ndef solution(nums: List[int]) -> int:\n \"\"\"\n 1. loop over the nums array\n 2. check 2 index before and compare the value with current\n 3. if the value < than current then change it inplace and increase index\n \"\"\"\n index = 0\n for value in nums:\n if index < 2 or nums[index - 2] < value:\n nums[index] = value\n index += 1\n return len(nums[:index])\n\n\ndef solution_1(nums: List[int]) -> int:\n i = 1\n count = 2\n while i < len(nums):\n\n if nums[i] - nums[i - 1] == 0 and count < 2:\n nums[:] = nums[:i] + nums[i + 1 :]\n count += 1\n elif nums[i] - nums[i - 1] == 0:\n i += 1\n count += -1\n else:\n i += 1\n count = 2\n return len(nums)\n\n\ndef solution_2(nums: List[int]) -> int:\n i = 1\n count = 2\n while i < len(nums):\n if nums[i] == nums[i - 1] and count < 2:\n i += 1\n count += 1\n\n elif nums[i] == nums[i - 1]:\n nums = nums[:i] + nums[i + 1 :]\n count -= 1\n\n else:\n i += 1\n\n return len(nums)\n\n\nif __name__ == \"__main__\":\n result = solution_2([1, 1, 1, 1, 2, 2, 3, 3, 3])\n assert result == 6\n print(result)\n","repo_name":"mustafasencer/Algorithm-Data-Structure-With-Graphviz","sub_path":"problems/array/remove_duplicates_from_sorted_array_II.py","file_name":"remove_duplicates_from_sorted_array_II.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39488500915","text":"#!/usr/bin/env python\nfrom turtle import Turtle\nimport rospy\nimport numpy\nimport math\n\nfrom geometry_msgs.msg import Twist, Vector3, Pose\nfrom std_msgs.msg import Float64\nfrom math import pow, sqrt, acos\n\nclass ExecuteMove:\n\n def __init__(self):\n\n rospy.init_node('executing_movement', anonymous=False)\n rospy.Subscriber('filtered_pos', Vector3, self.update_pose) \n rospy.Subscriber('initial_position', Vector3, self.update_initial_pose) \n rospy.Subscriber('desired_position', Vector3, self.update_des_pos)\n\n self.velocity_publisher = rospy.Publisher('cmd_vel', Twist, queue_size=1)\n\n self.rate = rospy.Rate(10)\n self.delta_pos = Vector3()\n self.cur_pose = Vector3()\n self.des_pos = Vector3()\n #self.des_vec = Vector3()\n self.last_pos = Vector3()\n self.initial_pos = Vector3()\n #self.turn_angle = 0.000\n #self.ang_vel = 2\n self.stop_x = True \n self.stop_y = True \n self.margin_x = 0\n self.margin_y = 0.02\n #self.rate = rospy.Rate(1) #Cycles at a rate of 1 hz\n #self.rate.sleep() \n #self.rate = rospy.Rate(1) #set rospy rate to 10 hzs\n self.last_pos.x = self.cur_pose.x \n self.last_pos.y = self.cur_pose.y\n \n self.end_y = self.initial_pos.y + 0.1\n\n # make it not go to 0,0 right off the bat\n self.des_pos.x = self.cur_pose.x \n self.des_pos.y = self.cur_pose.y\n \n self.des_pos.x = self.initial_pos.x + 5\n self.des_pos.y = self.initial_pos.y + 0.1\n\n self.vel_msg = Twist()\n self.vel_msg.linear.x = 0 \n self.vel_msg.angular.z = 0\n self.vel_msg.linear.y = 0 \n self.vel_msg.linear.z = 0 \n self.vel_msg.angular.x = 0 \n self.vel_msg.angular.y = 0\n self.velocity_publisher.publish(self.vel_msg)\n\n # des_vec is vector from current to desired\n # delta_pos is change in position from last position\n # angular velocity vector should be in radians per second\n def move(self):\n #get current direction\n self.delta_pos.x = self.cur_pose.x - self.last_pos.x \n self.delta_pos.y = self.cur_pose.y - self.last_pos.y\n vel_x = 0.0\n #if the robot is not within the margin from the desired position\n #current position - margin < desired position < current position -> stop = true\n #current position < desired position < current position + margin -> stop = true\n #else -> stop = false\n if self.check_pos_x():\n self.stop_x = False\n #determine the deriction beased on desired position\n if self.stop_x is False:\n #print(self.des_pos.x)\n self.last_pos.x = self.cur_pose.x\n if (self.des_pos.x < self.cur_pose.x) or (self.cur_pose.x > self.initial_pos.x + 5): \n vel_x = -0.06\n elif (self.des_pos.x > self.cur_pose.x) or (self.initial_pos.x >= self.cur_pose.x):\n vel_x = 0.06\n self.vel_msg.linear.x = vel_x\n #compare current and desire y position \n if self.check_pos_y():\n self.stop_y = False\n else:\n self.vel_msg.angular.z = 0\n\n #make correction only during robot is moving\n if self.stop_y is False:\n target_x = self.des_pos.x - self.cur_pose.x\n target_y = self.des_pos.y - self.cur_pose.y\n\n k = abs(target_x) * 0.1\n\n #print(self.des_pos.y)\n dist1 = math.sqrt(math.pow(target_x, 2 ) + math.pow(target_y, 2)) \n #theta = acos(abs(target_y)/abs(target_x))\n #k proportional constant\n #theta = theta * k\n theta = 0.000024\n print('des_x: ', self.des_pos.x, 'cur_x: ', self.cur_pose.x)\n print('des_y: ', self.des_pos.y, 'cur_y: ', self.cur_pose.y)\n print('angle: ', theta)\n\n #base is going to left and des_pos.y is on the right -> clockwise\n if (self.des_pos.x < self.cur_pose.x) and (self.des_pos.y < self.cur_pose.y): \n self.vel_msg.angular.z = theta\n #self.vel_msg.angular.z = 0.06\n #base is going to left and des_pos.y is on the left -> counter clockwise\n elif (self.des_pos.x < self.cur_pose.x) and (self.des_pos.y > self.cur_pose.y):\n self.vel_msg.angular.z = -theta\n #self.vel_msg.angular.z = -0.06\n #base is going to right and des_pos.y is on the left -> counter clockwise\n elif (self.des_pos.x > self.cur_pose.x) and (self.des_pos.y > self.cur_pose.y):\n self.vel_msg.angular.z = theta\n #self.vel_msg.angular.z = 0.06\n #base is going to right and des_pos.y is on the left -> clockwise\n elif (self.des_pos.x > self.cur_pose.x) and (self.des_pos.y < self.cur_pose.y):\n self.vel_msg.angular.z = -theta\n #self.vel_msg.angular.z = -0.06\n\n self.velocity_publisher.publish(self.vel_msg)\n rospy.sleep(0.5)\n self.vel_msg.angular.z = 0.000\n self.velocity_publisher.publish(self.vel_msg)\n\n #self.vel_msg.angular.z = 0\n #set back to going straingt right here or ramain still\n #self.rate = rospy.Rate(1) #Set Rospy rate to 1\n #self.turn_ang.publish(self.turn_angle)\n #rospy.spin()\n #self.rate = rospy.Rate(1) #Set Rospy Rate to 1\n\n #self.rate.sleep()\n\n # possibly need to introduce turning error\n \n # maybe want to round the numbers depending on testing results\n\n # maybe want to round the numbers depending on testing results\n \n def update_pose(self, data):\n self.cur_pose.x = data.x\n self.cur_pose.y = data.y\n\n def update_des_pos(self, data):\n # #print(\"\\ndes_posx is: \")\n # #print(data.x)\n # #print(\"\\ndes_posy is: \")\n # #print(data.y)\n self.des_pos.x = data.x\n if (self.des_pos.x < self.cur_pose.x): \n self.des_pos.y = self.initial_pos.y\n elif (self.des_pos.x > self.cur_pose.x):\n self.des_pos.y = self.end_y\n\n def update_initial_pose(self, data):\n self.initial_pos.x = data.x\n self.initial_pos.x = data.y\n\n # checks if in the acceptable margin from the desired position\n def check_pos_x(self):\n if (self.des_pos.x == self.cur_pose.x):\n return False\n elif (self.des_pos.x > self.cur_pose.x - self.margin_x) and (self.des_pos.x < self.cur_pose.x):\n #print('exit condition 1')\n return False\n elif (self.des_pos.x > self.cur_pose.x) and (self.des_pos.x < self.cur_pose.x + self.margin_x):\n #print('exit condition 2')\n return False\n else:\n #print('exit condition 3')\n return True\n\n # checks if in the acceptable margin from the desired position\n def check_pos_y(self):\n if (self.des_pos.y == self.cur_pose.y):\n return False\n elif (self.des_pos.y > self.cur_pose.y - self.margin_y) and (self.des_pos.y < self.cur_pose.y):\n #print('exit condition 1')\n return False\n elif (self.des_pos.y > self.cur_pose.y) and (self.des_pos.y < self.cur_pose.y + self.margin_y):\n #print('exit condition 2')\n return False\n else:\n #print('exit condition 3')\n return True\n\nif __name__ == '__main__':\n cl = ExecuteMove()\n while not rospy.is_shutdown():\n try:\n cl.move()\n rospy.sleep(0.1)\n except rospy.ROSInterruptException:\n pass\n","repo_name":"LightningAlchemist/Firebots2021_2022","sub_path":"src/movement/src/execute_move.py","file_name":"execute_move.py","file_ext":"py","file_size_in_byte":7599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70965860216","text":"def sortPlayers(allCircles, allFigures, allPlayers):\n playerList = []\n for i, player in enumerate(allPlayers):\n playerfigures = [figure for figure in allFigures if figure.player == i]\n mannedCircles = getMannedCircles(allCircles, playerfigures)\n progress = getProgressForOnePlayer(mannedCircles, player.startfield)\n entry = (player, progress)\n playerList.append(entry)\n playerList.sort(key=lambda entry: entry[1], reverse=True)\n return playerList\n\n\ndef getProgressForOnePlayer(allCircles, startField):\n progress = 0\n for circle in allCircles:\n progress = progress + getProgessOneFigure(circle, startField)\n return progress\n\n\ndef getProgessOneFigure(circle, startField):\n if \"base\" in circle.type:\n return 0\n if \"house\" in circle.type:\n return circle.number + 40 + 1\n else:\n progress = circle.number - startField + 1\n if progress <= 0:\n return 40 - startField + circle.number + 1\n return progress\n\n\ndef getMannedCircles(allCircles, allTeamFigures):\n mannedCircles = []\n for figure in allTeamFigures:\n circle = [\n circle for circle in allCircles if circle.position == figure.position\n ][0]\n mannedCircles.append(circle)\n return mannedCircles\n","repo_name":"BeneGeb/Extream4","sub_path":"game/Helper/ListSorter.py","file_name":"ListSorter.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41314868349","text":"import numpy as np\r\nimport math\r\n\r\n\r\n\r\nclass BaseStation:\r\n def __init__(self, f_local, f_mec, p_m, p_o, p_receive, p_send, p_AP, bandwidth, time_weight, power_weight,\r\n taskList, outputRelationship, task_size):\r\n # 边缘服务器的cpu能力 上传功率 下载功率 任务队列 上传速率 等待时间 服务器的cpu能力 下载速率\r\n\r\n self.f_local = f_local # 本地边缘设备的计算能力 设置成500MHZ\r\n self.f_mec = f_mec # 边缘服务器的计算能力 设置成5000MHZ\r\n self.p_m = p_m # 本地设备计算时的功率\r\n self.p_o = p_o # 本地设备空闲时的功率\r\n self.p_receive = p_receive # 本地设备接收计算结果时的功率\r\n self.p_send = p_send # 本地设备发送中间结果时的功率\r\n self.p_AP = p_AP # 基站的发射功率\r\n self.bandwidth = bandwidth # 本地设备和基站之间的带宽\r\n self.time_weight = time_weight # 时间权重\r\n self.power_weight = power_weight # 功率权重\r\n self.taskList = taskList # 任务列表\r\n self.outputRelationship = outputRelationship # 任务之间的依赖关系\r\n self.task_size = task_size\r\n self.offload = []\r\n self.exe_time = []\r\n # 用户上传数据到基站的数据传输速率\r\n def get_up_rate(self):\r\n # print(2000000*math.log2(1+math.pow(10, 9)/math.pow(20,4)))\r\n return 8000000 # b/s 这里数据的大小用kb单位\r\n\r\n # 用户从基站下载数据的传输速率\r\n def get_down_rate(self):\r\n return 1150000 # 用户下载时的传输速率为 b/s\r\n\r\n # 判断任务是本地执行还是边缘执行,卸载决策存储在offload列表中,执行时间存储在exe_time列表中\r\n def make_decision(self, i):\r\n\r\n if i == 0:\r\n local_execute = self.taskList[i] / self.f_local # 本地执行时间\r\n tran_time = self.task_size[i] / self.get_up_rate() # 传输时间:任务传输到边缘的时间\r\n edge_execute = self.taskList[i] / self.f_mec # 任务在边缘的执行时间\r\n edge_time = tran_time + edge_execute # 任务传输到边缘和执行的时间之和\r\n if local_execute < edge_time:\r\n self.offload.append(0)\r\n self.exe_time.append(local_execute)\r\n return [local_execute, edge_time]\r\n else:\r\n self.offload.append(1)\r\n self.exe_time.append(edge_time)\r\n return [local_execute, edge_time]\r\n else:\r\n local_execute = self.taskList[i] / self.f_local # 任务i在本地执行所需要的时间\r\n tran_time = self.task_size[i] / self.get_up_rate()\r\n edge_execute = self.taskList[i] / self.f_mec\r\n edge_time = tran_time + edge_execute\r\n if self.offload[i-1] == 0:\r\n getoutput = self.outputRelationship[i] # 获取依赖的前驱节点的数据\r\n getoutput = np.array(getoutput)\r\n getoutput = getoutput[np.nonzero(getoutput)]\r\n comm_time_to_edge = max(getoutput) / self.get_up_rate() # 中间结果传输到边缘所需要的时间\r\n edge_time = edge_time + comm_time_to_edge # 任务i在边缘执行需要的时间\r\n if local_execute < edge_time:\r\n self.offload.append(0)\r\n self.exe_time.append(local_execute)\r\n return [local_execute, edge_time]\r\n else:\r\n self.offload.append(1)\r\n self.exe_time.append(edge_time)\r\n return [local_execute, edge_time]\r\n elif self.offload[i-1] == 1:\r\n getoutput = self.outputRelationship[i]\r\n getoutput = np.array(getoutput)\r\n getoutput = getoutput[np.nonzero(getoutput)]\r\n comm_time_to_local = max(getoutput) / self.get_down_rate() # 中间结果传输到本地需要的时间\r\n local_execute = local_execute + comm_time_to_local\r\n if local_execute < edge_time:\r\n self.offload.append(0)\r\n self.exe_time.append(local_execute)\r\n return [local_execute, edge_time]\r\n else:\r\n self.offload.append(1)\r\n self.exe_time.append(edge_execute)\r\n return [local_execute, edge_time]\r\n\r\n # 通过读取卸载决策offload列表和执行时间列表exe_time,计算每个任务节点的能量消耗,这里的i为任务的索引号\r\n def energy_consumption(self, i):\r\n pass\r\n def local_power(self, task):\r\n return self.local_time(task) * self.pm\r\n\r\n def edge_time(self, task):\r\n return task[0] * self.C / self.Scpu_speed + task[0] / self.TXup + \\\r\n task[1] / self.TXdown # 计算卸载时间= 传输时间+执行时间+等待时间\r\n\r\n def edge_power(self, task):\r\n return task[0] / self.TXup * self.pup + task[1] / self.TXdown * self.pdown\r\n\r\n def get_myuser_profit(self, i):\r\n local_time = self.local_time(i)\r\n local_power = self.local_power(i)\r\n edge_time = self.edge_time(i)\r\n edge_power = self.edge_power(i)\r\n user_profit = (self.time_weight * local_time + self.power_weight * local_power) - (\r\n self.time_weight * edge_time + self.power_weight * edge_power)\r\n return user_profit\r\n\r\n def make_mydecision(self, i):\r\n if self.get_myuser_profit(i) > 0:\r\n return 'edge'\r\n else:\r\n return 'local'\r\n\r\n def cost(self, i):\r\n if self.make_mydecision(i) == 'edge':\r\n return -1 * (self.time_weight * self.edge_time(i) + self.power_weight * self.edge_power(i))\r\n else:\r\n return -1 * (self.time_weight * self.local_time(i) + self.power_weight * self.local_power(i))\r\n\r\n def local_cost(self, task):\r\n if task[2] > self.cpu_speed: # 如果选择本地执行时,如果移动设备的cpu小于我们的任务需要的CPU 则设置很小的奖励作为惩罚\r\n return -1000\r\n else:\r\n return -1 * (self.time_weight * self.local_time(task) + self.power_weight * self.local_power(task))\r\n\r\n def edge_cost(self, task):\r\n return -1 * (self.time_weight * self.edge_time(task) + self.power_weight * self.edge_power(task))\r\n\r\n def local_costT(self, task):\r\n # if task[2] > self.cpu_speed: # 如果选择本地执行时,如果移动设备的cpu小于我们的任务需要的CPU 则设置很小的奖励作为惩罚\r\n # return -1000\r\n # else:\r\n return self.time_weight * self.local_time(task) + self.power_weight * self.local_power(task)\r\n\r\n def edge_costT(self, task):\r\n return self.time_weight * self.edge_time(task) + self.power_weight * self.edge_power(task)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n task_size = [120000, 160000, 3000000, 210000, 3800000, 160000, 1400000, 200000] # 这里是输入任务的大小,单位bit\r\n taskList = [60, 150, 60, 105, 190, 80, 70, 100] # 这里是每个任务需要的cpu的转数单位是Mcycle\r\n # 这里是任务依赖的前驱节点以及前驱节点输出数据大小\r\n outputRelationship = [[0, 0, 0, 0, 0, 0, 0, 0],\r\n [12000, 0, 0, 0, 0, 0, 0, 0],\r\n [12800, 0, 0, 0, 0, 0, 0, 0],\r\n [12000, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 11200, 0, 0, 0, 0, 0],\r\n [0, 0, 12800, 13000, 0, 0, 0, 0],\r\n [0, 0, 0, 14400, 0, 0, 0, 0],\r\n [0, 9600, 0, 0, 9600, 11200, 10400, 0]]\r\n taskList2 = [60, 80, 150, 100]\r\n outputRelationship2 = [[0, 0, 0, 0],\r\n [8, 0, 0, 0],\r\n [12, 0, 0, 0],\r\n [0, 12, 16, 0]]\r\n bs = BaseStation(\r\n f_local=500,\r\n f_mec=5000,\r\n p_m=0.5,\r\n p_o=0.01,\r\n p_receive=0.05,\r\n p_send=0.1,\r\n p_AP=1,\r\n bandwidth=2,\r\n time_weight=0.5,\r\n power_weight=0.5,\r\n taskList=taskList,\r\n outputRelationship=outputRelationship,\r\n task_size=task_size\r\n )\r\n for i in range(len(taskList)):\r\n print(i)\r\n print(bs.make_decision(i))\r\n print(bs.offload)\r\n print(bs.exe_time)","repo_name":"solar-os/code","sub_path":"5.2DDQN-MEMORY/MyUser.py","file_name":"MyUser.py","file_ext":"py","file_size_in_byte":8459,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"16755840937","text":"#!/usr/bin/env python3.8\nimport bisect\nN, K = map(int, input().split())\nscores = []\nfor i in range(N):\n P = list(map(int, input().split()))\n scores.append(sum(P))\n\nthirds = list(sorted(scores))\nfor s in scores:\n fourth = s + 300\n low_rank = bisect.bisect_right(thirds, fourth)\n rank = len(scores) + 1 - low_rank\n if rank <= K:\n print('Yes')\n else:\n print('No')\n # print(rank)","repo_name":"harukaeru/CompetitiveProgramming","sub_path":"abc228/C/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"42334017973","text":"# Autor: Maciej Kuta\n# Data utworzenie: 9.10.2020\n# Cel: Import modułów\n\nimport games, random\n\nprint(\"Witaj w najprotszej grze na świecie\")\n\nagain = None\nwhile again != \"n\":\n players = []\n num = games.ask_number(\"Podaj liczbę graczy: \")\n for i in range(num):\n name = input(\"Nazwa gracza: \")\n score = random.randrange(100) +1\n player = games.Player(name, score)\n players.append(player)\n\n print(\"\\nOto wyniki gry\")\n for player in players:\n print(player)\n again = games.ask_yes_no(\"Czy chcesz zagrać ponownie? (t/n): \")\n\ninput(\"\\n\\nNaciśnij klawisz Enter, aby zakończyć działanie programu\")","repo_name":"crazydeveloper09/Praktyki-zawodowe","sub_path":"Python dla każdego/Chapter09/simple_game.py","file_name":"simple_game.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24476159855","text":"import queue\nimport pymumble_py3\nimport time\nfrom threading import Thread\nfrom pymumble_py3.channels import Channel\nfrom pymumble_py3.callbacks import PYMUMBLE_CLBK_SOUNDRECEIVED, PYMUMBLE_CLBK_CONNECTED, PYMUMBLE_CLBK_DISCONNECTED, PYMUMBLE_CLBK_PERMISSIONDENIED, PYMUMBLE_CLBK_CHANNELUPDATED, PYMUMBLE_CLBK_USERUPDATED\nfrom pymumble_py3.errors import UnknownChannelError, ConnectionRejectedError\nfrom .config import Config\nfrom .control import Control\nfrom .logger import getLogger\nfrom .shutdown import Shutdown\n\n\nlogger = getLogger(__name__)\n\n# The maximum duration of audio from the microphone we're \n# allowed to buffer before audio gets dropped \nSEND_BUFFER_MAX = 5 # 500ms\n\n\nclass Mumble():\n '''\n Handles staying connected to a mumble server (using pymumble) and \n sending/recieving audio to/from the mumble server.\n '''\n def __init__(self, control: Control, config: Config, shutdown: Shutdown):\n self._connected = False\n self._config = config\n self._control = control\n self._shutdown = shutdown\n \n self._mumble = None\n\n # Transmitting audio over the network is handled in a seperate thread to avoid \n # locking up the recieving audio buffer form the local microphone.\n self._transmit_queue = queue.Queue(maxsize=5)\n self._transmit_thread = None\n self._run_thread = None\n self._sound_callback = None\n self._stopping = False\n self._channel: Channel = None\n self._joined_channel = False\n\n def _onConnect(self):\n logger.info(f\"Connected to Mumble server {self._config.server}:{self._config.port} as '{self._config.nickname}'\")\n\n # If configured to do so, also join a channel after connecting.\n if self._config.channel is not None:\n try:\n self._channel: Channel = self._mumble.channels.find_by_name(self._config.channel)\n logger.info(f'Joining channel \\'{self._config.channel}\\'')\n self._channel.move_in()\n self._channel.get_users()\n except UnknownChannelError:\n logger.info(f\"Channel '{self._config.channel}' is unknown\")\n self._connected = True\n self._control._set_connected()\n\n def _onDisconnect(self):\n self._joined_channel = False\n self._connected = False\n logger.warn(\"Disconnected from mumble server\")\n self._control._set_disconnected()\n\n def _onDenied(self, data):\n logger.error(\"Permission denied\")\n\n def _channelUpdated(self, data):\n logger.info(\"Channel update\")\n logger.info(f\"{data}\")\n\n def _userUpdate(self, user, update):\n if (user.get(\"name\") == self._config.nickname and self._channel.get_id() == update.get(\"channel_id\")):\n self._joined_channel = True\n logger.info(f\"Moved into channel '{self._config.channel}'\")\n\n\n def _onSound(self, user, soundchunk):\n if self._sound_callback:\n self._sound_callback(user, soundchunk.pcm)\n\n def transmit(self, chunk):\n try:\n for sample in chunk:\n if sample > 0:\n self._transmit_queue.put(chunk, block=False)\n break\n except:\n pass\n\n def _transmit_loop(self):\n while(not self._stopping):\n try:\n chunk = self._transmit_queue.get(block=True, timeout=0.5)\n if self._connected and self._control.transmitting and self._mumble is not None:\n output = self._mumble.sound_output\n backlog = output.get_buffer_size()\n if backlog > self._config._send_buffer_latency:\n # Audio from the microphone can slowly get sent to us faster than we can \n # trasmit it. Dropping the buffer avoids a buildup of audio delay over \n # time by sacraficing some quality when it happens. It would be better \n # to compress and re-sample the audio to catch up.\n output.clear_buffer()\n logger.warn(f\"Clearing audio send buffer due to latency. Backlog: {backlog}\")\n self._mumble.sound_output.add_sound(chunk)\n else:\n time.sleep(1)\n except IndexError:\n # there wasn't any audio in the trasmit queue.\n time.sleep(0.005)\n except Exception as e:\n if isinstance(e, queue.Empty):\n # Such pythonic, so clean. wow.\n continue\n logger.printException(e)\n\n def start(self):\n '''\n Connects to the mumble server and starst sending/recieving audio. Also retrys connecting to mumble if a disconnect happens.\n '''\n self._mumble = pymumble_py3.Mumble(\n self._config.server,\n self._config.nickname,\n password=self._config.password,\n port=self._config.port,\n reconnect=True,\n certfile=self._config.cert_file,\n keyfile=self._config.key_file,\n tokens=self._config.tokens)\n # Bind to some server events so we can be notified and react accordingly.\n self._mumble.callbacks.set_callback(PYMUMBLE_CLBK_SOUNDRECEIVED, self._onSound)\n self._mumble.callbacks.set_callback(PYMUMBLE_CLBK_CONNECTED, self._onConnect)\n self._mumble.callbacks.set_callback(PYMUMBLE_CLBK_DISCONNECTED, self._onDisconnect)\n self._mumble.callbacks.set_callback(PYMUMBLE_CLBK_PERMISSIONDENIED, self._onDenied)\n self._mumble.callbacks.set_callback(PYMUMBLE_CLBK_CHANNELUPDATED, self._channelUpdated)\n self._mumble.callbacks.set_callback(PYMUMBLE_CLBK_USERUPDATED, self._userUpdate)\n self._mumble.set_receive_sound(True)\n\n self._run_thread = Thread(target=self._run_mumble, name=\"Mumble Run Thread\", daemon=True)\n self._run_thread.start()\n self._stopping = False\n self._transmit_thread = Thread(target=self._transmit_loop, name=\"Transmit Thread\", daemon=True)\n self._transmit_thread.start()\n\n def _run_mumble(self):\n while True:\n sleep = 5\n try:\n logger.info(f\"Connecting to mumble server {self._config.server}:{self._config.port}\")\n self._mumble.run()\n except ConnectionRejectedError:\n logger.error(\"Mumble server rejected login\")\n # avoid mumble throttling us\n sleep = 30\n except Exception as e:\n logger.printException(e)\n sleep = 10\n if self._shutdown.shutting_down:\n return\n logger.info(f\"I'll retry in {sleep} seconds\")\n time.sleep(sleep)\n \n\n def stop(self):\n '''\n Disconnects from the mumble server and stops processing audio.\n '''\n if self._mumble is not None:\n try:\n self._mumble.stop()\n except:\n # eat the error\n pass\n self._mumble = None\n self._connected = False\n self._stopping = True\n if self._transmit_thread is not None: \n self._transmit_thread.join()\n","repo_name":"sabeechen/rpi-intercom","sub_path":"rpi_intercom/mumble.py","file_name":"mumble.py","file_ext":"py","file_size_in_byte":7282,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"20107193570","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pylab as plt\r\nfrom sklearn.preprocessing import LabelBinarizer\r\nfrom sklearn import metrics\r\nfrom sklearn.datasets import fetch_20newsgroups\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer, TfidfVectorizer\r\nfrom sklearn.pipeline import Pipeline, FeatureUnion\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.metrics import precision_score, recall_score, make_scorer, accuracy_score\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklego.preprocessing import ColumnSelector\r\nfrom sklego.datasets import load_chicken\r\nfrom sklearn.linear_model import PassiveAggressiveClassifier\r\n\r\ncategories = ['alt.atheism', 'soc.religion.christian', 'comp.graphics', 'sci.med']\r\ntwenty_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42)\r\n\r\ndf = pd.read_csv('NonPullable/2Cleaned_Fake_News_Dataset.csv', index_col='index', sep=';')\r\nX, y = df.drop('label', axis=\"columns\"), df[['label']]\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42)\r\n\r\nfeature_pipeline = Pipeline([\r\n (\"datagrab\", FeatureUnion([\r\n (\"discrete\", Pipeline([\r\n (\"grab\", ColumnSelector(\"polarity\")),\r\n ])),\r\n (\"continuous\", Pipeline([\r\n (\"grab\", ColumnSelector(\"tweet\")),\r\n (\"vect\", CountVectorizer(stop_words='english')),\r\n (\"tf-idf\", TfidfTransformer())\r\n ]))\r\n ]))\r\n])\r\n\r\npipe = Pipeline([\r\n (\"transform\", feature_pipeline),\r\n (\"clf\", SGDClassifier(loss='hinge', penalty='l2',\r\n alpha=1e-3, random_state=42,\r\n max_iter=5, tol=None))\r\n])\r\nprint(\"Pipe Params:\", pipe.get_params().keys())\r\n# pipe.fit(X_train, y_train)\r\n# predicted = pipe.predict(X_test)\r\n# print(np.mean(predicted == y_test))\r\n# print(metrics.classification_report(y_test, predicted))\r\n\r\nparameters = {\r\n 'transform__datagrab__continuous__vect__ngram_range': [(1, 1), (1, 2)],\r\n 'transform__datagrab__continuous__tf-idf__use_idf': (True, False),\r\n 'clf__alpha': (1e-2, 1e-3),\r\n\r\n}\r\n\r\nmod = GridSearchCV(estimator=pipe,\r\n param_grid=parameters,\r\n scoring={'precision': make_scorer(precision_score), 'recall': make_scorer(recall_score), 'accuracy': make_scorer(accuracy_score)},\r\n refit='recall',\r\n return_train_score=True,\r\n cv=3,\r\n n_jobs=-1)\r\n\r\nmod.fit(X, y)\r\nprint(pd.DataFrame(mod.cv_results_))\r\n","repo_name":"Slayer2084/Erkennen-von-Fake-News-durch-maschinelles-Lernen","sub_path":"main1.py","file_name":"main1.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"29880315968","text":"\"\"\"\n@author:poppy\n@time:2022/4/14 18:23\n\"\"\"\nimport requests\nimport re\n\n\n\nclass TestPhpwindApi:\n\n csrf_token = \"\"\n\n php_session = requests.session()\n\n def test_phpwind_index(self):\n urls = \"http://47.107.116.139/phpwind/\"\n res = TestPhpwindApi.php_session.request(\"get\",url=urls)\n\n TestPhpwindApi.csrf_token = re.search('name=\"csrf_token\" value=\"(.*?)\"', res.text).group(1)\n print(TestPhpwindApi.csrf_token)\n\n\n def test_phpwind_login(self):\n urls = \"http://47.107.116.139/phpwind/index.php?m=u&c=login&a=dorun\"\n datas ={\n \"username\":\"poppy\",\n \"password\":\"123456\",\n \"csrf_token\":TestPhpwindApi.csrf_token,\n \"backurl\":\"http://47.107.116.139/phpwind/\",\n \"invite\":\"\"\n }\n headers = {\n \"Accept\":\"application/json,text/javascript,/;q=0.01\",\n \"X-Requested-With\":\"XMLHttpRequest\"\n }\n\n res = TestPhpwindApi.php_session.request(\"post\",url=urls,data=datas,headers=headers)\n print(res.text)\n\n\n","repo_name":"poppy726/poppy_MS","sub_path":"testcases/test_phpwind/test_phpwind_api.py","file_name":"test_phpwind_api.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19019871363","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\nimport odoo.addons.decimal_precision as dp\n\nclass ProductUom(models.Model):\n _inherit = \"product.uom\"\n \n @api.one\n def _get_default_label(self):\n return self.name\n \n label = fields.Char(required=True, default=_get_default_label)\n dimensions = fields.One2many('product.uom.dimension', 'product_uom', copy=True)\n formula = fields.Char(required=True)\n\nclass ProductUomDimension(models.Model):\n _name = 'product.uom.dimension'\n name = fields.Char(required=True)\n\n product_uom = fields.Many2one('product.uom', required=True)\n","repo_name":"moltisinc/addons","sub_path":"product_uom_dimension/models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"2067910922","text":"import base64\nimport json\n\nimport requests\n\nfrom mlflow.environment_variables import (\n MLFLOW_HTTP_REQUEST_BACKOFF_FACTOR,\n MLFLOW_HTTP_REQUEST_BACKOFF_JITTER,\n MLFLOW_HTTP_REQUEST_MAX_RETRIES,\n MLFLOW_HTTP_REQUEST_TIMEOUT,\n)\nfrom mlflow.exceptions import InvalidUrlException, MlflowException, RestException, get_error_code\nfrom mlflow.protos import databricks_pb2\nfrom mlflow.protos.databricks_pb2 import ENDPOINT_NOT_FOUND, INVALID_PARAMETER_VALUE, ErrorCode\nfrom mlflow.utils.proto_json_utils import parse_dict\nfrom mlflow.utils.request_utils import (\n _TRANSIENT_FAILURE_RESPONSE_CODES,\n _get_http_response_with_retries,\n augmented_raise_for_status, # noqa: F401\n cloud_storage_http_request, # noqa: F401\n)\nfrom mlflow.utils.string_utils import strip_suffix\n\nRESOURCE_DOES_NOT_EXIST = \"RESOURCE_DOES_NOT_EXIST\"\n_REST_API_PATH_PREFIX = \"/api/2.0\"\n\n\ndef http_request(\n host_creds,\n endpoint,\n method,\n max_retries=None,\n backoff_factor=None,\n backoff_jitter=None,\n extra_headers=None,\n retry_codes=_TRANSIENT_FAILURE_RESPONSE_CODES,\n timeout=None,\n raise_on_status=True,\n **kwargs,\n):\n \"\"\"\n Makes an HTTP request with the specified method to the specified hostname/endpoint. Transient\n errors such as Rate-limited (429), service unavailable (503) and internal error (500) are\n retried with an exponential back off with backoff_factor * (1, 2, 4, ... seconds).\n The function parses the API response (assumed to be JSON) into a Python object and returns it.\n\n :param host_creds: A :py:class:`mlflow.rest_utils.MlflowHostCreds` object containing\n hostname and optional authentication.\n :param endpoint: a string for service endpoint, e.g. \"/path/to/object\".\n :param method: a string indicating the method to use, e.g. \"GET\", \"POST\", \"PUT\".\n :param max_retries: maximum number of retries before throwing an exception.\n :param backoff_factor: a time factor for exponential backoff. e.g. value 5 means the HTTP\n request will be retried with interval 5, 10, 20... seconds. A value of 0 turns off the\n exponential backoff.\n :param backoff_jitter: A random jitter to add to the backoff interval.\n :param extra_headers: a dict of HTTP header name-value pairs to be included in the request.\n :param retry_codes: a list of HTTP response error codes that qualifies for retry.\n :param timeout: wait for timeout seconds for response from remote server for connect and\n read request.\n :param raise_on_status: whether to raise an exception, or return a response, if status falls\n in retry_codes range and retries have been exhausted.\n :param kwargs: Additional keyword arguments to pass to `requests.Session.request()`\n\n :return: requests.Response object.\n \"\"\"\n max_retries = MLFLOW_HTTP_REQUEST_MAX_RETRIES.get() if max_retries is None else max_retries\n backoff_factor = (\n MLFLOW_HTTP_REQUEST_BACKOFF_FACTOR.get() if backoff_factor is None else backoff_factor\n )\n backoff_jitter = (\n MLFLOW_HTTP_REQUEST_BACKOFF_JITTER.get() if backoff_jitter is None else backoff_jitter\n )\n timeout = MLFLOW_HTTP_REQUEST_TIMEOUT.get() if timeout is None else timeout\n hostname = host_creds.host\n auth_str = None\n if host_creds.username and host_creds.password:\n basic_auth_str = f\"{host_creds.username}:{host_creds.password}\".encode()\n auth_str = \"Basic \" + base64.standard_b64encode(basic_auth_str).decode(\"utf-8\")\n elif host_creds.token:\n auth_str = f\"Bearer {host_creds.token}\"\n\n from mlflow.tracking.request_header.registry import resolve_request_headers\n\n headers = dict(**resolve_request_headers())\n if extra_headers:\n headers = dict(**headers, **extra_headers)\n\n if auth_str:\n headers[\"Authorization\"] = auth_str\n\n if host_creds.client_cert_path is not None:\n kwargs[\"cert\"] = host_creds.client_cert_path\n\n if host_creds.aws_sigv4:\n # will overwrite the Authorization header\n from requests_auth_aws_sigv4 import AWSSigV4\n\n kwargs[\"auth\"] = AWSSigV4(\"execute-api\")\n elif host_creds.auth:\n from mlflow.tracking.request_auth.registry import fetch_auth\n\n kwargs[\"auth\"] = fetch_auth(host_creds.auth)\n\n cleaned_hostname = strip_suffix(hostname, \"/\")\n url = f\"{cleaned_hostname}{endpoint}\"\n try:\n return _get_http_response_with_retries(\n method,\n url,\n max_retries,\n backoff_factor,\n backoff_jitter,\n retry_codes,\n raise_on_status,\n headers=headers,\n verify=host_creds.verify,\n timeout=timeout,\n **kwargs,\n )\n except requests.exceptions.Timeout as to:\n raise MlflowException(\n f\"API request to {url} failed with timeout exception {to}.\"\n \" To increase the timeout, set the environment variable \"\n f\"{MLFLOW_HTTP_REQUEST_TIMEOUT!s} to a larger value.\"\n ) from to\n except requests.exceptions.InvalidURL as iu:\n raise InvalidUrlException(f\"Invalid url: {url}\") from iu\n except Exception as e:\n raise MlflowException(f\"API request to {url} failed with exception {e}\")\n\n\ndef _can_parse_as_json_object(string):\n try:\n return isinstance(json.loads(string), dict)\n except Exception:\n return False\n\n\ndef http_request_safe(host_creds, endpoint, method, **kwargs):\n \"\"\"\n Wrapper around ``http_request`` that also verifies that the request succeeds with code 200.\n \"\"\"\n response = http_request(host_creds=host_creds, endpoint=endpoint, method=method, **kwargs)\n return verify_rest_response(response, endpoint)\n\n\ndef verify_rest_response(response, endpoint):\n \"\"\"Verify the return code and format, raise exception if the request was not successful.\"\"\"\n if response.status_code != 200:\n if _can_parse_as_json_object(response.text):\n raise RestException(json.loads(response.text))\n else:\n base_msg = (\n f\"API request to endpoint {endpoint} \"\n f\"failed with error code {response.status_code} != 200\"\n )\n raise MlflowException(\n f\"{base_msg}. Response body: '{response.text}'\",\n error_code=get_error_code(response.status_code),\n )\n\n # Skip validation for endpoints (e.g. DBFS file-download API) which may return a non-JSON\n # response\n if endpoint.startswith(_REST_API_PATH_PREFIX) and not _can_parse_as_json_object(response.text):\n base_msg = (\n \"API request to endpoint was successful but the response body was not \"\n \"in a valid JSON format\"\n )\n raise MlflowException(f\"{base_msg}. Response body: '{response.text}'\")\n\n return response\n\n\ndef _get_path(path_prefix, endpoint_path):\n return f\"{path_prefix}{endpoint_path}\"\n\n\ndef extract_api_info_for_service(service, path_prefix):\n \"\"\"Return a dictionary mapping each API method to a tuple (path, HTTP method)\"\"\"\n service_methods = service.DESCRIPTOR.methods\n res = {}\n for service_method in service_methods:\n endpoints = service_method.GetOptions().Extensions[databricks_pb2.rpc].endpoints\n endpoint = endpoints[0]\n endpoint_path = _get_path(path_prefix, endpoint.path)\n res[service().GetRequestClass(service_method)] = (endpoint_path, endpoint.method)\n return res\n\n\ndef extract_all_api_info_for_service(service, path_prefix):\n \"\"\"Return a dictionary mapping each API method to a list of tuples [(path, HTTP method)]\"\"\"\n service_methods = service.DESCRIPTOR.methods\n res = {}\n for service_method in service_methods:\n endpoints = service_method.GetOptions().Extensions[databricks_pb2.rpc].endpoints\n res[service().GetRequestClass(service_method)] = [\n (_get_path(path_prefix, endpoint.path), endpoint.method) for endpoint in endpoints\n ]\n return res\n\n\ndef call_endpoint(host_creds, endpoint, method, json_body, response_proto, extra_headers=None):\n # Convert json string to json dictionary, to pass to requests\n if json_body:\n json_body = json.loads(json_body)\n call_kwargs = {\n \"host_creds\": host_creds,\n \"endpoint\": endpoint,\n \"method\": method,\n }\n if extra_headers is not None:\n call_kwargs[\"extra_headers\"] = extra_headers\n if method == \"GET\":\n call_kwargs[\"params\"] = json_body\n response = http_request(**call_kwargs)\n else:\n call_kwargs[\"json\"] = json_body\n response = http_request(**call_kwargs)\n response = verify_rest_response(response, endpoint)\n js_dict = json.loads(response.text)\n parse_dict(js_dict=js_dict, message=response_proto)\n return response_proto\n\n\ndef call_endpoints(host_creds, endpoints, json_body, response_proto, extra_headers=None):\n # The order that the endpoints are called in is defined by the order\n # specified in ModelRegistryService in model_registry.proto\n for i, (endpoint, method) in enumerate(endpoints):\n try:\n return call_endpoint(\n host_creds, endpoint, method, json_body, response_proto, extra_headers\n )\n except RestException as e:\n if e.error_code != ErrorCode.Name(ENDPOINT_NOT_FOUND) or i == len(endpoints) - 1:\n raise e\n\n\nclass MlflowHostCreds:\n \"\"\"\n Provides a hostname and optional authentication for talking to an MLflow tracking server.\n :param host: Hostname (e.g., http://localhost:5000) to MLflow server. Required.\n :param username: Username to use with Basic authentication when talking to server.\n If this is specified, password must also be specified.\n :param password: Password to use with Basic authentication when talking to server.\n If this is specified, username must also be specified.\n :param token: Token to use with Bearer authentication when talking to server.\n If provided, user/password authentication will be ignored.\n :param aws_sigv4: If true, we will create a signature V4 to be added for any outgoing request.\n Keys for signing the request can be passed via ENV variables,\n or will be fetched via boto3 session.\n :param auth: If set, the auth will be added for any outgoing request.\n Keys for signing the request can be passed via ENV variables,\n :param ignore_tls_verification: If true, we will not verify the server's hostname or TLS\n certificate. This is useful for certain testing situations, but should never be\n true in production.\n If this is set to true ``server_cert_path`` must not be set.\n :param client_cert_path: Path to ssl client cert file (.pem).\n Sets the cert param of the ``requests.request``\n function (see https://requests.readthedocs.io/en/master/api/).\n :param server_cert_path: Path to a CA bundle to use.\n Sets the verify param of the ``requests.request``\n function (see https://requests.readthedocs.io/en/master/api/).\n If this is set ``ignore_tls_verification`` must be false.\n \"\"\"\n\n def __init__(\n self,\n host,\n username=None,\n password=None,\n token=None,\n aws_sigv4=False,\n auth=None,\n ignore_tls_verification=False,\n client_cert_path=None,\n server_cert_path=None,\n ):\n if not host:\n raise MlflowException(\n message=\"host is a required parameter for MlflowHostCreds\",\n error_code=INVALID_PARAMETER_VALUE,\n )\n if ignore_tls_verification and (server_cert_path is not None):\n raise MlflowException(\n message=(\n \"When 'ignore_tls_verification' is true then 'server_cert_path' \"\n \"must not be set! This error may have occurred because the \"\n \"'MLFLOW_TRACKING_INSECURE_TLS' and 'MLFLOW_TRACKING_SERVER_CERT_PATH' \"\n \"environment variables are both set - only one of these environment \"\n \"variables may be set.\"\n ),\n error_code=INVALID_PARAMETER_VALUE,\n )\n self.host = host\n self.username = username\n self.password = password\n self.token = token\n self.aws_sigv4 = aws_sigv4\n self.auth = auth\n self.ignore_tls_verification = ignore_tls_verification\n self.client_cert_path = client_cert_path\n self.server_cert_path = server_cert_path\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return NotImplemented\n\n @property\n def verify(self):\n if self.server_cert_path is None:\n return not self.ignore_tls_verification\n else:\n return self.server_cert_path\n","repo_name":"mlflow/mlflow","sub_path":"mlflow/utils/rest_utils.py","file_name":"rest_utils.py","file_ext":"py","file_size_in_byte":12876,"program_lang":"python","lang":"en","doc_type":"code","stars":15878,"dataset":"github-code","pt":"22"} +{"seq_id":"9235948538","text":"from typing import Optional\nimport shlex\nfrom FslBuildGen.Log import Log\nfrom FslBuildGen import IOUtil\nfrom FslBuildGen.BuildConfig.CMakeConfiguration import CMakeConfiguration\nfrom FslBuildGen.BuildConfig.UserSetVariables import UserSetVariables\nfrom FslBuildGen.BuildExternal import CMakeHelper\nfrom FslBuildGen.CMakeUtil import CMakeUtil\n#from FslBuildGen.CMakeUtil import CMakeVersion\nfrom FslBuildGen.DataTypes import BuildVariantConfig\nfrom FslBuildGen.Generator.GeneratorCMakeConfig import GeneratorCMakeConfig\nfrom FslBuildGen.Version import Version\nfrom FslBuildGen.Tool.UserCMakeConfig import UserCMakeConfig\n\ndef BuildGeneratorCMakeConfig(log: Log, toolVersion: Version, platformName: str, buildVariantConfig: BuildVariantConfig,\n userSetVariables: UserSetVariables, userCMakeConfig: Optional[UserCMakeConfig],\n cmakeConfiguration: CMakeConfiguration, defaultCompilerVersion: int,\n isCheckMode: bool) -> GeneratorCMakeConfig:\n \"\"\"\n Build the CMake config based on the supplied parameters and the default settings from the toolconfig\n \"\"\"\n\n # Setup default configuration\n buildDir = IOUtil.Join(cmakeConfiguration.DefaultBuildDir, platformName)\n generatorName = \"\"\n installPrefix = cmakeConfiguration.DefaultInstallPrefix\n\n # Give the platform a chance to override the config\n platformConfig = cmakeConfiguration.TryGetPlatformConfig(platformName)\n allowFindPackage = True\n if platformConfig is not None:\n if platformConfig.DefaultGeneratorName is not None:\n generatorName = platformConfig.DefaultGeneratorName\n if platformConfig.DefaultInstallPrefix is not None:\n installPrefix = platformConfig.DefaultInstallPrefix\n if platformConfig.AllowFindPackage is not None:\n allowFindPackage = platformConfig.AllowFindPackage\n log.LogPrintVerbose(2, \"project defined AllowFindPackage to {0}\".format(allowFindPackage))\n\n # Apply the commandline overrides (so the user gets the final say)\n buildDirSetByUser = False\n buildDirId = None # Optional[int]\n if userCMakeConfig is not None:\n if userCMakeConfig.BuildDir is not None:\n buildDir = userCMakeConfig.BuildDir\n buildDirSetByUser = True\n if userCMakeConfig.BuildDirId is not None:\n buildDirId = userCMakeConfig.BuildDirId\n if userCMakeConfig.GeneratorName is not None:\n generatorName = userCMakeConfig.GeneratorName\n if userCMakeConfig.InstallPrefix is not None:\n installPrefix = userCMakeConfig.InstallPrefix\n if userCMakeConfig.AllowFindPackage is not None:\n allowFindPackage = userCMakeConfig.AllowFindPackage\n log.LogPrintVerbose(2, \"Command line set AllowFindPackage to {0}\".format(allowFindPackage))\n\n # If we still dont have a generator name then try to select a good default\n if len(generatorName) <= 0:\n # Try to determine the default generator name for the platform\n generatorName = CMakeHelper.GetPlatformDefaultCMakeGenerator(platformName, defaultCompilerVersion)\n\n cmakeVersion = CMakeUtil.GetVersion()\n\n cmakeConfigGlobalArgs = [] if userCMakeConfig is None else shlex.split(userCMakeConfig.ConfigGlobalArgs)\n cmakeConfigAppArgs = [] if userCMakeConfig is None else shlex.split(userCMakeConfig.ConfigAppArgs)\n\n checkDir = IOUtil.Join(buildDir, 'fsl')\n if isCheckMode:\n buildDir = checkDir\n\n return GeneratorCMakeConfig(log, toolVersion, platformName, buildVariantConfig, userSetVariables, buildDir, buildDirSetByUser, buildDirId, checkDir,\n generatorName, installPrefix, cmakeVersion, cmakeConfigGlobalArgs, cmakeConfigAppArgs,\n allowFindPackage)\n","repo_name":"nxp-imx/gtec-demo-framework","sub_path":".Config/FslBuildGen/CMakeConfigUtil.py","file_name":"CMakeConfigUtil.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"22"} +{"seq_id":"23860418855","text":"from django.contrib.auth import logout, authenticate, login\nfrom django.contrib.auth.forms import UserCreationForm # 默认表单\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\n\n\n\n\n# Create your views here.\n\ndef logout_view(request):\n \"\"\"注销用户\"\"\"\n # 调用了函数logout()\n logout(request)\n # 重定向到主页\n return HttpResponseRedirect(reverse('learning_logs:index'))\n\n\ndef register(request):\n \"\"\"注册新用户\"\"\"\n if request.method != 'POST':\n # 显示空的注册表单,不给它提供任何初始数据\n form = UserCreationForm()\n else: # 点击提交,录入信息,切换登录状态,转到主页\n # 处理填写好的表单\n form = UserCreationForm(data=request.POST)\n if form.is_valid():\n new_user = form.save()\n authenticated_user = authenticate(username=new_user.username,password=request.POST['password1'])\n login(request,authenticated_user)\n # 重定向到主页\n return HttpResponseRedirect(reverse('learning_logs:index'))\n context = {'form':form}\n return render(request,'register.html',context)\n\n\n\n\n","repo_name":"pangxi/learning_log","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73472898617","text":"#variables\r\n\r\n#No variables with spaces\r\n#spaces = camelCase or under_scores\r\n#no capital letters because that is reserved for other functions\r\n#uppercase must be in quotes\r\n\r\nname = \"Tristian\" #is a string\r\nage = \"11\" #this is an integer an integer is a whole number\r\npie = \"3.141592653589793238462643\" #this is called a float (decimal)\r\nfavpie = \"Banana Peach\"\r\n\r\n#if you have a contraction surround it in quotes\r\n\r\nhereFirst = \"Bryce & Curt\"\r\nsaid = \"said we won't have any vocabulary\"\r\nspace = \" \"\r\n#print ( hereFirst + space + said )\r\n\r\ntext = \"Hi my name is\"\r\ntext2 = \"and I am\"\r\n\r\n#numbers need quotes\r\nprint (text + space + name + space + text2 + space + age)\r\n\r\n#or do str mask\r\n\r\nage2 = 11\r\n\r\nprint (text + space + name + space + text2 + space + str(age2))\r\n","repo_name":"compagnb/W22-IntroToPython","sub_path":"studentWork/Tristian Pagano/Variables.py","file_name":"Variables.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"44416449189","text":"\"\"\"\nGiven a list of wikis emits statistics regarding shared templates, their parameters\nand value types\n\nThis script for each wiki:\n\n- takes 50 most used portable infoboxes\n- analyzes the names of template parameters\n- takes parameters for those from up to 500 articles for each infobox\n\nIt also writes values.txt file with all values taken from infoboxes.\n\ntemplates.md file will be generated as well.\n\"\"\"\nimport json\nimport logging\n\nfrom collections import Counter, defaultdict\n\nfrom mwclient.client import Site\nimport requests\n\nlogging.basicConfig(level=logging.INFO)\n\n\n# set up shared HTTP client\npool = requests.Session()\npool.proxies = {'http': 'border-http-s3:80'}\n\n\ndef get_site(wiki_domain: str):\n \"\"\"\n :type wiki_domain str\n :rtype: Site\n \"\"\"\n return Site(host=('http', wiki_domain), path='/', pool=pool)\n\n\ndef get_querypage(site: Site, page: str, limit: int = 500):\n \"\"\"\n :type site Site\n :type page str\n :type limit int\n :rtype: list[str]\n \"\"\"\n # http://poznan.wikia.com/api.php?action=query&list=querypage&qppage=Nonportableinfoboxes\n # http://poznan.wikia.com/api.php?action=query&list=querypage&qppage=Mostlinkedtemplates\n # http://poznan.wikia.com/api.php?action=query&list=querypage&qppage=AllInfoboxes\n res = site.get(action='query', list='querypage', qppage=page, qplimit=limit)\n return [\n # (u'value', u'69'), (u'ns', 10), (u'title', u'Template:Crew TV')\n entry['title']\n for entry in res['query']['querypage']['results']\n ]\n\n\ndef get_portable_infobox_params(site: Site, template_name: str):\n \"\"\"\n Please provide Template namespace suffix in template_name\n\n :type site Site\n :type template_name str\n :rtype: list[str]\n \"\"\"\n logging.info('Getting \"%s\" infobox parameters ...', template_name)\n\n # http://poznan.wikia.com/api.php?action=templateparameters&titles=Szablon:Ulica_infobox&format=json\n res = site.get(action='templateparameters', titles=template_name)\n\n if res['pages']:\n item = next(iter(res['pages'].items()))[1]\n return list(map(str, item['params']))\n\n return []\n\n\ndef get_articles_with_infobox(site: Site, template: str, limit: int = 500):\n \"\"\"\n :type site Site\n :type template str\n :type limit int\n :rtype list[str]\n \"\"\"\n # http://poznan.wikia.com/api.php?action=query&list=embeddedin&eititle=Szablon:Ulica_infobox&eilimit=500\n res = site.get(action='query', list='embeddedin', eititle=template, eilimit=limit)\n return [\n # \n entry['title']\n for entry in res['query']['embeddedin']\n if entry['ns'] == 0 # NS_MAIN only\n ]\n\n\ndef get_infoboxes_from_article(site: Site, title: str):\n \"\"\"\n :type site Site\n :type title str\n :rtype: list[str, dict]\n \"\"\"\n logger = logging.getLogger('get_infoboxes_from_article')\n logger.info('Article: %s', title)\n\n # https://nfs.fandom.com/wikia.php?controller=TemplatesApiController&method=getMetadata&title=Ferrari_355_F1\n res = json.loads(site.raw_call(\n http_method='GET',\n script='wikia',\n data={\n 'controller': 'TemplatesApiController',\n 'method': 'getMetadata',\n 'title': title\n }\n ))\n\n return [\n # Ulica infobox -> Template:Ulica infobox\n ('Template:{}'.format(template['name']), template['parameters'])\n for template in res['templates']\n if template['type'] == 'infobox'\n ]\n\n\ndef get_portable_infoboxes(wikis):\n \"\"\"\n :type: wikis list[str]\n :rtype: list[str]\n \"\"\"\n logger = logging.getLogger('get_portable_infoboxes')\n\n # on how many wikis a template is used?\n global_templates = Counter()\n\n # how frequently is a given parameter used in this template across wikis?\n template_parameters = defaultdict(Counter)\n\n # in now many templates is given attribute used (lowercase)?\n parameter_templates = defaultdict(Counter)\n\n # raw values as we get them\n # they will be stored in \"values.txt\" file for further processing\n template_values = []\n articles_analyzed = 0\n\n # write templates.md\n templates_md = open('templates.md', 'wt')\n templates_md.write('# Templates\\n')\n templates_md.write('> Wikis below: {}\\n\\n'.format(len(wikis)))\n\n for wiki_domain in wikis:\n site = get_site(wiki_domain)\n\n logger.info('Processing <%s> wiki', site.host[1])\n\n # fetch all templates as we want to get only the top templates\n templates = get_querypage(site, 'Mostlinkedtemplates')\n all_infoboxes = get_querypage(site, 'AllInfoboxes')\n non_portable = get_querypage(site, 'Nonportableinfoboxes')\n # print(all_infoboxes, non_portable)\n\n logger.info('%s: %d infoboxes in total (%d of them are non-portable)',\n wiki_domain, len(all_infoboxes), len(non_portable))\n\n # list only portable infoboxes\n infoboxes = [\n template\n for template in templates\n if template in all_infoboxes and template not in non_portable\n ][:50]\n\n templates_md.write('\\n## {}\\n'.format(wiki_domain))\n templates_md.write('> Portable infoboxes: {}\\n\\n'.format(len(infoboxes)))\n\n # process each portable infobox\n for infobox in infoboxes:\n params = get_portable_infobox_params(site, infobox)\n # print(wiki_domain, infobox, params)\n\n templates_md.write('\\n### [{}](http://{}/wiki/{})\\n'.\n format(infobox, wiki_domain, infobox.replace(' ', '_')))\n\n templates_md.writelines([\n '* `{}`\\n'.format(param)\n for param in sorted(params)\n ])\n\n # update per template params statistics\n template_parameters[infobox].update(set(params))\n\n # update per parameter statistics of in which templates it's used\n for param in params:\n parameter_templates[param.lower()].update((infobox,))\n\n # continue # skip the process of analyzing values\n\n # now get article that use a given infobox\n articles = get_articles_with_infobox(site, infobox, 50)\n\n parameters_usage_stats = {\n \"wiki\": wiki_domain,\n \"infobox\": infobox,\n \"parameters\": {\n param: dict(\n _not_set_in=0,\n _set_in=0\n )\n for param in params\n }\n }\n\n for article in articles:\n articles_analyzed += 1\n data = get_infoboxes_from_article(site, article)\n # print(data)\n\n for template_name, template_params in data:\n # ignore data coming from other templates and infoboxes\n if template_name != infobox:\n continue\n\n template_values += [\n str(value).strip().replace(\"\\n\", '\\\\n')\n for value in template_params.values()\n ]\n\n for param in params:\n # this param is used in this template on this page\n if param in template_params:\n value = str(template_params.get(param))\n\n parameters_usage_stats['parameters'][param]['_set_in'] += 1\n parameters_usage_stats['parameters'][param][article] = value\n else:\n parameters_usage_stats['parameters'][param]['_not_set_in'] += 1\n # parameters_usage_stats['parameters'][param][article] = None\n\n # store per infobox stats regarding missing values in articles that use it\n with open('templates/{}_{}.json'.format(wiki_domain, infobox.replace('/', '_')), 'wt') as fp:\n json.dump(parameters_usage_stats, fp=fp, indent=True)\n logger.info('Saved %s', fp.name)\n\n # print(wiki_domain, \"\\n\".join(template_values)); exit(1)\n\n # print(wiki_domain, infoboxes)\n\n for infobox in infoboxes:\n global_templates.update((infobox,))\n\n templates_md.close()\n\n # templates\n logger.info('%d unique template', len(global_templates.keys()))\n\n # get info about most common ones\n for infobox, usage_count in global_templates.most_common(25):\n logger.info('Most common parameters for %s (used on %d wikis): %s',\n infobox, usage_count,\n template_parameters[infobox].most_common())\n\n # template params\n logger.info('%d unique parameters', len(parameter_templates.keys()))\n\n # get info about most common ones\n with open('parameters.md', 'wt') as fp:\n fp.write('# Parameters\\n')\n\n for template_parameters in sorted(parameter_templates.keys()):\n templates = parameter_templates[template_parameters]\n\n fp.write('\\n## `{}` parameter\\n> Used in {} templates\\n\\n'.format(template_parameters, len(templates.keys())))\n # https://docs.python.org/2/library/collections.html#collections.Counter.most_common\n fp.writelines([\n \"* `{}` ({} times)\\n\".format(template, count)\n for template, count in templates.most_common()\n ])\n\n # write collected values to a file\n with open('values.txt', 'wt') as fp:\n for value in template_values:\n fp.write(value + \"\\n\")\n\n logger.info('values.txt file written - %d lines (from %d articles)',\n len(template_values), articles_analyzed)\n\n\nif __name__ == '__main__':\n mapping = get_portable_infoboxes(\n wikis=\"\"\"\nvillains.fandom.com\nwalkingdead.fandom.com\nmemory-alpha.fandom.com\npowerrangers.fandom.com\nsteven-universe.fandom.com\nttte.fandom.com\nspongebob.fandom.com\nsupernatural.fandom.com\nmuppet.fandom.com\ntardis.fandom.com\nvampirediaries.fandom.com\nhero.fandom.com\n \"\"\".strip().split('\\n')\n )\n\n \"\"\"\n for arg, items in mapping.items():\n print('{} -> {}'.format(\n arg, items))\n \"\"\"\n","repo_name":"Wikia/core-dynks","sub_path":"infoboxes-stats/infoboxes_stats.py","file_name":"infoboxes_stats.py","file_ext":"py","file_size_in_byte":10183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33023362993","text":"from differentiate import Div, Mul, Add, Variable\n\n\nclass Graph:\n\n def __init__(self, node):\n self.node = node\n\n def simplify(self):\n names = {Mul: '*', Add: '+', Div: '/'}\n n = self.node\n\n if hasattr(n, 'a'):\n n.a = Graph(n.a).simplify()\n n.b = Graph(n.b).simplify()\n n.name = '({} {} {})'.format(n.a.name, names[type(n)], n.b.name)\n\n while hasattr(n, 'a'):\n if isinstance(n, Mul):\n if n.a.fixed and n.b.fixed:\n val = n.a.val*n.b.val\n n = Variable(name=str(val), val=val, fixed=True)\n elif n.a.fixed:\n if n.a.val == 0:\n n = Variable(name='0', val=0, fixed=True)\n elif n.a.val == 1:\n n = n.b\n elif n.b.fixed:\n if n.b.val == 0:\n n = Variable(name='0', val=0, fixed=True)\n elif n.b.val == 1:\n n = n.a\n elif isinstance(n, Add):\n if n.a.fixed and n.b.fixed:\n val = n.a.val + n.b.val\n n = Variable(name=str(val), val=val, fixed=True)\n elif n.a.fixed and n.a.val == 0:\n n = n.b\n elif n.b.fixed and n.b.val == 0:\n n = n.a\n elif isinstance(n, Div):\n if n.a.fixed and n.a.val == 0:\n n = Variable(name='0', val=0, fixed=True)\n elif n.b.fixed and n.b.val == 1:\n n = n.a\n elif n.a is n.b:\n n = Variable(name='1', val=1, fixed=True)\n break\n return n\n","repo_name":"ltricot/automatic-differentiation","sub_path":"simplify.py","file_name":"simplify.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"it","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"274046931","text":"Mathses = __import__(\"Euler_Lib\")\n\ndef Chal14(limit):#finds the largest colatz solution up to 'limit'\n long = [0, 0]\n for x in range(1, limit+1):\n length = 0; temp = x\n while (temp>1):\n if ((temp%2) == 1):\n temp=(temp*3)+1\n else:\n temp/=2\n length+=1\n if (length > long[0]):\n long[0] = length\n long[1] = x\n return long[1]\n\n\nprint(Chal14(1000000))\n","repo_name":"Ramit110/Boredom-Nothing-More","sub_path":"Python/Euler/Problem_14.py","file_name":"Problem_14.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33492369867","text":"import xml.etree.ElementTree as ET\nfrom xml.etree.ElementTree import Element, SubElement, Comment\n\ndef add_command(name,cl,id):\n\n\n # root = tree.getroot()\n # top = Element('top')\n # child = SubElement(top, 'child')\n #child.text = 'This child contains text.'\n #child_with_tail = SubElement(top, 'child_with_tail')\n #child_with_tail.text = 'This child has regular text.'\n #child_with_tail.tail = 'And \"tail\" text.'\n\n\n\n tree = ET.parse('command.xml')\n root = tree.getroot()\n newNode = ET.Element(\"Command\")\n newNodeName=ET.Element('id')\n newNodeName.text = str(id)\n newNode.append(newNodeName)\n newNodeName=ET.Element('name')\n\n newNodeName.text = str(name)\n newNode.append(newNodeName)\n newNodeName=ET.Element('content')\n newNodeName.text = str(cl)\n newNode.append(newNodeName)\n root.insert(0, newNode)\n #file_handle = open(\"command.xml.xml\",\"wb\")\n tree.write('command.xml')\n #file_handle.close()\n #ET.dump(newNodeName)\n return\ndef delete_command(name):\n tree = ET.parse('command.xml')\n root = tree.getroot()\n for country in root.findall('Command'):\n rank = (country.find('name').text)\n\n if rank ==str(name):\n\n root.remove(country)\n\n tree.write('command.xml')\n return\ndef delete_commandid(id):\n tree = ET.parse('command.xml')\n root = tree.getroot()\n for country in root.findall('Command'):\n rank = (country.find('id').text)\n\n if rank ==str(id):\n\n root.remove(country)\n\n tree.write('command.xml')\n return\n\ndef update_command(xmlfile,namee):\n tree = ET.parse(str(xmlfile))\n root = tree.getroot()\n for country in root.findall('Command'):\n rank = (country.find('name').text)\n\n if rank ==str(namee):\n idd=(country.find('id').text)\n name=(country.find('name').text)\n script=(country.find('content').text)\n return idd,name,script\n#delete_commandid(\"100\")\n#print(update_command(\"command.xml\",\"lister\")[0])\n\n#add_command(\"aaa\",\"zaa\",\"16\")\n","repo_name":"Mahmoud-Kammoun/Project","sub_path":"updatexml.py","file_name":"updatexml.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72865927095","text":"from algonaut_tests.helpers import MockApiTest\nfrom algonaut_tests.fixtures.user import user, auth_client, organization\nfrom algonaut_tests.fixtures.object_role import object_role\nfrom algonaut_tests.fixtures.algorithm import project\nfrom algonaut_tests.fixtures.dataset import dataset\n\nfrom .helpers import ObjectTest\n\n\nclass TestDatasets(MockApiTest, ObjectTest):\n\n base_url = \"/v1/datasets\"\n obj_key = \"dataset\"\n obj_create_data = {\n \"name\": \"example/algo\",\n \"data\": {\"foo\": \"bar\"},\n \"tags\": [\"my\", \"tags\"],\n }\n obj_update_data = {\n \"name\": \"another/path\",\n \"data\": {\"bar\": \"baz\"},\n \"tags\": [\"one\", \"two\"],\n }\n\n @property\n def list_url(self):\n return \"/v1/projects/{}/datasets\".format(self.project.ext_id)\n\n @property\n def create_url(self):\n return self.list_url\n\n fixtures = [\n {\"auth_client\": auth_client},\n {\"organization\": organization},\n {\n \"another_organization\": lambda test, fixtures: organization(\n test, fixtures, name=\"another one\"\n )\n },\n {\"user\": user},\n {\"project\": project},\n {\n \"another_project\": lambda test, fixtures: project(\n test, fixtures, \"another/project\", \"another_organization\"\n )\n },\n {\"dataset\": lambda test, fixtures: dataset(test, fixtures, \"example\")},\n # the next dataset is not visible to the user\n {\n \"another_dataset\": lambda test, fixtures: dataset(\n test, fixtures, \"another_example\", \"another_project\"\n )\n },\n {\n \"object_role\": lambda test, fixtures: object_role(\n test, fixtures, \"admin\", \"admin\", \"organization\", \"project\"\n )\n },\n ]\n","repo_name":"algoneer/algonaut","sub_path":"algonaut_tests/api/v1/test_datasets.py","file_name":"test_datasets.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5264518791","text":"# Based on https://awslabs.github.io/aws-lambda-powertools-python/1.26.6/\n\nimport json\n\nfrom aws_lambda_powertools import Logger, Tracer\nfrom aws_lambda_powertools.event_handler import APIGatewayRestResolver, Response, content_types\nfrom aws_lambda_powertools.event_handler.exceptions import (\n BadRequestError,\n InternalServerError,\n NotFoundError,\n ServiceError,\n UnauthorizedError,\n)\nfrom aws_lambda_powertools.logging import correlation_paths\nfrom aws_lambda_powertools.utilities.typing import LambdaContext\n\ntracer = Tracer()\nlogger = Logger()\napp = APIGatewayRestResolver()\n\n# routers\nimport router_country\nimport router_region\nimport router_range\nimport router_mountain\n\napp.include_router(router_country.router, prefix=\"/country\")\napp.include_router(router_region.router, prefix=\"/region\")\napp.include_router(router_range.router, prefix=\"/range\")\napp.include_router(router_mountain.router, prefix=\"/mount\")\n\nfrom router_helper import build_response\n\n@app.exception_handler(ValueError)\ndef handle_invalid_limit_qs(ex: ValueError): # receives exception raised\n metadata = {\"path\": app.current_event.path, \"query_strings\": app.current_event.query_string_parameters}\n logger.error(f\"Malformed request: {ex}\", extra=metadata)\n return Response(\n status_code=400,\n content_type=content_types.APPLICATION_JSON,\n #body=json.dumps({\"data\": None, \"function\": app.current_event.path, \"params\": app.current_event.query_string_parameters, \"status\": \"BAD_REQUEST\", \"error\": f\"{ex}\")\n body=json.dumps(build_response(status = 'BAD_REQUEST', code = 400, error = f\"{ex}\", override_default = True)[0]) # skip additional error code\n )\n\n\n@app.not_found\n@tracer.capture_method\ndef handle_not_found_errors(exc: NotFoundError) -> Response:\n logger.info(f\"Not found route: {app.current_event.path}\")\n return Response(status_code=404, content_type=content_types.APPLICATION_JSON, body=exc)\n\n\n@app.get(rule=\"/bad-request-error\")\n@tracer.capture_method\ndef bad_request_error():\n raise BadRequestError(\"Missing required parameter\") # HTTP 400\n\n\n@app.get(rule=\"/unauthorized-error\")\n@tracer.capture_method\ndef unauthorized_error():\n raise UnauthorizedError(\"Unauthorized\") # HTTP 401\n\n\n@app.get(rule=\"/not-found-error\")\n@tracer.capture_method\ndef not_found_error():\n raise NotFoundError # HTTP 404\n\n\n@app.get(rule=\"/internal-server-error\")\n@tracer.capture_method\ndef internal_server_error():\n raise InternalServerError(\"Internal server error\") # HTTP 500\n\n\n@app.get(rule=\"/service-error\", cors=True)\n@tracer.capture_method\ndef service_error():\n raise ServiceError(502, \"Something went wrong!\")\n\n# Main lambda handler\n@ logger.inject_lambda_context(correlation_id_path = correlation_paths.API_GATEWAY_REST)\n@ tracer.capture_lambda_handler\ndef lambda_handler(event, context) -> dict:\n \"\"\"Sample pure Lambda function\n\n Parameters\n ----------\n event: dict, required\n API Gateway Lambda Proxy Input Format\n\n #api-gateway-simple-proxy-for-lambda-input-format\n Event doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html\n\n context: object, required\n Lambda Context runtime methods and attributes\n\n Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html\n\n Returns\n ------\n API Gateway Lambda Proxy Output Format: dict\n\n Return doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html\n \"\"\"\n\n return app.resolve(event, context)\n","repo_name":"wdoganowski/liczyrzepa_db","sub_path":"api/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39583654371","text":"#%%\nfrom datetime import datetime\nfrom geopy import Nominatim\ntry:\n from src.stardreamcatcher.tzwhere_v303 import tzwhere\nexcept Exception as e:\n print(e)\n from tzwhere import tzwhere\nfrom pytz import timezone, utc\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\nfrom matplotlib.patches import Circle\n\nfrom skyfield.api import Star, load, wgs84\nfrom skyfield.data import hipparcos, stellarium\nfrom skyfield.projections import build_stereographic_projection\nfrom skyfield.constants import GM_SUN_Pitjeva_2005_km3_s2 as GM_SUN\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n\ndef load_data():\n # load celestial data\n # de421 shows position of earth and sun in space/\n eph = load(\"./data/de421.bsp\")\n\n # hipparcos dataset contains star location data.\n try:\n with load.open(\"./data/hip_main.dat\") as f:\n stars = hipparcos.load_dataframe(f)\n except:\n # Pull data from online, if local copy of data fails to load.\n # NOTE: This causes issues with the share.streamlit.io hosted app.\n print(\"# HIT 1st EXCEPT in load_data()#\")\n st.markdown(\"# HIT 1st EXCEPT in load_data()#\")\n with load.open(hipparcos.URL) as f:\n stars = hipparcos.load_dataframe(f)\n\n # And the constellation outlines come from Stellarium. We make a list\n # of the stars at which each edge stars, and the star at which each edge\n # ends.\n try:\n # Access local copy of [`constellationship.fab`](./data/constellationship.fab) data.\n with load.open(\"./data/constellationship.fab\") as f:\n constellations = stellarium.parse_constellations(f)\n except:\n # Pull data from online, if local copy of data fails to load.\n # NOTE: This causes issues with the share.streamlit.io hosted app\n print(\"# HIT 2nd EXCEPT in load_data()#\")\n st.markdown(\"# HIT 2nd EXCEPT in load_data()#\")\n url = (\n \"https://raw.githubusercontent.com/Stellarium/stellarium/master\"\n \"/skycultures/modern_st/constellationship.fab\"\n )\n\n with load.open(url) as f:\n constellations = stellarium.parse_constellations(f)\n\n return eph, stars, constellations\n\n\ndef load_data_cam(url_or_other_path=\"./data/cam.constellationship.fab\"):\n \"\"\"Modify load_data() for less overhead when colorizing specified constellations.\n\n Args:\n url_or_other_path (str, optional): Path to constellations to be colorized. Defaults to \"./data/cam.constellationship.fab\".\n\n Returns:\n _type_: _description_\n \"\"\"\n\n url = (url_or_other_path)\n\n with load.open(url) as f:\n constellations_cam = stellarium.parse_constellations(f)\n \n return constellations_cam\n\ndef collect_celestial_data(location, when):\n # get latitude and longitude of our location \n locator = Nominatim(user_agent='myGeocoder')\n location = locator.geocode(location)\n lat, long = location.latitude, location.longitude\n \n # convert date string into datetime object\n dt = datetime.strptime(when, '%Y-%m-%d %H:%M')\n\n # define datetime and convert to utc based on our timezone\n timezone_str = tzwhere.tzwhere().tzNameAt(lat, long)\n local = timezone(timezone_str)\n\n # get UTC from local timezone and datetime\n local_dt = local.localize(dt, is_dst=None)\n utc_dt = local_dt.astimezone(utc)\n\n # load celestial data\n # eph, stars, constellations = load_data()\n\n # find location of earth and sun and set the observer position\n sun = eph['sun']\n earth = eph['earth']\n\n # define observation time from our UTC datetime\n ts = load.timescale()\n t = ts.from_datetime(utc_dt)\n\n # define an observer using the world geodetic system data\n observer = wgs84.latlon(latitude_degrees=lat, longitude_degrees=long).at(t)\n\n # define the position in the sky where we will be looking\n position = observer.from_altaz(alt_degrees=90, az_degrees=0)\n # center the observation point in the middle of the sky\n ra, dec, distance = observer.radec()\n center_object = Star(ra=ra, dec=dec)\n\n # find where our center object is relative to earth and build a projection with 180 degree view\n center = earth.at(t).observe(center_object)\n projection = build_stereographic_projection(center)\n field_of_view_degrees = 180.0\n\n # calculate star positions and project them onto a plain space\n star_positions = earth.at(t).observe(Star.from_dataframe(stars))\n stars['x'], stars['y'] = projection(star_positions)\n \n edges = [edge for name, edges in constellations for edge in edges]\n edges_star1 = [star1 for star1, star2 in edges]\n edges_star2 = [star2 for star1, star2 in edges]\n\n \n return stars, edges_star1, edges_star2\n\ndef create_star_chart(location, when, chart_size_dim0, chart_size_dim1, max_star_size, eph, stars, constellations, lines_xy_cam, savefig=\"./figs/pythonic_star_map.png\"):\n stars, edges_star1, edges_star2 = collect_celestial_data(location, when)\n limiting_magnitude = 10\n bright_stars = (stars.magnitude <= limiting_magnitude)\n magnitude = stars['magnitude'][bright_stars]\n fig, ax = plt.subplots(figsize=(chart_size, chart_size))\n \n #use the night sky color code\n border = plt.Circle((0, 0), 1, color='#041A40', fill=True) \n ax.add_patch(border)\n\n marker_size = max_star_size * 10 ** (magnitude / -2.5)\n\n ax.scatter(stars['x'][bright_stars], stars['y'][bright_stars],\n s=marker_size, color='white', marker='.', linewidths=0,\n zorder=2)\n # Draw the constellation lines.\n xy1 = stars[['x', 'y']].loc[edges_star1].values\n xy2 = stars[['x', 'y']].loc[edges_star2].values\n lines_xy = np.rollaxis(np.array([xy1, xy2]), 1)\n\n ax.add_collection(LineCollection(lines_xy, colors='#ffff', linewidths=0.15))\n\n # if lines_xy_cam != None:\n ax.add_collection(LineCollection(lines_xy_cam, colors='#ff0000', linewidths=0.15))\n\n horizon = Circle((0, 0), radius=1, transform=ax.transData)\n for col in ax.collections:\n col.set_clip_path(horizon)\n\n # other settings\n ax.set_xlim(-1, 1)\n ax.set_ylim(-1, 1)\n plt.axis('off')\n when_datetime = datetime.strptime(when, '%Y-%m-%d %H:%M')\n plt.title(f\"Observation Location: {location}, Time: {when_datetime.strftime('%Y-%m-%d %H:%M')}\", loc='right', fontsize=10)\n \n if savefig:\n plt.savefig(savefig)\n else:\n plt.show()\n return\n\nif __name__ == '__main__':\n # load celestial data\n eph, stars, constellations = load_data()\n\n location = \"Fort Lauderdale, FL\"\n when = '2019-04-25 10:01'\n chart_size=12\n max_star_size=500\n # print(constellations)\n\n # Make colored constellations.\n # TODO: Pull out into a utility function.\n stars, edges_star1, edges_star2 = collect_celestial_data(location, when)\n constellations_cam = load_data_cam(\"./data/constellationship_cam.fab\")\n edges_cam = [edge for name, edges in constellations_cam for edge in edges]\n edges_star1_cam = [star1 for star1, star2 in edges_cam]\n edges_star2_cam = [star2 for star1, star2 in edges_cam]\n\n xy1_cam = stars[['x', 'y']].loc[edges_star1_cam].values\n xy2_cam = stars[['x', 'y']].loc[edges_star2_cam].values\n lines_xy_cam = np.rollaxis(np.array([xy1_cam, xy2_cam]), 1)\n\n # Create star chart.\n # figname = location.replace(\" \", \"_\").remove(\":\")\n figname = location.replace(\" \", \"_\").replace(\",\",\"_\") + \"_\" + when.replace(\"-\", \"_\").replace(\":\", \"\").replace(\" \", \"_\")\n create_star_chart(location, when, chart_size, chart_size, max_star_size, eph, stars, constellations, lines_xy_cam=lines_xy_cam, savefig=f\"./figs/{figname.lower()}.png\")\n\n","repo_name":"cmutnik/starchart_dreamcatchers","sub_path":"src/stardreamcatcher/scripts/star_map.py","file_name":"star_map.py","file_ext":"py","file_size_in_byte":7689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13459127658","text":"import os\n\nfrom PySide2 import QtCore, QtWidgets, QtGui\n\nfrom .ui_ipdialog import Ui_Dialog\nimport mycommonfunctions.basicconfig as myconf\n\nglobalvars = myconf.getGlobals()\n\nclass MyIPDialog(QtWidgets.QDialog, Ui_Dialog):\n\n\n def __init__(self, parent=None):\n super(MyIPDialog, self).__init__(parent)\n\n self.filename = globalvars['ip_file']\n\n self.setupUi(self)\n\n self.comboBox.currentIndexChanged.connect(self.indexChanged)\n\n # user_path = os.path.expanduser('~')\n # file_name = 'ipaddresses.txt'\n # self.file_path = os.path.join(user_path, file_name)\n\n if not os.path.exists(self.filename):\n self.save_ip_adresses(self.filename, ['127.0.0.1'])\n \n self.list_of_ip_adresses = self.read_ip_adresses(self.filename)\n\n self.comboBox.clear()\n self.comboBox.addItems(self.list_of_ip_adresses)\n self.comboBox.setCurrentIndex(0)\n \n self.buttonBox.accepted.connect(self.acc)\n self.buttonBox.rejected.connect(self.rej)\n\n def save_ip_adresses(self, file_path, list_of_adresses): # Only save the first five adresses\n f = open(file_path, 'w')\n counter = 0\n for each in list_of_adresses:\n f.write(each + '\\n')\n counter += 1\n if counter == 5: \n break\n f.close()\n\n def read_ip_adresses(self, file_path):\n with open(file_path,'r') as f:\n list_of_ip_adresses = [line.rstrip('\\n') for line in f]\n return list_of_ip_adresses\n\n\n def indexChanged(self):\n spin_boxes = [self.spinBox_1, self.spinBox_2, self.spinBox_3, self.spinBox_4]\n values = self.comboBox.currentText().split('.')\n\n if len(values) == 4:\n for spin_box, value in zip(spin_boxes, values):\n spin_box.setValue(int(value))\n\n\n def acc(self):\n spin_boxes = [self.spinBox_1, self.spinBox_2, self.spinBox_3, self.spinBox_4]\n values = [str(spin_box.value()) for spin_box in spin_boxes]\n\n adress = '.'.join(values)\n\n if not adress in self.list_of_ip_adresses:\n self.list_of_ip_adresses.insert(0, adress)\n self.comboBox.clear()\n self.comboBox.addItems(self.list_of_ip_adresses)\n self.comboBox.setCurrentIndex(0)\n self.save_ip_adresses(self.filename, self.list_of_ip_adresses)\n \n self.accept()\n\n\n def active_ip_adress(self):\n return self.comboBox.currentText()\n\n\n def rej(self):\n self.reject()\n\n\n\n ","repo_name":"komministern/gca_simulator","sub_path":"source/view/myipdialog.py","file_name":"myipdialog.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13015223692","text":"# File created By: Lucien Maman\n# Last updated: 12/08/2022\n# ---------------------------------------------------------------------------\n\"\"\"\nThe TBD architectures (i.e., TBD-S and TBD-T).\n\nSpecificities:\n- Requires having features computed from both individuals and the group as a whole\n- Requires having a pre-trained model dedicated to the prediction of a specific dimension\n\nDesigned by: Lucien Maman\n\nIt was presented at @ICMI2021.\nPlease cite @ICMI2021 to refer to the TBDs.\n\"\"\"\n\n# ---------------------------------------------------------------------------\n# Imports\n# ---------------------------------------------------------------------------\nimport tensorflow as tf\n\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.models import Model\n\n# ---------------------------------------------------------------------------\n# Function returning the TBD architecture\n# ---------------------------------------------------------------------------\n\ndef create_TBD():\n\n # Define the parameters of the model. Default numbers are the ones used in the paper(s)\n nb_task = 5 # Here, 5 tasks are predicted, following the ones presented in @GAME-ON\n timesteps = 6 * nb_task # number of timesteps (here 6 segments of 20s) x the number of task (here set to 5)\n nb_indiv_features = 50 # number of features computed from each individuals\n nb_group_features = 41 # number of features computed from the group as a whole\n path_base_model = \"./path_to_pre_trained_model\" # path to the saved pre-trained model on the \"Base\" dimension only\n\n # To store all the outputs and instantiate the model later\n list_outputs = []\n\n # INPUT MODULE\n\n # Initiate individual inputs\n input_p0 = tf.keras.Input(\n shape=(timesteps, nb_indiv_features), name=\"p0\"\n )\n\n input_p1 = tf.keras.Input(\n shape=(timesteps, nb_indiv_features), name=\"p1\"\n )\n\n input_p2 = tf.keras.Input(\n shape=(timesteps, nb_indiv_features), name=\"p2\"\n )\n\n # Initiate group input\n input_group = tf.keras.Input(\n shape=(timesteps, nb_group_features), name=\"Group\"\n )\n\n # Concatenate inputs to instantiate the model later\n list_inputs = [input_p0, input_p1, input_p2, input_group]\n\n # BASE MODULE\n\n # Load the saved pre-trained model on a specific dimension (i.e., Social in TBD-T or Task in TBD-S)\n base_model = tf.keras.models.load_model(path_base_model)\n # Make the parameters trainable as a first step to integrate reciprocal impact\n base_model.trainable = True\n\n # Instantiate the pre-trained model without the layers used to make predictions\n base_model = Model(base_model.inputs, base_model.layers[-6].output, name=\"Base_model\")\n base_model = base_model(list_inputs)\n\n # TARGET MODULE\n x = Dense(16, activation='relu', name=\"Dense_group\")(base_model)\n\n # Multitask setting\n for i in range(nb_task):\n x_prev = Dense(8, activation='relu')(x)\n x_prev = Dense(4, activation='relu')(x_prev)\n\n # OUTPUT MODULE\n\n # Only one unit in the dense layer as TBDs are designed to predict only one dimension of cohesion\n output = Dense(1, activation='sigmoid', name=\"Output_t\" + str(i + 1))(x_prev)\n\n # Concatenate outputs to instantiate the model later\n list_outputs.append(output)\n\n # Create the model instance\n model = Model(\n inputs=list_inputs,\n outputs=list_outputs,\n )\n model._name = \"TBD\"\n\n return model\n","repo_name":"gvarni/GRACE_Project","sub_path":"models/TBD.py","file_name":"TBD.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74032081975","text":"import math\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.modules.module import Module\nfrom torch.nn.modules.utils import _pair\n\nclass _ConvNdMtl(Module):\n \"\"\"The class for meta-transfer convolution\"\"\"\n def __init__(self, in_channels, out_channels, kernel_size, stride,\n padding, dilation, transposed, output_padding, groups, bias):\n super(_ConvNdMtl, self).__init__()\n if in_channels % groups != 0:\n raise ValueError('in_channels must be divisible by groups')\n if out_channels % groups != 0:\n raise ValueError('out_channels must be divisible by groups')\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.dilation = dilation\n self.transposed = transposed\n self.output_padding = output_padding\n self.groups = groups\n if transposed:\n self.weight = Parameter(torch.Tensor(\n in_channels, out_channels // groups, *kernel_size))\n self.mtl_weight = Parameter(torch.ones(in_channels, out_channels // groups, 1, 1))\n else:\n self.weight = Parameter(torch.Tensor(\n out_channels, in_channels // groups, *kernel_size))\n self.mtl_weight = Parameter(torch.ones(out_channels, in_channels // groups, 1, 1))\n self.weight.requires_grad=False\n if bias:\n self.bias = Parameter(torch.Tensor(out_channels))\n self.bias.requires_grad=False\n self.mtl_bias = Parameter(torch.zeros(out_channels))\n else:\n self.register_parameter('bias', None)\n self.register_parameter('mtl_bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n n = self.in_channels\n for k in self.kernel_size:\n n *= k\n stdv = 1. / math.sqrt(n)\n self.weight.data.uniform_(-stdv, stdv)\n self.mtl_weight.data.uniform_(1, 1)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n self.mtl_bias.data.uniform_(0, 0)\n\n def extra_repr(self):\n s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'\n ', stride={stride}')\n if self.padding != (0,) * len(self.padding):\n s += ', padding={padding}'\n if self.dilation != (1,) * len(self.dilation):\n s += ', dilation={dilation}'\n if self.output_padding != (0,) * len(self.output_padding):\n s += ', output_padding={output_padding}'\n if self.groups != 1:\n s += ', groups={groups}'\n if self.bias is None:\n s += ', bias=False'\n return s.format(**self.__dict__)\n\nclass Conv2dMtl(_ConvNdMtl):\n \"\"\"The class for meta-transfer convolution\"\"\"\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=True):\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n super(Conv2dMtl, self).__init__(\n in_channels, out_channels, kernel_size, stride, padding, dilation,\n False, _pair(0), groups, bias)\n\n def forward(self, inp):\n new_mtl_weight = self.mtl_weight.expand(self.weight.shape)\n new_weight = self.weight.mul(new_mtl_weight)\n if self.bias is not None:\n new_bias = self.bias + self.mtl_bias\n else:\n new_bias = None\n return F.conv2d(inp, new_weight, new_bias, self.stride,\n self.padding, self.dilation, self.groups)\n","repo_name":"yaoyao-liu/meta-transfer-learning","sub_path":"pytorch/models/conv2d_mtl.py","file_name":"conv2d_mtl.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","stars":685,"dataset":"github-code","pt":"22"} +{"seq_id":"37729058081","text":"import arcpy\nfrom datetime import datetime \t#import the datetime class from the datetime module\n\ntc = \"d:\\\\temp\\\\tc.shp\"\nfields = [\"SETDATE\", \"SETYEAR\"]\n\n## either of these clauses should work if you are testing for a field not set\nwhere = 'char_length(\"SETYEAR\") = 1'\n#where = '\"SETYEAR\"' + \" < '1900'\"\n\nwith arcpy.da.UpdateCursor(tc, fields, where) as yearcursor:\n\tfor row in yearcursor:\n\t\tset_date = row[0] \t#get the SETDATE field value\n\t\t\n\t\tset_year = datetime.strftime(set_date, \"%Y\") \t#use datetime to create string of year \n\n\t\t## add your udpate field/row functionality\n\ndel yearcursor\n\n\n##\n##with arcpy.da.UpdateCursor(tc,[\"SETDATE\",\"SETYEAR\"],'CHAR_LENGTH(\"SETYEAR\")=1') as yearcursor:\n## for row in yearcursor:\n##\n## datetime.strftime(\"%m/%d/%Y\")[-4]\n#### yearcursor.updateRow()\n##del yearcursor","repo_name":"DVRPCfreight/setdate2year","sub_path":"setyear.py","file_name":"setyear.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"17224511475","text":"\"\"\"empty message\n\nRevision ID: f4d771400d28\nRevises: 8d902c508ad5\nCreate Date: 2021-01-09 03:19:04.778835\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = 'f4d771400d28'\ndown_revision = '8d902c508ad5'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('Artist', 'genres',\n existing_type=postgresql.ARRAY(sa.VARCHAR()),\n nullable=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('Artist', 'genres',\n existing_type=postgresql.ARRAY(sa.VARCHAR()),\n nullable=True)\n # ### end Alembic commands ###\n","repo_name":"ahmed-gharib89/Fyyur-web-app","sub_path":"migrations/versions/f4d771400d28_.py","file_name":"f4d771400d28_.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36869519695","text":"import pygame\nimport random\nimport os\nimport threading\nimport pytweening as tween\n\nclock = pygame.time.Clock()\npygame.init()\npygame.mixer.init()\n\nSCREEN_WIDTH = 1280\nSCREEN_HEIGHT = 720\n\n# COLORS\n\nWHITE = (255, 255, 255)\nGRAY = (120, 120, 120)\nBLACK = (0, 0, 0)\n\n# UI SETTINGS\n\ntest = False\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\npygame.display.set_caption(\"Fighter\")\n\nspeaker_box = pygame.image.load(\"IMAGES/DIALOGUE/speaker_box.png\")\ntext_box = pygame.image.load(\"IMAGES/DIALOGUE/rsz.png\")\n# speaker_box = pygame.transform.scale(speaker_box, (speaker_box.get_width() * 0.15, speaker_box.get_height() * 0.15))\n\nTEXT_BOX_POS = (SCREEN_WIDTH / 2, 640)\nSPEAKER_BOX_POS = (180, 100)\nADVANCE_DIALOGUE_RECT = pygame.Rect(1117, 680, 40, 30)\nTEXT_FONT_TYPE = pygame.font.Font(\"MISC/MS Gothic.ttf\", 25)\nSPEAKER_FONT_TYPE = pygame.font.Font(\"MISC/baron.otf\", 35)\nTEXT_FONT_COLOR = (200, 200, 200)\nSPEAKER_FONT_COLOR = WHITE\nCHARACTER_UNFOCUS_TRANSPARENCY = 150\n\ntext_box_rect = text_box.get_rect(center=TEXT_BOX_POS)\nspeaker_box_rect = speaker_box.get_rect(center=SPEAKER_BOX_POS)\n\ndef tween_value(current_value, goal_value, step_amount, direction):\n if direction == \"positive\":\n return current_value + (tween.easeOutSine(current_value / step_amount) * goal_value)\n elif direction == \"negative\":\n return current_value - (tween.linear(current_value / step_amount) * goal_value)\n\nclass visual_novel_system():\n def __init__(self, dialogue_events, background=None):\n pygame.sprite.Sprite.__init__(self)\n\n # CHARACTER LIST\n\n self.character_list = {\n \"left\": None,\n \"right\": None\n }\n self.focused_side = None\n self.speaker_name = None\n self.obscure_speaker = False\n\n # BACKGROUNDS\n\n self.background_dict = {}\n self.dimmed_background = False\n self.dim_current_index = 0\n self.dim_goal_index = 0\n self.blind_current_transparency = 0\n self.blind_goal_transparency = 0\n for image in os.listdir(f\"IMAGES/DIALOGUE/backgrounds\"):\n if not \".DS_Store\" in image:\n image_name = image.split(\".\")\n image_name = image_name[0]\n sprite = pygame.image.load(f\"IMAGES/DIALOGUE/backgrounds/{image}\")\n self.background_dict[image_name] = sprite\n if background != None:\n self.background = self.background_dict[background]\n self.background = pygame.transform.scale(self.background, (SCREEN_WIDTH, SCREEN_HEIGHT))\n\n # MUSIC\n\n self.music_dict = {}\n for music in os.listdir(f\"SOUND/DIALOGUE/MUSIC\"):\n if not \".DS_Store\" in music:\n music_name = music.split(\".\")\n music_name = music_name[0]\n file = pygame.mixer.Sound(f\"SOUND/DIALOGUE/MUSIC/{music}\")\n self.music_dict[music_name] = file\n\n # DIALOGUE\n test = \"test\"\n file = open(f\"MISC/DIALOGUE_TEXT/{dialogue_events}\", \"r\")\n f = file.readlines()\n self.current_text = \"\"\n self.text_lines = []\n self.MAX_CHARACTERS = 50\n self.dialogue_events = []\n self.dialogue_text_index = 0\n self.able_to_update = True\n for line in f:\n if line[-1] == \"\\n\":\n # omits last character which is /n\n self.dialogue_events.append(line[:-1])\n else:\n self.dialogue_events.append(line)\n\n def change_background(self, new_background):\n self.background = self.background_dict[new_background]\n self.background = pygame.transform.scale(self.background, (SCREEN_WIDTH, SCREEN_HEIGHT))\n\n def dim_background(self, goal_transparency, goal_ticks):\n self.dim_goal_index = goal_ticks\n self.blind_goal_transparency = goal_transparency\n if self.dimmed_background == False:\n self.dimmed_background = True\n else:\n self.dimmed_background = False\n\n def check_clicking_continue(self):\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONUP and self.able_to_update == False and ADVANCE_DIALOGUE_RECT.collidepoint(pygame.mouse.get_pos()):\n self.able_to_update = True\n self.play_music(\"transition\", 0.3)\n\n def play_music(self, name, volume, looped=0):\n music = self.music_dict[name]\n music.set_volume(volume)\n music.play(looped)\n\n def stop_music(self, music):\n initial_volume = music.get_volume()\n FADE_SPEED = 20\n for current_tick in range(0, FADE_SPEED + 1):\n music.set_volume(initial_volume - (tween.linear(current_tick / FADE_SPEED) * initial_volume))\n if music.get_volume() <= 0:\n music.stop()\n break\n dt = clock.tick(60)\n def write(self, asdf):\n TEXT_SPEED = 30\n goal_text_list = [char for char in asdf]\n current_text_list = []\n current_text_index = 0\n self.text_lines = []\n text_offsets = {\"0\": 0}\n amount_of_lines = 1\n while current_text_index < len(goal_text_list):\n current_text_list.append(goal_text_list[current_text_index])\n if goal_text_list[current_text_index] != \" \":\n self.play_music(\"text_blip\", 0.02, 0)\n self.current_text = \"\".join(current_text_list)\n try:\n # check if text_offsets dictionary current slot is empty\n if str(amount_of_lines) in text_offsets.keys():\n if amount_of_lines > 1:\n self.text_lines[amount_of_lines - 1] = self.current_text[(self.MAX_CHARACTERS * (amount_of_lines - 1) + text_offsets[str(amount_of_lines - 1)]):self.MAX_CHARACTERS * amount_of_lines + text_offsets[str(amount_of_lines)]]\n else:\n self.text_lines[amount_of_lines - 1] = self.current_text[(self.MAX_CHARACTERS * (amount_of_lines - 1)):self.MAX_CHARACTERS * amount_of_lines + text_offsets[str(amount_of_lines)]]\n # print(\"current line offset: \", text_offsets[str(amount_of_lines)])\n else:\n text_offsets[str(amount_of_lines)] = 0\n print(\"current line offset: \", text_offsets[str(amount_of_lines)])\n # self.text_lines[amount_of_lines - 1] = self.current_text[self.MAX_CHARACTERS * (amount_of_lines - 1) + text_offsets[str(amount_of_lines - 1)]:self.MAX_CHARACTERS * amount_of_lines + text_offsets[str(amount_of_lines)]]\n if amount_of_lines > 1:\n self.text_lines[amount_of_lines - 1] = self.current_text[(self.MAX_CHARACTERS * (amount_of_lines - 1) + text_offsets[str(amount_of_lines - 1)]):self.MAX_CHARACTERS * amount_of_lines + text_offsets[str(amount_of_lines)]]\n else:\n self.text_lines[amount_of_lines - 1] = self.current_text[(self.MAX_CHARACTERS * (amount_of_lines - 1)):self.MAX_CHARACTERS * amount_of_lines + text_offsets[str(amount_of_lines)]]\n except IndexError:\n # if no current text on line, add slot\n self.text_lines.append(self.current_text)\n\n if (len(self.current_text) % self.MAX_CHARACTERS == 0 and len(self.current_text) != 0) or text_offsets[str(amount_of_lines)] > 0:\n # checking for next index\n # print(\"starting char: \", goal_text_list[current_text_index])\n try:\n next_item = goal_text_list[current_text_index + 1]\n if next_item != \" \":\n try:\n text_offsets[str(amount_of_lines)] += 1\n except IndexError:\n text_offsets[str(amount_of_lines)] = 1\n print(\"current line: \", amount_of_lines)\n print(\"adding to offset: \", text_offsets[str(amount_of_lines)])\n print(\"current char: \", goal_text_list[current_text_index])\n print(text_offsets)\n else:\n print(\"went down line\")\n print(text_offsets[str(amount_of_lines)])\n offset = text_offsets[str(amount_of_lines)]\n if amount_of_lines == 1:\n print(\"first line\")\n print(self.current_text[0:((self.MAX_CHARACTERS * amount_of_lines)) + offset])\n self.text_lines.append(self.current_text[0:(self.MAX_CHARACTERS * amount_of_lines) + offset])\n else:\n print(\"more than one line\")\n self.text_lines.append(self.current_text[(self.MAX_CHARACTERS * (amount_of_lines - 1) + offset):(self.MAX_CHARACTERS * amount_of_lines)])\n amount_of_lines += 1\n except IndexError:\n # means the end of the current text\n print(\"first statement new line\")\n # self.text_lines.append(self.current_text[(self.MAX_CHARACTERS * (amount_of_lines - 1) + text_offsets[str(amount_of_lines)]):(self.MAX_CHARACTERS * amount_of_lines)])\n amount_of_lines += 1\n # self.text_lines[amount_of_lines - 1] = self.current_text[((amount_of_lines - 1) * self.MAX_CHARACTERS):amount_of_lines * self.MAX_CHARACTERS]\n current_text_index += 1\n dt = clock.tick(TEXT_SPEED)\n\n def draw(self):\n if self.background != None:\n screen.blit(self.background, (0, 0))\n if self.dimmed_background == True:\n blind = pygame.Surface((SCREEN_WIDTH, SCREEN_HEIGHT)) # the size of your rect\n if self.blind_goal_transparency != self.blind_current_transparency and self.dim_current_index != self.dim_goal_index:\n if self.blind_goal_transparency < self.blind_current_transparency:\n self.blind_current_transparency = 0 - tween.easeOutSine(self.dim_current_index / self.dim_goal_index) * self.blind_goal_transparency\n else:\n self.blind_current_transparency = 0 + tween.easeOutSine(self.dim_current_index / self.dim_goal_index) * self.blind_goal_transparency\n self.dim_current_index += 1\n\n blind.set_alpha(self.blind_current_transparency) # alpha level\n blind.fill((GRAY)) # this fills the entire surface\n screen.blit(blind, (0, 0)) # (0,0) are the top-left coordinates\n\n # dont ask why this code is here instead of update()\n if self.character_list[\"left\"] != None:\n self.character_list[\"left\"].update(self)\n if self.character_list[\"right\"] != None:\n # print(\"update right\")\n self.character_list[\"right\"].update(self)\n\n screen.blit(text_box, text_box_rect)\n\n for i in range(0, len(self.text_lines)):\n text = TEXT_FONT_TYPE.render(self.text_lines[i], True, TEXT_FONT_COLOR)\n screen.blit(text, (230, 625 + i * 30))\n\n if self.focused_side != None:\n if self.obscure_speaker == False and self.character_list[self.focused_side] != None:\n speaker_text = SPEAKER_FONT_TYPE.render(self.character_list[self.focused_side].char_name, True, SPEAKER_FONT_COLOR)\n else:\n speaker_text = SPEAKER_FONT_TYPE.render(\"???\", True,SPEAKER_FONT_COLOR)\n speaker_text = pygame.transform.rotate(speaker_text, 6)\n speaker_text_rect = speaker_text.get_rect()\n speaker_text_rect.center = (180, 95)\n screen.blit(speaker_box, speaker_box_rect)\n screen.blit(speaker_text, speaker_text_rect)\n\n def update(self):\n self.draw()\n self.check_clicking_continue()\n if self.able_to_update == True:\n current_line_text = self.dialogue_events[self.dialogue_text_index]\n if \"change sprite: \" in current_line_text:\n splitted = current_line_text.replace(\"change sprite: \", \"\")\n character_events_list = splitted.split(\", \")\n print(character_events_list[1], \": \", character_events_list[2])\n c = self.character_list[character_events_list[0]]\n if c == None:\n new_char = visual_novel_system.character(character_events_list[1], character_events_list[0], character_events_list[2], character_events_list[3], self)\n self.character_list[character_events_list[0]] = new_char\n else:\n c.change_sprite(character_events_list[1], character_events_list[2], character_events_list[3], self)\n elif \"toggle focus: \" in current_line_text:\n splitted = current_line_text.replace(\"toggle focus: \", \"\")\n c = self.character_list[splitted]\n c.toggle_focus(self)\n elif \"text: \" in current_line_text:\n splitted = current_line_text.replace(\"text: \", \"\")\n threading.Thread(target=self.write, args=(splitted,)).start()\n # self.write(splitted)\n self.current_text = splitted\n elif \"play music: \" in current_line_text:\n splitted = current_line_text.replace(\"play music: \", \"\")\n sound_events_list = splitted.split(\", \")\n self.play_music(sound_events_list[0], float(sound_events_list[1]), int(sound_events_list[2]))\n elif \"stop music: \" in current_line_text:\n splitted = current_line_text.replace(\"stop music: \", \"\")\n # no idea why the comma fixes it\n threading.Thread(target=self.stop_music, args=(self.music_dict[splitted],)).start()\n elif \"change background: \" in current_line_text:\n splitted = current_line_text.replace(\"change background: \", \"\")\n self.change_background(splitted)\n elif \"dim background\" in current_line_text:\n self.dim_background(100, 100)\n elif \"obscure speaker: \" in current_line_text:\n splitted = current_line_text.replace(\"obscure speaker: \", \"\")\n if splitted == \"True\":\n self.obscure_speaker = True\n elif splitted == \"False\":\n self.obscure_speaker = False\n elif \"shake: \" in current_line_text:\n splitted = current_line_text.replace(\"shake: \", \"\")\n shake_events_list = splitted.split(\", \")\n threading.Thread(target=self.character_list[shake_events_list[0]].shake, args=(self, int(shake_events_list[1]))).start()\n elif \"- - - - -\" in current_line_text:\n self.able_to_update = False\n self.dialogue_text_index += 1\n\n class character():\n def __init__(self, char_name, type, current_sprite_name, sprite_pos, system_instance):\n pygame.sprite.Sprite.__init__(self)\n self.char_name = char_name\n self.type = type\n self.in_focus = False\n self.sprite_transparency = 0\n self.visible = True\n # default config\n if self.type == \"left\":\n self.direction = False\n # print(char_name, \" is facing \", self.direction)\n if sprite_pos == \"default\":\n self.sprite_pos_x = 300\n self.sprite_pos_y = 550\n else:\n splitted = sprite_pos.split(\"|\")\n self.sprite_pos_x = int(splitted[0])\n self.sprite_pos_y = int(splitted[1])\n if self.type == \"right\":\n self.direction = True\n if sprite_pos == \"default\":\n self.sprite_pos_x = 980\n self.sprite_pos_y = 550\n else:\n splitted = sprite_pos.split(\"|\")\n self.sprite_pos_x = int(splitted[0])\n self.sprite_pos_y = int(splitted[1])\n self.sprite_dict = {}\n self.sprite_tween_index = 0\n\n # LOADING ALL CHARACTER SPRITES\n\n temp_dict = {}\n directory_contents = os.listdir(f\"IMAGES/DIALOGUE\")\n for item in directory_contents:\n dir_path = os.path.join(f\"IMAGES/DIALOGUE\", item)\n if os.path.isdir(dir_path) and item != \"backgrounds\":\n for image in os.listdir(f\"IMAGES/DIALOGUE/{item}\"):\n if \".png\" in image:\n image_name = image.split(\".\")\n image_name = image_name[0]\n sprite = pygame.image.load(f\"IMAGES/DIALOGUE/{item}/{image}\")\n temp_dict[image_name] = sprite\n # list of lists of animations\n self.sprite_dict[item] = temp_dict\n temp_dict = {}\n\n self.sprite_image = self.sprite_dict[self.char_name][current_sprite_name]\n self.sprite_rect = self.sprite_image.get_rect()\n self.sprite_rect.centerx = self.sprite_pos_x\n self.sprite_rect.centery = self.sprite_pos_y\n\n def toggle_focus(self, system_instance):\n self.in_focus = not self.in_focus\n if self.in_focus == True:\n if system_instance.obscure_speaker == False:\n self.sprite_transparency = 255\n print(\"transparency: \", self.sprite_transparency)\n else:\n self.sprite_transparency = 0\n print(\"transparency: \", self.sprite_transparency)\n if self.type == \"left\":\n threading.Thread(target=self.tween_focus, args=(10, 0, 20, \"right\", 1)).start()\n elif self.type == \"right\":\n threading.Thread(target=self.tween_focus, args=(10, 0, 20, \"left\", 1)).start()\n system_instance.focused_side = self.type\n else:\n if self.type == \"left\":\n threading.Thread(target=self.tween_focus, args=(10, 150, 20, \"left\")).start()\n elif self.type == \"right\":\n threading.Thread(target=self.tween_focus, args=(10, 150, 20, \"right\")).start()\n system_instance.focused_side = None\n\n def tween_focus(self, ticks, goal_transparency, goal_offset, pos_direction, transparency_direction = -1):\n initial_transparency = self.sprite_transparency\n initial_offset = self.sprite_rect.centerx\n for current_tick in range(0, ticks):\n self.sprite_transparency = initial_transparency + (transparency_direction * (tween.easeOutSine(current_tick / ticks) * goal_transparency))\n if pos_direction == \"left\":\n self.sprite_pos_x = initial_offset - (tween.easeOutSine(current_tick / ticks) * goal_offset)\n elif pos_direction == \"right\":\n self.sprite_pos_x = initial_offset + (tween.easeOutSine(current_tick / ticks) * goal_offset)\n dt = clock.tick(60)\n\n def shake(self, system_instance, magnitude):\n while system_instance.able_to_update == True:\n if system_instance.able_to_update == False:\n break\n dt = clock.tick(60)\n initial_pos_x = self.sprite_pos_x\n initial_pos_y = self.sprite_pos_y\n while system_instance.able_to_update == False:\n random_shake_x = random.randint(-magnitude, magnitude)\n random_shake_y = random.randint(-magnitude, magnitude)\n self.sprite_pos_x = initial_pos_x + random_shake_x\n self.sprite_pos_y = initial_pos_y + random_shake_y\n dt = clock.tick(60)\n\n def change_sprite(self, new_char_name, new_sprite_name, new_sprite_pos, system_instance):\n if new_char_name != \"REMOVE\":\n self.char_name = new_char_name\n self.sprite_image = self.sprite_dict[self.char_name][new_sprite_name]\n self.sprite_rect = self.sprite_image.get_rect()\n if new_sprite_pos != \"default\":\n splitted = new_sprite_pos.split(\"|\")\n self.sprite_pos_x = int(splitted[0])\n self.sprite_pos_y = int(splitted[1])\n self.sprite_rect.center = (int(splitted[0]), int(splitted[1]))\n else:\n if self.type == \"left\":\n self.direction = False\n self.sprite_pos_x = 300\n self.sprite_pos_y = 550\n elif self.type == \"right\":\n self.direction = True\n self.sprite_pos_x = 980\n self.sprite_pos_y = 550\n self.sprite_rect.center = (self.sprite_pos_x, self.sprite_pos_y)\n else:\n system_instance.character_list[self.type] = None\n\n def draw(self, system_instance):\n converted = self.sprite_image.convert()\n converted.set_alpha(self.sprite_transparency)\n if system_instance.obscure_speaker == True:\n if self.in_focus == False:\n # print(\"INVISIBLE: \", self.char_name)\n screen.blit(pygame.transform.flip(converted, self.direction, False), self.sprite_rect)\n else:\n # print(\"test\")\n screen.blit(pygame.transform.flip(converted, self.direction, False), self.sprite_rect)\n\n def update(self, system_instance):\n # if self.char_name == \"dio\":\n # print(\"dio: \", self.sprite_transparency)\n self.sprite_rect.centerx = self.sprite_pos_x\n self.sprite_rect.centery = self.sprite_pos_y\n if system_instance.obscure_speaker == True:\n if self.in_focus == True:\n self.sprite_transparency = 0\n self.draw(system_instance)\n else:\n self.draw(system_instance)\n\ndef draw_bg():\n screen.fill((255,255,255))\n\ntest_system = visual_novel_system(\"opening_scene\", \"black_screen\")\n# test_system.dim_background(100, 30)\n\nrun = True\n\nwhile run:\n draw_bg()\n test_system.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n run = False\n\n pygame.display.flip()\n dt = clock.tick(60)","repo_name":"nanayaGrindset/visualNovelTest","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27687866436","text":"\nfrom time import time\n\nprint(\"Enter your name: \", end='')\nstart_time = time()\nname = input()\nelapsed = time() - start_time\nprint(name, \",it took you\", elapsed, \"seconds to respond\")\n\nstart = time()\nfor i in range(10000):\n print(i, end='')\nend = time()\nduration = end - start\nprint(\"duration= \", duration)\n","repo_name":"bichngoc08/PYTHON-TUTORIAL","sub_path":"Phần 5/40. Time_Clock.py","file_name":"40. Time_Clock.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19998006470","text":"#!/usr/bin/python3\n\nnums = [9,12,5,10,14,3,10]\npivot = 10\nlist1 = []\nlist2 = []\n\nfor num in nums:\n if num < pivot:\n list1.append(num)\n else:\n list2.append(num)\n\nlst = list1 + list2\nprint(lst)\n","repo_name":"Nisha-bs/Coding","sub_path":"parentheses.py","file_name":"parentheses.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10807987400","text":"from aux.protocol.transport import TCP_DEFAULT_FRAME_SIZE\nimport re\n\nclass DefaultController(object):\n\n def __init__(self, headers, transport, msg):\n self.headers = headers\n self.transport = transport\n self.msg = msg\n\n def read(self):\n raw_response = self.msg\n content_length = int(self.headers.get('Content-Length', 0))\n response = \"\"\n while 1:\n if content_length < 1:\n break\n if content_length > len(raw_response):\n raw_response += self.transport.recv()\n response += raw_response\n content_length -= len(raw_response)\n raw_response = \"\"\n return response\n\nclass NoContentController(object):\n def __init__(self, headers, transport, msg):\n self.headers = headers\n self.transport = transport\n self.msg = msg\n\n def read(self):\n return self.msg\n\n\nclass ChunkedController(object):\n\n def __init__(self, headers, transport, msg):\n self.headers = headers\n self.transport = transport\n self.msg = msg\n \n def read(self):\n re_chunk = re.compile(r'^([a-f|\\d]{1,4})\\r\\n')\n re_end_chunk = re.compile(r'^0\\r\\n\\r\\n0')\n re_single_end_chunk = re.compile(r'0\\r\\n\\r\\n')\n raw_response = self.msg\n response = \"\"\n block = 0\n chunk_cdown = 0\n i_next_chunk = 0\n while 1:\n if chunk_cdown == 0:\n next_chunk = re_chunk.findall(raw_response[0:8])\n end_chunk = re_end_chunk.findall(raw_response[0:8])\n broken_end_chunk = re_single_end_chunk.findall(raw_response[0:8])\n if len(next_chunk) > 0:\n i_next_chunk = int(next_chunk[0], 16)\n chunk_cdown = i_next_chunk\n raw_response = raw_response[len(next_chunk[0])+2:]\n if i_next_chunk == 0 or len(end_chunk) > 0:\n break\n if len(broken_end_chunk) > 0:\n break\n if i_next_chunk > len(raw_response):\n raw_response += self.transport.recv()\n if len(raw_response) <= 0:\n break\n block, nl_skip = (len(raw_response), 0) if len(raw_response) < chunk_cdown else (chunk_cdown, 1)\n response += raw_response[:block]\n raw_response = raw_response[block+nl_skip:]\n chunk_cdown -= block\n return response\n\ndef transferFactory(headers):\n content_length = headers.get('Content-Length', None)\n if content_length != None:\n if int(content_length) < 1:\n return NoContentController\n content_type = headers.get('Transfer-Encoding', None)\n if content_type != None:\n if 'chunked' in content_type.lower():\n return ChunkedController\n return DefaultController\n","repo_name":"bischjer/auxiliary","sub_path":"aux/protocol/http/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4038309105","text":"#!/usr/bin/env python3\ndescription = '''\nGet, and dump to text, the individuals table in the .trees output from a SLiM simulation.\n'''\n\nimport sys, os\nimport gzip\nimport glob\nimport re\nimport argparse\nimport struct\nimport numpy as np\n\nimport msprime\n\nparser = argparse.ArgumentParser(description=description)\nparser.add_argument(\"--tree_file\", \"-t\", type=str, nargs=\"*\", dest=\"tree_file\", \n help=\"name of file to load tree sequences from [default: .trees files in basedir.\")\nparser.add_argument(\"--basedir\", \"-o\", type=str, dest=\"basedir\", \n help=\"name of directory to save output files to.\")\nparser.add_argument(\"--indivfile\", \"-i\", type=str, nargs=\"*\", dest=\"indivfile\", \n help=\"name of output files [default: as trees but with .indiv.tsv]\")\nparser.add_argument(\"--logfile\", \"-g\", type=str, dest=\"logfile\", \n help=\"name of log file\")\n\nargs = parser.parse_args()\nargdict = vars(args)\n\nif args.basedir is None and args.indivfile is None:\n print(description)\n raise ValueError(\"Must specify at least basedir and indivfile (run with -h for help).\")\n\nif args.tree_file is None or len(args.tree_file) == 0:\n args.tree_file = glob.glob(os.path.join(args.basedir, \"*.trees\"))\n\nif args.indivfile is None or len(args.indivfile) == 0:\n args.indivfile = [os.path.join(args.basedir, re.sub(\"[.]trees$\", \"\", os.path.basename(x))) \n + \".indiv.tsv\" for x in args.tree_file]\n\nif args.logfile is None:\n args.logfile = os.path.join(args.basedir, \"get_individuals.log\")\n\nassert len(args.indivfile) == len(args.tree_file)\n\nlogfile = open(args.logfile, \"w\")\n\nclass slimIndividual(object):\n def __init__(self, table_row):\n ped_id, age, subpop, sex, flags = struct.unpack(\" list:\n images = []\n classes = []\n\n for folder in os.listdir(images_folder):\n for file in os.listdir(images_folder + folder):\n image = cv2.imread(images_folder + folder + '/' + file, cv2.COLOR_RGB2BGR)\n image = np.array(image)\n image = image.astype('float32')\n image = image/255\n images.append(image)\n classes.append(folder)\n\n return images, classes\n\n\ndef preprocess_image_for_model(path) -> np.array:\n new_image = cv2.imread(path)\n new_image = new_image.astype('float32')\n new_image = new_image/255\n corrected_image = np.expand_dims(new_image, axis=0)\n return corrected_image\n\n\n\ndef create_target_encoding(target_list) -> dict:\n \n target_dict = {target: index for index, target in enumerate(np.unique(target_list))}\n print(target_dict)\n\n numeric_targets = [target_dict[target_class] for target_class in target_list]\n\n return numeric_targets\n\n\n\ndef decode_numeric_target(numeric_class) -> str:\n \n output_dict = {'cap': 0, 'phone': 1, 'shoe': 2, 'small_box': 3, 'stuffed_toy': 4}\n\n numeric_dict = {value: key for key, value in output_dict.items()}\n\n for key, value in numeric_dict.items():\n if key == numeric_class:\n return value\n\n\ndef decode_four_target(numeric_class) -> str:\n \n output_dict = {'phone': 0, 'shoe': 1, 'small_box': 2, 'stuffed_toy': 3}\n\n numeric_dict = {value: key for key, value in output_dict.items()}\n\n for key, value in numeric_dict.items():\n if key == numeric_class:\n return value\n\n\ndef decode_first_model(numeric_class) -> str:\n \n output_dict = {'coffee_mug': 0, 'phone': 1, 'small_box': 2}\n\n numeric_dict = {value: key for key, value in output_dict.items()}\n\n for key, value in numeric_dict.items():\n if key == numeric_class:\n return value\n\n","repo_name":"padawanabhi/image_classifier_app","sub_path":"load_images.py","file_name":"load_images.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74038047096","text":"import os\r\n\r\ndef main():\r\n excelFileName = 'all_stock_data.xlsx'\r\n currentPath = str(os.path.abspath(os.getcwd()))\r\n desiredPath = currentPath.replace('src', 'US')\r\n excelFilePath = f'{desiredPath}/{excelFileName}'\r\n\r\n if os.path.exists(excelFilePath):\r\n os.remove(excelFilePath)\r\n print(f'\\nDeleted old {excelFilePath} file.') \r\n else:\r\n print('\\nNo pre-existing stock data file exists. ')\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"Alex-Jarosz-1996/yf_us","sub_path":"src/RemoveAllStockData.py","file_name":"RemoveAllStockData.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7673819979","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"Christian Heider Nielsen\"\n__doc__ = r\"\"\"\n\n Created on 28/07/2020\n \"\"\"\n\n__all__ = [\"orthogonal_reg\", \"RegLossWrapper\"]\n\nimport torch\nfrom torch import nn\n\n\nclass RegLossWrapper(torch.nn.Module):\n \"\"\"description\"\"\"\n\n def __init__(self, loss, model: torch.nn.Module, factor: float = 0.0005):\n super().__init__()\n self.loss = loss\n self.l1_crit = torch.nn.L1Loss()\n self.a = torch.zeros(1)\n self.factor = factor\n self.params = []\n for name, param in model.named_parameters():\n if \"bias\" not in name:\n self.params.append(param)\n\n def forward(self, *loss, **kwargs) -> torch.Tensor:\n \"\"\"description\"\"\"\n return self.loss(*loss) + self.factor * sum(\n [self.l1_crit(p, self.a) for p in self.params]\n )\n\n\ndef orthogonal_reg(model, reg: float = 1e-6) -> None:\n \"\"\"description\"\"\"\n with torch.enable_grad():\n orth_loss = torch.zeros(1)\n for name, param in model.named_parameters():\n if \"bias\" not in name:\n param_flat = param.reshape(param.shape[0], -1)\n sym = torch.mm(param_flat, torch.t(param_flat))\n sym -= torch.eye(param_flat.shape[0])\n orth_loss += reg * sym.abs().sum()\n\n\nif __name__ == \"__main__\":\n\n def bb() -> None:\n \"\"\"\n :rtype: None\n \"\"\"\n from draugr.torch_utilities.optimisation.parameters.initialisation import (\n normal_init_weights,\n )\n\n i = torch.randn(3, 5, requires_grad=True)\n model = torch.nn.Linear(5, 5)\n normal_init_weights(model)\n target = torch.empty(3, dtype=torch.long).random_(5)\n loss_fn = RegLossWrapper(nn.CrossEntropyLoss(), model)\n\n def a(m):\n \"\"\"description\"\"\"\n loss = loss_fn(m(i), target)\n print(loss)\n loss.backward()\n\n a(model)\n a(model)\n normal_init_weights(model, 1.0)\n a(model)\n\n bb()\n","repo_name":"cnheider/draugr","sub_path":"draugr/torch_utilities/optimisation/parameters/regularisation/reg_loss_wrapper.py","file_name":"reg_loss_wrapper.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"18921812795","text":"# STINGERS/TRIGGERS\r\n#\r\n# Some objects can contain a reference to another object, that plays when the game posts\r\n# a \"trigger\" (via API or calling a CAkTrigger from an event)\r\n\r\nclass CAkStinger(object):\r\n def __init__(self, node):\r\n self.ntrigger = None\r\n self.ntid = None\r\n self.tid = None\r\n self._build(node)\r\n\r\n def _build(self, node):\r\n self.ntrigger = node.find1(name='TriggerID') #idExt called from trigger action\r\n self.ntid = node.find1(name='SegmentID') #musicsegment to play (may be 0)\r\n if self.ntid:\r\n self.tid = self.ntid.value()\r\n\r\nclass CAkStingerList(object):\r\n def __init__(self, node):\r\n self.stingers = []\r\n self._build(node)\r\n\r\n def _build(self, node):\r\n nstingers = node.finds(name='CAkStinger')\r\n if not nstingers:\r\n return\r\n\r\n for nstinger in nstingers:\r\n stinger = CAkStinger(nstinger)\r\n if stinger.tid:\r\n self.stingers.append(stinger)\r\n","repo_name":"bnnm/wwiser","sub_path":"wwiser/generator/render/bnode_stinger.py","file_name":"bnode_stinger.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":118,"dataset":"github-code","pt":"22"} +{"seq_id":"32490920525","text":"from employee import Employee\n\nfile_name = 'test_employee_file.txt'\n\nwith open(file_name, 'w') as f:\n f.writelines(\n [\n 'Kevin Bacon,kbacon@example.com,CEO,555-867-5309\\n',\n 'Bruce Wayne,bwayne@example.com,President,\\n'\n ]\n )\n\nemployees = Employee.get_all(file_name)\n\nassert len(employees) == 2\n\nassert (\n Employee.get_at_line(1, file_name).__dict__ == employees[0].__dict__\n)\n\ncmo = Employee('Batty White', 'bwhite@example.com', 'CMO')\ncmo.save(file_name)\n\nassert len(Employee.get_all(file_name)) == 3\n\npresident = employees[1]\n\npresident.phone_number = '555-555-5555'\npresident.save(file_name)\n\nnew_president = Employee.get_at_line(president.identifier, file_name)\n\nassert new_president.phone_number == '555-555-5555'\n","repo_name":"smorenburg/python","sub_path":"src/old/acloudguru/test_employee.py","file_name":"test_employee.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7563096895","text":"#!/usr/bin/env python\n# coding: utf8\n\"\"\"A simple example of extracting relations between phrases and entities using\nspaCy's named entity recognizer and the dependency parse. Here, we extract\nmoney and currency values (entities labelled as MONEY) and then check the\ndependency tree to find the noun phrase they are referring to – for example:\n$9.4 million --> Net income.\n\nCompatible with: spaCy v2.0.0+\nLast tested with: v2.2.1\n\"\"\"\nfrom __future__ import unicode_literals, print_function\n\nimport plac\nimport spacy\n\ndef filter_spans(spans):\n # Filter a sequence of spans so they don't contain overlaps\n # For spaCy 2.1.4+: this function is available as spacy.util.filter_spans()\n get_sort_key = lambda span: (span.end - span.start, -span.start)\n sorted_spans = sorted(spans, key=get_sort_key, reverse=True)\n result = []\n seen_tokens = set()\n for span in sorted_spans:\n # Check for end - 1 here because boundaries are inclusive\n if span.start not in seen_tokens and span.end - 1 not in seen_tokens:\n result.append(span)\n seen_tokens.update(range(span.start, span.end))\n result = sorted(result, key=lambda span: span.start)\n return result\n\n\ndef extract_currency_relations(doc):\n # Merge entities and noun chunks into one token\n spans = list(doc.ents) + list(doc.noun_chunks)\n spans = filter_spans(spans)\n with doc.retokenize() as retokenizer:\n for span in spans:\n retokenizer.merge(span)\n\n relations = []\n for money in filter(lambda w: w.ent_type_ == \"MONEY\", doc):\n if money.dep_ in (\"attr\", \"dobj\"):\n subject = [w for w in money.head.lefts if w.dep_ == \"nsubj\"]\n if subject:\n subject = subject[0]\n relations.append((subject, money))\n elif money.dep_ == \"pobj\" and money.head.dep_ == \"prep\":\n relations.append((money.head.head, money))\n return relations\n\n\nif __name__ == \"__main__\":\n plac.call(main)\n\n # Expected output:\n # Net income MONEY $9.4 million\n # the prior year MONEY $2.7 million\n # Revenue MONEY twelve billion dollars\n # a loss MONEY 1b","repo_name":"k8scaleio/SpaCyServer","sub_path":"src/entity_relation.py","file_name":"entity_relation.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3977410192","text":"\nfrom datetime import datetime, date\n\nfrom django.db.models import Sum\nfrom django.shortcuts import get_object_or_404\n\nfrom rest_framework import status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\n\nfrom cajas.boxes.models.box_don_juan import BoxDonJuan\nfrom cajas.boxes.models.box_partner import BoxStatus\nfrom cajas.users.models.partner import Partner, PartnerType\nfrom cajas.chains.models.user_place import UserPlace\nfrom cajas.chains.models.chain_place import ChainPlace\nfrom cajas.concepts.models.concepts import Concept, ConceptType\nfrom cajas.general_config.models.exchange import Exchange\nfrom cajas.investments.models.investment import Investment\nfrom cajas.loans.models.loan import Loan, LoanType\nfrom cajas.loans.models.loan_history import LoanHistory\nfrom cajas.movement.models.movement_partner import MovementPartner\nfrom cajas.movement.models.movement_don_juan import MovementDonJuan\nfrom cajas.movement.services.partner_service import MovementPartnerManager\nfrom cajas.webclient.views.get_ip import get_ip\nfrom cajas.webclient.views.utils import get_object_or_none, get_president_user\n\npresident = get_president_user()\n\n\nclass PartnerCloseout(APIView):\n\n def post(self, request):\n total = 0\n self.validate_chains(request)\n self.validate_investments(request)\n self.validate_loans(request)\n self.generate_closeout(request)\n partner = get_object_or_404(Partner, pk=request.data['partner'])\n if partner.box.balance > 0:\n total = partner.box.balance\n return Response(\n \"Se ha hecho la liquidación exitosamente. El valor final de la caja del socio es ${}\".format(total),\n status=status.HTTP_200_OK\n )\n\n def validate_chains(self, request):\n data = request.data\n partner = get_object_or_404(Partner, pk=data['partner'])\n partner_destiny = get_object_or_404(Partner, code=data['partner_destiny'])\n office = partner.office\n today = date.today()\n user_places = UserPlace.objects.filter(user=partner.user)\n concept1 = get_object_or_404(Concept, name=\"Devolución Pago Cadenas\")\n for place in user_places:\n chain = place.chain_place.chain\n pay_date = place.chain_place.pay_date\n if pay_date > today:\n payments = place.related_payments.all().aggregate(Sum('pay_value'))\n total = payments['pay_value__sum']\n if total:\n MovementPartner.objects.create(\n box_partner=partner.box,\n concept=concept1,\n movement_type='IN',\n value=total,\n detail='Devolución Pago puestos de la cadena {}'.format(place.chain_place.chain),\n date=datetime.now(),\n responsible=request.user,\n ip=get_ip(request)\n )\n if partner_destiny.user == president:\n MovementDonJuan.objects.create(\n box_don_juan=BoxDonJuan.objects.get(office=office),\n concept=concept1.counterpart,\n movement_type='OUT',\n value=total,\n detail='Pagos puestos de la cadena {}'.format(place.chain_place.chain),\n date=datetime.now(),\n responsible=request.user,\n ip=get_ip(request)\n )\n else:\n MovementPartner.objects.create(\n box_partner=partner_destiny.box,\n concept=concept1.counterpart,\n movement_type='OUT',\n value=total,\n detail='Pagos puestos de la cadena {}'.format(place.chain_place.chain),\n date=datetime.now(),\n responsible=request.user,\n ip=get_ip(request)\n )\n else:\n month = datetime.now().month\n chain_places = place.chain_place.chain.places\n actual_place = ChainPlace.objects.get(chain=place.chain_place.chain, pay_date__month=month)\n actual_place = actual_place.name.split(\" \")\n actual_place_number = actual_place[1]\n total_places = int(chain_places) - int(actual_place_number)\n concept2 = get_object_or_404(Concept, name=\"Pago Puesto Cadena\")\n MovementPartner.objects.create(\n box_partner=partner.box,\n concept=concept2,\n movement_type='OUT',\n value=chain.place_value * total_places,\n detail='Pagos puestos faltantes de la cadena {}'.format(place.chain_place.chain),\n date=datetime.now(),\n responsible=request.user,\n ip=get_ip(request)\n )\n MovementDonJuan.objects.create(\n box_don_juan=BoxDonJuan.objects.get(office=office),\n concept=concept2.counterpart,\n movement_type='IN',\n value=chain.place_value * total_places,\n detail='Pagos puestos de la cadena {}'.format(place.chain_place.chain),\n date=datetime.now(),\n responsible=request.user,\n ip=get_ip(request)\n )\n\n place.user = partner_destiny.user\n place.save()\n\n def validate_investments(self, request):\n data = request.data\n partner = get_object_or_404(Partner, pk=data['partner'])\n office = partner.office\n investments = Investment.objects.filter(partner=partner)\n concept = Concept.objects.get(name=\"Inversión Negocios\", concept_type=ConceptType.DOUBLE)\n for i in investments:\n if i.investment_type == Investment.BUSINESS:\n pays = i.related_pays.all().aggregate(Sum('value'))\n if pays['value__sum']:\n MovementPartner.objects.create(\n box_partner=partner.box,\n concept=concept,\n movement_type='IN',\n value=pays['value__sum'],\n detail='Devolución Pagos inversión {}'.format(i),\n date=datetime.now(),\n responsible=request.user,\n ip=get_ip(request)\n\n )\n MovementDonJuan.objects.create(\n box_don_juan=BoxDonJuan.objects.get(office=office),\n concept=concept.counterpart,\n movement_type='OUT',\n value=pays['value__sum'],\n detail='Devolución Pagos inversión {} al socio {}'.format(i, partner),\n date=datetime.now(),\n responsible=request.user,\n ip=get_ip(request)\n )\n\n def validate_loans(self, request):\n movement_partner_manager = MovementPartnerManager()\n data = request.data\n partner = get_object_or_404(Partner, pk=data['partner'])\n office = partner.office\n loans = Loan.objects.filter(lender=partner.user)\n partner_balance = partner.box.balance\n exchange = get_object_or_none(\n Exchange,\n currency=office.country.currency,\n month__month=datetime.now().month,\n )\n for loan in loans:\n if loan.loan_type == LoanType.SOCIO_DIRECTO:\n if loan.balance > 0:\n concept = Concept.objects.get(name='Pago Abono préstamo socio')\n total_loan = loan.balance_cop / exchange.exchange_cop_abono\n if partner_balance >= total_loan:\n value = total_loan\n else:\n value = partner_balance\n data = {\n 'partner': partner,\n 'box': partner.box,\n 'concept': concept,\n 'movement_type': 'OUT',\n 'value': value,\n 'detail': 'Pago préstamo por ${} de socio {}'.format(value, partner),\n 'date': datetime.now(),\n 'responsible': request.user,\n 'ip': get_ip(request)\n }\n movement_partner_manager.create_double(data)\n LoanHistory.objects.create(\n loan=loan,\n value=value,\n value_cop=value * exchange.exchange_cop_abono,\n date=datetime.now(),\n history_type=LoanHistory.ABONO,\n movement_type=LoanHistory.OUT\n )\n elif loan.loan_type == LoanType.EMPLEADO:\n if loan.balance > 0:\n if (partner_balance * 3) >= loan.balance:\n value = loan.balance * 3\n else:\n value = partner_balance\n concept = Concept.objects.get(name='Pago Abono préstamo socio')\n data = {\n 'partner': partner,\n 'box': partner.box,\n 'concept': concept,\n 'movement_type': 'OUT',\n 'value': value,\n 'detail': 'Pago préstamo por ${} de socio {} (Sale como retiro de socio)'.format(value, partner),\n 'date': datetime.now(),\n 'responsible': request.user,\n 'ip': get_ip(request)\n }\n movement_partner_manager.create_double(data)\n LoanHistory.objects.create(\n loan=loan,\n value=value,\n date=datetime.now(),\n history_type=LoanHistory.ABONO,\n movement_type=LoanHistory.OUT\n )\n\n def generate_closeout(self, request):\n partner = get_object_or_404(Partner, pk=request.data['partner'])\n box = partner.box\n balance = box.balance\n if box.balance > 0:\n concept = Concept.objects.get(name=\"Liquidación Sociedad\")\n MovementPartner.objects.create(\n box_partner=partner.box,\n concept=concept,\n movement_type='OUT',\n value=(balance / 3) * 2,\n detail='Liquidación sociedad',\n date=datetime.now(),\n responsible=request.user,\n ip=get_ip(request)\n )\n if partner.partner_type == PartnerType.DIRECTO:\n MovementDonJuan.objects.create(\n box_don_juan=BoxDonJuan.objects.get(office=partner.office),\n concept=concept.counterpart,\n movement_type='IN',\n value=(balance / 3) * 2,\n detail='Liquidación Socio {}'.format(partner),\n date=datetime.now(),\n responsible=request.user,\n ip=get_ip(request)\n )\n else:\n MovementPartner.objects.create(\n box_partner=partner.direct_partner.box,\n concept=concept.counterpart,\n movement_type='IN',\n value=(balance / 3) * 2,\n detail='Liquidación Socio {}'.format(partner),\n date=datetime.now(),\n responsible=request.user,\n ip=get_ip(request)\n )\n box.box_status = BoxStatus.EN_LIQUIDACION\n else:\n box.box_status = BoxStatus.LIQUIDADA\n partner.is_active = False\n partner.save()\n box.save()\n","repo_name":"dmontoya1/cajas","sub_path":"cajas/users/api/views/partner_closeout.py","file_name":"partner_closeout.py","file_ext":"py","file_size_in_byte":12269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41721416120","text":"from dragonfly import Clipboard\nfrom dragonfly import Key\n\n\n__rule_counter = 0\n\n\ndef get_unique_rule_name():\n global __rule_counter\n __rule_counter += 1\n return str(__rule_counter)\n\n\ndef get_selected_text():\n clipboard = Clipboard()\n previous = clipboard.get_system_text()\n clipboard.set_system_text(\"\")\n Key(\"c-c/3\").execute()\n selected = clipboard.get_system_text()\n clipboard.set_text(previous)\n clipboard.copy_to_system()\n return selected\n\n\ndef set_clipboard(text):\n clipboard = Clipboard()\n clipboard.set_text(text)\n clipboard.copy_to_system()\n","repo_name":"Milchshakee/dragonfly_environment","sub_path":"modules/util/dragonfly_utils.py","file_name":"dragonfly_utils.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10036277511","text":"\nimport random\n\n\n__version__ = '0.3.1'\n\n\nclass Seed(object):\n instance = None\n seeders = {}\n fakers = {}\n\n @classmethod\n def __new__(cls, *args, **kwargs):\n if cls.instance is None:\n cls.instance = super(Seed, cls).__new__(*args, **kwargs)\n return cls.instance\n\n def __init__(self):\n pass\n\n @staticmethod\n def codename(locale=None):\n from django.conf import settings\n locale = locale or getattr(settings, 'LANGUAGE_CODE', None)\n codename = locale or 'default'\n return codename\n\n @classmethod\n def faker(cls, locale=None, codename=None):\n code = codename or cls.codename(locale)\n if code not in cls.fakers:\n from faker import Faker\n cls.fakers[code] = Faker(locale)\n cls.fakers[code].seed_instance(random.randint(1, 10000))\n return cls.fakers[code]\n\n @classmethod\n def seeder(cls, locale=None):\n code = cls.codename(locale)\n if code not in cls.seeders:\n faker = cls.fakers.get(code, None) or cls.faker(codename=code)\n from django_seed import seeder\n cls.seeders[code] = seeder.Seeder(faker)\n\n return cls.seeders[code]\n","repo_name":"Brobin/django-seed","sub_path":"django_seed/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":651,"dataset":"github-code","pt":"22"} +{"seq_id":"9728466023","text":"\"\"\"\nThe bjective is to print contents of \nhttps://eyes.nasa.gov/dsn/dsn.html in terminal.\n\n1. fetch data from the website with requests\n2. filter the appropriate data by creating a dictionary / array\n3. print the appropriate data\n1. go through isro exercises again.\n2. to print just the antenna's details.\n - request this https://eyes.nasa.gov/dsn/config.xml - done\n - parse this to dictionary (how to parse xml?)\n - print location and corresponding antennas.\n2.5. make a tree of temp.xml\n\n\"\"\"\nimport requests\n# this library parses xml into a ET class object\nimport xml.etree.ElementTree as ET\n\n# r.text is generally in 'str' format. \n# one has to manually decide to convert itto either array, or dictionary etc\nr = requests.get(\"https://eyes.nasa.gov/dsn/config.xml\")\ndata = r.text\nprint(type(data))\n\n\n# three modes to 'open' a file, r, w, w+\n# with open(\"temp.xml\", \"w\") as ffile:\n# ffile.write(data)\n\n\"\"\"\nWhen we do json.loads, we give the json string as parameter\nx = json.loads(\"{'a': 21}\")\n\"\"\"\n# xmldata = ET.parse(\"temp.xml\")\nroot = ET.fromstring(data)\nprint(root)\n\n# we can iterate through children\n\"\"\"\nfor child in root:\n print(child.tag)\n\"\"\"\n# or directly reference it like an array\nsites = root[0]\nspacecraft_map = root[1]\n\"\"\"sites dictionary will be of the following format \nsitesDictionary = {\n \"mdcc\": [{\"name\": \"DSS63\", \"friendlyName\": \"DSS 63\", \"type\": \"70M\"}, {}, {}]\n}\n\"\"\"\n\nsitesDictionary = {}\nfor site in sites:\n site_name = site.attrib[\"name\"]\n sitesDictionary[site_name] = []\n \"\"\"\n until this step, {'mdscc': [], 'gdscc': [], 'cdscc': []}\n is the result\n \"\"\"\n for dish in site:\n dish_dict = {\n \"name\": dish.attrib[\"name\"],\n \"friendly_name\": dish.attrib[\"friendlyName\"],\n \"type\": dish.attrib[\"type\"],\n }\n sitesDictionary[site_name].append(dish_dict)\n\nprint(sitesDictionary)\n\n\n\"\"\"\n\nParse SpacecraftMap\n\nspacecraftmap looks like an array of spacecraft.\nspacecraft is a dictionary with keys:\n - name\n - explorerName\n - friendlyName\n - thumbnail\n\"\"\"\nspacecraft_map_arr = []\n# run a for loop in spacecraft_map variable.\n\nfor spacecraft in spacecraft_map:\n spacecraft_dict = {\n \"name\": spacecraft.attrib[\"name\"],\n \"friendly_name\": spacecraft.attrib[\"friendlyName\"],\n \"explorerName\": spacecraft.attrib[\"explorerName\"],\n # we can't do the following because some spacecrafts don't have thumbnails uncomment the following line and try for yourself\n # \"thumbnail\": spacecraft.attrib[\"thumbnail\"], \n }\n\n # first we check if thumbnail in spacecraft.\n\n if \"thumbnail\" in spacecraft.keys():\n\n spacecraft_dict[\"thumbnail\"] = spacecraft.attrib[\"thumbnail\"]\n else:\n spacecraft_dict[\"thumbnail\"] = \"false\"\n spacecraft_map_arr.append(spacecraft_dict)\n\nprint(spacecraft_map_arr)\n","repo_name":"sapling99/dsn-parser","sub_path":"dsn-py/dsn.py","file_name":"dsn.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72019954615","text":"#-------------------------------------------------------------------------------\r\n# Name: module1\r\n# Purpose:\r\n#\r\n# Author: bent83\r\n#\r\n# Created: 08/10/2013\r\n# Copyright: (c) bent83 2013\r\n# Licence: \r\n#-------------------------------------------------------------------------------\r\nimport sys\r\nimport time\r\nimport ctypes\r\nfl = 'C:\\\\Program FIles (x86)\\\\MicroSMC\\\\'\r\nsys.path.append(fl)\r\nfrom pol_rotator_p35.Mot_pol_params import *\r\n\r\nbyref = ctypes.byref\r\n\r\nmot = ctypes.cdll.LoadLibrary('USMCDLL.dll')\r\n#runfile(r'Z:\\Mot_pol_params.py', wdir=r'Z:')\r\n\r\nclass A8MRU(object):\r\n\r\n def __init__(self):\r\n self.device = 0\r\n self.devs = USMC_Devices()\r\n out = mot.USMC_Init(byref(self.devs))\r\n self.mode,self.params,self.stparams = self.initializeSettings()\r\n self.setMode()\r\n self.setParams()\r\n self.state = USMC_State()\r\n self.degperstep = 0.075\r\n self.speed = ctypes.c_float(100)\r\n\r\n def __del__(self):\r\n self.MoveAbsWait(0)\r\n self.MotorOff()\r\n out = self.close()\r\n print(out)\r\n\r\n def setSpeed(self,vel=100,step=4):\r\n ''' step = 1,2,4,8\r\n vel in degrees / sec '''\r\n if vel > 350:\r\n step = 1\r\n if step in (1,2,4,8):\r\n self.stparams.SDivisor = step\r\n speedsteps = vel/(step*self.degperstep)\r\n self.speed = ctypes.c_float(speedsteps)\r\n out = self.setParams()\r\n return out\r\n\r\n def MotorOn(self):\r\n self.mode.ResetD = 0\r\n out = self.setMode()\r\n return out\r\n\r\n def MotorOff(self):\r\n self.mode.ResetD = 1\r\n out = self.setMode()\r\n return out\r\n\r\n def MoveAbs(self,pos):\r\n ''' move to absolute position\r\n pos in degrees '''\r\n index = int(pos/self.degperstep)\r\n out = mot.USMC_Start(self.device,index,byref(self.speed),byref(self.stparams))\r\n return out\r\n\r\n def MoveAbsWait(self,pos):\r\n starttime = time.time()\r\n index = int(pos/self.degperstep)\r\n out = mot.USMC_Start(self.device,index,byref(self.speed),byref(self.stparams))\r\n while (abs(pos-self.getCurPos())>2):\r\n time.sleep(0.01)\r\n endtime = time.time()\r\n print (endtime-starttime)\r\n return self.getCurPos()\r\n\r\n def MoveRel(self,jog=1):\r\n self.getState()\r\n index = self.state.CurPos + int(jog/self.degperstep)\r\n out = mot.USMC_Start(self.device,index,byref(self.speed),byref(self.stparams))\r\n return out\r\n\r\n def init(self):\r\n devs = USMC_Devices()\r\n out1 = mot.USMC_Init(byref(devs))\r\n return (devs,out1)\r\n\r\n def close(self):\r\n out = mot.USMC_Close()\r\n return out\r\n\r\n def getState(self):\r\n self.state = USMC_State()\r\n out = mot.USMC_GetState(self.device,byref(self.state))\r\n return out\r\n\r\n def getCurPos(self):\r\n self.state = USMC_State()\r\n out = mot.USMC_GetState(self.device,byref(self.state))\r\n pos = self.state.CurPos*(360./4800.)\r\n return pos\r\n\r\n def getMode(self):\r\n out = mot.USMC_GetMode(self.device,byref(self.mode))\r\n return out\r\n\r\n def setMode(self):\r\n out = mot.USMC_SetMode(self.device,byref(self.mode))\r\n return out\r\n\r\n def getParams(self):\r\n out = mot.USMC_GetParameters(self.device,byref(self.params))\r\n return out\r\n\r\n def setParams(self):\r\n out = mot.USMC_SetParameters(self.device,byref(self.params))\r\n return out\r\n\r\n def getStartParams(self):\r\n out = mot.USMC_GetStartParameters(self.device,byref(self.stparams))\r\n return out\r\n\r\n def Start(pos,stparams):\r\n device = 0\r\n speed = ctypes.c_float(100)\r\n out = mot.USMC_Start(device,pos,byref(speed),byref(stparams))\r\n return (speed,stparams,out)\r\n\r\n def Stop(self):\r\n out = mot.USMC_Stop(self.device)\r\n return out\r\n\r\n def initializeSettings(self):\r\n ''' mode '''\r\n mode = USMC_Mode()\r\n mode.PMode = 1 # Turn off buttons (1 = buttons disabled)\r\n mode.PReg = 1 # current reduction regime\r\n mode.ResetD = 1 # turn power off and make a whole step (True = apply)\r\n mode.EMReset = 0 # Quick power off\r\n mode.Tr1T = 0 # limit switch 1 True state\r\n mode.Tr2T = 0 # limit switch 2 True state\r\n mode.RotTrT = 0 # Rotary Transducer True state\r\n mode.TrSwap = 0 # if True, limit switches are to be swapped\r\n mode.Tr1En = 0 # if True, limit switch 1 enabled\r\n mode.Tr2En = 0 # if True, limit switch 2 enabled\r\n mode.RotTeEn = 0 # if True, rotary Transducer Operation enabled\r\n mode.RotTrOp = 0 # Rotary Transducer Operation Select (stop on error if True)\r\n mode.Butt1T = 0 # Button 1 True state\r\n mode.Butt2T = 0 # Button 2 True state\r\n mode.ResetRT = 0 # Reset Rotary Transducer\r\n mode.SyncOutEn = 0 # if True output synchornization enabled\r\n mode.SyncOUTR = 0 # if True output synchronization counter will be reset\r\n mode.SyncINOp = 0 # Synchronization input mode\r\n mode.SyncCount = 0 # number of steps after which synchronization output signal occurs\r\n mode.SyncInvert = 0 # Set this bit to True to invert output synchornization polarity\r\n mode.EncoderEn = 0 # Enable Encoder on pins (syncin,rottr)\r\n mode.EncoderInv = 0 # Invert Encoder Counter Direction\r\n mode.ResBEnc = 0 # Reset Encoder\r\n mode.ResEnc = 0 # Reset Encoder\r\n mode.Reserved # not used\r\n ''' params '''\r\n params = USMC_Parameters()\r\n params.AccelT = 980 # acceleration time in ms\r\n params.DecelT = 980 # deceleration time in ms\r\n params.PTimeout = 100 # Time (in ms) after which current will be reduced to 60% of normal\r\n params.BTimeout1 = 1000 # Time (in ms) after which speed of motor rotation will be equal to the one specified in BT01P\r\n params.BTimeout2 = 1000\r\n params.BTimeout3 = 1000\r\n params.BTimeout4 = 1000\r\n params.BTimeoutR # Time (in ms) after which reset command will be performed\r\n params.BTimeoutD # This field reserved\r\n params.MinP # Speed (steps/sec) while performing reset\r\n params.BTO1P = 2 # Speed after btimeout1\r\n params.BTO2P = 8\r\n params.BTO3P = 32\r\n params.BTO4P = 128\r\n params.MaxLoft = 1024 # Value in full steps that will be used in backlash operation\r\n params.StartPos = 1 # Current position saved to FLASH\r\n params.RTDelta = 600 # Revolution distance -- number of full steps per one full revolution\r\n params.RTMinError = 2 # number of full steps missed to raise error flag\r\n params.MaxTemp = 50 # maximum allowed temp\r\n params.SynOutP = 5 # duration of output synchronization pulse\r\n params.LoftPeriod = 3 # Speed (steps/sec) of the lst phase of the backlash operation\r\n params.EncMult = 1.0 # Encoder step multiplier\r\n params.Reserved = 0 # NA\r\n ''' start parameters '''\r\n stparams = USMC_StartParameters()\r\n stparams.SDivisor = 4 # Step is divided by this factor (1,2,4,8)\r\n stparams.DefDir = 0 # Direction for backlash operation (relative)\r\n stparams.LoftEn = 0 # Enable automatic backlash operation (works if slow start/stop mode is off)\r\n stparams.SlStart = 1 # if True slow start/stop enabled\r\n stparams.WSyncIN = 0 # if True, controller will wait for input synchronization to start\r\n stparams.SyncOutR = 0 # If True, output synchronization counter will be reset\r\n stparams.ForceLoft = 0 # if True and destination position is equal to the current position, backlash will be performed\r\n return (mode,params,stparams)\r\n","repo_name":"Knerlab/SIM_Control_Software","sub_path":"motorized_pol_v2.py","file_name":"motorized_pol_v2.py","file_ext":"py","file_size_in_byte":7796,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"41615333359","text":"import streamlit as st \n\n\ndef calculator_body():\n \"\"\" Lays out calculator page\"\"\"\n st.write(\"---\")\n col1, col2, col3 = st.beta_columns(3)\n with col1:\n num1 = st.number_input(label='Enter the first integer', step=1, key=\"1\")\n with col2:\n num2 = st.number_input(label='Enter the second integer', step=1, key=\"2\")\n with col3:\n operator = st.selectbox(label='Select an operator',\n options=['Add', 'Subtract',\n 'Multiply', 'Divide'])\n if st.button('Click here to calculate!'):\n if num2 == 0 and operator == 'Divide':\n st.error('Division by zero error. Enter a non-zero integer.')\n else:\n calculator_function(num1, num2, operator)\n\n\ndef calculator_function(num1, num2, operator):\n \"\"\" Calculator logic and result display\"\"\"\n if operator == 'Add': result = num1 + num2\n elif operator == 'Subtract': result = num1 - num2\n elif operator == 'Multiply': result = num1 * num2\n elif operator == 'Divide': result = num1 / num2\n st.success(f'The result is: **{result}**')\n\n ","repo_name":"Code-Institute-Solutions/streamlit-calculator","sub_path":"app_pages/page_calculator.py","file_name":"page_calculator.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"22"} +{"seq_id":"5857943460","text":"from singer.catalog import Catalog, CatalogEntry, Schema\n\nfrom tap_mailchimp.schema import get_schemas, PKS\n\ndef discover():\n schemas, field_metadata = get_schemas()\n catalog = Catalog([])\n\n for stream_name, schema_dict in schemas.items():\n schema = Schema.from_dict(schema_dict)\n metadata = field_metadata[stream_name]\n pk = PKS[stream_name]\n\n catalog.streams.append(CatalogEntry(\n stream=stream_name,\n tap_stream_id=stream_name,\n key_properties=pk,\n schema=schema,\n metadata=metadata\n ))\n\n return catalog\n","repo_name":"singer-io/tap-mailchimp","sub_path":"tap_mailchimp/discover.py","file_name":"discover.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"22"} +{"seq_id":"7022113508","text":"import sys\nimport gi\ngi.require_version('Gtk', '4.0')\nfrom gi.repository import Gtk, Gio, GLib\n\nQUIT = False\n\ndef quit_(window):\n global QUIT\n QUIT = True\n\nclass Ventana(Gtk.Window):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.box1 = Gtk.Box.new( Gtk.Orientation.VERTICAL,10) #(orientation VERTICAL|HORIZONTAL , spacing in pixels)\n self.set_child(self.box1)\n\n self.about_button = Gtk.Button.new()\n self.about_button.set_label(\"About\") # Or Change \"label Propertie\" self.about_button.props.label = \"About\"\n self.about_button.connect(\"clicked\",self.on_about_button_clicked,\"My Example App\")\n self.box1.append(self.about_button)\n\n self.quit_button = Gtk.Button.new_with_label(\"Quit\")\n self.quit_button.connect(\"clicked\",self.on_quit_button_clicked)\n self.quit_button.props.vexpand = True # Whether to expand vertically\n # https://amolenaar.github.io/pgi-docgen/index.html#Gtk-4.0/classes/Widget.html#Gtk.Widget.props.vexpand\n self.box1.append(self.quit_button)\n\n self.resolution640x480_button = Gtk.Button.new()\n self.resolution640x480_button.set_label(\"640x480\")\n self.resolution640x480_button.connect(\"clicked\",self.change_resolution, 640, 480)\n self.box1.append(self.resolution640x480_button)\n\n self.resolution800x600_button = Gtk.Button.new()\n self.resolution800x600_button.set_label(\"800x600\")\n self.resolution800x600_button.connect(\"clicked\",self.change_resolution, 800, 600)\n self.box1.append(self.resolution800x600_button)\n\n self.resolution1024x768_button = Gtk.Button.new()\n self.resolution1024x768_button.set_label(\"1024x768\")\n self.resolution1024x768_button.connect(\"clicked\",self.change_resolution, 1024, 768)\n self.box1.append(self.resolution1024x768_button)\n\n self.connect(\"close-request\", quit_)\n self.show()\n self.fullscreen()\n\n def change_resolution(self, change_resolution, a, b):\n self.unfullscreen()\n self.set_default_size(a,b)\n self.set_default_size(a,b)\n \n\n def on_quit_button_clicked(self,quit_clicked_button):\n quit_(self)\n\n def on_about_button_clicked(self,about_clicked_button,msg):\n print(msg)\n\n\nif __name__ == \"__main__\":\n Ventana()\n loop = GLib.MainContext().default()\n while not QUIT:\n loop.iteration(True)\n","repo_name":"Kiki1108/Prog-Avanzada","sub_path":"Unidad2/Clases/pruebas/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34761210584","text":"import math\ndef formatted(my_list):\n new = []\n for i in range(0, len(my_list)):\n number = my_list[i]\n #print(f\"{number:.2f}\")\n #print(number)\n new.append(f\"{number:.2f}\")\n return new\nif __name__ == \"__main__\":\n my_list = [1.234, 0.3333, 0.11111, 3.446]\n new_list = formatted(my_list)\n print(new_list)\n","repo_name":"VienThanh12/Python","sub_path":"integer_to_string.py","file_name":"integer_to_string.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"28660357834","text":"import warnings\n\nimport torch\nimport torch.cuda.comm as comm\nfrom torch.autograd import Function\nfrom torch.cuda._utils import _get_device_index\n\n\nclass Broadcast(Function):\n\n @staticmethod\n def forward(ctx, target_gpus, *inputs):\n if not all(input.is_cuda for input in inputs):\n raise TypeError('Broadcast function not implemented for CPU tensors')\n target_gpus = list(map(lambda x: _get_device_index(x, True), target_gpus))\n ctx.target_gpus = target_gpus\n if len(inputs) == 0:\n return tuple()\n ctx.num_inputs = len(inputs)\n ctx.input_device = inputs[0].get_device()\n outputs = comm.broadcast_coalesced(inputs, ctx.target_gpus)\n non_differentiables = []\n for idx, input_requires_grad in enumerate(ctx.needs_input_grad[1:]):\n if not input_requires_grad:\n for output in outputs:\n non_differentiables.append(output[idx])\n ctx.mark_non_differentiable(*non_differentiables)\n return tuple([t for tensors in outputs for t in tensors])\n\n @staticmethod\n def backward(ctx, *grad_outputs):\n return (None,) + ReduceAddCoalesced.apply(ctx.input_device, ctx.num_inputs, *grad_outputs)\n\n\nclass ReduceAddCoalesced(Function):\n\n @staticmethod\n def forward(ctx, destination, num_inputs, *grads):\n ctx.target_gpus = [grads[i].get_device() for i in range(0, len(grads), num_inputs)]\n\n grads = [grads[i:i + num_inputs]\n for i in range(0, len(grads), num_inputs)]\n return comm.reduce_add_coalesced(grads, destination)\n\n @staticmethod\n def backward(ctx, *grad_outputs):\n return (None, None,) + Broadcast.apply(ctx.target_gpus, *grad_outputs)\n\n\nclass Gather(Function):\n\n @staticmethod\n def forward(ctx, target_device, dim, *inputs):\n assert all(map(lambda i: i.is_cuda, inputs))\n target_device = _get_device_index(target_device, True)\n ctx.target_device = target_device\n ctx.dim = dim\n ctx.input_gpus = tuple(map(lambda i: i.get_device(), inputs))\n if all(t.dim() == 0 for t in inputs) and dim == 0:\n inputs = tuple(t.view(1) for t in inputs)\n warnings.warn('Was asked to gather along dimension 0, but all '\n 'input tensors were scalars; will instead unsqueeze '\n 'and return a vector.')\n ctx.unsqueezed_scalar = True\n else:\n ctx.unsqueezed_scalar = False\n ctx.input_sizes = tuple(map(lambda i: i.size(ctx.dim), inputs))\n return comm.gather(inputs, ctx.dim, ctx.target_device)\n\n @staticmethod\n def backward(ctx, grad_output):\n scattered_grads = Scatter.apply(ctx.input_gpus, ctx.input_sizes, ctx.dim, grad_output)\n if ctx.unsqueezed_scalar:\n scattered_grads = tuple(g[0] for g in scattered_grads)\n return (None, None) + scattered_grads\n\n\nclass Scatter(Function):\n\n @staticmethod\n def forward(ctx, target_gpus, chunk_sizes, dim, input):\n target_gpus = list(map(lambda x: _get_device_index(x, True), target_gpus))\n ctx.dim = dim\n ctx.input_device = input.get_device() if input.is_cuda else -1\n streams = None\n if ctx.input_device == -1:\n # Perform CPU to GPU copies in a background stream\n streams = [_get_stream(device) for device in target_gpus]\n outputs = comm.scatter(input, target_gpus, chunk_sizes, ctx.dim, streams)\n # Synchronize with the copy stream\n if streams is not None:\n for i, output in enumerate(outputs):\n with torch.cuda.device(target_gpus[i]):\n main_stream = torch.cuda.current_stream()\n main_stream.wait_stream(streams[i])\n output.record_stream(main_stream)\n return outputs\n\n @staticmethod\n def backward(ctx, *grad_output):\n return None, None, None, Gather.apply(ctx.input_device, ctx.dim, *grad_output)\n\n\n# background streams used for copying\n_streams = None\n\n\ndef _get_stream(device):\n \"\"\"Gets a background stream for copying between CPU and GPU\"\"\"\n global _streams\n if device == -1:\n return None\n if _streams is None:\n _streams = [None] * torch.cuda.device_count()\n if _streams[device] is None:\n _streams[device] = torch.cuda.Stream(device)\n return _streams[device]\n","repo_name":"ryfeus/lambda-packs","sub_path":"pytorch/source/torch/nn/parallel/_functions.py","file_name":"_functions.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","stars":1104,"dataset":"github-code","pt":"22"} +{"seq_id":"39183199366","text":"#!/usr/bin/env python3\n# Author: Simeon Reusch (simeon.reusch@desy.de)\n# License: BSD-3-Clause\n\nimport getpass\nimport logging\nimport os\n\nimport keyring\nimport pandas as pd\nimport requests\n\nimport backoff\nimport ztfquery\nfrom astropy import units as u\nfrom astroquery.ipac.irsa import Irsa\nfrom ztfnuclear import io\nfrom ztfnuclear.ampel_api import ampel_api_catalog, ampel_api_distnr, ampel_api_sgscore\nfrom ztfnuclear.database import WISE, SarahAGN\n\nlogger = logging.getLogger(__name__)\n\n\n@backoff.on_exception(\n backoff.expo,\n requests.exceptions.RequestException,\n max_time=600,\n)\ndef query_catwise(ra_deg: float, dec_deg: float, searchradius_arcsec=5):\n \"\"\"\n Query the CatWISE 2020 catalog\n \"\"\"\n\n logger.debug(\"Querying CatWISE2020\")\n\n table = Irsa.query_region(\n catalog=\"catwise_2020\",\n coordinates=f\"{ra_deg},{dec_deg}\",\n spatial=\"Cone\",\n radius=searchradius_arcsec * u.arcsec,\n # selcols=\"ra,dec,dist\",\n )\n df = table.to_pandas()\n if len(df) == 0:\n return {\"catWISE2020\": {}}\n\n first_row = df.iloc[0]\n ra = first_row[\"ra\"]\n dec = first_row[\"dec\"]\n w1 = first_row[\"w1mpro\"]\n w2 = first_row[\"w2mpro\"]\n dist = first_row[\"dist\"]\n\n resdict = {\"RA\": ra, \"Dec\": dec, \"Mag_W1\": w1, \"Mag_W2\": w2, \"dist_arcsec\": dist}\n\n return {\"catWISE2020\": resdict}\n\n\n@backoff.on_exception(\n backoff.expo,\n requests.exceptions.RequestException,\n max_time=600,\n)\ndef query_marshal(ztfid):\n \"\"\"\n I was thinking I would not need the Marshal. Sweet summerchild...\n Anyway, with this we parse the html for a transient page\n \"\"\"\n from bs4 import BeautifulSoup as bs\n\n marshal_baseurl = (\n \"http://skipper.caltech.edu:8080/cgi-bin/growth/view_source.cgi?name=\"\n )\n logger.debug(f\"Querying the Growth Marshal for {ztfid}\")\n username = keyring.get_password(\"marshal\", f\"marshal_user\")\n password = keyring.get_password(\"marshal\", f\"marshal_password\")\n\n if username is None:\n username = input(f\"Enter your Growth Marshal login: \")\n password = getpass.getpass(\n prompt=f\"Enter your Growth Marshal password: \", stream=None\n )\n keyring.set_password(\"marshal\", f\"marshal_user\", username)\n keyring.set_password(\"marshal\", f\"marshal_password\", password)\n auth = (username, password)\n\n url = marshal_baseurl + ztfid\n\n response = requests.get(url, auth=auth)\n df = pd.read_html(response.text)[0]\n rowid = df.apply(lambda row: row.astype(str).str.contains(ztfid).any(), axis=1)\n useful_rows = df.loc[rowid]\n if len(useful_rows) > 0:\n maybe_class = useful_rows[1].values[0].split(\" \")[1]\n if len(maybe_class.split(\":\")) > 1:\n return {\"Marshal\": {}}\n else:\n logger.debug(f\"{ztfid}: Classification found ({maybe_class})\")\n return {\"Marshal\": {\"class\": maybe_class}}\n else:\n return {\"Marshal\": {}}\n\n\ndef query_ned_for_z(\n ra_deg: float, dec_deg: float, searchradius_arcsec: float = 10\n) -> dict | None:\n \"\"\"Function to obtain redshifts from NED (via the AMPEL API)\"\"\"\n logger.info(f\"Querying NEDz for redshift\")\n\n res = ampel_api_catalog(\n catalog=\"NEDz_extcats\",\n catalog_type=\"extcats\",\n ra_deg=ra_deg,\n dec_deg=dec_deg,\n search_radius_arcsec=searchradius_arcsec,\n search_type=\"nearest\",\n )\n\n if res:\n return {\"NEDz\": float(res[\"body\"][\"z\"]), \"NEDz_dist\": float(res[\"dist_arcsec\"])}\n\n else:\n return {\"NEDz\": {}}\n\n\ndef query_crts(\n ra_deg: float, dec_deg: float, searchradius_arcsec: float = 5\n) -> dict | None:\n logger.debug(f\"Querying CRTS DR1 if variable star\")\n\n res = ampel_api_catalog(\n catalog=\"CRTS_DR1\",\n catalog_type=\"extcats\",\n ra_deg=ra_deg,\n dec_deg=dec_deg,\n search_radius_arcsec=searchradius_arcsec,\n )\n\n if res:\n logger.debug(\"CRTS: Match\")\n if \"name\" in res[0][\"body\"].keys():\n return {\n \"CRTS\": {\n \"name\": str(res[0][\"body\"][\"name\"]),\n \"dist\": float(res[0][\"dist_arcsec\"]),\n }\n }\n else:\n logger.debug(\"CRTS: No match\")\n return {\"CRTS\": {}}\n\n else:\n logger.debug(\"CRTS: No match\")\n return {\"CRTS\": {}}\n\n\ndef query_milliquas(\n ra_deg: float, dec_deg: float, searchradius_arcsec: float = 1.5\n) -> dict | None:\n \"\"\"Query Milliquas\"\"\"\n\n logger.debug(\"Querying Milliquas for AGN/Quasar\")\n\n res = ampel_api_catalog(\n catalog=\"milliquas\",\n catalog_type=\"extcats\",\n ra_deg=ra_deg,\n dec_deg=dec_deg,\n search_radius_arcsec=searchradius_arcsec,\n )\n\n if res:\n if len(res) == 1:\n if \"body\" in res[0].keys():\n if res[0][\"body\"][\"broad_type\"]:\n if \"q\" in res[0][\"body\"][\"broad_type\"]:\n logger.debug(\"Milliquas: QSO match found\")\n return {\n \"Milliquas\": {\n \"name\": str(res[0][\"body\"][\"name\"]),\n \"type\": \"QSO\",\n \"qso_prob\": float(res[0][\"body\"][\"qso_prob\"]),\n \"dist\": float(res[0][\"dist_arcsec\"]),\n }\n }\n\n else:\n logger.debug(\"Milliquas: Non-QSO match found\")\n return {\n \"Milliquas\": {\n \"name\": str(res[0][\"body\"][\"name\"]),\n \"type\": str(res[0][\"body\"][\"broad_type\"]),\n \"dist\": float(res[0][\"dist_arcsec\"]),\n }\n }\n else:\n logger.debug(\"Milliquas: No match\")\n return {\"Milliquas\": {}}\n else:\n logger.debug(\"Milliquas: No match\")\n return {\"Milliquas\": {}}\n else:\n logger.debug(\"Milliquas: Multiple matches found\")\n return {\"Milliquas\": \"multiple_matches\"}\n else:\n logger.debug(\"Milliquas: No match\")\n return {\"Milliquas\": {}}\n\n\ndef query_gaia(\n ra_deg: float, dec_deg: float, searchradius_arcsec: float = 1.5\n) -> dict | None:\n \"\"\"Query Gaia\"\"\"\n logger.debug(\"Querying Gaia for parallax\")\n res = ampel_api_catalog(\n catalog=\"GAIADR2\",\n catalog_type=\"catsHTM\",\n ra_deg=ra_deg,\n dec_deg=dec_deg,\n search_radius_arcsec=5.0,\n )\n if res:\n if res[0][\"body\"][\"Plx\"] is not None:\n plx_sig = float(res[0][\"body\"][\"Plx\"]) / float(res[0][\"body\"][\"ErrPlx\"])\n if plx_sig > 3.0:\n logger.debug(\"Gaia: Match.\")\n return {\n \"Gaia\": {\"parallax_sigma\": plx_sig, \"dist\": res[0][\"dist_arcsec\"]}\n }\n else:\n logger.debug(\"Gaia: Match, but no significant parallax found\")\n return {\"Gaia\": {}}\n else:\n logger.debug(\"Gaia: Match, but no parallax\")\n return {\"Gaia\": {}}\n else:\n logger.debug(\"Gaia: No match found\")\n return {\"Gaia\": {}}\n\n\ndef query_sdss(\n ra_deg: float, dec_deg: float, searchradius_arcsec: float = 1.5\n) -> dict | None:\n \"\"\"Query SDSS\"\"\"\n logger.debug(\"Querying SDSS for probable star\")\n\n res = ampel_api_catalog(\n catalog=\"SDSSDR10\",\n catalog_type=\"catsHTM\",\n ra_deg=ra_deg,\n dec_deg=dec_deg,\n search_radius_arcsec=searchradius_arcsec,\n )\n if res:\n if len(res) == 1:\n if float(res[0][\"body\"][\"type\"]) == 6.0:\n logger.debug(\"SDSS: Match found, it's a star\")\n return {\"SDSS\": {\"type\": \"star\", \"dist\": res[0][\"dist_arcsec\"]}}\n else:\n logger.debug(\"SDSS: Match found, but no star\")\n return {\"SDSS\": {}}\n else:\n logger.debug(\"SDSS: Multiple matches found\")\n return {\"SDSS\": \"multiple_matches\"}\n\n else:\n return {\"SDSS\": {}}\n\n\ndef query_tns(\n ra_deg: float, dec_deg: float, searchradius_arcsec: float = 1.5\n) -> dict | None:\n \"\"\"\n Query the AMPEL-hosted copy of TNS\n \"\"\"\n logger.debug(\"Querying TNS\")\n\n res = ampel_api_catalog(\n catalog=\"TNS\",\n catalog_type=\"extcats\",\n ra_deg=ra_deg,\n dec_deg=dec_deg,\n search_radius_arcsec=searchradius_arcsec,\n search_type=\"nearest\",\n )\n\n if res:\n logger.debug(\"TNS: Match found\")\n res_body = res[\"body\"]\n name = res_body[\"objname\"]\n prefix = res_body[\"name_prefix\"]\n full_name = prefix + name\n classification = res_body.get(\"object_type\", {}).get(\"name\")\n redshift = res_body.get(\"redshift\")\n dist_arcsec = res[\"dist_arcsec\"]\n\n return {\n \"TNS\": {\n \"name\": full_name,\n \"type\": classification,\n \"dist\": dist_arcsec,\n \"z\": redshift,\n },\n }\n\n return {\"TNS\": {}}\n\n\ndef query_wise_cat(\n ra_deg: float, dec_deg: float, searchradius_arcsec: float = 3\n) -> dict | None:\n \"\"\"\n Query the AMPEL-hosted WISE catalogue\n \"\"\"\n logger.debug(\"Querying WISE catalogue\")\n\n res = ampel_api_catalog(\n catalog=\"WISE\",\n catalog_type=\"catsHTM\",\n ra_deg=ra_deg,\n dec_deg=dec_deg,\n search_radius_arcsec=searchradius_arcsec,\n search_type=\"nearest\",\n )\n\n if res:\n final_res = (\n {\"WISE_cat\": res[\"body\"]} if \"body\" in res.keys() else {\"WISE_cat\": {}}\n )\n import numpy as np\n\n if final_res[\"WISE_cat\"]:\n final_res[\"WISE_cat\"].update(\n {\n \"RA\": np.degrees(final_res[\"WISE_cat\"][\"RA\"]),\n \"Dec\": np.degrees(final_res[\"WISE_cat\"][\"Dec\"]),\n }\n )\n logger.debug(\"WISE cat (AMPEL): Match found\")\n return final_res\n\n return {\"WISE_cat\": {}}\n\n\ndef query_wise(ra_deg: float, dec_deg: float, searchradius_arcsec: float = 20) -> dict:\n \"\"\"\n Obtains WISE object RA and Dec from parquet file\n \"\"\"\n wise = WISE()\n\n res = wise.query(\n ra_deg=ra_deg, dec_deg=dec_deg, searchradius_arcsec=searchradius_arcsec\n )\n\n if res:\n logger.debug(\"WISE: Match found\")\n return {\"WISE\": res}\n\n return {\"WISE\": {}}\n\n\ndef query_sarah_agn(\n ra_deg: float, dec_deg: float, searchradius_arcsec: float = 5\n) -> dict:\n \"\"\"\n Query the local AGN catalog that Sarah curated\n \"\"\"\n agn = SarahAGN()\n res = agn.query(\n ra_deg=ra_deg, dec_deg=-dec_deg, searchradius_arcsec=searchradius_arcsec\n )\n if res:\n logger.debug(\"Sarah AGN: Match found\")\n return {\"Sarah_agn\": res}\n\n return {\"Sarah_agn\": {}}\n\n\ndef query_bts(ztfid) -> dict:\n \"\"\"\n Query the local BTS csv file\n \"\"\"\n bts_df = pd.read_csv(io.LOCALSOURCE_bts_info)\n bts_df.query(\"ZTFID == @ztfid\", inplace=True)\n\n res = {}\n\n if len(bts_df[\"type\"]) > 0:\n res.update({\"class\": bts_df[\"type\"].values[0]})\n\n return res\n\n\ndef query_ampel_sgscore(ztfid: str) -> dict:\n \"\"\"\n Gets the median core distance from Ampel alerts\n \"\"\"\n res = ampel_api_sgscore(ztfid=ztfid)\n\n if res is not None:\n logger.debug(\"Ampel sgscore: Match found\")\n else:\n res = {}\n\n return {\"sgscore\": res}\n\n\ndef query_ampel_dist(ztfid: str) -> dict:\n \"\"\"\n Gets the median core distance from Ampel alerts\n \"\"\"\n res = ampel_api_distnr(ztfid=ztfid)\n\n if res is not None:\n logger.debug(\"Ampel dist: Match found\")\n else:\n res = {}\n\n return {\"distnr\": res}\n","repo_name":"simeonreusch/ztfnuclear","sub_path":"ztfnuclear/crossmatch.py","file_name":"crossmatch.py","file_ext":"py","file_size_in_byte":11802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27598171159","text":"#!/usr/bin/env python3\nimport asyncio\nimport os\nimport sys\nimport time\nimport webbrowser\nfrom typing import AnyStr, Callable\n\nfrom PySide6 import QtWidgets, QtCore\nfrom PySide6.QtCore import *\nfrom PySide6.QtGui import *\nfrom PySide6.QtWidgets import *\nfrom qt_material import apply_stylesheet\n\nfrom MyCommon import list_jpg, join_path, list_dir\nfrom TextOut import TextOut\nfrom myparser import search_dup\nfrom myparser.CosplayMoveWidget import CosplayMoveWidget\nfrom myparser.CosplayParseWidget import CosplayParseWidget, XinmeituluListWidget\nfrom myparser.InfoImage import InfoImage\nfrom myparser.RenameHint import RenameHint\nfrom myqt.CommonDialog import RenameDialog, RenameImageDialog\nfrom myqt.MyDirModel import MyDirModel\nfrom myqt.MyQtCommon import QtHBox, QtVBox, MyButton, fa_icon\nfrom myqt.MyQtFlow import MyQtScrollableFlow\nfrom myqt.QtImage import MyImageBox, MyImageSource, MyImageDialog\nfrom myqt.MyQtSetting import MySetting, SettingDialog\nfrom myqt.MyQtWorker import MyThread, MyThreadPool\n\n\nclass MainWidget(QtWidgets.QWidget):\n info_out = Signal(str)\n image_signal = Signal(str, QSize, MyImageSource)\n new_image_signal = Signal(str, QSize, MyImageSource)\n progress_reset_signal = Signal(int)\n progress_signal = Signal(int)\n page_display = Signal(int)\n refresh_and_select = Signal(str)\n\n def __init__(self):\n super().__init__()\n\n # self.threadpool = QThreadPool()\n\n \"\"\"\n database = QFontDatabase()\n fontFamilies = database.families()\n print(fontFamilies)\n awFont = QFont(\"Font Awesome 5 Free\", 34)\n print(fa.icons['thumbs-up'])\n \"\"\"\n\n self.but_exit = MyButton(fa_icon('mdi.exit-run'), self.safe_exit)\n self.but_settings = MyButton(fa_icon('ri.settings-3-line'), self.action_settings)\n self.but_find_dup = MyButton('Dup', self.action_find_dup)\n\n self.but_show_folder_local = MyButton(\"Folders\", self.action_show_folder_local)\n self.but_show_images_local = MyButton(fa_icon('fa5.images'), self.action_show_images_local)\n\n self.but_move_folder = MyButton(\"Move\", self.action_move_folder)\n self.but_parse = MyButton(fa_icon('ri.image-add-line'), self.action_parse_page)\n\n h_box_top_bar = QtHBox().addAll(self.but_exit,\n self.but_settings,\n self.but_find_dup,\n self.but_move_folder,\n self.but_parse)\n\n self.progress_bar = QProgressBar(self)\n self.progress_bar.setMinimum(0)\n self.progress_bar.setMaximum(1)\n self.progress_bar.setValue(1)\n\n self.image_frame = MyImageBox(self, QSize(300, 240))\n\n self.txt_folder = QLineEdit(\"\")\n self.txt_url = QLineEdit(\"\")\n self.txt_count = QLineEdit(\"\")\n self.txt_count.setMaximumWidth(60)\n # self.txt_url.setReadOnly(True)\n self.but_open_url = MyButton(fa_icon('fa.chrome'), self.action_open_url, icon_size=24)\n self.but_update = MyButton(fa_icon('fa5s.cloud-download-alt'), self.action_update, icon_size=24)\n\n h_box_url = QtHBox().addAll(self.txt_url, self.txt_count, self.but_open_url, self.but_update)\n\n self.txt_date = QLineEdit(\"\")\n\n self.rename_dialog = None\n\n self.cos_root = settings.valueStr(\"cosplay/root\")\n\n self.but_rename_images = MyButton(\"Rename\", self.action_rename_images)\n\n shortcut = {\n \"Root\": self.cos_root,\n \"Download\": settings.valueStr(\"cosplay/download\"),\n \"Comic\": \"d:/comic\",\n \"Cosplay\": \"d:/Cosplay\",\n \"Photo\": \"d:/photo\",\n \"DouJin\": \"x:/[DOUJINSHI]\",\n }\n\n h_box_move_1 = QtHBox().addAll(\n self.but_rename_images,\n self.but_show_folder_local,\n self.but_show_images_local,\n )\n\n self.lang_convert_list = [\n [\"デート・ア・ライブ\", \"約會大作戰\", \"DATE A LIVE\"],\n [\"アルトリア\", \"阿爾托莉雅\", \"Alter\"],\n [\"ネロ\", \"尼祿\", None],\n [\"玉藻前\", \"玉藻前\", \"Tamamo\"],\n [\"ワンピース\", \"ONE PIECE\", \"ONE PIECE\"],\n [\"マシュ\", \"瑪修\", \"Matthew\"],\n [\"スカサハ\", \"斯卡哈\", None],\n [\"水着\", \"泳裝\", None],\n [\"花嫁\", \"婚紗\", None],\n [\"ネコぱら\", \"NEKOPARA\", \"NEKOPARA\"],\n [\"ホロライブ\", \"hololive\", \"hololive\"],\n [\"ショコラ\", \"巧克力\", None],\n [\"バニラ\", \"香草\", None],\n [\"霞ヶ丘詩羽\", \"霞之丘詩羽\", None],\n [\"ヱヴァンゲリヲン\", \"新世紀福音戰士\", \"EVA\"],\n [\"鬼滅の刃\", \"鬼滅之刃\", None],\n [\"初音ミク\", \"初音未來\", \"Miku\"],\n [\"LOL \", \"英雄聯盟 \", \"LOL \"],\n [\"Ahri\", \"阿狸\", \"Ahri\"],\n [\"黒獣\", \"黑獸\", None],\n [\"バイオハザード\", \"BIOHAZARD\", \"BIOHAZARD\"],\n [\"コヤンスカヤ\", \"高揚斯卡娅\", None],\n [\"冴えない彼女の育てかた\", \"路人女主的養成方式\", None],\n [\"賭ケグルイ\", \"狂賭之淵\", None],\n [\"酒吞童子\", \"酒呑童子\", None],\n [\"Tifa\", \"蒂法\", \"Tifa\"],\n [\"プリンセスコネクト\", \"公主連結\", None],\n [\"小林さんちのメイド\", \"小林家的龍女僕\", None],\n [\"リゼロ\", \"從零開始的異世界生活\", None],\n [\"レム\", \"蕾姆\", None],\n [\"ZONE-00\", \"零之地帶\", \"ZONE-00\"],\n [\"ネトゲの嫁は女の子じゃないと思った\", \"線上遊戲的老婆不可能是女生\", None],\n [\"NieR\", \"尼爾 機械紀元\", \"NieR\"],\n [\"NANA\", \"大崎娜娜\", \"NANA\"],\n [\"涼宮ハルヒの憂鬱\", \"涼宮春日的憂鬱\", None],\n [\"俺の妹がこんなに可愛いわけがない\", \"我的妹妹哪有這麼可愛\", None],\n [\"グランブルーファンタジー\", \"碧藍幻想\", \"GranblueFantasy\"],\n [\"オーバーウォッチ\", \"守望先鋒\", \"Overwatch\"],\n [\"ブルーアーカイブ\", \"蔚藍檔案\", \"BlueArchive\"],\n [\"アズールレーン\", \"碧藍航線\", \"AzurLane\"],\n [\"Gantz\", \"殺戮都市\", \"Gantz\"],\n\n # 黑獸 奧莉加\n ]\n\n hint = [\"碧藍航線\", \"賭ケグルイ\", \"日常\", \"兔女郎\",\n \"黒獣\", \"崩壞3rd\", \"從零開始的異世界生活\", \"蕾姆\",\n \"美少女萬華鏡 篝之霧枝\", \"魔鏡Mirror\", \"泳裝\", \"婚紗\",\n \"聖誕\", \"內衣\", \"賽車\", \"ヱヴァンゲリヲン\", \"艾蕾\",\n \"歪萌社\", \"靡烟旗袍\", \"南半球女僕\", \"黑暗王朝\",\n \"魅魔\", \"透明女僕\", \"萊莎的鍊金工房\", \"hololive\",\n \"デート・ア・ライブ\", \"時崎狂三\", \"エロマンガ先生\", \"FF7\",\n \"Granblue Fantasy\", \"魔太郎\", \"豔娘幻夢譚\", \"瓶兒\",\n \"アイドルマスター\", \"ONE PIECE\", \"Persona 5\", \"化物語 戦場ヶ原\",\n \"青春ブタ野郎\", \"桜島麻衣\", \"ソードアート・オンライン\", \"瑪修\",\n \"VOCALOID\", \"初音未來\", \"東方Project\", \"NieR\",\n \"緣之空\", \"春日野穹\", \"監獄学園\", \"バイオハザード\"]\n\n hint = {\n \"日常\": [\"兔女郎\", \"聖誕\", \"內衣\", \"賽車\", \"泳裝\", \"婚紗\", \"女僕\"],\n \"歪萌社\": [\"靡烟旗袍\", \"南半球女僕\", \"黑暗王朝\", \"魅魔\", \"透明女僕\"],\n \"碧藍航線\": [],\n \"明日方舟\": [],\n \"少女前線\": [],\n \"Persona 5\": [],\n \"我的妹妹哪有這麼可愛\": [\"五更琉璃\"],\n \"黒獣\": [],\n \"崩壞3rd\": [],\n \"美少女萬華鏡\": [\"篝之霧枝\"],\n \"從零開始的異世界生活\": [\"蕾姆\"],\n \"路人女主的養成方式\": [\"加藤惠\", \"英梨梨\", \"霞ヶ丘詩羽\"],\n \"Fate\": [\"酒吞童子\", \"アルトリア\", \"尼祿\",\n \"貞德\", \"黑貞德\", \"白貞德\", \"斯卡哈\", \"伊斯塔凜\", \"玉藻前\",\n \"葛饰北斋\", \"源頼光\", \"瑪修\"],\n \"萊莎的鍊金工房\": [],\n \"hololive\": [],\n \"約會大作戰\": [\"時崎狂三\"],\n \"エロマンガ先生\": [],\n \"FF7\": [\"Tifa\"],\n \"Granblue Fantasy\": [],\n \"豔娘幻夢譚\": [\"瓶兒\"],\n \"原神\": [\"優菈\", \"菲謝爾\", \"刻晴\", \"雷電將軍\"],\n \"新世紀福音戰士\": [],\n \"化物語 戦場ヶ原\": [],\n \"アイドルマスター\": [],\n \"ONE PIECE\": [],\n \"LOL\": [\"Ahri\"],\n \"涼宮春日的憂鬱\": [\"涼宮春日\"],\n \"NieR\": [\"2B\"],\n \"東方Project\": [],\n \"VOCALOID\": [\"初音未來\"],\n \"緣之空\": [\"春日野穹\"],\n \"監獄学園\": [],\n \"BIOHAZARD\": [],\n \"鬼滅之刃\": [],\n \"狂賭之淵\": [],\n \"NEKOPARA\": [\"巧克力\", \"香草\"],\n \"公主連結\": [],\n \"青春ブタ野郎\": [\"桜島麻衣\"],\n }\n\n self.hint = RenameHint(None)\n\n self.model = MyDirModel(hint=self.hint, lang_convert_list=self.lang_convert_list, shortcut=shortcut)\n # QFileSystemModel(self) # QStringListModel()\n # self.model.setFilter(QDir.AllDirs | QDir.NoDotAndDotDot)\n # self.model.setReadOnly(False)\n\n # self.view.setViewMode(QListView.IconMode)\n self.model.signal_clicked.connect(self.action_list_click)\n self.model.signal_double_clicked.connect(self.action_list_double_click)\n self.model.signal_root_changed.connect(self.model_root_changed)\n self.model.view.setEditTriggers(QAbstractItemView.NoEditTriggers)\n\n v_box_left = QtVBox().addAll(h_box_top_bar,\n h_box_move_1,\n self.progress_bar,\n self.image_frame,\n self.txt_folder,\n h_box_url,\n self.model.shortcut_bar,\n self.model.tool_bar,\n self.model.view)\n\n self.left_panel_widget = QWidget(self)\n self.left_panel_widget.setLayout(v_box_left)\n self.left_panel_widget.setFixedWidth(500)\n\n self.txt_info = [QLabel(self), QLabel(self), QLabel(self), QLabel(self), QLabel(self)]\n\n self.image_flow = MyQtScrollableFlow()\n self.image_new_flow = MyQtScrollableFlow()\n\n self.xin_mei_tutu = CosplayParseWidget(self, self.create_folder)\n self.xin_mei_tutu.info_out.connect(self.info_out)\n self.xin_mei_tutu.download_start.connect(self.model.lock_folder)\n self.xin_mei_tutu.download_finish.connect(self.model.unlock_folder)\n\n # self.vbox_right.addWidget(h_sep)\n\n self.splitter_right = QSplitter(self)\n self.splitter_right.setOrientation(Qt.Vertical)\n self.splitter_right.addWidget(self.image_flow)\n self.splitter_right.addWidget(self.image_new_flow)\n\n if settings.contains(\"cosplay/splitterSizes\"):\n self.splitter_right.restoreState(settings.value(\"cosplay/splitterSizes\"))\n\n self.right_panel = QtVBox().addAll(self.splitter_right, self.xin_mei_tutu, *self.txt_info)\n\n layout = QtHBox().addAll(self.left_panel_widget, self.right_panel)\n\n self.setLayout(layout)\n\n self.thumb_size = QSize(140, 200)\n self.selected_data = None\n self.apply_settings()\n\n self.progress_signal.connect(self.progress_bar.setValue)\n self.progress_reset_signal.connect(self.progress_bar.setMaximum)\n self.info_out.connect(self.action_info_out, Qt.QueuedConnection)\n self.image_signal.connect(self.action_show_img, Qt.QueuedConnection)\n self.new_image_signal.connect(self.action_show_new_img, Qt.QueuedConnection)\n self.page_display.connect(self.action_show_page, Qt.QueuedConnection)\n\n self.refresh_and_select.connect(self.refresh_and_select_path, Qt.QueuedConnection)\n\n TextOut.func = self.info_out.emit\n # self.model.directoryLoaded.connect(self.model_loaded)\n\n def create_folder(self, folder, use_path: AnyStr = None):\n if use_path is None:\n out_path, out = RenameDialog.create_rename(self.model.rootPath, \"\", folder)\n else:\n out_path = use_path\n self.refresh_and_select.emit(out_path)\n return out_path\n\n @Slot()\n def refresh_and_select_path(self, out_path):\n if out_path != \"\" and out_path != self.model.select:\n self.model.makeSelect(out_path, reload=True)\n\n @Slot()\n def action_rename_images(self):\n if self.model.select:\n rename = RenameImageDialog(self.model.select)\n rename.exec()\n\n def action_to_folder(self, path, goto=True):\n self.model.makeSelect(None)\n self.image_new_flow.clearAll()\n if goto:\n self.model.setRootPath(path)\n else:\n self.model.setRootPath(self.model.rootPath)\n\n def apply_settings(self):\n thumb_w = settings.valueInt(\"image/thumb/width\", 140)\n thumb_y = settings.valueInt(\"image/thumb/height\", 200)\n self.thumb_size = QSize(thumb_w, thumb_y)\n\n win_w = settings.valueInt(\"main/width\", screen.availableGeometry().width() - 50)\n win_h = settings.valueInt(\"main/height\", screen.availableGeometry().height() - 50)\n print(win_w, win_h)\n n_size = QSize(win_w, win_h).boundedTo(screen.availableGeometry().size())\n self.resize(n_size)\n\n self.xin_mei_tutu.retry = settings.valueFloat(\"cosplay/download_retry\")\n\n self.model.setRootPath(settings.valueStr(\"cosplay/current\"))\n selected_path = settings.valueStr(\"cosplay/last_selection\", None)\n print(selected_path)\n\n self.model.makeSelect(selected_path)\n # self.root_idx = self.model.setRootPath(settings.value(\"bitgirl/root\"))\n # self.view.setRootIndex(self.root_idx)\n\n @Slot()\n def action_list_click(self, path, root, _) -> None:\n QCoreApplication.processEvents()\n settings.setValue(\"cosplay/last_selection\", path)\n\n if path is not None and os.path.isdir(path):\n self.show_info()\n if root == settings.valueStr(\"cosplay/download\"):\n self.action_show_images_local()\n\n @Slot()\n def action_list_double_click(self, path, _1, _2) -> None:\n if MyImageDialog.is_image(path):\n dialog = MyImageDialog(self, path, screen.availableGeometry().size())\n dialog.exec()\n else:\n os.startfile(path)\n\n @Slot()\n def model_root_changed(self, new_root):\n settings.setValue(\"cosplay/current\", new_root)\n\n @Slot()\n def action_settings(self) -> None:\n dialog = SettingDialog(self, settings, \"cosplay\")\n if dialog.exec():\n self.apply_settings()\n dialog.deleteLater()\n\n @Slot()\n def action_info_out(self, mess) -> None:\n if len(mess) > 100:\n mess = mess[:100]\n for i in range(0, len(self.txt_info) - 1):\n self.txt_info[i].setText(self.txt_info[i + 1].text())\n self.txt_info[-1].setText(mess)\n\n @Slot()\n def action_click_folder_image(self, path: str, thumb: QLabel, auto_confirm: bool) -> None:\n self.model.makeSelect(path.rsplit(\"/\", 1)[0])\n\n @Slot()\n def action_show_large_img(self, path: str, thumb: QLabel, auto_confirm: bool) -> None:\n m_size = screen.availableGeometry().size()\n dialog = MyImageDialog(self, path, m_size, thumb, True, self.show_info, scale_size=m_size)\n dialog.exec()\n\n @Slot()\n def action_show_img(self, _, as_size, img: MyImageSource) -> None:\n if img.image_path.endswith(\"folder.jpg\"):\n self.image_flow.show_img(as_size, img, self.action_click_folder_image)\n else:\n self.image_flow.show_img(as_size, img, self.action_show_large_img)\n QCoreApplication.processEvents()\n\n @Slot()\n def action_show_new_img(self, _, as_size, img: MyImageSource) -> None:\n self.image_new_flow.show_img(as_size, img, self.action_show_large_img)\n QCoreApplication.processEvents()\n\n @Slot()\n def action_find_dup(self):\n if self.model.select is not None:\n MyThreadPool.start(\"image_flow\", self.action_find_dup_start, self.action_find_dup_end, None,\n search_dup, self.model.select, self.thumb_size,\n self.image_signal, self.new_image_signal,\n self.progress_reset_signal, self.progress_signal,\n can_cancel=True)\n\n def action_find_dup_start(self):\n self.image_new_flow.clearAll()\n self.image_flow.clearAll()\n self.image_flow.group_by_date = False\n\n def action_find_dup_end(self, find_dup_path):\n files = list_jpg(find_dup_path, no_folder_img=True)\n InfoImage.update_count(find_dup_path, len(files))\n if find_dup_path == self.model.select:\n self.show_info()\n\n @Slot()\n def action_show_desc(self):\n pass\n\n @Slot()\n def action_recheck(self):\n pass\n\n @Slot()\n def action_parse_page(self):\n w = XinmeituluListWidget(self)\n self.image_new_flow.clearAll()\n self.image_new_flow.addWidget(w, front=True)\n w.info_download.connect(self.paste_download_url)\n\n @Slot()\n def action_move_folder(self):\n w = CosplayMoveWidget(self, settings.valueStr(\"cosplay/root\"), self.model.select)\n self.image_new_flow.clearAll()\n self.image_new_flow.addWidget(w, front=True)\n w.on_move.connect(self.action_to_folder)\n\n @Slot()\n def paste_download_url(self, url):\n self.xin_mei_tutu.txt_url.setText(url)\n\n @Slot()\n def action_show_images_local(self, page=0):\n if self.model.select is not None:\n MyThreadPool.start(\"image_flow\", self.action_show_images_start, self.show_info, None,\n self.async_load_images_local,\n self.model.select, self.thumb_size, page, can_cancel=True)\n\n @Slot()\n def action_show_folder_local(self, page=0):\n MyThreadPool.start(\"image_flow\", self.action_show_images_start, None, None,\n self.async_load_folder_local,\n self.model.rootPath, self.thumb_size, page, can_cancel=True)\n\n def action_show_images_start(self):\n self.image_flow.clearAll()\n self.image_flow.group_by_date = False\n\n @Slot()\n def action_show_page(self, num: int):\n w = MyButton(\"1\", self.action_show_images_local, param=[0])\n self.image_flow.addWidget(w)\n for i in range(num):\n w = MyButton(str(i + 2), self.action_show_images_local, param=[i + 1])\n self.image_flow.addWidget(w)\n\n def async_load_images_local(self, folder, as_size, page, check_cancel: Callable[[], bool] = None):\n files = sorted(list_jpg(folder, no_folder_img=True))\n\n InfoImage.update_count(folder, len(files))\n\n p = int(len(files) / 500)\n self.page_display.emit(p)\n\n files = files[page * 500:(page + 1) * 500]\n\n self.progress_reset_signal.emit(len(files))\n\n progress = 0\n\n for f in files:\n progress += 1\n self.progress_signal.emit(progress)\n\n file = f.replace(\"\\\\\", \"/\")\n img = MyImageSource(file, self.thumb_size)\n self.image_signal.emit(file, as_size, img)\n if check_cancel and check_cancel():\n self.progress_reset_signal.emit(1)\n self.progress_signal.emit(1)\n break\n time.sleep(0.01)\n\n self.info_out.emit(f\"{folder}: {progress} / {len(files)}\")\n\n def async_load_folder_local(self, folder, as_size, page, check_cancel: Callable[[], bool] = None):\n files = list_dir(folder)\n files = [os.path.join(f, \"folder.jpg\") for f in files if os.path.isdir(f)]\n files = [f for f in files if os.path.exists(f)]\n\n self.progress_reset_signal.emit(len(files))\n\n progress = 0\n\n for f in files:\n progress += 1\n self.progress_signal.emit(progress)\n file = f.replace(\"\\\\\", \"/\")\n img = MyImageSource(file, self.thumb_size)\n self.image_signal.emit(file, as_size, img)\n if check_cancel and check_cancel():\n self.progress_reset_signal.emit(1)\n self.progress_signal.emit(1)\n break\n time.sleep(0.01)\n\n self.info_out.emit(f\"{folder}: {progress} / {len(files)}\")\n\n @Slot()\n def action_open_url(self):\n url = self.txt_url.text()\n print(url)\n if url:\n webbrowser.open(url)\n\n @Slot()\n def action_update(self):\n if self.model.select:\n url = self.txt_url.text()\n if url:\n self.xin_mei_tutu.txt_url.setText(url)\n self.xin_mei_tutu.download(self.model.select)\n\n def show_info(self):\n MyThreadPool.start(\"show_info\", None, self.show_info_imp, None,\n self.show_info_run, self.model.select, priority=QtCore.QThread.Priority.HighPriority)\n\n def show_info_run(self, path: str) -> list:\n url = \"\"\n name = \"\"\n count = \"\"\n img = None\n if path and os.path.isdir(path):\n # image = imutils.url_to_image(\"https://pbs.twimg.com/media/E_24DrNVUAIPlFe.jpg:small\")\n name = os.path.basename(path)\n image_path = join_path(path, \"folder.jpg\")\n img = MyImageSource(image_path, self.image_frame.out_size)\n self.selected_data = InfoImage.load_info(path)\n if self.selected_data is not None:\n url = self.selected_data.url\n count = self.selected_data.count\n if count > 0:\n count = str(count)\n else:\n count = \"\"\n\n return [name, url, img, count]\n\n def show_info_imp(self, data: list) -> None:\n self.txt_folder.setText(data[0])\n self.txt_url.setText(data[1])\n self.image_frame.set_image(data[2]) # .set_path_async(data[2])\n self.txt_count.setText(data[3])\n\n @Slot()\n def safe_exit(self) -> None:\n \"\"\"exit the application gently so Spyder IDE will not hang\"\"\"\n settings.setValue(\"cosplay/splitterSizes\", self.splitter_right.saveState())\n settings.setValue(\"main/width\", self.width())\n settings.setValue(\"main/height\", self.height())\n settings.sync()\n RenameHint.save()\n self.deleteLater()\n self.close()\n self.destroy()\n app.exit()\n\n\nif __name__ == '__main__':\n print(\"Program Start\")\n\n settings = MySetting(\"soft.jp\", \"Manager\")\n\n # settings.setValue(\"cosplay/current\", \"Y:/Cosplay/星之迟迟\")\n\n if not settings.contains(\"cosplay/url1\"):\n settings.setValue(\"cosplay/url1\", \"https://www.xinmeitulu.com//\")\n if not settings.contains(\"cosplay/root\"):\n settings.setValue(\"cosplay/root\", \"Y:/cosplay\")\n if not settings.contains(\"cosplay/download\"):\n settings.setValue(\"cosplay/download\", \"Y:/download/cosplay\")\n if not settings.contains(\"cosplay/download_retry\"):\n settings.setValue(\"cosplay/download_retry\", 999)\n\n print(\"Create App\")\n\n app = QtCore.QCoreApplication.instance()\n if app is None:\n app = QtWidgets.QApplication(sys.argv)\n\n user_theme = settings.value(\"main/theme\", \"dark_pink.xml\")\n apply_stylesheet(app, theme=user_theme)\n\n # file = glob.glob(\"X:\\Image\\Twitter/*\")\n # model = QFileSystemModel()\n # model.setRootPath(\"X:\\Image\\Twitter\")\n\n screen = app.primaryScreen()\n print('Screen: %s' % screen.name())\n size = screen.size()\n print('Size: %d x %d' % (size.width(), size.height()))\n rect = screen.availableGeometry()\n print('Available: %d x %d' % (rect.width(), rect.height()))\n\n widget = MainWidget()\n widget.setWindowTitle(\" \")\n # widget.setWindowIcon(QApplication.style().standardIcon(QStyle.SP_MediaPlay))\n widget.setWindowFlags(Qt.Window | Qt.CustomizeWindowHint | Qt.WindowTitleHint)\n widget.show()\n\n sys.exit(app.exec())\n","repo_name":"baha2046a/pysearch","sub_path":"main_cosplay.py","file_name":"main_cosplay.py","file_ext":"py","file_size_in_byte":24596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11859040170","text":"import random\n\nfrom combat import Combat\n\nCOLORS = ['yellow', 'red', 'blue', 'green', 'purple']\n\n\nclass Monster(Combat):\n\n#create defaults to add to when creating subclasses\n min_hit_points = 1\n max_hit_points = 1\n min_experience = 1\n max_experience = 1\n weapon = 'sword'\n sound = 'roar'\n\n\n#\"dunder\" init = __init__ sets rules for each spawn of instance\n def __init__(self, **kwargs):\n self.hit_points = random.randint(self.min_hit_points, self.max_hit_points)\n self.experience = random.randint(self.min_experience, self.max_experience)\n self.color = random.choice(COLORS)\n\n for key , value in kwargs.items():\n setattr(self, key, value)\n\n#\"dunder\" str = __str__ = controls how the object is printed\n\n def __str__(self):\n return '{} {}, HP: {}, XP: {}'.format(self.color.title(),\n self.__class__.__name__,\n self.hit_points,\n self.experience)\n\n#need to create and instance before calling methods\n#cannot work on Monster.battlecry() because cannot work on the class itself\n\n def battlecry(self):\n return self.sound.upper()\n\n\n#creating subclasses that inherit the Monster class and add individual attributes\nclass Goblin(Monster):\n max_hit_points = 3\n max_experience = 2\n sound = 'squeak'\n\n\nclass Troll(Monster):\n min_hit_points = 3\n max_hit_points = 5\n min_experience = 2\n max_experience = 6\n sound = 'growl'\n\n\nclass Dragon(Monster):\n min_hit_points = 5\n max_hit_points = 10\n min_experience = 6\n max_experience = 10\n sound = 'raaaaaawr'\n","repo_name":"danielkline23/treehouse","sub_path":"OOP_game/monster.py","file_name":"monster.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19388955423","text":"\"\"\"\nЗадача 22: Даны два неупорядоченных набора целых чисел (может быть, с\nповторениями).\nВыдать без повторений в порядке возрастания все те числа, которые встречаются\nв обоих наборах.\nПользователь вводит 2 числа.\nn - кол-во элементов первого множества.\nm - кол-во элементов второго множества.\nЗатем пользователь вводит сами элементы множеств.\n\"\"\"\ndef get_user_input_number_list(size, message):\n numbers_list = []\n for value in input(message).split():\n numbers_list.append(int(value))\n\n if len(numbers_list) != size:\n raise Exception(\"Количество элементов не соответствует входному \"\n \"размеру!\")\n return numbers_list\n\n\nn = int(input(\"кол-во элементов первого множества: \"))\nm = int(input(\"кол-во элементов второго множества: \"))\nn_list = get_user_input_number_list(n, \"Введите значение для n списка через \"\n \"пробел: \")\nm_list = get_user_input_number_list(m, \"Введите значение для m списка через \"\n \"пробел: \")\nres_list = list(set.intersection(set(n_list), set(m_list)))\nres_list.sort()\nprint(res_list)","repo_name":"Ekaterinagy/pythonProject","sub_path":"lesson4/task22.py","file_name":"task22.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33380716121","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('podcasts', '0006_auto_20150730_2343'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Site',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('slug', models.SlugField(unique=True)),\n ('theme', models.CharField(max_length=16, choices=[(b'panther', 'Panther'), (b'podcasty', 'Podcasty'), (b'zen', 'Zen'), (b'wharf', 'Wharf'), (b'abn', 'ABN')])),\n ('custom_cname', models.CharField(max_length=64, null=True, blank=True)),\n ('logo_url', models.URLField(blank=True)),\n ('itunes_url', models.URLField(blank=True)),\n ('stitcher_url', models.URLField(blank=True)),\n ('podcast', models.OneToOneField(to='podcasts.Podcast')),\n ],\n ),\n migrations.CreateModel(\n name='SiteBlogPost',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=512)),\n ('slug', models.SlugField()),\n ('created', models.DateTimeField(auto_now=True)),\n ('publish', models.DateTimeField()),\n ('body', models.TextField()),\n ('site', models.ForeignKey(to='sites.Site')),\n ],\n ),\n migrations.CreateModel(\n name='SiteLink',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=256)),\n ('url', models.URLField(blank=True)),\n ('class_name', models.CharField(max_length=256, null=True, blank=True)),\n ('site', models.ForeignKey(to='sites.Site')),\n ],\n ),\n migrations.AlterUniqueTogether(\n name='siteblogpost',\n unique_together=set([('site', 'slug')]),\n ),\n ]\n","repo_name":"abts-doug/pinecast","sub_path":"sites/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37111181220","text":"import turtle\ndef tortuga():\n\n turtle.shape(\"turtle\")\n turtle.color('red', 'yellow')\n turtle.begin_fill()\n\n for i in range(5):\n \n turtle.forward(200)\n turtle.right(144)\n turtle.end_fill()\n turtle.done()\n \n\ntortuga()","repo_name":"oxisakre/Programacion","sub_path":"book/tortuguita.py","file_name":"tortuguita.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34645430386","text":"with open('input/07') as f:\n raw_circuit = f.readlines()\n\n\ndef parse_instruction(raw_instruction):\n raw_instruction = raw_instruction.rstrip('\\n')\n way, signal = raw_instruction.split(' -> ')\n return (way, signal)\n\ncircuit = {}\nfor instruction in raw_circuit:\n way, signal = parse_instruction(instruction)\n circuit[signal] = way\n\ncounts = {}\ncache = {}\n\n\ndef solve(signal):\n if signal in cache:\n return cache[signal]\n if signal not in circuit:\n return int(signal)\n way = circuit[signal]\n if way in counts:\n counts[way] += 1\n else:\n counts[way] = 1\n way_split = way.split()\n if len(way_split) == 1:\n try:\n return int(way)\n except ValueError:\n return solve(way)\n if way_split[0] == 'NOT':\n result = ~ solve(way_split[1])\n else:\n operator = way_split[1]\n value1, value2 = way_split[0], way_split[2]\n if operator == 'AND':\n result = solve(value1) & solve(value2)\n elif operator == 'OR':\n result = solve(value1) | solve(value2)\n elif operator == 'LSHIFT':\n result = solve(value1) << int(value2)\n elif operator == 'RSHIFT':\n result = solve(value1) >> int(value2)\n if signal not in cache:\n cache[signal] = result\n return result\n\nif __name__ == '__main__':\n print(solve('a'))\n","repo_name":"Thor77/AdventOfCode2015","sub_path":"day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2666360769","text":"class Queue:\n def __init__(self):\n # Stack to hold elements that get added\n self.inStack = []\n # Stack to hold elements that are getting removed\n self.outStack = []\n \n\n def enqueue(self, item):\n self.inStack.append(item) \n\n def dequeue(self):\n # if the outStack is empty\n # we need to populate it with inStack elements\n if len(self.outStack) == 0:\n # empty out the inStack into the outStack\n while len(self.inStack) > 0:\n self.outStack.append(self.inStack.pop())\n \n return self.outStack.pop()\n\n\n def peek(self):\n # same logic as `dequeue`\n if len(self.inStack) == 0:\n return None\n else:\n while len(self.inStack) > 0:\n self.outStack.append(self.inStack.pop())\n \n return self.outStack[0]\n \n\n# Some console.log tests \nq = Queue()\nprint(q.peek()) # should print None\n\nq.enqueue(10)\nprint(q.peek()) # should print 10\n\nq.enqueue(9)\nq.enqueue(8)\n\nprint(q.dequeue()) # should print 10\nprint(q.dequeue()) # should print 9\nprint(q.dequeue()) # should print 8\n","repo_name":"bloominstituteoftechnology/Whiteboard-Pairing","sub_path":"QueueWithTwoStacks/model_solution.py","file_name":"model_solution.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":115,"dataset":"github-code","pt":"22"} +{"seq_id":"29209177192","text":"# A file to create load and dump functions for all Root Types?\n\nfrom .RootTypes import *\nfrom .RootState import *\n\nimport sys\nimport inspect\nimport json\n\n\n\ndef RootObjectHook(d):\n if \"__VEnum__\" in d:\n return getattr(sys.modules[__name__],d[\"__VEnum__\"])(d[\"value\"])\n elif \"__class__\" in d:\n cls = getattr(sys.modules[__name__], d[\"__class__\"])\n if cls == Board:\n d['paths'] = dict([(tuple([int(i) for i in k.split(\"-\")]),v) for k,v in d['paths'].items()])\n if hasattr(cls, \"_deserialize\"):\n return cls._deserialize(**d)\n else:\n return cls(**d)\n return d\n\ndef RootDefault(obj):\n if isinstance(obj, VEnum):\n return {\"__VEnum__\":obj.__class__.__name__, \"value\":obj.name}\n if isinstance(obj, Deserializable):\n d = obj.__dict__.copy()\n if type(obj).__name__==\"Board\":\n # Deal with Paths!\n d['paths'] = dict([(\"-\".join([str(i) for i in k]),v) for k,v in obj.paths.items()])\n return dict( d, __class__= obj.__class__.__name__)\n else:\n print(obj)\n raise TypeError(\"Object of type {} is not JSON serializable\".format(obj.__class__.__name__))\n\n\ndumps = json.dumps\ndumps.__kwdefaults__['default'] = RootDefault\ndump = json.dump\ndump.__kwdefaults__['default'] = RootDefault\nloads = json.loads\nloads.__kwdefaults__[\"object_hook\"] = RootObjectHook\nload = json.load\nload.__kwdefaults__[\"object_hook\"] = RootObjectHook","repo_name":"bvs7/RootNote","sub_path":"RootNote/RootJSON.py","file_name":"RootJSON.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33257237050","text":"sensors = {\"living room\": 21, \"kitchen\": 23, \"bedroom\": 20, \"pantry\": 22}\nnum_cameras = {\"backyard\": 6 , \"garage\": 2 ,\"driveway\": 1}\n\n#print(num_cameras)\n\n#Create a dictionary from english to Sindarin\nenglish_to_sindarin = {\"mountain\":\"orod\",\"bread\":\"bass\",\"friend\":\"mellon\",\"horse\":\"roch\"}\n\n#print(english_to_sindarin)\n\nchildren = { \"von Trapp\":[\"Johannes\", \"Rosmarie\", \"Eleonore\"], \"Corleone\" : [\"Sonny\", \"Fredo\", \"Michael\"]}\n#print(children)\n\nmy_empty_dictionary = {}\n\nanimals_in_zoo = {}\nanimals_in_zoo[\"zebras\"] = 8\nanimals_in_zoo[\"monkeys\"] = 12\nanimals_in_zoo[\"dinosaurs\"] = 0\n\n#print(animals_in_zoo)\n\nuser_ids = {\"teraCoder\": 9018293, \"proProgrammer\": 119238}\n\nuser_ids.update({\"theLooper\":138475,\"stringQueen\":85739})\n\n#print(user_ids)\n\noscar_winners = {\"Best Picture\": \"La La Land\", \"Best Actor\": \"Casey Affleck\", \"Best Actress\": \"Emma Stone\", \"Animated Feature\": \"Zootopia\"}\n\noscar_winners[\"Supporting Actress\"] = \"Viola Davis\"\noscar_winners[\"Best Picture\"] = \"Moonlight\"\n\n#print(oscar_winners)\n\ndrinks = [\"espresso\", \"chai\", \"decaf\", \"drip\"]\ncaffeine = [64, 40, 0, 120]\n\nzipped_drinks = zip(drinks,caffeine)\n\ndrinks_to_caffeine = {}\n\nfor i in range(len(drinks)):\n drinks_to_caffeine[drinks[i]] = caffeine[i]\n\n#print(drinks_to_caffeine)\n\n\nsongs = [\"Like a Rolling Stone\", \"Satisfaction\", \"Imagine\", \"What's Going On\", \"Respect\", \"Good Vibrations\"]\nplaycounts = [78, 29, 44, 21, 89, 5]\n\nplays = {}\n\nzipSongs = zip(songs,playcounts)\n\nfor i in range(len(songs)):\n plays[songs[i]] = playcounts[i]\n \n#print(plays)\n\nplays[\"Purple Haze\"] = 1\nplays[\"Respect\"] = 94\n\n#print(plays)\nlibrary = {\"The Best Songs\": plays,\"Sunday Feelings\":{}}\n\n#print(library)\n\nzodiac_elements = {\"water\": [\"Cancer\", \"Scorpio\", \"Pisces\"], \"fire\": [\"Aries\", \"Leo\", \"Sagittarius\"], \"earth\": [\"Taurus\", \"Virgo\", \"Capricorn\"], \"air\":[\"Gemini\", \"Libra\", \"Aquarius\"]}\nzodiac_elements[\"energy\"] = \"Not a Zodiac element\"\n\n#print(zodiac_elements[\"energy\"])\n#print(zodiac_elements[\"earth\"])\n#print(zodiac_elements[\"fire\"])\n\ncaffeine_level = {\"espresso\": 64, \"chai\": 40, \"decaf\": 0, \"drip\": 120}\ncaffeine_level[\"matcha\"] = 30\n\n#try:\n #print(caffeine_level[\"matcha\"])\n#except:\n #print(\"Unknown Caffeine Level\")\n \n \nuser_ids = {\"teraCoder\": 100019, \"pythonGuy\": 182921, \"samTheJavaMaam\": 123112, \"lyleLoop\": 102931, \"keysmithKeith\": 129384}\n\ntc_id = user_ids.get(\"teraCoder\", 100000)\n#print(tc_id)\n\nstack_id = user_ids.get(\"superStackSmash\",100000)\n#print(stack_id)\n\n\navailable_items = {\"health potion\": 10, \"cake of the cure\": 5, \"green elixir\": 20, \"strength sandwich\": 25, \"stamina grains\": 15, \"power stew\": 30}\nhealth_points = 20\n\nhealth_points += available_items.pop(\"stamina grains\",0)\n#print(health_points)\nhealth_points += available_items.pop(\"power stew\",0)\n#print(health_points)\nhealth_points += available_items.pop(\"mystic bread\",0)\n#print(available_items)\n#print(health_points)\n\nuser_ids = {\"teraCoder\": 100019, \"pythonGuy\": 182921, \"samTheJavaMaam\": 123112, \"lyleLoop\": 102931, \"keysmithKeith\": 129384}\nnum_exercises = {\"functions\": 10, \"syntax\": 13, \"control flow\": 15, \"loops\": 22, \"lists\": 19, \"classes\": 18, \"dictionaries\": 18}\n\nusers = user_ids.keys()\nlessons = num_exercises.keys()\n\n#print(users)\n\nnum_exercises = {\"functions\": 10, \"syntax\": 13, \"control flow\": 15, \"loops\": 22, \"lists\": 19, \"classes\": 18, \"dictionaries\": 18}\n\ntotal_exercises = 0\n\nfor exercise in num_exercises:\n total_exercises += num_exercises[exercise]\n \n#print(total_exercises)\n\npct_women_in_occupation = {\"CEO\": 28, \"Engineering Manager\": 9, \"Pharmacist\": 58, \"Physician\": 40, \"Lawyer\": 37, \"Aerospace Engineer\": 9}\n\n#for occupation,pct in pct_women_in_occupation.items():\n #print(\"Women make up \" + str(pct) + \" percent of \" + occupation +\"s\")\n \ntarot = {1: \"The Magician\", 2: \"The High Priestess\", 3: \"The Empress\", 4: \"The Emperor\", 5: \"The Hierophant\", 6: \"The Lovers\", 7: \"The Chariot\", 8: \"Strength\", 9: \"The Hermit\", 10: \"Wheel of Fortune\", 11:\t\"Justice\", 12: \"The Hanged Man\", 13: \"Death\", 14:\t\"Temperance\", 15:\t\"The Devil\", 16: \"The Tower\", 17: \"The Star\", 18:\t\"The Moon\", 19:\t\"The Sun\", 20: \"Judgement\", 21: \"The World\", 22: \"The Fool\"}\n\nspread = {}\nspread[\"past\"] = tarot.pop(13)\nspread[\"present\"] = tarot.pop(22)\nspread[\"future\"] = tarot.pop(10)\n\n#for key,value in spread.items():\n #print(\"Your \" + key + \" is the \" + value + \" card.\")\n \nletters = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\"]\npoints = [1, 3, 3, 2, 1, 4, 2, 4, 1, 8, 5, 1, 3, 4, 1, 3, 10, 1, 1, 1, 1, 4, 4, 8, 4, 10]\n\nletter_to_points = {letters:points for letters,points in zip(letters,points)}\n\nletter_to_points[\"\"] = 0\n\ndef score_word(word):\n point_total = 0\n \n try:\n for letter in word:\n point_total += letter_to_points[letter.upper()]\n except:\n point_total += 0\n \n return point_total\n \nplayer_to_words = {\"player1\":[\"BLUE\",\"TENNIS\",\"EXIT\"],\"wordNerd\":[\"EARTH\",\"EYES\",\"MACHINE\"],\"Lexi Con\":[\"ERASER\",\"BELLY\",\"HUSKY\"],\"Prof Reader\":[\"ZAP\",\"COMA\",\"PERIOD\"]}\nplayer_to_points = {}\n\ndef update_points_totals():\n for player, words in player_to_words.items():\n player_points = 0\n for word in words:\n player_points += score_word(word)\n player_to_points[player] = player_points\n return print(\"Player Score Updated\")\n \ndef play_word(player,word):\n list1 = []\n list1 = player_to_words[player]\n list1.append(word)\n player_to_words[player] = list1\n update_points_totals()\n return print(word + \" added to \" + player + \" words.\")\n \n#play_word(\"player1\",\"Booger\")\n#play_word(\"wordNerd\",\"Earthquake\")\n#print(player_to_words)\n#print(player_to_points)\n\ndef sum_values(my_dictionary):\n sumValues = 0\n for value1 in my_dictionary.values():\n sumValues += value1\n return sumValues\n\n#print(sum_values({\"milk\":5, \"eggs\":2, \"flour\": 3}))\n\ndef sum_even_keys(my_dictionary):\n sumValues = 0\n for key,value1 in my_dictionary.items():\n if(key %2 == 0):\n sumValues += value1\n return sumValues\n \n#print(sum_even_keys({1:5, 2:2, 3:3}))\n#print(sum_even_keys({10:1, 100:2, 1000:3}))\n\ndef add_ten(my_dictionary):\n for key,value1 in my_dictionary.items():\n my_dictionary[key] = value1+10\n return my_dictionary\n \n#print(add_ten({1:5, 2:2, 3:3}))\n\ndef values_that_are_keys(my_dictionary):\n list1 = []\n for key in my_dictionary.keys():\n for value in my_dictionary.values():\n if(value == key):\n list1.append(key)\n return list1\n \n#print(values_that_are_keys({1:100, 2:1, 3:4, 4:10}))\n#print(values_that_are_keys({\"a\":\"apple\", \"b\":\"a\", \"c\":100}))\n\ndef max_key(my_dictionary):\n max_val = 0\n for key,value1 in my_dictionary.items():\n if(value1 > max_val):\n max_val = value1\n \n for key in my_dictionary.keys():\n if(my_dictionary[key] == max_val):\n return key\n\n#print(max_key({1:100, 2:1, 3:4, 4:10}))\n#print(max_key({\"a\":100, \"b\":10, \"c\":1000}))\n\ndef word_length_dictionary(words):\n key = \"\"\n value = 0\n my_dictionary = {}\n \n for word in words:\n key = word\n value = len(word)\n my_dictionary[key] = value\n \n return my_dictionary\n \n#print(word_length_dictionary([\"apple\", \"dog\", \"cat\"]))\n#print(word_length_dictionary([\"a\", \"\"]))\n\ndef frequency_dictionary(words):\n my_dictionary = {}\n for word in words:\n try:\n if(my_dictionary[word]):\n my_dictionary[word] += 1\n except:\n my_dictionary[word] = 1\n \n return my_dictionary\n \n#print(frequency_dictionary([\"apple\", \"apple\", \"cat\", 1]))\n#print(frequency_dictionary([0,0,0,0,0]))\n\ndef unique_values(my_dictionary):\n sums = 0\n string1 = \"\"\n for value in my_dictionary.values():\n if(string1.find(str(value)) == -1):\n sums+=1\n string1 += str(value)\n return sums\n\n#print(unique_values({0:3, 1:1, 4:1, 5:3}))\n#print(unique_values({0:3, 1:3, 4:3, 5:3}))\n\ndef count_first_letter(names):\n new_dict = {}\n for key in names.keys():\n if(new_dict.get(key[0], -1) != -1):\n new_dict[key[0]] += len(names[key])\n else:\n new_dict[key[0]] = len(names[key])\n return new_dict\n \n#print(count_first_letter({\"Stark\": [\"Ned\", \"Robb\", \"Sansa\"], \"Snow\" : [\"Jon\"], \"Lannister\": [\"Jaime\", \"Cersei\", \"Tywin\"]}))\n#print(count_first_letter({\"Stark\": [\"Ned\", \"Robb\", \"Sansa\"], \"Snow\" : [\"Jon\"], \"Sannister\": [\"Jaime\", \"Cersei\", \"Tywin\"]}))\n\n","repo_name":"ckronber/python_files","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":8244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"8610787163","text":"from typing import Optional\n\nfrom libs.dominio import Dominio\nfrom libs.tipos_basicos.texto import Email, Senha\nfrom libs.unidade_de_trabalho import UnidadeDeTrabalhoAbstrata\n\nfrom contextos.usuarios.dominio.agregados import Usuario\nfrom contextos.usuarios.dominio.comandos import (\n CriarUsuario,\n EditarUsuario,\n AutenticarUsuario,\n AlterarEmailDoUsuario,\n)\nfrom contextos.usuarios.repositorio.repo_consulta import RepoConsultaUsuarios\n\nfrom contextos.usuarios.dominio.eventos import EmailAlterado\nfrom contextos.usuarios.adaptadores.jwt import GeradorDeToken, Token\nfrom contextos.usuarios.adaptadores.encriptador import EncriptadorDeSenha\nfrom contextos.usuarios.exceptions import UsuarioNaoEncontrado, ErroNaAutenticacao\n\n\ndef criar_usuario(\n comando: CriarUsuario,\n uow: UnidadeDeTrabalhoAbstrata,\n encriptar_senha: Optional[bool] = True,\n) -> Usuario:\n \"\"\"\"\"\"\n\n uow = uow(Dominio.usuarios)\n\n with uow:\n repo_consulta: RepoConsultaUsuarios = uow.repo_consulta\n\n ja_existe_usuario_com_o_email = repo_consulta.consultar_por_email(\n email=Email(comando.email)\n )\n\n if ja_existe_usuario_com_o_email:\n raise Usuario.UsuarioInvalido(\n \"Não é possível criar um novo usuário com este e-mail.\"\n )\n\n senha = comando.senha\n\n if encriptar_senha:\n senha = EncriptadorDeSenha().encriptar_senha(senha=comando.senha)\n\n novo_usuario = Usuario.criar(\n email=Email(comando.email),\n senha=Senha(senha),\n nome_completo=comando.nome_completo,\n data_de_nascimento=comando.data_de_nascimento,\n )\n\n uow.repo_dominio.adicionar(novo_usuario)\n uow.commit()\n\n return novo_usuario\n\n\ndef editar_usuario(comando: EditarUsuario, uow: UnidadeDeTrabalhoAbstrata) -> Usuario:\n \"\"\"\"\"\"\n\n uow = uow(Dominio.usuarios)\n\n with uow:\n repo_consulta: RepoConsultaUsuarios = uow.repo_consulta\n usuario: Usuario = repo_consulta.consultar_por_id(id_usuario=comando.usuario_id)\n\n usuario_editado = usuario.editar(valores_para_edicao=comando.novos_valores)\n\n uow.repo_dominio.atualizar(usuario_editado)\n uow.commit()\n\n return usuario_editado\n\n\ndef autenticar_usuario(\n comando: AutenticarUsuario, uow: UnidadeDeTrabalhoAbstrata\n) -> Token:\n\n uow = uow(Dominio.usuarios)\n\n with uow:\n repo_consulta: RepoConsultaUsuarios = uow.repo_consulta\n usuario: Usuario = repo_consulta.consultar_por_email(email=comando.email)\n\n if not usuario:\n raise UsuarioNaoEncontrado(\"Usuário não encontrado\")\n\n encriptador = EncriptadorDeSenha()\n senha_eh_valida = encriptador.verificar_senha(\n senha_para_verificar=comando.senha,\n senha_do_usuario=usuario.senha,\n )\n\n if not senha_eh_valida:\n raise ErroNaAutenticacao(\"Usuário ou senha incorretos\")\n\n return GeradorDeToken.gerar_token(usuario=usuario)\n\n\ndef alterar_email_do_usuario(\n comando: AlterarEmailDoUsuario, uow: UnidadeDeTrabalhoAbstrata\n) -> Usuario:\n with uow(Dominio.usuarios):\n usuario: Usuario = uow.repo_consulta.consultar_por_email(\n email=Email(comando.novo_email)\n )\n\n # email ja utilizado por usuario com id diferente\n if usuario and usuario.id != comando.usuario_id:\n raise Usuario.UsuarioInvalido(\n \"Não é possível criar um novo usuário com este e-mail.\"\n )\n\n if not usuario:\n usuario = uow.repo_consulta.consultar_por_id(id_usuario=comando.usuario_id)\n\n usuario_alterado = usuario.alterar_email(email=Email(comando.novo_email))\n\n uow.repo_dominio.atualizar(usuario_alterado)\n uow.commit()\n\n return usuario_alterado\n\n\ndef enviar_email_de_confirmacao(evento: EmailAlterado, uow: UnidadeDeTrabalhoAbstrata):\n print(f\"==== ENVIOU O E-MAIL PARA {evento.novo_email} ====\")\n","repo_name":"tchaguitos/glicobuddy","sub_path":"contextos/usuarios/servicos/executores.py","file_name":"executores.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"5349611386","text":"#https://projecteuler.net/problem=5\n\nmaxNumber = 20\n#for 20, result : 232792560\n\npossibilities = [i for i in range(1,maxNumber+1)]\n\ntest = True\ncursor = 0\nwhile test:\n\tcursor+=1\n\tcheck = True\n\tfor k in possibilities:\n\t\tif cursor%k!=0:\n\t\t\tcheck=False\n\tif check == True:\n\t\ttest=False\nprint(\"Smallest positive number is\",cursor)\n\n","repo_name":"jokfun/Project-Euler","sub_path":"page1/prb5.py","file_name":"prb5.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10037111409","text":"import sys\nprint(sys.argv)\nfname = sys.argv[1]\nnewfile = \"\"\nwith open(fname,'r') as annotatedjson:\n l = annotatedjson.readlines()\nfor line in l:\n string=line.split('#')[0]\n newfile+= string.strip('\\n')+'\\n'\nwith open(fname+'.json','w') as nonannotatedjson:\n nonannotatedjson.write(newfile)","repo_name":"NanoExplorer/Zeus2-cycle-box","sub_path":"de-annotate-json.py","file_name":"de-annotate-json.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9150933952","text":"import json\nimport datetime as dt\n\nfrom google.cloud import storage\n\nfrom prefect.blocks.system import Secret\n\nimport config as cfg\n\n\n# -------------------------- #\n# GCS UTILS #\n# -------------------------- #\n\ndef upload_blob_from_memory(bucket_name, contents, destination_blob_name, credentials):\n \"\"\"Uploads a file to the bucket.\"\"\"\n storage_client = storage.Client(credentials=credentials)\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n blob.upload_from_string(contents, content_type='application/json')\n\ndef download_blob_to_memory(bucket_name, file_name, credentials):\n '''Downloads a file from bucket to memory.'''\n client = storage.Client(credentials=credentials)\n bucket = client.get_bucket(bucket_name)\n blob = bucket.blob(file_name)\n return json.loads(blob.download_as_text(encoding=\"utf-8\"))\n\ndef blob_exists(bucket_name, file_name, credentials):\n '''Checks if blob exists in the bucket'''\n client = storage.Client(credentials=credentials)\n bucket = client.get_bucket(bucket_name)\n blob = bucket.blob(file_name)\n return blob.exists()\n\n\n# \n# GET SUMMONER IDS\n#\n\ndef get_all_ids_by_summoner_id(summoner_id):\n with cfg.SESSION.get(f'{cfg.REQUEST_URLS[\"SUMMONER-V4\"]}/lol/summoner/v4/summoners/{summoner_id}',\n headers= {\"X-Riot-Token\": Secret.load(\"riot-api-key\").get()}) as req:\n if req.status_code == 200:\n return req.json()\n return {}\n\ndef get_all_ids_by_summoner_name(summoner_name):\n with cfg.SESSION.get(f'{cfg.REQUEST_URLS[\"SUMMONER-V4\"]}/lol/summoner/v4/summoners/by-name/{summoner_name}',\n headers= {\"X-Riot-Token\": Secret.load(\"riot-api-key\").get()}) as req:\n if req.status_code == 200:\n return req.json()\n return {}\n\ndef get_summoner_ids(summoner_name: str, summoner_id: str) -> str:\n file_name = f'players/{summoner_name}_{summoner_id}.json'\n\n # check if it exists in data lake\n if blob_exists(cfg.DATA_LAKE, file_name, cfg.CREDENTIALS):\n return download_blob_to_memory(cfg.DATA_LAKE, file_name, cfg.CREDENTIALS)\n\n # if not call riot api\n data = get_all_ids_by_summoner_name(summoner_name)\n\n if not data:\n print(f'Summoner Name ({summoner_name}) not exists using Summoner ID ({summoner_id}).')\n data = get_all_ids_by_summoner_id(summoner_id)\n \n # save the result and return the puuid\n if not data:\n return ''\n\n upload_blob_from_memory(cfg.DATA_LAKE, \n json.dumps(data, indent=4), \n file_name, \n cfg.CREDENTIALS)\n\n return data\n\n\n#\n# GET MATCH INFO\n#\n\n\ndef match_history(summoner_puuid: str, \n start_time: dt.datetime, \n end_time: dt.datetime, \n count: int = 100, \n match_type: str = 'ranked') -> list:\n \n file_name = f'match-history/{summoner_puuid}-{match_type}-{start_time.strftime(\"%m-%d-%Y_%H:%M:%S\")}-{end_time.strftime(\"%m-%d-%Y_%H:%M:%S\")}.json'\n\n if blob_exists(cfg.DATA_LAKE, file_name, cfg.CREDENTIALS):\n return download_blob_to_memory(cfg.DATA_LAKE, file_name, cfg.CREDENTIALS)\n \n match_ids = []\n start = 0\n\n while True:\n with cfg.SESSION.get(f'{cfg.REQUEST_URLS[\"MATCH-V5\"]}/lol/match/v5/matches/by-puuid/{summoner_puuid}/ids',\n params={\n 'startTime': int(start_time.timestamp()),\n 'endTime': int(end_time.timestamp()),\n 'type': match_type,\n 'count': count,\n 'start': start\n },\n headers= {\"X-Riot-Token\": Secret.load(\"riot-api-key\").get()}) as req:\n \n data = []\n if req.status_code == 200:\n data = req.json()\n\n if not data:\n break\n\n match_ids.extend(data)\n start += count\n\n if len(data) < count:\n break \n\n print(f'In total matches played is {len(match_ids)}.')\n\n upload_blob_from_memory(cfg.DATA_LAKE, json.dumps(match_ids), file_name, cfg.CREDENTIALS)\n\n return match_ids\n\n\n\ndef get_match_info(match_id: str) -> dict:\n file_name = f'matches/{match_id}.json'\n\n if blob_exists(cfg.DATA_LAKE, file_name, cfg.CREDENTIALS):\n return download_blob_to_memory(cfg.DATA_LAKE, file_name, cfg.CREDENTIALS)\n else:\n with cfg.SESSION.get(f'{cfg.REQUEST_URLS[\"MATCH-V5\"]}/lol/match/v5/matches/{match_id}',\n headers= {\"X-Riot-Token\": Secret.load(\"riot-api-key\").get()}) as req:\n if req.status_code == 200:\n data = req.json()\n upload_blob_from_memory(cfg.DATA_LAKE, \n json.dumps(data, indent=4), \n file_name, \n cfg.CREDENTIALS) \n \n return data\n else:\n print(f'Failed to do request ({req.status_code})')\n return {}\n \n\n\ndef match_transform(data: dict) -> list:\n\n retrieve_time = dt.datetime.utcnow()\n\n match_id = data['metadata']['matchId']\n\n # creation time\n creation_time = dt.datetime.fromtimestamp(data['info']['gameCreation'] / 1000.0)\n game_start_time = dt.datetime.fromtimestamp(data['info']['gameStartTimestamp'] / 1000.0)\n # game length\n duration_min = data['info']['gameDuration'] / 60\n\n participants = []\n\n for info in data['info']['participants']:\n summoner_puuid = info['puuid']\n summoner_id = info['summonerId']\n summoner_name = info['summonerName']\n\n # champion played\n champion = info['championName']\n\n # K.D.A\n kills = info['kills']\n deaths = info['deaths']\n assits = info['assists']\n kda = info['challenges']['kda']\n\n # Role (MID/ADC/TOP/JG/SUP)\n lane = info['teamPosition']\n\n # Damage to Champions\n damage_dealt = info['totalDamageDealtToChampions']\n damage_per_min = info['challenges']['damagePerMinute']\n damage_taken = info['totalDamageTaken']\n\n # Gold Earn\n gold = info['goldEarned']\n gold_per_min = info['challenges']['goldPerMinute']\n\n # Outcome\n win = info['win']\n\n # Creep Slain\n cs = info['totalMinionsKilled'] + info['neutralMinionsKilled']\n\n # vision score\n vision_score = info['visionScore']\n vision_score_per_min = info['challenges']['visionScorePerMinute']\n\n\n # game info\n surrender = info['gameEndedInSurrender']\n\n participants.append({\n 'summoner_puuid': summoner_puuid,\n 'summoner_id': summoner_id,\n 'summoner_name': summoner_name,\n 'match_id': match_id,\n 'creation_time': creation_time,\n 'game_start_time': game_start_time,\n 'duration_min': duration_min,\n 'champion': champion,\n 'kills': kills,\n 'deaths': deaths,\n 'assits': assits,\n 'kda': kda,\n 'lane': lane,\n 'damage_dealt': damage_dealt,\n 'damage_per_min': damage_per_min,\n 'damage_taken': damage_taken,\n 'gold': gold,\n 'gold_per_min': gold_per_min,\n 'win': win,\n 'cs': cs,\n 'vision_score': vision_score,\n 'vision_score_per_min': vision_score_per_min,\n 'surrender': surrender,\n 'retrieve_time': retrieve_time\n })\n\n return participants\n\n","repo_name":"HCA97/Leauge-of-Legends-Challenger-Stats","sub_path":"prefect/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7717,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"28108388668","text":"from django.urls import path\nfrom .views import CreatView, LoginView, IdentifyView, DetailView, AccountListView\n\n\n\nurlpatterns = [\n path('signup', CreatView.as_view(), name = 'signup'),\n path('login',LoginView.as_view(), name = 'login'),\n path('otp', IdentifyView.as_view(), name = 'otp'),\n path('detail/', DetailView.as_view(), name= 'detail'),\n path('list', AccountListView.as_view(), name ='list'),\n \n]\n","repo_name":"Hosein110011/API_user","sub_path":"account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14697546999","text":"import re\nimport pandas as pd\n\nn_stream = [i+1 for i in range(32)]\nprefixes = ['26-32] Elapsed time: ', '54-78] Elapsed time: ', '81-90] Elapsed time: ']\npostfix = ' ms'\n\nfor i in range(len(prefixes)):\n result = []\n for n in n_stream:\n with open(f\"./result_{n}\") as f:\n s = f.read()\n elements = re.findall(f'{prefixes[i]}(.*?){postfix}', s)\n result.append(elements)\n pd.DataFrame(result, columns = [f\"testcase_{idx}\" for idx in range(10)], index = n_stream).to_csv(f'./exp{i+1}.csv')","repo_name":"minsusun/csed490c-01","sub_path":"lab3/Lab3_cuda/sources/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42096652292","text":"import pandas as pd\n\ntest_data = open('experimental_data', 'a', newline='')\ntest_data.write(\"step car vel accel location\")\n\n\ndef export_data(step, vehic):\n for i in range(len(vehic)):\n test_data.write(\"\\n\" + str(step) + ' ' + str(i) + ' ' + str(vehic[i].current_speed) + ' ' + str(vehic[i].acceleration) + ' ' + str(vehic[i].current_location))\n\n\n","repo_name":"whitesila/traffic_wave","sub_path":"data_export_method.py","file_name":"data_export_method.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"42385717084","text":"from time import sleep\nr = 'S'\nmaior = menor = cont = total = 0\nwhile r == 'S':\n num = int(input('Digite um Valor: '))\n total += num\n cont += 1\n if cont == 1:\n maior = menor = num\n else:\n if num > maior:\n maior = num\n if num < menor:\n menor = num\n r = str(input('Quer digitar outro valor? [S/N] :')).upper()[0]\n if r not in 'NS':\n print('\\033[31m OPÇÂO INVALIDA!!\\033[m')\n sleep(1)\n r = str(input('Quer digitar outro valor? [S/N] :')).upper()[0]\nmedia = total / cont\nprint('''A média dos {} números digitados é {}.\nO Menor é {} e o Maior é {}. '''.format(cont, media, menor, maior))\n","repo_name":"Oliveiraster/Python","sub_path":"Python 3/Exercicios/ex065.py","file_name":"ex065.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7173891378","text":"from __future__ import unicode_literals\n\nimport argparse\nimport signal\nimport sys\nfrom python_banyan.banyan_base import BanyanBase\n\n\nclass MyBanyanComponent(BanyanBase):\n \"\"\"\n This class is a template that you can use to create your\n own Banyan components\n \"\"\"\n\n def __init__(self, process_name='None'):\n \"\"\"\n\n :param process_name: Name of the component to be\n displayed on the console.\n \"\"\"\n\n # Call to super allows this class to be used in multiple inheritance scenarios when needed\n super(MyBanyanComponent, self).__init__(process_name=process_name)\n\n # add any initialization code you need for your specific component\n\n # You may wish to subscribe to messages here.\n # Subscribe to as many topic as you wish.\n # Uncomment out the following line and enter your topic.\n\n # self.set_subscriber_topic('the_topic')\n\n # You may also publish a message in init or anywhere else.\n # You will need to create a payload, that is in the form\n # of a Python dictionary. When you publish, you will also\n # need to specify\n\n # The next three lines are a sample of this. Uncomment and\n # modify to your needs, or copy the pattern to any other\n # portion of your component.\n\n # payload = {'command': 'turn_led_on\"}\n # topic = 'gpio_control'\n # self.publish_payload(payload, topic)\n\n # Start the receive event loop to receive messages for\n # subscribed topics.\n\n # This will also keep the component alive, even if you have not\n # subscribed to any topics.\n\n # This should be the last statement in init, since the receive_loop\n # runs in an infinite loop.\n\n self.receive_loop()\n\n def incoming_message_processing(self, topic, payload):\n \"\"\"\n Override this method with a custom Banyan message processor\n for subscribed messages.\n\n :param topic: Message Topic string.\n\n :param payload: Message Data.\n \"\"\"\n\n\ndef my_banyan_component():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-n\", dest=\"process_name\", default=\"My Component\",\n help=\"Set process name in banner\")\n args = parser.parse_args()\n kw_options = {\n 'process_name': args.process_name,\n }\n\n try:\n app = MyBanyanComponent(**kw_options)\n except KeyboardInterrupt:\n sys.exit()\n\n # noinspection PyUnusedLocal\n def signal_handler(sig, frame):\n print(\"Control-C detected. See you soon.\")\n app.clean_up()\n sys.exit(0)\n\n # listen for SIGINT\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n\n\nif __name__ == '__main__':\n my_banyan_component()\n","repo_name":"MrYsLab/bots-in-pieces-examples","sub_path":"banyan-bot-blue/banyan_templates/banyan_component_template.py","file_name":"banyan_component_template.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"71609897336","text":"#!/usr/bin/env python\nfrom setuptools import setup, find_packages\n# configure the setup to install from specific repos and users\n\nDEPENDENCY_LINKS = [\n 'https://github.com/deeso/task-blox/tarball/master#egg=task-blox',\n 'https://github.com/deeso/manipin-json/tarball/master#egg=manipin-json'\n]\n\nDESC ='Python JSON file consumer'\nsetup(name='json-file-consumer',\n version='1.0',\n description=DESC,\n author='adam pridgen',\n author_email='dso@thecoverofnight.com',\n install_requires=['toml', 'regex', 'task-blox', 'manipin-json'],\n packages=find_packages('src'),\n package_dir={'': 'src'},\n dependency_links=DEPENDENCY_LINKS,\n)\n","repo_name":"deeso/json-file-consumer","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74518432695","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 6 15:03:09 2018\n\n@author: Ekansh Maheshwari\n\"\"\"\nfrom matplotlib import pyplot\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom mpl_toolkits.mplot3d import Axes3D\nimport re\n\ndf=pd.read_csv(\"dat.csv\")\n#df = df.drop_duplicates('Username')\n#print(df)\ndf=df.drop(['Timestamp','Username'],axis=1)\n#print(df.head())\n\nprint(\"LEngth of dataframe\"+str(len(df))) \n\nprint(type(df[\"Coding Languages you are proficient in:\"][2]))\nprint(df[\"Coding Languages you are proficient in:\"][2])\nk=[]\nm=[]\ndf = df[df[\"Number of Publications/Research Paper(If Any):\"] != 999999999999999999999999]\ndf[\"numberofcodinglanguage\"]=-10\nfor kz in range(0,len(df)):\n i=0\n s=re.split(';|,| ; | , |; |\\*|\\n',df[\"Coding Languages you are proficient in:\"][kz])\n for j in s:\n if j not in k :\n i=i+1\n k.append(j)\n m.append(1)\n else:\n i=i+1\n q=k.index(j)\n m[q]=m[q]+1\n df[\"numberofcodinglanguage\"][kz]=i\nprint(m)\nprint(k)\n\nq=[]\nz=[]\ndf[\"numberproject\"]=-23\nfor kz in range(0,len(df)):\n #print(df[\"Number of Projects:\"][kz])\n if type(df[\"Number of Projects:\"][kz])==str:\n s=re.findall(r'\\d+',df[\"Number of Projects:\"][kz])\n if len(s)!=0:\n s=int(s[0])\n df[\"numberproject\"][kz]=s\n elif df[\"Number of Projects:\"][kz] ==\" One\":\n s=1\n df[\"numberproject\"][kz]=s\n #print(s)\n else :\n s=1\n df[\"numberproject\"][kz]=s\n if s not in q:\n q.append(s)\n z.append(1) \n else :\n ze=q.index(s)\n z[ze]=z[ze]+1\n elif str.isdigit(str(df[\"Number of Projects:\"][kz]))==True:\n s=int(df[\"Number of Projects:\"][kz])\n df[\"numberproject\"][kz]=s\n else:\n df[\"numberproject\"][kz]=0\nprint(q)\nprint(z)\npapernumbers=[]\npapernum=[]\nkze=\"Number of Publications/Research Paper(If Any):\"\nfor kz in range(0,len(df)):\n #s=re.findall(r'\\d+',df[\"Number of Publications/Research Paper(If Any):\"][kz])\n s=df[\"Number of Publications/Research Paper(If Any):\"][kz]\n #if df[\"Number of Projects:\"][kz]==\"NA\" or df[\"Number of Projects:\"][kz]==\"NA\" or df[\"Number of Projects:\"][kz]==\"N/A\" or df[\"Number of Projects:\"][kz]==\"None\" or df[\"Number of Projects:\"][kz]==\"-\" or df[\"Number of Projects:\"][kz]==\".\" or df[\"Number of Projects:\"][kz]==\"N.A\" or df[\"Number of Projects:\"][kz]==\"NO\" or df[\"Number of Projects:\"][kz]==\"NA\" or df[\"Number of Projects:\"][kz]==\"NONE\" or df[\"Number of Projects:\"][kz]==\"Na\" or df[\"Number of Projects:\"][kz]==\"Nil\" or df[\"Number of Projects:\"][kz]==\"NA\" or df[\"Number of Projects:\"][kz]==\"No it\" or df[\"Number of Projects:\"][kz]==\"No publications/resarch paper\" or df[\"Number of Projects:\"][kz]==\"NA\" or df[\"Number of Projects:\"][kz]==\"No.\" or df[\"Number of Projects:\"][kz]==\"Nope\" or df[\"Number of Projects:\"][kz]==\"Not yet\" or df[\"Number of Projects:\"][kz]==\"Zero\" or df[\"Number of Projects:\"][kz]==\"`NA\" or df[\"Number of Projects:\"][kz]==\"no\":\n # s=0\n if type(s)!=float and s.isdigit() and int(s)>=0:\n s=int(s)\n elif s=='2':\n s=str(2)\n #elif s\n elif df[kze][kz]==\" nan\" or df[kze][kz]==\"No\" or df[kze][kz]==\"NA\" or df[kze][kz]==\"NA\" or df[kze][kz]==\"N/A\" or df[kze][kz]==\"None\" or df[kze][kz]==\"-\" or df[kze][kz]==\".\" or df[kze][kz]==\"N.A\" or df[kze][kz]==\"NO\" or df[\"Number of Projects:\"][kz]==\"NA\" or df[kze][kz]==\"NONE\" or df[kze][kz]==\"Na\" or df[kze][kz]==\"Nil\" or df[kze][kz]==\"NA\" or df[kze][kz]==\"No it\" or df[kze][kz]==\"No publications/resarch paper\" or df[kze][kz]==\"NA\" or df[kze][kz]==\"No.\" or df[kze][kz]==\"Nope\" or df[kze][kz]==\"Not yet\" or df[kze][kz]==\"Zero\" or df[kze][kz]==\"`NA\" or df[kze][kz]==\"no\":\n s=0\n elif df[kze][kz]=='\"can plastic solar cell replace traditional silicon solar cell\" in IEEE explore' or df[kze][kz]==\"Soil stabilization \": \n s=1\n \n else:\n mee= re.findall(r'\\d+',df[\"Number of Projects:\"][kz])\n if len(mee)!=0:\n s=int(mee[0])\n else:\n s=0\n print(df[kze][kz])\n #elif type(s)==str:\n #s=0\n print(df[\"Number of Projects:\"][kz])\n if s not in papernumbers: \n papernumbers.append(s)\n papernum.append(1)\n else:\n ze=papernumbers.index(s)\n papernum[ze]=papernum[ze]+1\nprint(\"oi\")\nprint(papernum)\nprint(papernumbers) \nextraskills=[]\nextraskill=[]\ndf[\"numberextraskill\"]=0\nfor kz in range(0,len(df)):\n j=0\n s=re.split(';|,| ; | , |; |\\*|\\n|and|AND|/|&',str(df[\"Any Extra/Other Skills:\"][kz]))\n for i in s:\n if len(i)>0 and i[0]==\" \":\n i=i[1:]\n if len(i)>0 and i[-1]==\" \":\n i=i[0:-1]\n if str.upper(i)==\"INTERNET OF THINGS\" or str.upper(i)==\"INTERNET OF THINGS (IOT)\" or str.upper(i)==\"INTERNET OF THINGS.\":\n i=\"IOT\"\n if str.upper(i)==\"PLC TRAINING\":\n i=\"PLC\"\n \n if str.upper(i)==\"CC\":\n i=\"CLOUD COMPUTING\"\n \n if str.upper(i)==\"HACKING\":\n i=\"ETHICAL HACKING\"\n \n if str.upper(i)==\"EMBEDDED\":\n i=\"EMBEDDED SYSTEMS\"\n \n if str.upper(i)==\"BASIC ROBOTICS.\":\n i= \"ROBOTICS\"\n \n if str.upper(i)==\" PHOTOSHOP\":\n i=\"PHOTOSHOP\"\n \n if str.upper(i) not in extraskills and i!=\" \" and str.upper(i)!=\"NAN\" and str.upper(i)!=\"NO\" and str.upper(i)!=\"N\" and str.upper(i)!=\"NOTHING\" and str.upper(i)!=\"\" and str.upper(i)!=\"NONE\": \n extraskills.append(str.upper(i))\n extraskill.append(1)\n \n j=j+1\n elif i!=\" \" and str.upper(i)!=\"NAN\" and str.upper(i)!=\"NO\" and str.upper(i)!=\"N\" and str.upper(i)!=\"NOTHING\" and str.upper(i)!=\"\" and str.upper(i)!=\"NONE\":\n ze=extraskills.index(str.upper(i))\n extraskill[ze]=extraskill[ze]+1\n j=j+1\n df[\"numberextraskill\"][kz]=j\nprint(extraskills)\nprint(extraskill)\n\ndf[\"numberinternship\"]=-12\ninternship=[]\ninternships=[]\nm=[\"Nil\",\"Na\",\"No internships only Industrial trainings.\",\"No\",\"None\",\"nil\",\"N/A\"]\nfor kz in range(0,len(df)):\n if type(df[\"Internship Experience (The number of internship and company name):\"][kz])==str:\n s=re.findall('\\d+',df[\"Internship Experience (The number of internship and company name):\"][kz])\n if len(s)!=0:\n #print(s[-1]) \n if int(s[-1]) not in internship:\n df[\"numberinternship\"][kz]=int(s[-1])\n internship.append(int(s[-1]))\n internships.append(1)\n else:\n df[\"numberinternship\"][kz]=int(s[-1])\n ze=internship.index(int(s[-1]))\n internships[ze]=internships[ze]+1\n elif len(s)==0: \n lq=df[\"Internship Experience (The number of internship and company name):\"][kz].count(\",\")\n if lq>0:\n if lq+1 not in internship:\n df[\"numberinternship\"][kz]=lq+1\n internship.append(lq+1)\n internships.append(1)\n else:\n df[\"numberinternship\"][kz]=lq+1\n ze=internship.index(lq+1)\n internships[ze]=internships[ze]+1\n elif df[\"Internship Experience (The number of internship and company name):\"][kz] in m:\n q=0\n if q not in internship:\n df[\"numberinternship\"][kz]=q \n internship.append(q)\n internships.append(1)\n else:\n df[\"numberinternship\"][kz]=q\n ze=internship.index(q)\n internships[ze]=internships[ze]+1\n else:\n q=1\n if q not in internship:\n df[\"numberinternship\"][kz]=q\n internship.append(q)\n internships.append(1)\n else:\n df[\"numberinternship\"][kz]=q\n ze=internship.index(q)\n internships[ze]=internships[ze]+1\n \n '''if df[\"Internship Experience (The number of internship and company name):\"][kz] not in internship:\n internship.append(df[\"Internship Experience (The number of internship and company name):\"][kz])\n internships.append(1)\n else:\n ze=internship.index(df[\"Internship Experience (The number of internship and company name):\"][kz])\n internship[ze]=internship[ze]+1\n '''\n else:\n q=0\n if q not in internship:\n df[\"numberinternship\"][kz]=q\n internship.append(q)\n internships.append(1)\n else:\n df[\"numberinternship\"][kz]=q\n ze=internship.index(q)\n internships[ze]=internships[ze]+1\n \n #print(df[\"Internship Experience (The number of internship and company name):\"][kz])\nprint(\"No of internship\")\nprint(internship)\nprint(internships)\n\nevent=[]\nevents=[]\ncount=0\n\nkze=\"Events Organised:\"\ndf[\"numberevents\"]=-23\nfor i in range(0,len(df)):\n if type(df[\"Events Organised:\"][i])==str and str.isdigit(df[\"Events Organised:\"][i])==False:\n s=re.findall('\\d+',df[\"Events Organised:\"][i])\n if len(s)!=0 :\n #print(s[-1]) \n if int(s[-1]) not in event and int(s[-1])<=12:\n event.append(int(s[-1]))\n df[\"numberevents\"][i]=int(s[-1])\n events.append(1)\n elif int(s[-1])<=12:\n df[\"numberevents\"][i]=int(s[-1])\n ze=event.index(int(s[-1]))\n events[ze]=events[ze]+1\n else:\n #if df[\"Events Organised:\"][i]==\"•Coordinator of Kreative eye (2018)- The official photography society of KIIT. •Member of the broadcasting team of KIIT MUN 2016, 2017. •Member of the organizing committee of KIITFEST, the annual cultural fest of KIIT. •Member of the organizing committee of TEDxKIIT University 2017.\":\n \n if df[\"Events Organised:\"][i].count(\",\")>0:\n #print(df[\"Events Organised:\"][i])\n q=df[\"Events Organised:\"][i].count(\",\")+1\n #print(q)\n if q not in event:\n df[\"numberevents\"][i]=q\n event.append(q)\n events.append(1)\n else:\n df[\"numberevents\"][i]=q\n ze=event.index(q)\n events[ze]=events[ze]+1\n else :\n q=1\n if q not in event:\n df[\"numberevents\"][i]=q\n event.append(q)\n events.append(1)\n else:\n df[\"numberevents\"][i]=q\n ze=event.index(q)\n events[ze]=events[ze]+1\n print(df[\"Events Organised:\"][i])\n elif df[\"Events Organised:\"][i].count(\",\")>0 and df[\"Events Organised:\"][i]!=\"None. 😂 \":\n q=df[\"Events Organised:\"][i].count(\",\")+1\n #print(q)\n if q not in event:\n df[\"numberevents\"][i]=q\n event.append(q)\n events.append(1)\n else:\n df[\"numberevents\"][i]=q\n ze=event.index(q)\n events[ze]=events[ze]+1\n elif str.upper(df[\"Events Organised:\"][i])==\"NONE\" or str.upper(df[\"Events Organised:\"][i])==\"NO\" or str.upper(df[\"Events Organised:\"][i])==\"NA\" or str.upper(df[\"Events Organised:\"][i])==\"NONE.\" or str.upper(df[\"Events Organised:\"][i])==\"NIL\" or df[\"Events Organised:\"][i]==\"No events organised. \" or df[\"Events Organised:\"][i]==\"No event \":\n q=0\n if q not in event:\n df[\"numberevents\"][i]=q\n event.append(q)\n events.append(1)\n else:\n df[\"numberevents\"][i]=q\n ze=event.index(q)\n events[ze]=events[ze]+1\n else:\n #print(df[\"Events Organised:\"][i])\n q=1\n if q not in event:\n df[\"numberevents\"][i]=q\n event.append(q)\n events.append(1)\n else:\n df[\"numberevents\"][i]=q\n ze=event.index(q)\n events[ze]=events[ze]+1\n \n elif type(df[\"Events Organised:\"][i])==str and str.isdigit(df[\"Events Organised:\"][i]) :\n #print(df[\"Events Organised:\"][i])\n q=int(df[\"Events Organised:\"][i])\n if q not in event:\n df[\"numberevents\"][i]=q\n event.append(q)\n events.append(1)\n else:\n df[\"numberevents\"][i]=q\n ze=event.index(q)\n events[ze]=events[ze]+1 \n else:\n q=0\n if q not in event:\n df[\"numberevents\"][i]=q\n event.append(q)\n events.append(1)\n else:\n df[\"numberevents\"][i]=q\n ze=event.index(q)\n events[ze]=events[ze]+1\n \n #print(df[\"Events Organised:\"][i])\n #elif len(s)==0:\n #print(df[\"Events Organised:\"][i])\n \nprint(event)\nprint(events)\nprint(count)\nprint(df[\"numberevents\"])\nlbmake=LabelEncoder()\ndf[\"numberbranch\"]=lbmake.fit_transform(df[\"Branch\"])+1\n#print(df[\"numberbranch\"].head())\n#print(df[\"Branch\"].head())\nprint(df[df[\"numberbranch\"]==3])#Mech=7 CSE=2 IT=6 CIVIL=1 EE=4 EEE=5 E&I=3\ndf[\"numberenglishwritten\"]=-2\n#lbmake=LabelEncoder()\nfor i in range(0,len(df)):\n if df[\"English Written:\"][i]==\"Bad\":\n df[\"numberenglishwritten\"][i]=1\n elif df[\"English Written:\"][i]==\"Average\":\n df[\"numberenglishwritten\"][i]=2\n elif df[\"English Written:\"][i]==\"Good\":\n df[\"numberenglishwritten\"][i]=3\n elif df[\"English Written:\"][i]==\"Very Good\":\n df[\"numberenglishwritten\"][i]=4\n elif df[\"English Written:\"][i]==\"Excellent\":\n df[\"numberenglishwritten\"][i]=5\ndf[\"numberenglishspoken\"]=-2 \nfor i in range(0,len(df)):\n if df[\"English Spoken:\"][i]==\"Bad\":\n df[\"numberenglishspoken\"][i]=1\n elif df[\"English Spoken:\"][i]==\"Average\":\n df[\"numberenglishspoken\"][i]=2\n elif df[\"English Spoken:\"][i]==\"Good\":\n df[\"numberenglishspoken\"][i]=3\n elif df[\"English Spoken:\"][i]==\"Very Good\":\n df[\"numberenglishspoken\"][i]=4\n elif df[\"English Spoken:\"][i]==\"Excellent\":\n df[\"numberenglishspoken\"][i]=5\n \n \n \n#df[\"numberenglishwritten\"]=lbmake.fit_transform(df[\"English Written:\"])+1\n#print(df[df[\"numberenglishwritten\"]==5].head())#Average=1 Bad=2 Excellent=3 GOOD=4 VERY GOOD=5\n'''lbmake=LabelEncoder()\ndf[\"numberenglishspoken\"]=lbmake.fit_transform(df[\"English Spoken:\"])+1\ns=df[df[\"English Spoken:\"]==\"Average\"].head()#Average=1 Bad=2 Excellent=3 GOOD=4 VERY GOOD=5\n\nprint(s[[\"English Spoken:\",\"numberenglishspoken\"]])\n'''\ndf[\"numberquantativeaptitude\"]=2 \nfor i in range(0,len(df)):\n if df[\"Quantitative Aptitude:\"][i]==\"Bad\":\n df[\"numberquantativeaptitude\"][i]=1\n elif df[\"Quantitative Aptitude:\"][i]==\"Average\":\n df[\"numberenglishspoken\"][i]=2\n elif df[\"Quantitative Aptitude:\"][i]==\"Good\":\n df[\"numberquantativeaptitude\"][i]=3\n elif df[\"Quantitative Aptitude:\"][i]==\"Very Good\":\n df[\"numberquantativeaptitude\"][i]=4\n elif df[\"Quantitative Aptitude:\"][i]==\"Excellent\":\n df[\"numberquantativeaptitude\"][i]=5\n\n'''lbmake=LabelEncoder()\ndf[\"numberquantativeaptitude\"]=lbmake.fit_transform(df[\"Quantitative Aptitude:\"])+1\ns=df[df[\"Quantitative Aptitude:\"]==\"Very Good\"].head()#Average=1 Bad=2 Excellent=3 GOOD=4 VERY GOOD=5\nprint(s[[\"Quantitative Aptitude:\",\"numberquantativeaptitude\"]])\n'''\nlbmake=LabelEncoder()\ndf[\"numbercompany\"]=lbmake.fit_transform(df[\"Company Name :\"])+1\ns=df[df[\"Company Name :\"]==\"unplaced\"]\nprint(s[[\"numbercompany\",\"Company Name :\"]])#1 =placed 2=placed\n\nprint(\"Number of internship\")\nprint(df[[\"numberinternship\"]])\nprint(\"Number of events\")\nprint(df[\"numberevents\"])\nprint(\"Number of projects\")\nprint(df[\"numberproject\"])\ndf.to_csv(\"newc.csv\", encoding='utf-8', index=False)","repo_name":"EkanshMaheshwari/Student-Placement-Analysis","sub_path":"file to read data.py","file_name":"file to read data.py","file_ext":"py","file_size_in_byte":16559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"5752621893","text":"import hashlib,json\nfrom datetime import datetime\n\nclass Block():\n def __init__(self,tstamp,voterInfo,previoushash=''):\n self.nonce = 0\n self.tstamp = tstamp\n self.voterInfo = voterInfo\n self.previoushash = previoushash\n self.hash = self.calcHash()\n \n def __str__(self):\n string =\" Chain Nounce : \" + str(self.nonce)+\"\\n\"\n string += \"voterInfo: \" +str(self.voterInfo)+\"\\n\"\n string += \"Old hash: \" +str(self.previoushash)+\"\\n\"\n string += \"New hash :\" + str(self.hash)+\"\\n\"\n\n return string \n \n \n def calcHash(self):\n block_string = json.dumps({\"Chain Nonce\":self.nonce,\"VotinggTimestamp\":str(self.tstamp),\"voterInfo\":self.voterInfo,\"previoushash\":self.previoushash},sort_keys=True).encode()\n return hashlib.sha512(block_string).hexdigest()\n #sha512 used to make Voter blocks encrypted and Secure than 128\n #OLD AND newer hash will generate 512 encry keys \n def mineBlock(self,difficulty):\n while(self.hash[:difficulty] != str('').zfill(difficulty)):\n self.nonce += 1\n self.hash = self.calcHash()\n \n\n \n \n\nclass BlockChain():\n def __init__(self):\n self.chain = [self.generateGenesisBlock(),]\n self.difficulty = 3\n\n def generateGenesisBlock(self):\n return Block(0,'01/01/2020','First Block')\n\n def getLastBlock(self):\n return self.chain[-1]\n\n def addBlock(self,newBlock):\n newBlock.previoushash = self.getLastBlock().hash\n newBlock.mineBlock(self.difficulty)\n self.chain.append(newBlock)\n\n def isChainValid(self):\n for i in range(1,len(self.chain)):\n prevb = self.chain[i-1]\n currb = self.chain[i]\n if(currb.hash != currb.calcHash()):\n print(\"Invalid Block\")\n return False\n if(currb.previoushash != prevb.hash):\n print(\"Invalid Chain\")\n return False\n return True\n \n\nbchain = BlockChain()\ni=1\nwhile i!=\"quit\":\n\n age = int(input(\"Enter Your age: \"))\n if age >= 18:\n print(\"You Can Vote You are Elegible to Vote \")\n else:\n print(\"Sorry You are Below 18 System Abort !\")\n exit(1)\n \n name = str(input(\"Enter voter name: \"))\n \n \n vote = int(input(\"Press the no. to vote:\\n1 - BJP (Narendra Modi)\\n2 - INC (Rahul Gandhi)\\n3 - AAP (Arvind Kejriwal)\\n4 - BSP\\n5 - ShivSena\\n6 - NOTA\\n\"))\n if vote == 1:\n elected = \"BJP (Narendra Modi)\"\n elif vote ==2 :\n elected = \"Congress (Rahul Gandhi)\"\n elif vote == 3:\n elected = \"AAP (Arvind Kejriwal)\"\n elif vote == 4:\n elected = \"BSP (Mayawati)\"\n elif vote == 5:\n elected = \"ShivSena (Aditya Thackrey)\"\n else:\n elected = \"NOTA\"\n print(\"No Part Selected Pls Select Party in next Stream\")\n exit(1)\n patInfo = \"Name: \"+name +\"\\nAge: \"+str(age)+\"\\nElected Party: \"+elected\n bchain.addBlock(Block(datetime.now(),patInfo))\n i = input(\"type quit to End Voting and view Total E-Voting Details \\n type cont to Continue Adding Voters :\")\n\n\nfor b in bchain.chain:\n print(b)\n","repo_name":"rdunlocked18/Python-Blockchain-SimpleEvoting","sub_path":"mainSuper.py","file_name":"mainSuper.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"28255058233","text":"import pygame\n\nfrom mlgame.gamedev.generic import quit_or_esc, KeyCommandMap\nfrom mlgame.gamedev.recorder import get_record_handler\n\nfrom . import gamecore\nfrom .gamecore import GameStatus, PlatformAction\nfrom ..communication import SceneInfo\nfrom ..main import get_log_dir\n\nclass Arkanoid:\n def __init__(self, fps: int, level: int, record_progress, one_shot_mode):\n self._init_pygame()\n\n self._fps = fps\n self._scene = gamecore.Scene(level, True)\n self._keyboard = KeyCommandMap({\n pygame.K_LEFT: PlatformAction.MOVE_LEFT,\n pygame.K_RIGHT: PlatformAction.MOVE_RIGHT,\n }, PlatformAction.NONE)\n\n self._record_handler = get_record_handler(record_progress, {\n \"status\": (GameStatus.GAME_OVER, GameStatus.GAME_PASS)\n }, get_log_dir())\n self._one_shot_mode = one_shot_mode\n\n def _init_pygame(self):\n pygame.display.init()\n pygame.display.set_caption(\"Arkanoid\")\n self._screen = pygame.display.set_mode(gamecore.scene_area_size)\n self._clock = pygame.time.Clock()\n\n def game_loop(self):\n while not quit_or_esc():\n self._record_handler(self._scene.fill_scene_info_obj(SceneInfo()))\n control_action = self._keyboard.get_command()\n game_status = self._scene.update(control_action)\n\n if game_status == GameStatus.GAME_OVER or \\\n game_status == GameStatus.GAME_PASS:\n print(game_status.value)\n self._record_handler(self._scene.fill_scene_info_obj(SceneInfo()))\n\n if self._one_shot_mode:\n return\n\n self._scene.reset()\n\n self._screen.fill((0, 0, 0))\n self._scene.draw_gameobjects(self._screen)\n pygame.display.flip()\n\n self._clock.tick(self._fps)\n\n pygame.quit()\n","repo_name":"loe1616/-Machine-learning","sub_path":"MLGame-master/games/arkanoid/game/arkanoid.py","file_name":"arkanoid.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25433799764","text":"'''\r\nCreated on 01.01.2023\r\n\r\n@author: uschoen\r\n'''\r\n__version__ = '0.1.0'\r\nfrom _ast import Try\r\n__author__ = 'ullrich schoen'\r\n\r\n\r\n# Standard library imports\r\nimport json\r\nimport os\r\nimport importlib\r\nimport logging\r\nimport sys\r\nimport re\r\nimport socket\r\nfrom threading import Thread\r\n\r\nLOG=logging.getLogger(__name__)\r\n\r\ntry:\r\n import psutil #@UnresolvedImport \r\n GETSTATISTICPROCESS=True\r\nexcept:\r\n LOG.error(\"psutil not installed. Use pip3 install psutil to install. Function getStatisticProcess not working\")\r\n GETSTATISTICPROCESS=False\r\n# Local application imports\r\nfrom core.coreException import coreException\r\nfrom module.modulException import modulException\r\n\r\n\r\n\r\nclass coreBase():\r\n \r\n def __init__(self):\r\n '''\r\n coreBase \r\n \r\n Global variable:\r\n self.path: absolute path of the script\r\n self.rootPath: root path of the script\r\n self.host: host name\r\n self.threads: all running thread objects\r\n \r\n \r\n '''\r\n \r\n '''\r\n self.path: the absolute path of the script\r\n '''\r\n self.runPath='' if not os.path.dirname(sys.argv[0]) else '%s/'%(os.path.dirname(sys.argv[0]))\r\n \r\n '''\r\n script absolute root path\r\n '''\r\n self.rootPath=(\"%s/%s\"%(os.getcwd(),os.path.dirname(sys.argv[0])))\r\n \r\n '''\r\n self.host: the self host name\r\n '''\r\n self.host=socket.gethostbyaddr(socket.gethostname())[0].split(\".\")[0]\r\n \r\n \r\n '''\r\n thread queue\r\n '''\r\n self.threads=[]\r\n \r\n LOG.info(\"__init core base finish, version %s\"%(__version__))\r\n \r\n def getLocalIP(self):\r\n ''' \r\n return the local IP Adresss\r\n '''\r\n try:\r\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n s.connect(('8.8.8.8', 9))\r\n ip = s.getsockname()[0]\r\n return ip\r\n except (socket.error) as e:\r\n raise coreException(\"socket error in getLocalIP. error: %e\"%(e))\r\n except (Exception) as e:\r\n raise coreException(\"unknown error in getLocalIP. error: %e\"%(e))\r\n \r\n def clearUpThreads(self):\r\n '''\r\n clear up death or old thread\r\n '''\r\n try:\r\n for thread in self.threads:\r\n if not thread.is_alive():\r\n self.threads.remove(thread)\r\n except (Exception) as e:\r\n raise coreException(\"unknown error in clearUpThreads. error: %e\"%(e))\r\n \r\n def getStatisticProcess(self):\r\n '''\r\n return a dic with statis data\r\n \r\n return: { 'cpu': cpu usage in percent\r\n 'mem' : memory in percent\r\n 'threads' running threads\r\n }\r\n '''\r\n try: \r\n statisticValues={\r\n 'mem':0,\r\n 'cpu':0,\r\n 'threads':0\r\n }\r\n if GETSTATISTICPROCESS:\r\n python_process = psutil.Process(os.getpid())\r\n statisticValues={\r\n 'mem':round(python_process.memory_percent(),2),\r\n 'cpu':python_process.cpu_percent(interval=1),\r\n 'threads':len(self.threads)\r\n }\r\n return statisticValues\r\n except (Exception) as e:\r\n raise coreException(\"some error in getStatisticProcess. error:%s\"%(e),True)\r\n \r\n def startThread(self,target,args):\r\n '''\r\n start an the funktion in the new thread and add it to\r\n the thread queue\r\n \r\n target=function or object to start\r\n args= argument\r\n '''\r\n try:\r\n thread=Thread(target=target, args=args,daemon=True)\r\n self.threads.append(thread)\r\n thread.start()\r\n except (Exception) as e:\r\n raise coreException(\"can't start new thread. error:%s\"%(e))\r\n \r\n def thisMethode(self):\r\n '''\r\n return the actule methode\r\n '''\r\n try:\r\n return sys._getframe(1).f_code.co_name \r\n except:\r\n LOG.critical(\"some error in thisMethode\",True)\r\n \r\n def checkModulVersion(self,package,classModul,modulVersion=__version__):\r\n '''\r\n check if a load package have the right module version\r\n \r\n package: the load package\r\n classModul: the load class\r\n modulVersion: min version , default=core Version\r\n \r\n return: object from the Class\r\n \r\n exception: defaultEXC\r\n \r\n '''\r\n try:\r\n if hasattr(classModul, '__version__'):\r\n if classModul.__version__ str:\n t = [releaseTimes[0]]\n for i in range(len(releaseTimes) - 1):\n t.append(releaseTimes[i + 1] - releaseTimes[i])\n d = defaultdict(int)\n maxn = 0\n maxch = ''\n for ch in range(len(keysPressed)):\n if keysPressed[ch] in d:\n d[ch] += t[ch]\n else:\n d[ch] = t[ch]\n\n if d[ch] > maxn:\n maxn = d[ch]\n maxch = keysPressed[ch]\n elif d[ch] == maxn:\n maxch = keysPressed[ch] if ord(keysPressed[ch]) > ord(maxch) else maxch\n return maxch","repo_name":"wangyu33/LeetCode","sub_path":"LeetCode1629.py","file_name":"LeetCode1629.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"17186125182","text":"import netCDF4 as nc\nimport numpy as np\nimport matplotlib.pyplot as plt\n\norig = nc.Dataset(\"E:\\GT_chpl\\Tests\\Data\\\\true.nc\",\n \"r\").variables[\"data\"][:]\ncalc = nc.Dataset(\"E:\\GT_chpl\\Tests\\Data\\\\result.nc\",\n \"r\").variables[\"data\"][:]\n\nsavedFile = open(\"Tests\\Data\\sincos2DAvgError.txt\", \"r\")\nli = []\nfor line in savedFile:\n li.append(list(map(float, line.split(\" \"))))\n\n\nh_values, error_values = tuple(li)\n\nfig = plt.figure(figsize=(10, 7))\nfig.add_subplot(2, 2, 1)\nc = plt.imshow(calc)\nplt.colorbar(c)\nplt.title(\"Calculated Value\")\n\nfig.add_subplot(2, 2, 2)\nk = plt.imshow(orig)\nplt.colorbar(k)\nplt.title(\"True Value\")\n\nfig.add_subplot(2, 2, 3)\nplt.yscale('log')\nplt.xscale('log')\nplt.plot(h_values, error_values, '-o', label=\"error vs H\")\nplt.plot(h_values, np.power(h_values, 2), '-x', label=\"h*h vs h\")\nplt.xlabel('h Values')\nplt.ylabel('Avg Error')\nplt.title('Sin(X)Cos(Y) Derivative test')\nplt.legend()\n#plt.savefig(fname=\"PythonScripts\\sincos2DAvgErr_LogScale\", dpi=1000)\nplt.show()\n","repo_name":"ds24449/Stencil-in-Chapel","sub_path":"PythonScripts/sincos2DAvgError.py","file_name":"sincos2DAvgError.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"42622558048","text":"# 133. Clone Graph\n\n# Given a reference of a node in a connected undirected graph.\n# Return a deep copy (clone) of the graph.\n# Each node in the graph contains a value (int) and a list (List[Node]) of its\n# neighbors.\n\n# class Node {\n# public int val;\n# public List neighbors;\n# }\n\n# Test case format:\n# For simplicity, each node's value is the same as the node's index (1-indexed)\n# Ffor example, the first node with val == 1, the second node with val == 2,\n# and so on. The graph is represented in the test case using an adjacency list.\n# An adjacency list is a collection of unordered lists used to represent a\n# finite graph. Each list describes the set of neighbors of a node in the\n# graph. The given node will always be the first node with val = 1. You must\n# return the copy of the given node as a reference to the cloned graph.\n\n# Example 1:\n# Input: adjList = [[2,4],[1,3],[2,4],[1,3]]\n# Output: [[2,4],[1,3],[2,4],[1,3]]\n# Explanation: There are 4 nodes in the graph.\n# 1st node (val = 1)'s neighbors are 2nd node (val = 2) and 4th node (val = 4).\n# 2nd node (val = 2)'s neighbors are 1st node (val = 1) and 3rd node (val = 3).\n# 3rd node (val = 3)'s neighbors are 2nd node (val = 2) and 4th node (val = 4).\n# 4th node (val = 4)'s neighbors are 1st node (val = 1) and 3rd node (val = 3).\n\n\nclass Node:\n def __init__(self, val=0, neighbors=None):\n self.val = val\n self.neighbors = neighbors if neighbors is not None else []\n\n\nclass Solution:\n def cloneGraph(self, node: 'Node') -> 'Node':\n # when no Node input\n if node is None:\n return None\n\n # initialize head node\n head = None\n\n # dictionary storing copies of Nodes\n # key: numberic self.val, value: copy of Node object\n # e.g. {1: }\n visited = {}\n\n # queue of original nodes and copy nodes\n # for parallel breath first traversal\n originalQueue = [node]\n newQueue = [Node()] # start with empty new Node\n\n while (originalQueue):\n currentOriginal = originalQueue.pop(0)\n currentCopy = newQueue.pop(0)\n currentCopy.val = currentOriginal.val\n # first node copy store it in head to be return in the end\n if currentCopy.val == 1:\n visited[currentCopy.val] = currentCopy\n head = currentCopy\n\n # for each neighbor of current original Node\n for neigh in currentOriginal.neighbors:\n # if copy exists in dictionary\n if neigh.val in visited:\n # copy was made but no neighbors were filled\n if visited[neigh.val].neighbors == []:\n # store that Node from dictionary as newNeigh var\n newNeigh = visited[neigh.val]\n else:\n # add that Node from dict to currentCopy's neighbors\n currentCopy.neighbors.append(visited[neigh.val])\n # no further action needed\n continue\n\n # if copy does not exist in dictionary\n else:\n # create a new copy of Node\n newNeigh = Node(neigh.val)\n # store this creation in dictionary\n visited[neigh.val] = newNeigh\n\n # neighborless neigh copy added to currentCopy's neighbors\n currentCopy.neighbors.append(newNeigh)\n # append the new neigh copy and original neigh to\n # corresponding queues if there isn't one in queue already\n # e.g. when two \"parent\" Nodes have the same neighbor,\n # only append that neighbor once\n if neigh.val not in (x.val for x in originalQueue):\n newQueue.append(newNeigh)\n originalQueue.append(neigh)\n\n # return first copy of Node\n return head\n","repo_name":"hongchris96/DSA","sub_path":"leetcode/133_clone_graph.py","file_name":"133_clone_graph.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"36373421914","text":"from __future__ import division\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom PIL import Image\nfrom torchvision.ops import nms\n\n\nclass DecodeBox(nn.Module):\n def __init__(self, anchors, num_classes, img_size):\n super(DecodeBox, self).__init__()\n \n self.anchors = anchors\n self.num_anchors = len(anchors)\n self.num_classes = num_classes\n self.bbox_attrs = 5 + num_classes\n self.img_size = img_size\n\n def forward(self, input):\n \n batch_size = input.size(0)\n input_height = input.size(2)\n input_width = input.size(3)\n\n \n stride_h = self.img_size[1] / input_height\n stride_w = self.img_size[0] / input_width\n \n scaled_anchors = [(anchor_width / stride_w, anchor_height / stride_h) for anchor_width, anchor_height in self.anchors]\n\n \n prediction = input.view(batch_size, self.num_anchors,\n self.bbox_attrs, input_height, input_width).permute(0, 1, 3, 4, 2).contiguous()\n\n \n x = torch.sigmoid(prediction[..., 0])\n y = torch.sigmoid(prediction[..., 1])\n \n w = prediction[..., 2]\n h = prediction[..., 3]\n \n conf = torch.sigmoid(prediction[..., 4])\n \n pred_cls = torch.sigmoid(prediction[..., 5:])\n\n FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor\n LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor\n\n \n grid_x = torch.linspace(0, input_width - 1, input_width).repeat(input_height, 1).repeat(\n batch_size * self.num_anchors, 1, 1).view(x.shape).type(FloatTensor)\n grid_y = torch.linspace(0, input_height - 1, input_height).repeat(input_width, 1).t().repeat(\n batch_size * self.num_anchors, 1, 1).view(y.shape).type(FloatTensor)\n\n \n anchor_w = FloatTensor(scaled_anchors).index_select(1, LongTensor([0]))\n anchor_h = FloatTensor(scaled_anchors).index_select(1, LongTensor([1]))\n anchor_w = anchor_w.repeat(batch_size, 1).repeat(1, 1, input_height * input_width).view(w.shape)\n anchor_h = anchor_h.repeat(batch_size, 1).repeat(1, 1, input_height * input_width).view(h.shape)\n\n \n pred_boxes = FloatTensor(prediction[..., :4].shape)\n pred_boxes[..., 0] = x.data + grid_x\n pred_boxes[..., 1] = y.data + grid_y\n pred_boxes[..., 2] = torch.exp(w.data) * anchor_w\n pred_boxes[..., 3] = torch.exp(h.data) * anchor_h\n\n \n _scale = torch.Tensor([stride_w, stride_h] * 2).type(FloatTensor)\n output = torch.cat((pred_boxes.view(batch_size, -1, 4) * _scale,\n conf.view(batch_size, -1, 1), pred_cls.view(batch_size, -1, self.num_classes)), -1)\n return output.data\n \ndef letterbox_image(image, size):\n iw, ih = image.size\n w, h = size\n scale = min(w/iw, h/ih)\n nw = int(iw*scale)\n nh = int(ih*scale)\n\n image = image.resize((nw,nh), Image.BICUBIC)\n new_image = Image.new('RGB', size, (128,128,128))\n new_image.paste(image, ((w-nw)//2, (h-nh)//2))\n return new_image\n\ndef yolo_correct_boxes(top, left, bottom, right, input_shape, image_shape):\n new_shape = image_shape*np.min(input_shape/image_shape)\n\n offset = (input_shape-new_shape)/2./input_shape\n scale = input_shape/new_shape\n\n box_yx = np.concatenate(((top+bottom)/2,(left+right)/2),axis=-1)/input_shape\n box_hw = np.concatenate((bottom-top,right-left),axis=-1)/input_shape\n\n box_yx = (box_yx - offset) * scale\n box_hw *= scale\n\n box_mins = box_yx - (box_hw / 2.)\n box_maxes = box_yx + (box_hw / 2.)\n boxes = np.concatenate([\n box_mins[:, 0:1],\n box_mins[:, 1:2],\n box_maxes[:, 0:1],\n box_maxes[:, 1:2]\n ],axis=-1)\n boxes *= np.concatenate([image_shape, image_shape],axis=-1)\n return boxes\n\ndef bbox_iou(box1, box2, x1y1x2y2=True):\n \"\"\"\n 计算IOU\n \"\"\"\n if not x1y1x2y2:\n b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2\n b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2\n b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2\n b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2\n else:\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]\n\n inter_rect_x1 = torch.max(b1_x1, b2_x1)\n inter_rect_y1 = torch.max(b1_y1, b2_y1)\n inter_rect_x2 = torch.min(b1_x2, b2_x2)\n inter_rect_y2 = torch.min(b1_y2, b2_y2)\n\n inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * \\\n torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, min=0)\n \n b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)\n b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)\n\n iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)\n\n return iou\n\n\ndef non_max_suppression(prediction, num_classes, conf_thres=0.5, nms_thres=0.4):\n \n box_corner = prediction.new(prediction.shape)\n box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2\n box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2\n box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2\n box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2\n prediction[:, :, :4] = box_corner[:, :, :4]\n\n output = [None for _ in range(len(prediction))]\n for image_i, image_pred in enumerate(prediction):\n \n class_conf, class_pred = torch.max(image_pred[:, 5:5 + num_classes], 1, keepdim=True)\n\n \n conf_mask = (image_pred[:, 4] * class_conf[:, 0] >= conf_thres).squeeze()\n\n \n image_pred = image_pred[conf_mask]\n class_conf = class_conf[conf_mask]\n class_pred = class_pred[conf_mask]\n if not image_pred.size(0):\n continue\n \n detections = torch.cat((image_pred[:, :5], class_conf.float(), class_pred.float()), 1)\n\n \n unique_labels = detections[:, -1].cpu().unique()\n\n if prediction.is_cuda:\n unique_labels = unique_labels.cuda()\n detections = detections.cuda()\n\n for c in unique_labels:\n \n detections_class = detections[detections[:, -1] == c]\n\n \n keep = nms(\n detections_class[:, :4],\n detections_class[:, 4] * detections_class[:, 5],\n nms_thres\n )\n max_detections = detections_class[keep]\n \n # Add max detections to outputs\n output[image_i] = max_detections if output[image_i] is None else torch.cat(\n (output[image_i], max_detections))\n\n return output\n","repo_name":"LinghuiXia/SAR-Eddy-Detection","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6857,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"22"} +{"seq_id":"15384912028","text":"from __future__ import print_function\nimport math\nfrom random import (\n randint,\n shuffle,\n choice,\n)\n\nimport random\nimport os\nimport sys\ndir_path = os.path.dirname(os.path.realpath(__file__))\nsys.path.insert(0, dir_path + '/simanneal-master/simanneal')\nfrom anneal import Annealer\n\nclass NonBetweenness(Annealer):\n def __init__(self, identifier, num_wizards, num_constraints, wizards, constraints, outfile):\n # NOTE: state == wizards\n # shuffle(wizards) # do not shuffle because we may start with an ordering\n super(NonBetweenness, self).__init__(wizards)\n # set hyperparameters\n self.Tmax = 2.0\n self.Tmin = 0.08\n self.steps = 100000\n self.updates = 1500\n # self.randomize_hyperparams() # use this for exploring\n\n # mapping for efficient position lookup by wizard name\n self.identifier = identifier\n self.wiz_to_pos = {wizards[i] : i for i in range(len(wizards))}\n self.num_wizards = num_wizards\n self.num_constraints = num_constraints\n self.constraints = constraints\n self.wizards = wizards\n self.outfile = outfile\n\n def randomize_hyperparams(self):\n self.Tmax = random.uniform(2.5, 5)\n self.Tmin = random.uniform(0.01, 1)\n self.steps = randint(20000, 200000)\n self.updates = 100\n\n def energy(self):\n \"\"\"Calculates the number of constraints unsatisfied.\"\"\"\n E = sum([1 for c in self.constraints if self._is_constraint_violated(c)])\n if E == 0:\n self._save_solution()\n print(\"exiting...\")\n exit()\n return E\n\n def move(self):\n \"\"\"Performs a move during the simmulated annealing algorithm.\"\"\"\n self._move_range_shuffle(3)\n self._move_satisfy_random_constraint()\n # self._move_range_shuffle(3)\n #if (curr_energy > 50):\n # self._move_satisfy_random_constraint()\n #else:\n # self._move_range_shuffle(3)\n\n def print_violated_constraints(self):\n for c in self.constraints:\n if self._is_constraint_violated(c):\n print(c)\n\n def _save_solution(self):\n print(\"FOUND OPTIMAL:\", self.state)\n print(\"saving to file...\", self.outfile)\n with open(self.outfile, 'w') as file:\n for w in self.state:\n file.write(\"{0} \".format(w))\n\n def _move_adjacently(self):\n a = randint(0, len(self.state) - 1)\n if a == 0:\n b = a + 1\n elif a == len(self.state) - 1:\n b = a - 1\n else:\n offset = choice([1, -1])\n b = a + offset\n self._swap_wizards(self.state[a], self.state[b])\n\n def dict_check(self):\n enum_dict = {v:i for i, v in enumerate(self.state)}\n error = 0\n for wizard in self.state:\n if (enum_dict[wizard] != self.wiz_to_pos[wizard]):\n error+=1\n print(wizard)\n return error\n\n def _move_range_shuffle(self, range_len):\n \"\"\"Shuffles a random, continuous subset of the current state, provided the length of the range desired to be shuffled\"\"\"\n start = randint(0, len(self.state) - range_len)\n end = start + range_len\n\n # print(\"start: \" + str(start))\n # print(\"end: \" + str(end))\n # print(\"range_len: \" + str(range_len))\n # print(\"prior state: \", self.state)\n # print(\"prior dict: \", self.wiz_to_pos)\n\n copy_state = self.state[start:end]\n\n #for wizard in copy_state:\n # print(wizard)\n\n random.shuffle(copy_state)\n\n for i, wizard in enumerate(copy_state):\n #print(\"wiz1_loop: \" + wizard)\n self.state[start + i] = wizard\n self.wiz_to_pos[wizard] = start + i\n\n # print(\"post state: \", self.state)\n # print(\"post dict: \", self.wiz_to_pos)\n # print('\\n Error:', self.dict_check())\n # print(\"end\\n \\n\")\n\n\n\n def _move_range_mirror(self, range_len):\n \"\"\"Shuffles a random, continuous subset of the current state, provided the length of the range desired to be shuffled\"\"\"\n #start1 = randint(range_len, len(self.state) - range_len)\n start = randint(0, len(self.state) - range_len)\n #range_list = choice([[start1, start1 - range_len], [start2, start2 + range_len]])\n end = start + range_len\n\n copy_state = self.state[start:end]\n copy_state.reverse()\n self.state[start:end] = copy_state\n\n for wizard in self.state[start:end]:\n self.wiz_to_pos[wizard] = self.state.index(wizard)\n\n def _move_satisfy_random_constraint(self):\n \"\"\"Satisfies a random unsatisfied constraint.\"\"\"\n secure_random = random.SystemRandom()\n done = False\n while not done:\n c = secure_random.choice(self.constraints)\n if self._is_constraint_violated(c):\n done = True\n # swap 2 wizards to move closer\n self._swap_wizards(c[random.randint(0, 1)], c[2])\n # with probability 0.5, swap the two border wizards\n if random.randint(0, 1) == 1:\n self._swap_wizards(c[0], c[1])\n if not done: print(\"Nothing to do...\")\n\n def _move_randomly(self):\n \"\"\"Swaps two wizard assignments.\"\"\"\n a, b = randint(0, len(self.state) - 1), randint(0, len(self.state) - 1)\n wiz1, wiz2 = self.state[a], self.state[b]\n self._swap_wizards(wiz1, wiz2)\n\n def _swap_wizards(self, wiz1, wiz2):\n pos1, pos2 = self.wiz_to_pos[wiz1], self.wiz_to_pos[wiz2]\n self.state[pos1], self.state[pos2] = self.state[pos2], self.state[pos1]\n self.wiz_to_pos[wiz1], self.wiz_to_pos[wiz2] = self.wiz_to_pos[wiz2], self.wiz_to_pos[wiz1]\n\n def _is_constraint_violated(self, c):\n return (\n (self.wiz_to_pos[c[0]] < self.wiz_to_pos[c[2]] < self.wiz_to_pos[c[1]]) or\n (self.wiz_to_pos[c[1]] < self.wiz_to_pos[c[2]] < self.wiz_to_pos[c[0]])\n )\n","repo_name":"FibonacciHeap/cs170-project","sub_path":"wizards.py","file_name":"wizards.py","file_ext":"py","file_size_in_byte":6032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11816357943","text":"# Author Karol Zwolak\nimport pygame as pg\nimport time\nW_WIDTH, W_HEIGHT = 1200, 900\n\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\ngreen = (0, 255, 0)\npathC = (255, 255, 0)\ncl = (0, 255, 255)\nop = (30, 144, 255)\nred = (255, 0, 0)\n\n\nclass Node:\n def __init__(self, pos=None, parent_node=None, start_pos=None, target_pos=None):\n if start_pos != None and target_pos != None:\n self.pos = start_pos\n self.g_cost = 0\n self.h_cost = 0\n self.cost = 0\n\n elif pos != None and parent_node != None:\n self.parent = parent_node\n self.pos = pos\n self.g_cost = parent_node.g_cost + self.distance(self.parent.pos)\n self.h_cost = self.get_h_val()\n self.cost = self.g_cost+self.h_cost\n\n def get_h_val(self):\n a = abs(self.pos[0] - target_node[0])\n b = abs(self.pos[1] - target_node[1])\n return a+b\n\n def distance(self, from_pos):\n dis = ((self.pos[0]-from_pos[0])**2+(self.pos[1]-from_pos[1])**2)**0.5\n return dis\n\n def get_neighbors(self, board):\n n = (0, 1, 2, 3, 4, 5)\n y, x = self.pos\n\n down = False\n up = False\n left_up = False\n left_down = False\n right_up = False\n right_down = False\n\n h_len = len(board[0]) # x, pos[1] #\n v_len = len(board) # y, pos[0] #\n neighbors = []\n\n if y < v_len-1:\n y1 = y+1\n x1 = x\n down = True\n if board[y1][x1] in n:\n neighbors.append(Node(pos=(y1, x1), parent_node=self))\n left_down = True\n right_down = True\n\n if y > 0:\n y1 = y-1\n x1 = x\n up = True\n if board[y1][x1] in n:\n neighbors.append(Node(pos=(y1, x1), parent_node=self))\n left_up = True\n right_up = True\n\n if x < h_len-1:\n y1 = y\n x1 = x+1\n if board[y1][x1] in n:\n neighbors.append(Node(pos=(y1, x1), parent_node=self))\n right_up = True\n right_down = True\n # else:\n # right_up = False\n # right_down = False\n\n if right_up and up:\n y1 = y-1\n if board[y1][x1] in n:\n neighbors.append(Node(pos=(y1, x1), parent_node=self))\n if right_down and down:\n y1 = y+1\n if board[y1][x1] in n:\n neighbors.append(Node(pos=(y1, x1), parent_node=self))\n if x > 0:\n y1 = y\n x1 = x-1\n if board[y1][x1] in n:\n\n neighbors.append(Node(pos=(y1, x1), parent_node=self))\n left_up = True\n left_down = True\n\n if left_up and up:\n y1 = y-1\n if board[y1][x1] in n:\n neighbors.append(Node(pos=(y1, x1), parent_node=self))\n if left_down and down:\n y1 = y+1\n if board[y1][x1] in n:\n neighbors.append(Node(pos=(y1, x1), parent_node=self))\n\n return neighbors\n\n\nclass NodeList:\n def __init__(self):\n self.items = []\n\n def contains(self, obj):\n if len(self.items) > 0:\n\n for i, node in enumerate(self.items):\n if obj.pos == node.pos:\n return i\n\n return -1\n\n def get_min(self):\n if len(self.items) > 0:\n curr = 0\n\n for i in range(len(self.items)):\n node = self.items[i]\n if node.cost < self.items[curr].cost:\n curr = i\n return curr\n\n return -1\n\n def _print(self):\n for node in self.items:\n print(node.pos, end=\",\")\n\n print()\n\n\nclass Grid:\n\n def __init__(self, length):\n self.length = length\n self.board = [[0 for _ in range(W_WIDTH//length)]\n for _ in range(W_HEIGHT//length)]\n\n self.board[0][0] = 1\n self.board[-1][-1] = 2\n\n self.start_node = self.get_node(1)\n self.target_node = self.get_node(2)\n\n def switch(self):\n self.board[self.start_node[0]][self.start_node[1]] = 2\n self.board[self.target_node[0]][self.target_node[1]] = 1\n\n self.start_node = self.get_node(1)\n self.target_node = self.get_node(2)\n\n return (self.start_node, self.target_node)\n\n def get_node(self, node):\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n\n if self.board[i][j] == node:\n return (i, j)\n\n return None\n\n def display(self, win):\n win.fill(white)\n\n for i in range(len(self.board)):\n for j in range(len(self.board[i])):\n\n color = None\n if self.board[i][j] == 1:\n color = green\n elif self.board[i][j] == 2:\n color = red\n elif self.board[i][j] == 3:\n color = pathC\n elif self.board[i][j] == -1:\n color = black\n elif self.board[i][j] == 4:\n color = op\n elif self.board[i][j] == 5:\n color = cl\n if color:\n\n pg.draw.rect(win, color, (self.length*j,\n self.length*i, self.length, self.length))\n\n pg.draw.line(win, black, (0, self.length*i),\n (W_WIDTH, self.length*i))\n\n if i == len(self.board)-1: # draw vertical lines #\n pg.draw.line(win, black, (self.length*j, 0),\n (self.length*j, W_HEIGHT))\n\n def set(self, n_board):\n self.board = n_board\n\n def clicked(self, x, y):\n result = (y//self.length, x//self.length)\n if 0 <= result[0] < len(self.board) and 0 <= result[1] < len(self.board[0]):\n return result\n return None\n\n def clear(self, walls=True):\n nums = [-1, 3, 4, 5] if walls else [3, 4, 5]\n for i in range(len(self.board)):\n for j in range(len(self.board[0])):\n if self.board[i][j] in nums:\n self.board[i][j] = 0\n\n def move_node(self, node, to_pos):\n if 0 <= to_pos[0] < len(self.board) and 0 <= to_pos[1] < len(self.board[0]):\n if self.board[to_pos[0]][to_pos[1]] not in (1, 2):\n y, x = self.get_node(node)\n self.board[y][x] = 0\n self.board[to_pos[0]][to_pos[1]] = node\n self.start_node = self.get_node(1)\n self.target_node = self.get_node(2)\n\n\ndef solve(win, grid, show_steps=True):\n paused = False\n\n av = (0, 3, 4, 5)\n clock = pg.time.Clock()\n open_list = NodeList()\n closed_list = NodeList()\n\n board = grid.board\n start_node = Node(start_pos=grid.start_node, target_pos=grid.target_node)\n open_list.items.append(start_node)\n\n a_break = False\n start = grid.start_node\n end = grid.target_node\n\n no_count = 0.\n s = time.time()\n\n while True:\n\n # Actual algorithm ----------- #\n if not open_list.items:\n print(\"NoPathException\")\n return board\n\n neighbors = NodeList()\n n = open_list.get_min()\n\n curr = open_list.items[n]\n open_list.items.pop(n)\n closed_list.items.append(curr)\n neighbors = curr.get_neighbors(board)\n\n if curr.pos == end:\n break\n for neighbor in neighbors:\n if neighbor.pos == end:\n a_break = True\n break\n\n index1 = open_list.contains(neighbor)\n index2 = closed_list.contains(neighbor)\n\n if index1 >= 0 and open_list.items[index1].cost > neighbor.cost:\n open_list.items.pop(index1)\n open_list.items.append(neighbor)\n\n if index2 >= 0 and closed_list.items[index2].cost > neighbor.cost:\n closed_list.items.pop(index2)\n # adding this neighbor to openlist isnt necessary, but it might turn out to be shortest path #\n open_list.items.append(neighbor)\n\n if index1 == -1 and index2 == -1:\n open_list.items.append(neighbor)\n\n # End of actual algorithm ----------- #\n if show_steps:\n\n start_no_count = time.time()\n\n for node in closed_list.items:\n y, x = node.pos\n if (y, x) != start:\n grid.board[y][x] = 4\n\n for node in open_list.items:\n y, x = node.pos\n if (y, x) != start:\n grid.board[y][x] = 5\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n return board\n\n elif event.type == pg.KEYDOWN:\n\n if event.key == pg.K_ESCAPE:\n return board\n if event.key == pg.K_SPACE:\n paused = True\n grid.clear(walls=False)\n pg.event.clear()\n break\n\n while paused:\n for event in pg.event.get():\n\n if event.type == pg.QUIT:\n return board\n elif event.type == pg.KEYDOWN:\n if event.key == pg.K_SPACE:\n paused = False\n break\n elif event.key == pg.K_ESCAPE:\n return board\n\n grid.display(win)\n pg.display.flip()\n # clock.tick(200)\n\n no_count += time.time() - start_no_count\n\n if a_break:\n break\n\n actual_time = time.time() - s - no_count\n\n # Extracting path ------------ #\n # path is reversed #\n path = NodeList()\n length = 0\n curr = closed_list.items[-1]\n\n while curr.pos != start:\n y, x = curr.pos\n\n if board[y][x] in av and (y, x) != start:\n path.items.append(curr)\n length += curr.g_cost\n\n curr = curr.parent\n\n length = round(length, 3)\n\n # elapsedTime = time.time()-s\n\n if show_steps:\n print(\"found path of length\", length, \"time: \", actual_time, \"s\")\n\n path.items = reversed(path.items)\n\n for elem in path.items:\n clock.tick(60)\n y, x = elem.pos\n\n board[y][x] = 3\n grid.display(win)\n pg.display.flip()\n\n return board\n\n\ndef get_size_from_user():\n size = 0\n while not 10 <= size <= 50:\n print(\"Enter size of cells in grid (between 10 and 50)\\n press enter to choose default value of 20\")\n try:\n inp = input()\n size = int(inp)\n except:\n if inp == \"\":\n size = 20\n break\n print(\"Not a valid number!\")\n\n return size\n\n\ndef init():\n\n win = pg.display.set_mode((W_WIDTH, W_HEIGHT))\n\n pg.display.set_caption(\n \"A* visualization. mouse-click to draw, c to clear board, space to start/pause, p to clear path, s to switch start and end nodes \")\n\n size = get_size_from_user()\n grid = Grid(size)\n\n start_node = grid.get_node(1)\n target_node = grid.get_node(2)\n\n return win, grid, start_node, target_node\n\n\ndef main():\n global start_node, target_node\n\n win, grid, start_node, target_node = init()\n\n solving = True\n editing = False\n moving = False\n\n while solving:\n\n for event in pg.event.get():\n if event.type == pg.MOUSEBUTTONDOWN:\n\n if event.button == 1:\n pos1 = pg.mouse.get_pos()\n pos1 = grid.clicked(*pos1)\n\n if pos1:\n y, x = pos1\n k = grid.board[y][x]\n if k == 0 or k == -1 or k == 3 or k == 4 or k == 5:\n\n editing = True\n if k == -1:\n grid.board[y][x] = 0\n else:\n grid.board[y][x] = -1\n\n elif k == 1 or k == 2:\n moving = True\n\n elif event.type == pg.MOUSEBUTTONUP:\n if event.button == 1:\n editing = False\n moving = False\n\n if moving or editing:\n pos = pg.mouse.get_pos()\n pos = grid.clicked(*pos)\n\n if pos:\n if moving:\n grid.move_node(k, pos)\n start_node = grid.get_node(1)\n target_node = grid.get_node(2)\n\n if pos != pos1:\n y, x = pos\n k = grid.board[y][x]\n\n if editing:\n if k == 0 or k == -1 or k == 3 or k == 4 or k == 5:\n if k == -1:\n grid.board[y][x] = 0\n else:\n grid.board[y][x] = -1\n\n pos1 = pg.mouse.get_pos()\n pos1 = grid.clicked(*pos1)\n\n if event.type == pg.KEYDOWN:\n\n if event.key == pg.K_p:\n grid.clear(walls=False)\n\n elif event.key == pg.K_c:\n grid.clear()\n elif event.key == pg.K_SPACE:\n\n grid.clear(walls=False)\n grid.set(solve(win, grid))\n grid.display(win)\n pg.event.clear()\n\n elif event.key == pg.K_s:\n start_node, target_node = grid.switch()\n\n elif event.key == pg.K_ESCAPE:\n pg.quit()\n return\n\n elif event.type == pg.QUIT:\n pg.quit()\n return\n\n grid.display(win)\n pg.display.flip()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DeathEyeXD/a-star-visualizer","sub_path":"a-star-visualizer.py","file_name":"a-star-visualizer.py","file_ext":"py","file_size_in_byte":14142,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"22281160028","text":"class Solution(object):\n def reverseList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if head == None or head.next == None: return head\n front = head\n mid = None\n last = None\n while front.next!= None:\n mid = front\n front = front.next\n mid.next = last\n last = mid\n front.next = mid\n return front\n def reverseList(self, head):\n curt = None\n while head !=None:\n temp = head.next\n head.next = curt\n curt = head\n head = temp\n return curt\nclass Solution(object):\n def reverseList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if head == None or head.next == None: return head\n last = head.next\n result = self.reverseList(last)\n head.next = None\n last.next = head\n return result\nclass Solution:\n # @param {ListNode} head\n # @return {ListNode}\n def reverseList(self, head):\n return self._reverse(head)\n \n def _reverse(self, node, prev = None):\n if not node:\n return prev\n \n n = node.next\n node.next = prev\n prev = node\n return self._reverse(n, node)\nclass Solution(object):\n def reverseList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if head == None or head.next == None: return head\n last = head.next\n result = self.reverseList(last)\n head.next = None\n last.next = head\n return result","repo_name":"YanpuLi/LeetCode-Practice-and-Algo","sub_path":"Easy/206. Reverse Linked List.py","file_name":"206. Reverse Linked List.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20019114823","text":"\"\"\"\nEntry point for all operations\n\"\"\"\n\nimport asyncio\n\nfrom .analyzer import filter_daily_bars, filter_data, find_levels\nfrom .downloader import download_daily_data, download_intraday_data\nfrom .parse_arguments import ArgumentParser\nfrom .parser import parse_csv\n\n\nasync def return_empty():\n return list()\n\n\ndef load_data(arguments: ArgumentParser):\n \"\"\"\n\n :param arguments: program arguments\n :return: coroutines tuple that will return two lists of OHLC data, first element used in filtering\n \"\"\"\n if arguments.csv_mode:\n if arguments.daily is not None:\n daily = parse_csv(arguments.arguments.daily, positions=arguments.ohlc_positions)\n else:\n daily = return_empty()\n return daily, parse_csv(arguments.arguments.intraday, positions=arguments.ohlc_positions)\n\n if arguments.fetch_mode:\n return download_daily_data(arguments.ticker), download_intraday_data(arguments.ticker)\n\n return return_empty(), return_empty()\n\n\nasync def run_project():\n arguments = ArgumentParser()\n\n daily, intraday = load_data(arguments)\n\n daily, intraday = await asyncio.gather(daily, intraday)\n\n daily = filter_daily_bars(daily)\n data = filter_data(daily, intraday, arguments.price_fuzz)\n\n levels = find_levels(data, arguments.threshold, arguments.price_sorted)\n\n print('We found following levels:')\n\n if not levels:\n print('No any....')\n return\n\n for l in levels:\n print('Level price: {0}\\tcount {1}'.format(l[0], l[1]))\n","repo_name":"Ansud/supres_founder","sub_path":"source/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"70691425977","text":"schedule = list()\ncount = 0\nN = int(input())\nfor i in range(N):\n t = list(map(int,input().split(\" \")))#input()\n schedule.append([t[0]*60+t[1],t[2]*60+t[3]])\nschedule.sort(key=lambda x:x[0])\n# print(schedule)\nfor i in range(N):\n for j in schedule[:i]:\n if schedule[i][0] in range(j[0],j[1]):\n count += 1\n break\nprint(count)\n# 6\n# 09 00 09 45\n# 09 30 10 30\n# 10 40 12 00\n# 11 00 13 00\n# 11 45 14 00\n# 16 00 17 00","repo_name":"sinhaapurva25/python","sub_path":"HackerEarth-AmazonAssessments/meeting_rooms.py","file_name":"meeting_rooms.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"552507575","text":"from rest_framework.decorators import api_view, permission_classes\nfrom django.http import HttpResponse, JsonResponse, Http404\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .models import *\nfrom .serializers import *\nfrom rest_framework_simplejwt.tokens import RefreshToken, AccessToken\nfrom django.contrib.auth import authenticate,login\nfrom django.contrib.auth.models import User\nimport json\nfrom rest_framework.permissions import IsAuthenticated\nfrom django.db.models import Q\n\n\n\ndef all_products_view(request):\n category_param = request.GET.get('category')\n search_param = request.GET.get('search')\n\n queryset = Product.objects.all()\n\n if category_param:\n queryset = queryset.filter(category__name__iexact=category_param)\n\n if search_param:\n queryset = queryset.filter(Q(name__icontains=search_param))\n\n serializer = ProductSerializer(queryset, many=True)\n data = serializer.data\n return JsonResponse(data, safe=False)\n \n\ndef all_categories_view(request):\n categories = Category.objects.all()\n serializer = CategorySerializer(categories, many=True)\n data = serializer.data\n return JsonResponse(data, safe=False)\n \n\n\n@api_view(['POST'])\ndef signup(request):\n try:\n data = json.loads(request.body.decode('utf-8'))\n username = data.get('username')\n password = data.get('password')\n except json.JSONDecodeError:\n return JsonResponse({'detail': 'Invalid JSON data'}, status=status.HTTP_400_BAD_REQUEST)\n\n if not username or not password:\n return JsonResponse({'detail': 'Both username and password are required'}, status=status.HTTP_400_BAD_REQUEST)\n\n if User.objects.filter(username=username).exists():\n return JsonResponse({'detail': 'Username already exists'}, status=status.HTTP_400_BAD_REQUEST)\n\n user = User(username=username)\n user.set_password(password)\n user.save()\n\n # Create a cart for the user\n cart = Cart(user=user)\n cart.save()\n\n # Log the user in\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n\n # Generate both access and refresh tokens\n refresh = RefreshToken.for_user(user)\n access_token = AccessToken.for_user(user)\n\n # Create a response with user, access token, refresh token, and cart details\n response_data = {\n 'access_token': str(access_token),\n 'refresh_token': str(refresh),\n 'user_id': user.id,\n 'username': user.username,\n 'cart_id': cart.id,\n 'cart_user_id': cart.user.id,\n }\n\n return JsonResponse(response_data, status=status.HTTP_201_CREATED)\n\n return JsonResponse({'detail': 'User creation failed'}, status=status.HTTP_400_BAD_REQUEST)\n\n\n\n@api_view(['POST'])\ndef signin(request):\n try:\n data = json.loads(request.body.decode('utf-8'))\n username = data.get('username')\n password = data.get('password')\n except json.JSONDecodeError:\n return JsonResponse({'detail': 'Invalid JSON data'}, status=status.HTTP_400_BAD_REQUEST)\n\n user = authenticate(username=username, password=password)\n if user is not None:\n # Log the user in\n login(request, user)\n\n # Generate both access and refresh tokens\n refresh = RefreshToken.for_user(user)\n access_token = AccessToken.for_user(user)\n\n # Retrieve the user's cart\n try:\n cart = Cart.objects.get(user=user)\n cart_id = cart.id\n except Cart.DoesNotExist:\n cart_id = None\n\n # Create a response with user, access token, refresh token, and cart details\n response_data = {\n 'access_token': str(access_token),\n 'refresh_token': str(refresh),\n 'user_id': user.id,\n 'username': user.username,\n 'cart_id': cart_id,\n }\n\n return JsonResponse(response_data)\n\n return JsonResponse({'detail': 'Invalid credentials'}, status=status.HTTP_401_UNAUTHORIZED)\n\n\n\n@api_view(['POST'])\ndef logout(request):\n try:\n refresh_token = request.data.get('refresh_token')\n user = request.user\n except Exception as e:\n return JsonResponse({'detail': 'Invalid request data'}, status=status.HTTP_400_BAD_REQUEST)\n\n if refresh_token:\n print('******')\n try:\n token = RefreshToken(refresh_token)\n token.blacklist()\n except Exception as e:\n return JsonResponse({'detail': 'Invalid refresh token'}, status=status.HTTP_400_BAD_REQUEST)\n\n return JsonResponse({'detail': 'Successfully logged out'}, status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef cart_items_view(request, cart_id):\n try:\n cart = Cart.objects.get(id=cart_id)\n except Cart.DoesNotExist:\n return Response({'detail': 'Cart not found'}, status=status.HTTP_404_NOT_FOUND)\n\n cart_items = CartItem.objects.filter(cart=cart)\n serialized_cart_items = []\n\n for cart_item in cart_items:\n # Calculate the total price for each item\n total_price = cart_item.product.price * cart_item.quantity\n\n # Create a dictionary with the item's data\n item_data = {\n 'product_id': cart_item.product.id,\n 'product_name': cart_item.product.name,\n 'quantity': cart_item.quantity,\n 'total_price': total_price,\n }\n\n serialized_cart_items.append(item_data)\n\n return Response(serialized_cart_items, status=status.HTTP_200_OK)\n\n\n@api_view(['PUT'])\n@permission_classes([IsAuthenticated])\ndef update_cart_item(request, cart_id, product_id):\n try:\n quantity = int(request.data.get('quantity'))\n except (ValueError, TypeError):\n return Response({'detail': 'Invalid quantity'}, status=status.HTTP_400_BAD_REQUEST)\n\n cart = Cart.objects.get(id=cart_id)\n product = Product.objects.get(id=product_id)\n\n try:\n cart_item = CartItem.objects.get(cart=cart, product=product)\n if quantity > 0:\n cart_item.quantity += quantity\n cart_item.save()\n else:\n cart_item.delete()\n except CartItem.DoesNotExist:\n if quantity > 0:\n cart_item = CartItem(cart=cart, product=product, quantity=quantity)\n cart_item.save()\n\n return Response({'detail': 'Cart item updated'}, status=status.HTTP_200_OK)\n\n\n@api_view(['DELETE'])\n@permission_classes([IsAuthenticated])\ndef clear_cart(request, cart_id):\n try:\n cart = Cart.objects.get(id=cart_id)\n except Cart.DoesNotExist:\n return Response({'detail': 'Cart not found'}, status=status.HTTP_404_NOT_FOUND)\n\n cart_items = CartItem.objects.filter(cart=cart)\n\n for cart_item in cart_items:\n product = cart_item.product\n product.stock -= cart_item.quantity\n product.save()\n cart_item.delete()\n\n return Response({'detail': 'Purchase completed'}, status=status.HTTP_200_OK)\n\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef refresh_token_view(request):\n try:\n refresh_token = request.data.get('refresh_token')\n\n except Exception as e:\n return JsonResponse({'detail': 'Invalid request data'}, status=status.HTTP_400_BAD_REQUEST)\n\n if refresh_token:\n try:\n token = RefreshToken(refresh_token)\n access_token = str(token.access_token)\n except Exception as e:\n return JsonResponse({'detail': 'Invalid refresh token'}, status=status.HTTP_400_BAD_REQUEST)\n return JsonResponse({'access_token': access_token}, status=status.HTTP_200_OK)\n return JsonResponse({'detail': 'Refresh token is required'}, status=status.HTTP_400_BAD_REQUEST)","repo_name":"tzuk999/project_3_backend","sub_path":"store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16711751753","text":"#done with service-request-history\r\n\r\nimport mysql.connector\r\nfrom sqlalchemy import create_engine, MetaData, Table, Column, Date, BigInteger, SMALLINT, String, Float, Integer\r\nfrom sqlalchemy.sql import exists\r\nimport json\r\n\r\nimport numpy as np\r\nimport pandas as panda\r\nfrom datetime import date\r\nfrom datetime import datetime, timedelta\r\nfrom sklearn.externals import joblib\r\n#from joblib import Parallel, delayed\r\nimport argparse\r\nimport urllib2\r\nimport os\r\nimport shutil\r\n\r\n\r\n#changed mldb_creds.json with \r\ndef getSQlEngine():\r\n with open(\"mldb_creds.json\") as f:\r\n dbCreds = json.load(f)\r\n\r\n PASSWORD = dbCreds['password']\r\n DB_NAME = dbCreds['db_name_ml']\r\n HOST = dbCreds['host']\r\n USER = dbCreds['user']\r\n engine = create_engine(\"mysql+mysqldb://\" + USER + \":\"+PASSWORD+\"@\" + HOST + \"/\"+DB_NAME)\r\n meta = MetaData(bind=engine)\r\n return engine, meta\r\n \r\n#directory where ekryp-data-db-prod tables are stored\r\nDATADIR = 'C:/Users/pooja/OneDrive/Desktop/eKryp' #change to the necessary directory \r\n\r\n#alter this to match the table structure\r\ndef getTableSchema(tableName, meta):\r\n if tableName == 'Asset_information': #need to change it to ml-refernence tables\r\n return Table(tableName, meta,\r\n Column('customer_asset_identifier',Integer, nullable=False),\r\n Column('install_date',String(10), nullable=False),\r\n Column('model_name', String(45), nullable=False),\r\n Column('model_group_id', Integer, nullable=False),\r\n Column('category_id', Integer, nullable=False),\r\n Column('type_id', Integer, nullable=False),\r\n Column('capacity', Integer, nullable=False),\r\n Column('attribute reference', String(45), nullable=False),\r\n Column('ekryp_customer_id',Integer, nullable=False),\r\n Column('Asset_serial_number',String(45), nullable=False),\r\n Column('location_name', String(45), nullable=False),\r\n Column('end_customer',String(45), nullable=False),\r\n Column('service_provider', String(45), nullable=False),\r\n )\r\ndef pushToSQL(tableSchema, df, tableName, engine, meta):\r\n print('Pushing population parameters to MY SQL')\r\n ## TABLE TO PUSH###\r\n table_pop_parameters = tableSchema\r\n meta.create_all(engine)\r\n df.to_sql(tableName, engine, if_exists= 'append', index=False)\r\n#load table info from ekryp_data_db_prod!!! i've already done that, and i have it on my GCP directory\r\ndummy=panda.read_csv(os.path.join(DATADIR,'Asset_information.csv'))\r\n\r\n#alter this to fit the info in df to match the columns in table\r\ndf = panda.DataFrame({\r\n 'customer_asset_identifier': dummy['customer_asset_identifier'],\r\n 'install_date': dummy['install_date'],\r\n 'model_name':dummy['model_name'],\r\n 'model_group_id': dummy['model_group_id'],\r\n 'category_id':dummy['category_id'],\r\n 'type_id':dummy['type_id'],\r\n 'capacity':dummy['capacity'],\r\n 'attribute_reference':dummy['attribute_reference'],\r\n 'ekryp_customer_id':dummy['ekryp_customer_id'],\r\n 'Asset_serial_number':dummy['Asset_serial_number'],\r\n 'location_name':dummy['location_name'],\r\n 'end_customer' :dummy['end_customer'],\r\n 'service_provider': dummy['service_provider']\r\n})\r\n\r\n#table you want to populate\r\ntablename='Asset_information'\r\n\r\nsqlEngine, sqlMeta = getSQlEngine()\r\ntableSchema= getTableSchema(tablename,sqlMeta)\r\npushToSQL(tableSchema,df,'Asset_information',sqlEngine,sqlMeta)\r\n\r\nprint('All done')","repo_name":"HemaMurthy/testModel","sub_path":"importcsvtosql.py","file_name":"importcsvtosql.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31772295095","text":"import csv\n\n\nclass Check:\n\n def __init__(self, mail, url, price):\n self.url = url\n self.price = price\n self.mail = mail\n # persit into database but for now in csv file\n with open('check.csv', 'a') as csvfile:\n filewriter = csv.writer(csvfile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n filewriter.writerow([self.mail, self.url, self.price])\n","repo_name":"P0NE/amazon_scrapper","sub_path":"script/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31006019091","text":"import os\n\nfrom fastapi import FastAPI, Form\nfrom starlette.responses import PlainTextResponse\n\nimport python.live_audio as basik\n\nimport json\n\napp = FastAPI()\n\n# These variables MUST be part of the app otherwise other workers will\n# not be able to access them. Most variables will initially be set by a source outside basik,\n# then set again by basik, once basik is running.\n\n# I/O\napp.selected_input = -1\napp.selected_output = -1\napp.remove_pos = -1\n\n# Effects\napp.current_board = []\n# Start Variables For Adding an Effect (VFANE)\napp.desired_position_in_board = None\napp.parameters = None\n# End VFANE\n\n# Fake CLI, it will be None after accessed once (via API)\napp.cli_input = None\n\n\n# START !only intended to be used by basik backend! (denoted as comment: !*!)\n@app.get(\"/effect\")\nasync def get_effect():\n global app\n if app.desired_position_in_board is None:\n return None\n temp_params_with_pos = app.parameters.split(\",\")\n temp_params_with_pos.append(\"POSITION:\" + str(app.desired_position_in_board))\n app.desired_position_in_board = None\n app.parameters = None\n return temp_params_with_pos\n\n\n@app.patch(\"/effect\")\nasync def adjust_effect(desired_position_in_board: int = Form(...),\n current_position_in_board: int = Form(...), parameters: str = Form(...)):\n global app\n app.desired_position_in_board = str(current_position_in_board) + \"/\" + str(desired_position_in_board)\n app.parameters = parameters\n # essentially tell the interface we want to adjust an effect\n app.cli_input = \"a\"\n return \"Effect adjustment queued...\"\n\n\n@app.put(\"/effect\")\nasync def add_effect(desired_position_in_board: int = Form(...),\n effect_number: int = Form(...), parameters: str = Form(...)):\n global app\n app.desired_position_in_board = desired_position_in_board\n app.parameters = parameters\n # essentially tell the interface we want to add an effect\n app.cli_input = effect_number\n return \"Effect addition queued...\"\n\n\n@app.delete(\"/effect\")\nasync def remove_effect(effect_position_in_board: int = Form(...)):\n global app\n app.cli_input = \"r\"\n app.remove_pos = effect_position_in_board\n return \"Effect removal queued...\"\n\n\n@app.delete(\"/effect/{position}\")\nasync def remove_effect(position: int):\n global app\n app.cli_input = \"r\"\n app.remove_pos = position\n return \"Effect removal queued...\"\n\n\n@app.get(\"/remove-pos\", response_class=PlainTextResponse)\nasync def get_cli():\n global app\n # since we are faking cli input, this should only be accessible once\n temp_pos = app.remove_pos\n app.remove_pos = -1\n return json.dumps(temp_pos)\n\n\n@app.get(\"/cli\", response_class=PlainTextResponse)\nasync def get_cli():\n global app\n # since we are faking cli input, this should only be accessible once\n temp_cli = app.cli_input\n app.cli_input = None\n return json.dumps(temp_cli)\n\n\n# LAST !*!\n# LAST !*!\n# LAST !*!\n# LAST !*!\n@app.put(\"/cli\")\nasync def set_cli(command: str = Form(...)):\n global app\n print(command)\n app.cli_input = command\n return \"Set input to \\\"\" + app.cli_input + \"\\\". This will be nullified after a single get request.\"\n\n\n# get the current board\n@app.get(\"/pedalboard\")\nasync def get_current_board():\n global app\n return json.dumps(app.current_board)\n\n\n# Rather than adding or removing from this local board variable,\n# I think it is better to just replace with the current running board from basik.\n# Otherwise, there could be some sort of mismatch.\n@app.put(\"/pedalboard\")\nasync def replace_current_board(new_board: str):\n global app\n app.current_board = new_board\n\n\n@app.get(\"/outputs\")\nasync def get_outputs():\n return json.dumps(basik.get_output_devices())\n\n\n@app.get(\"/inputs\")\nasync def get_inputs():\n return json.dumps(basik.get_input_devices())\n\n\n@app.get(\"/input\")\nasync def get_input():\n global app\n return json.dumps(app.selected_input)\n\n\n@app.put(\"/input\")\nasync def set_input(input_number: int = Form(...)):\n global app\n app.selected_input = input_number\n\n\n@app.delete(\"/input\")\nasync def set_input():\n global app\n app.selected_input = -1\n\n\n@app.get(\"/output\")\nasync def get_input():\n global app\n return json.dumps(app.selected_output)\n\n\n@app.put(\"/output\")\nasync def set_input(output_number: int = Form(...)):\n global app\n app.selected_output = output_number\n\n\n@app.delete(\"/output\")\nasync def set_input():\n global app\n app.selected_output = -1\n\n\n@app.delete(\"/stop-api\")\nasync def start_basik():\n os.system('kill %d' % os.getpid())\n","repo_name":"kyleyannelli/BasikGUI","sub_path":"python/python/local_api.py","file_name":"local_api.py","file_ext":"py","file_size_in_byte":4579,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"28881758287","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom .services import ScraperCrawlerService\nfrom fastapi.responses import FileResponse\n\ndescription = \"\"\"\n\n### Stock Crawler and Scraper\n\nWill generate an CSV file with stocks data from given country.\n\"\"\"\n\napp = FastAPI(title='Stock Crawler and Scraper', description=description, version='1.0.0', docs_url='/docs')\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n@app.on_event(\"startup\")\nasync def startup():\n global robot_svc\n robot_svc = ScraperCrawlerService()\n\n@app.get(\"/generate_stock_csv/region\",\n tags=['scrapping'],\n description=\"\"\"Collect data about stocks of given country (region) and return in a CSV file.\n The parameter region must be written in english and capital letters.\n Depending on the country the list can be long and it may take a while.\"\"\",\n responses={\n 200: {\n \"description\": \"Returns brand new CSV file with collected stocks data.\",\n }\n })\nasync def get_stock_csv(region: str):\n global robot_svc\n csv_path = robot_svc.generate_stocks_data(region)\n return FileResponse(csv_path)\n","repo_name":"ottoalves22/finance-yahoo-crawler","sub_path":"fast_app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"1082268021","text":"from datetime import date\natual = date.today().year\nnasc = int(input('Em qual você nasceu: '))\nprint('Qual é o seu sexo:\\n[1] Masculino\\n[2] Feminino ')\nsex = int(input('Escolha entra a opção 1 ou 2: '))\nidade = atual - nasc\nif sex == 1:\n print('Você podera se alistar')\nelse:\n print('Mulheres não podem se alistar')\n exit()\nprint('Quem nasceu em {} tem {} anos em {}'.format(nasc, idade, atual))\nif idade == 18:\n print('Você tem que se alistar imediatamente!')\nelif idade < 18:\n saldo = 18 - idade\n print('ainda faltam {} anos para o alistamento'.format(saldo))\n ano = atual + saldo\n print('Seu alistamento será em {}'.format(ano))\nelif idade > 18:#poderia encerrar com else por não tem mais opçoes\n saldo = idade - 18\n print('Você já deveria ter se alistado há {} anos'.format(saldo))\n ano = atual - saldo\n print('Seu alistamento foi em {}'.format(ano))\n\n","repo_name":"LucasdeSDuarte/Projetos-em-python","sub_path":"PROJETOS EM PYTHON/ALISTAMENTO MILITAR.py","file_name":"ALISTAMENTO MILITAR.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43211493201","text":"#!/usr/bin/python\n\nimport os, sys, time, signal, binascii, termcolor, json, subprocess\nfrom time import strftime as date\n\n#sys.path.append( './lib/' )\n\nfrom config import config\n\n# Here we import the two types of drivers. The event driven driver, and the ticking driver.\nimport event_driver as eventDriver # For responding to signals\nimport tick_driver as tickDriver # For constantly sending a type of signal at a certain interval\n\nfrom itunes_sync import itunesSync \nfrom module_audio import mpdClient\nfrom web_server import webServer\n\nfrom interface import *\nfrom logger import PLog\n\n\nlog = PLog(__name__)\n\n#####################################\n# GLOBALS\n#####################################\nDEVPATH = config.get(\"ibus\",\"interface_path\")\nLOGFILE = config.get(\"general\",\"log_file\")\nIBUS = None\nREGISTERED = False # This is a temporary measure until state driven behaviour is implemented\n\n#####################################\n# FUNCTIONS\n\n# Initializes modules as required and opens files for writing\ndef initialize():\n global IBUS, REGISTERED, DEVPATH\n REGISTERED=False\n \n #mpd = Audio.MpdClient()\n webServer.start()\n \n #mpd.client.listallinfo()\n #print mpdClient.commands()\n #Audio.init()\n #print Audio.client().lsinfo()\n #print Audio.client().commands()\n #print Audio.client().listplaylists()\n mpdClient.init()\n \n \n #print \"wtf\"\n #sync = ItunesSync()\n #sync.start()\n \n #sys.exit(0)\n \n # Initialize the iBus interface or wait for it to become available.\n while IBUS == None:\n \n #print mpdClient.currentsong()\n \n if os.path.exists(DEVPATH):\n IBUS = ibusFace(DEVPATH)\n else:\n log.warning(\"USB interface not found at (%s). Waiting 1 seconds.\", DEVPATH)\n time.sleep(2)\n \n IBUS.waitClearBus() # Wait for the iBus to clear, then send some initialization signals\n \n eventDriver.init(IBUS)\n tickDriver.init(IBUS)\n \n# close the USB device and whatever else is required\ndef shutdown():\n global IBUS\n \n log.info(\"Shutting down event driver\")\n eventDriver.shutDown()\n \n log.info(\"Shutting down tick driver\")\n tickDriver.shutDown()\n \n log.info(\"Shutting down mpd client\")\n mpdClient.shutDown()\n \n log.info(\"Shutting down web server\")\n webServer.shutDown()\n \n if IBUS:\n log.info(\"Killing iBUS instance\")\n IBUS.close()\n IBUS = None\n\ndef run():\n eventDriver.listen()\n","repo_name":"arsac/pimmer","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"40578932851","text":"import Common, helper_events, keyboard, datetime\nfrom Common import ImagePath\nfrom threading import Thread\nfrom time import sleep\nfrom os import remove, getenv, listdir, mkdir\nfrom mouse import MoveEvent, WheelEvent\nfrom kivy.lang import Builder\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.properties import ObjectProperty, StringProperty, BooleanProperty\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.popup import Popup\nfrom speech_recognition import Microphone, Recognizer\nfrom kivy.factory import Factory\nfrom auto_tasks import runMouseMouseKeyboardEvents\nfrom kivy.config import Config\nfrom record_task import open_new_desktop_and_come_back_to_original_and_record, just_record, close_desktop_record\nfrom helper_events import back_to_events\nfrom select_task import search_string_in_file\nfrom kivymd.app import MDApp\nfrom kivymd.uix.picker import MDTimePicker\nfrom kivymd.uix.picker import MDDatePicker\nfrom kivymd.uix.list import OneLineIconListItem, IconLeftWidget\nfrom kivy.uix.settings import SettingsPanel\nfrom webbrowser import open as openweb\nfrom Common import ReadOpenWindow, ReadCloseWindow, SetOpenWindow, SetCloseWindow, ctrlWinD, ctrlWinArrow, getEmail\nfrom ctypes import *\nfrom encrypt_files import decfile, encfile\n\n\nfrom kivy.core.window import Window\nfrom kivy.uix.button import Button\n\n\nWindow.size = (1080, 720)\napp_data_path = getenv('APPDATA')\npath = f\"{app_data_path}/Auto Tasks\"\nConfig.set('graphics', 'resizable', True)\nConfig.set('input', 'mouse', 'mouse, multitouch_on_demand')\nConfig.set('kivy', 'exit_on_escape', '0')\nConfig.write()\nCommon.writeInFile(rf\"{path}\\checkbox_for_new_desktop.txt\", \"True\")\n\nMain = \"\"\"\n\n MDLabel:\n id: mainText\n text: \"Welcome to Auto Tasks!\\\\nWe highly recommending you to\\\\nwatch an overview tutorial to\\\\nunderstand better our software\"\n font_size: 26\n halign: 'left'\n pos_hint: {\"center_x\": 0.55, \"center_y\": 0.65}\n\n MDRaisedButton:\n text: app.WatchText\n font_size: 16\n pos_hint: {\"center_x\": 0.35, \"center_y\": 0.2}\n md_bg_color: 255/255, 165/255, 0, 1\n\n MDRaisedButton:\n text: \"Watch now\"\n font_size: 16\n pos_hint: {\"center_x\": 0.65, \"center_y\": 0.2}\n on_press: app.open_tutorial()\n md_bg_color: 255/255, 165/255, 0, 1\n\n Image:\n source: app.logoPng\n size_hint_y: None # Tells the layout to ignore the size_hint in y dir\n height: dp(90) # The fixed height you want\n pos_hint: {\"center_x\": 0.85, \"center_y\": 0.7}\n\n\n BoxLayout:\n orientation: 'vertical'\n pos_hint: {\"center_x\": 0.53, \"center_y\": 0.3}\n size_hint_x: None\n size_hint_y: 1.18\n width: 350\n disabled: app.scheduleDisable\n ScrollView:\n size_hint_y: None\n BoxLayout:\n id: YourTaskName\n orientation: 'horizontal'\n size_hint_x: None\n width: self.minimum_width\n AutoCompleter:\n size_hint_y: None\n container: YourTaskName\n size_hint_y: 0.2\n size_hint_x: 0.9\n color_mode: 'accent'\n mode: \"rectangle\"\n font_size: 24\n disabled: app.scheduleDisable\n Widget:\n\n Label:\n id: a\n text: app.ScheduledLabel\n size_hint_x: None\n font_size: 20\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.86}\n opacity: 1\n\n MDRectangleFlatButton: #select task button\n text: \"Scheduled your task\"\n font_size: 16\n pos_hint: {\"center_x\": 0.65, \"center_y\": 0.15}\n opacity: app.opacityDisable\n disabled: app.scheduleDisable\n on_press:\n app.show_date_picker_via_main_screen()\n\n MDRectangleFlatButton: #quit button\n text: \"Close\"\n font_size: 16\n pos_hint: {\"center_x\": 0.3, \"center_y\": 0.15}\n\n\n\n Label:\n text: \"Deleted successfully\"\n font_size: 22\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.82}\n\n Label:\n text: \"the changes will appear once\\\\nyou restart the software\"\n font_size: 14\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.52}\n\n\n MDRectangleFlatButton: #quit button\n text: \"Close\"\n font_size: 12\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.2}\n\n\n Label:\n id: scLabel\n text: app.scLabel\n font_size: 22\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.65}\n\n MDRectangleFlatButton: #quit button\n text: \"Close\"\n font_size: 12\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.2}\n\n\n MDTextField:\n mode: \"rectangle\"\n id: www\n hint_text: \"Enter a word\"\n size_hint_x: None\n width: 330\n font_size: 22\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.85}\n disabled: app.disabledShortcut\n\n MDRectangleFlatButton:\n text: \"Press here and record key\"\n size_hint_x: None\n width: 50\n font_size: 20\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.57}\n on_press: app.recordShortCut_thred()\n disabled: app.disabledShortcut\n\n Label:\n id: scLabel\n text: app.keysLive\n size_hint_x: None\n font_size: 21\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.43}\n disabled: app.disabledShortcut\n\n MDRectangleFlatButton: #quit button\n text: \"Close\"\n font_size: 16\n pos_hint: {\"center_x\": 0.35, \"center_y\": 0.15}\n\n MDRectangleFlatButton: #save button\n text: \"Save\"\n font_size: 16\n pos_hint: {\"center_x\": 0.65, \"center_y\": 0.15}\n on_press: app.word_short_cuts(www.text)\n disabled: app.disabledShortcut\n\n\n\n BoxLayout:\n orientation: 'vertical'\n pos_hint: {\"center_x\": 0.52, \"center_y\": 0.53}\n size_hint_x: None\n size_hint_y: 1.18\n width: 350\n disabled: app.disabledShortcut\n ScrollView:\n size_hint_y: None\n BoxLayout:\n id: YourTaskName\n orientation: 'horizontal'\n size_hint_x: None\n width: self.minimum_width\n\n AutoCompleter:\n size_hint_y: None\n container: YourTaskName\n size_hint_y: 0.2\n size_hint_x: 0.9\n color_mode: 'accent'\n mode: \"rectangle\"\n font_size: 24\n Widget:\n\n MDRectangleFlatButton:\n text: \"Press here and record key\"\n size_hint_x: None\n width: 50\n font_size: 20\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.54}\n on_press: app.recordShortCut_thred()\n disabled: app.disabledShortcut\n\n Label:\n id: scLabel\n text: app.keysLive\n size_hint_x: None\n font_size: 21\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.38}\n disabled: app.disabledShortcut\n\n MDRectangleFlatButton: #quit button\n text: \"Close\"\n font_size: 16\n pos_hint: {\"center_x\": 0.35, \"center_y\": 0.15}\n\n MDRectangleFlatButton: #save button\n text: \"Save\"\n font_size: 16\n pos_hint: {\"center_x\": 0.65, \"center_y\": 0.15}\n on_press: app.SaveShortcut()\n disabled: app.disabledShortcut\n\n\n Label:\n id: CloseWindowLabel\n text: app.CloseWindowLabel1\n font_size: 21\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.75}\n\n MDRectangleFlatButton: #youtube button\n text: \"Watch a tutorial\"\n font_size: 16\n on_press: app.open_url()\n pos_hint: {\"center_x\": 0.8, \"center_y\": 0.23}\n\n MDRectangleFlatButton: #quit button\n text: \"Close\"\n font_size: 16\n pos_hint: {\"center_x\": 0.2, \"center_y\": 0.23}\n\n MDRectangleFlatButton: #start recording\n text: \"Start recording\"\n font_size: 16\n pos_hint: {\"center_x\": 0.48, \"center_y\": 0.23}\n on_press: app.ChangeLabel123()\n on_release: app.record_event_of_closing_new_desktop()\n\n\n MDRectangleFlatButton:\n text: \"Press here and record key\"\n size_hint_x: None\n width: 50\n font_size: 20\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.8}\n on_press: app.recordShortCut_thred()\n disabled: app.disabledShortcut\n\n Label:\n id: scLabel\n text: app.keysLive\n size_hint_x: None\n font_size: 21\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.58}\n disabled: app.disabledShortcut\n\n MDRectangleFlatButton: #quit button\n text: \"Close\"\n font_size: 16\n pos_hint: {\"center_x\": 0.35, \"center_y\": 0.2}\n\n MDRectangleFlatButton: #save button\n text: \"Save\"\n font_size: 16\n pos_hint: {\"center_x\": 0.65, \"center_y\": 0.2}\n on_press: app.WriteShortcut()\n disabled: app.disabledShortcut\n\n\n MDTextField:\n id: loop\n text: \"1\"\n size_hint_x: None\n width: 48\n font_size: 18\n pos_hint: {\"center_x\": 0.15, \"center_y\": 0.63}\n MDLabel:\n text: \"Number of\\\\n iterations\"\n size_hint_x: None\n font_size: 15\n pos_hint: {\"center_x\": 0.17, \"center_y\": 0.53}\n \n MDTextField:\n id: SpeedFactor\n text: \"1\"\n size_hint_x: None\n width: 48\n font_size: 18\n pos_hint: {\"center_x\": 0.15, \"center_y\": 0.4}\n MDLabel:\n text: \"Speed Factor\"\n size_hint_x: None\n font_size: 15\n pos_hint: {\"center_x\": 0.17, \"center_y\": 0.32}\n\n\n\n CheckBox:\n pos_hint: {\"center_x\": 0.85, \"center_y\": 0.55}\n active: True\n on_active: app.choice_open_new_desktop(self.active)\n background_checkbox_normal: app.checkBoxImage\n background_checkbox_down: app.checkBoxImage2\n size_hint_x: None\n size_hint_y: None\n size: sp(60), sp(60)\n\n MDLabel:\n text: \"Open new window\"\n size_hint_x: None\n font_size: 14\n pos_hint: {\"center_x\": 0.89, \"center_y\": 0.45}\n\n CheckBox:\n pos_hint: {\"center_x\": 0.85, \"center_y\": 0.25}\n active: False\n on_active: app.check_box(self.active)\n background_checkbox_normal: app.checkBoxImage\n background_checkbox_down: app.checkBoxImage2\n size_hint_x: None\n size_hint_y: None\n size: sp(60), sp(60)\n opacity: app.opacityCheckbox\n disabled: app.disabledCheckbox\n\n MDLabel:\n text: \"Close window\\\\nafter running\"\n size_hint_x: None\n font_size: 14\n pos_hint: {\"center_x\": 0.86, \"center_y\": 0.14}\n opacity: app.opacityCheckbox\n\n BoxLayout:\n orientation: 'vertical'\n pos_hint: {\"center_x\": 0.52, \"center_y\": 0.53}\n size_hint_x: None\n size_hint_y: 1.3\n width: 350\n ScrollView:\n size_hint_y: None\n BoxLayout:\n id: YourTaskName\n orientation: 'horizontal'\n size_hint_x: None\n width: self.minimum_width\n AutoCompleter:\n size_hint_y: None\n container: YourTaskName\n size_hint_y: 0.2\n size_hint_x: 0.9\n color_mode: 'accent'\n mode: \"rectangle\"\n font_size: 24\n Widget:\n\n Label:\n id: a\n text: app.labelSelect\n size_hint_x: None\n font_size: 20\n pos_hint: {\"center_x\": 0.48, \"center_y\": 0.59}\n opacity: 1\n MDRectangleFlatButton: #select task button\n text: \"Select task\"\n font_size: 16\n pos_hint: {\"center_x\": 0.25, \"center_y\": 0.15}\n\n on_release: \n app.selectTasksKivy(loop.text, SpeedFactor.text)\n\n MDRectangleFlatButton: #quit button\n text: \"Close\"\n font_size: 16\n pos_hint: {\"center_x\": 0.55, \"center_y\": 0.15}\n\n\n CheckBox:\n pos_hint: {\"center_x\": 0.8, \"center_y\": 0.3}\n active: False\n on_active: app.check_box(self.active)\n background_checkbox_normal: app.checkBoxImage\n background_checkbox_down: app.checkBoxImage2\n size_hint_x: None\n size_hint_y: None\n size: sp(60), sp(60)\n opacity: app.opacityCheckbox\n disabled: app.disabledCheckbox\n\n CheckBox:\n pos_hint: {\"center_x\": 0.8, \"center_y\": 0.6}\n active: True\n on_active: app.choice_open_new_desktop(self.active)\n background_checkbox_normal: app.checkBoxImage\n background_checkbox_down: app.checkBoxImage2\n size_hint_x: None\n size_hint_y: None\n size: sp(60), sp(60)\n\n MDLabel:\n text: \"Close window\\\\nafter recording\"\n size_hint_x: None\n font_size: 15\n pos_hint: {\"center_x\": 0.8, \"center_y\": 0.185}\n opacity: app.opacityCheckbox\n\n MDLabel:\n text: \"Open new window\"\n size_hint_x: None\n font_size: 15\n pos_hint: {\"center_x\": 0.83, \"center_y\": 0.495}\n\n MDTextField:\n mode: \"rectangle\"\n id: TaskName\n hint_text: \"Your task name\"\n size_hint_x: None\n width: 235\n font_size: 22\n pos_hint: {\"center_x\": 0.4, \"center_y\": 0.85}\n\n Label: #second label\n id: p\n text: app.output\n size_hint_x: None\n font_size: 20\n pos_hint: {\"center_x\": 0.35, \"center_y\": 0.55}\n\n MDRectangleFlatButton: #start recording button\n text: app.b3\n font_size: 16\n pos_hint: {\"center_x\": 0.25, \"center_y\": 0.2}\n on_press:\n app.changeLabel2(TaskName.text)\n on_release:\n app.start_and_save(TaskName.text)\n disabled: app.disable\n opacity: app.opacity\n\n MDRectangleFlatButton: #quit button\n text: \"Close\"\n font_size: 16\n pos_hint: {\"center_x\": 0.55, \"center_y\": 0.2}\n\n\n\n CheckBox:\n pos_hint: {\"center_x\": 0.8, \"center_y\": 0.3}\n active: False\n on_active: app.check_box(self.active)\n background_checkbox_normal: app.checkBoxImage\n background_checkbox_down: app.checkBoxImage2\n size_hint_x: None\n size_hint_y: None\n size: sp(60), sp(60)\n opacity: app.opacityCheckbox\n disabled: app.disabledCheckbox\n\n CheckBox:\n pos_hint: {\"center_x\": 0.8, \"center_y\": 0.6}\n active: True\n on_active: app.choice_open_new_desktop(self.active)\n background_checkbox_normal: app.checkBoxImage\n background_checkbox_down: app.checkBoxImage2\n size_hint_x: None\n size_hint_y: None\n size: sp(60), sp(60)\n\n MDLabel:\n text: \"Close window\\\\nafter recording\"\n size_hint_x: None\n font_size: 15\n pos_hint: {\"center_x\": 0.8, \"center_y\": 0.185}\n opacity: app.opacityCheckbox\n MDLabel:\n text: \"Open new window\"\n size_hint_x: None\n font_size: 15\n pos_hint: {\"center_x\": 0.83, \"center_y\": 0.495}\n MDTextField:\n mode: \"rectangle\"\n id: TaskName\n hint_text: \"Your task name\"\n size_hint_x: None\n width: 235\n font_size: 22\n pos_hint: {\"center_x\": 0.4, \"center_y\": 0.85}\n Label: #main label\n id: recorder\n text: app.text\n size_hint_x: None\n font_size: 20\n pos_hint: {\"center_x\": 0.4, \"center_y\": 0.6}\n opacity: app.opac\n Label: #second label\n id: p\n text: app.output\n size_hint_x: None\n font_size: 20\n pos_hint: {\"center_x\": 0.35, \"center_y\": 0.55}\n opacity: app.opac2\n Image:\n source: app.micImage\n size_hint_y: None # Tells the layout to ignore the size_hint in y dir\n height: dp(50) # The fixed height you want\n pos_hint: {\"center_x\": 0.8, \"center_y\": 0.85}\n\n MDRectangleFlatButton: #audio button\n text: \"Start recording\"\n font_size: 16\n pos_hint: {\"center_x\": 0.8, \"center_y\": 0.85}\n disabled: app.disabLable\n opacity: 0\n on_press:\n app.change()\n on_release:\n app.startRecord(TaskName.text)\n\n MDRectangleFlatButton: #i cant record button\n text: app.b2\n font_size: 16\n pos_hint: {\"center_x\": 0.25, \"center_y\": 0.2}\n on_press:\n app.okay(TaskName.text, recorder.text)\n disabled: app.disabLable\n opacity: app.opac4\n\n MDRectangleFlatButton: #start recording button\n text: app.b3\n font_size: 16\n pos_hint: {\"center_x\": 0.25, \"center_y\": 0.2}\n on_press:\n app.changeLabel()\n on_release:\n app.start_and_save(TaskName.text)\n disabled: app.disable\n opacity: app.opac3\n\n MDRectangleFlatButton: #quit button\n text: \"Close\"\n font_size: 16\n pos_hint: {\"center_x\": 0.55, \"center_y\": 0.2}\n\n\n\n:\n ScrollView:\n MDList:\n TwoLineIconListItem:\n text: \"Home\"\n secondary_text: \"Your main screen\"\n on_press:\n root.nav_drawer.set_state(\"close\")\n root.screen_manager.current = \"Home\"\n IconLeftWidget:\n icon: \"home\"\n\n TwoLineIconListItem:\n text: \"Settings\"\n secondary_text: \"Your settings screen\"\n on_press:\n root.nav_drawer.set_state(\"close\")\n root.screen_manager.current = \"Settings\"\n IconLeftWidget:\n icon: \"wrench\"\n\n TwoLineIconListItem:\n text: \"Task manager\"\n secondary_text: \"Task editor and modifier\"\n on_press:\n root.nav_drawer.set_state(\"close\")\n root.screen_manager.current = \"Task manager\"\n IconLeftWidget:\n icon: \"border-color\"\n\n TwoLineIconListItem:\n text: \"Keyboard manager\"\n secondary_text: \"Shortcuts manager screen\"\n on_press:\n root.nav_drawer.set_state(\"close\")\n root.screen_manager.current = \"Keyboard manager\"\n IconLeftWidget:\n icon: \"keyboard\"\n\n TwoLineIconListItem:\n text: \"Schedule manager\"\n secondary_text: \"Schedule manager screen\"\n on_press:\n root.nav_drawer.set_state(\"close\")\n root.screen_manager.current = \"Schedule manager\"\n IconLeftWidget:\n icon: \"alarm-check\"\n\n\n\n\nScreen:\n MDToolbar:\n id: toolbar\n pos_hint: {\"top\": 1}\n elevation: 10\n title: \"Auto Tasks\"\n left_action_items: [[\"menu\", lambda x: nav_drawer.set_state(\"open\")]]\n MDNavigationLayout:\n x: toolbar.height\n ScreenManager:\n id: screen_manager\n\n Screen:\n name: \"Home\"\n\n MDLabel:\n text: f\"Hello {app.getName()}!\"\n halign: \"center\"\n font_size: 24\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.958}\n bold: True\n Image:\n source: app.micImage \n allow_stretch: True \n size_hint_y: 0.185 # Tells the layout to ignore the size_hint in y dir\n #height: dp(125) # The fixed height you want\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.7}\n\n MDRectangleFlatButton: #audio run button\n text: \"aa\"\n font_size: 100\n # disabled: app.isMic\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.7}\n on_release: app.audioRun()\n on_press: app.change2()\n size_hint_y: 0.2\n size_hint_x: 0.13\n opacity: 0\n\n MDLabel:\n text: app.micLabel\n halign: \"center\"\n font_size: 28\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.55}\n bold: True\n\n MDLabel:\n text: \"Create new task\"\n halign: \"center\"\n font_size: 28\n pos_hint: {\"center_x\": 0.2, \"center_y\": 0.55}\n bold: True\n Image:\n source: app.plusImage\n allow_stretch: True\n size_hint_y: 0.185\n #height: dp(125)\n pos_hint: {\"center_x\": 0.2, \"center_y\": 0.7}\n MDRectangleFlatButton: #show new task dialog\n text: \"aa\"\n font_size: 100\n pos_hint: {\"center_x\": 0.2, \"center_y\": 0.7}\n on_press: app.Show()\n size_hint_y: 0.2\n size_hint_x: 0.13\n opacity: 0\n\n\n MDLabel:\n text: \"Run your task\"\n halign: \"center\"\n font_size: 28\n pos_hint: {\"center_x\": 0.8, \"center_y\": 0.55}\n bold: True\n MDRectangleFlatButton:\n text: \"aa\"\n font_size: 100\n pos_hint: {\"center_x\": 0.8, \"center_y\": 0.7}\n on_press: app.Show2()\n size_hint_y: 0.2\n size_hint_x: 0.13\n opacity: 0\n\n Image:\n source: app.runImage\n allow_stretch: True\n size_hint_y: 0.2 # Tells the layout to ignore the size_hint in y dir\n #height: dp(130) # The fixed height you want\n pos_hint: {\"center_x\": 0.8, \"center_y\": 0.7}\n\n Image:\n source: app.KeyboardImage\n allow_stretch: True\n size_hint_y: 0.2 # Tells the layout to ignore the size_hint in y dir\n #height: dp(131) # The fixed height you want\n pos_hint: {\"center_x\": 0.8, \"center_y\": 0.3}\n MDLabel:\n text: \"Shortcut for task\"\n halign: \"center\"\n font_size: 28\n pos_hint: {\"center_x\": 0.8, \"center_y\": 0.15}\n bold: True\n MDRectangleFlatButton:\n text: \"aa\"\n font_size: 100\n pos_hint: {\"center_x\": 0.8, \"center_y\": 0.3}\n size_hint_y: 0.2\n size_hint_x: 0.13\n on_press: app.ScSelecter()\n opacity: 0\n\n\n Image:\n source: app.GreenKeyboardImage\n allow_stretch: True\n size_hint_y: 0.193 # Tells the layout to ignore the size_hint in y dir\n #height: dp(128) # The fixed height you want\n pos_hint: {\"center_x\": 0.2, \"center_y\": 0.3}\n MDLabel:\n text: \"Shortcut for word\"\n halign: \"center\"\n font_size: 28\n pos_hint: {\"center_x\": 0.2, \"center_y\": 0.15}\n bold: True\n MDRectangleFlatButton:\n text: \"aa\"\n font_size: 100\n pos_hint: {\"center_x\": 0.2, \"center_y\": 0.3}\n on_press: app.SctWord()\n size_hint_y: 0.2\n size_hint_x: 0.13\n opacity: 0\n\n Image:\n source: app.ClockImage\n allow_stretch: True\n size_hint_y: 0.24 # Tells the layout to ignore the size_hint in y dir\n #height: dp(160) # The fixed height you want\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.3}\n MDLabel:\n text: \"Schedule tasks\"\n halign: \"center\"\n font_size: 28\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.15}\n bold: True\n\n MDRectangleFlatButton:\n text: \"aa\"\n font_size: 100\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.3}\n on_press: app.Schedule()\n size_hint_y: 0.2\n size_hint_x: 0.13\n opacity: 0\n\n\n Screen:\n name: \"Task manager\"\n\n MDCard:\n size_hint: 1, 0.905\n #size: 1080,665\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.46}\n padding: 25\n spacing: 25\n orientation: 'vertical'\n ScrollView:\n MDList:\n id: container\n\n Screen:\n name: \"Settings\"\n\n MDRectangleFlatButton:\n text: \"Watch a tutorial\"\n font_size: 20\n pos_hint: {\"center_x\": 0.67, \"center_y\": 0.15}\n on_press: app.open_tutorial()\n #md_bg_color: 255/255, 255/255, 1, 0.8\n\n\n MDLabel:\n text: f\"Username: {app.getName()}\"\n halign: \"center\"\n font_size: 24\n pos_hint: {\"center_x\": 0.88, \"center_y\": 0.24}\n\n MDRectangleFlatButton:\n text: \"Log out\"\n font_size: 24\n pos_hint: {\"center_x\": 0.88, \"center_y\": 0.15}\n on_press: app.Logout()\n\n MDLabel:\n text: \"Open new desktop\"\n halign: \"center\"\n font_size: 24\n pos_hint: {\"center_x\": 0.62, \"center_y\": 0.78}\n MDLabel:\n text: \"Yes \\ No\"\n halign: \"center\"\n font_size: 15\n pos_hint: {\"center_x\": 0.62, \"center_y\": 0.71}\n MDTextField:\n id: open\n size_hint_x: None\n hint_text: app.getOpenDesktop()\n color_mode: 'accent'\n max_text_length: 3\n width: 80\n font_size: 18\n pos_hint: {\"center_x\": 0.79, \"center_y\": 0.78}\n MDRectangleFlatButton:\n text: \"Save\"\n font_size: 18\n pos_hint: {\"center_x\": 0.9, \"center_y\": 0.78}\n on_press: app.saveOpenDesktop(open.text)\n\n MDLabel:\n text: \"Close new desktop\"\n halign: \"center\"\n font_size: 24\n pos_hint: {\"center_x\": 0.62, \"center_y\": 0.58}\n MDLabel:\n text: \"Yes \\ No\"\n halign: \"center\"\n font_size: 15\n pos_hint: {\"center_x\": 0.62, \"center_y\": 0.51}\n MDTextField:\n id: close\n size_hint_x: None\n hint_text: app.getCloseDesktop()\n color_mode: 'accent'\n max_text_length: 3\n width: 80\n font_size: 18\n pos_hint: {\"center_x\": 0.79, \"center_y\": 0.58}\n MDRectangleFlatButton:\n text: \"Save\"\n font_size: 18\n pos_hint: {\"center_x\": 0.9, \"center_y\": 0.58}\n on_press: app.saveCloseDesktop(close.text)\n\n MDLabel:\n text: \"Maximize windows (beta)\"\n halign: \"center\"\n font_size: 24\n pos_hint: {\"center_x\": 0.6, \"center_y\": 0.4}\n MDLabel:\n text: \"Yes \\ No\"\n halign: \"center\"\n font_size: 15\n pos_hint: {\"center_x\": 0.62, \"center_y\": 0.31}\n MDTextField:\n id: close1\n size_hint_x: None\n hint_text: app.get_Maximized_window_choice_state()\n color_mode: 'accent'\n max_text_length: 3\n width: 80\n font_size: 18\n pos_hint: {\"center_x\": 0.79, \"center_y\": 0.4}\n MDRectangleFlatButton:\n text: \"Save\"\n font_size: 18\n pos_hint: {\"center_x\": 0.9, \"center_y\": 0.4}\n on_press: app.save_Maximized_window_choice_state(close1.text)\n\n MDLabel:\n text: \"Operating system direction\"\n halign: \"center\"\n font_size: 24\n pos_hint: {\"center_x\": 0.2, \"center_y\": 0.8}\n MDLabel:\n text: \"Right \\ Left\"\n halign: \"center\"\n font_size: 17\n pos_hint: {\"center_x\": 0.12, \"center_y\": 0.68}\n MDTextField:\n id: Direction123\n size_hint_x: None\n hint_text: app.getDirection()\n color_mode: 'accent'\n max_text_length: 5\n width: 80\n font_size: 18\n pos_hint: {\"center_x\": 0.25, \"center_y\": 0.68}\n MDRectangleFlatButton:\n text: \"Save\"\n font_size: 18\n pos_hint: {\"center_x\": 0.35, \"center_y\": 0.68}\n on_press: app.saveDirection(Direction123.text)\n\n MDSwitch:\n active: False\n on_active: app.darkMode()\n pos_hint: {\"center_x\": 0.2, \"center_y\": 0.55}\n MDLabel:\n text: \"Dark mode\"\n halign: \"left\"\n font_size: 24\n pos_hint: {\"center_x\": 0.55, \"center_y\": 0.55}\n\n MDLabel:\n text: \"Color\"\n halign: \"left\"\n font_size: 24\n pos_hint: {\"center_x\": 0.55, \"center_y\": 0.4}\n\n MDRectangleFlatButton:\n text: \"Red\"\n font_size: 17\n pos_hint: {\"center_x\": 0.19, \"center_y\": 0.4}\n on_press: app.color(\"DeepOrange\")\n MDRectangleFlatButton:\n text: \"Orange\"\n font_size: 17\n pos_hint: {\"center_x\": 0.29, \"center_y\": 0.4}\n on_press: app.color(\"Orange\")\n MDRectangleFlatButton:\n text: \"Blue\"\n font_size: 17\n pos_hint: {\"center_x\": 0.39, \"center_y\": 0.4}\n on_press: app.color(\"Blue\")\n\n MDLabel:\n text: app.keyLabel\n halign: \"left\"\n font_size: 24\n pos_hint: {\"center_x\": 0.55, \"center_y\": 0.25}\n MDLabel:\n text: app.recordKey\n halign: \"left\"\n font_size: 21\n pos_hint: {\"center_x\": 0.835, \"center_y\": 0.25}\n MDRectangleFlatButton:\n text: \"Record / Save\"\n font_size: 18\n pos_hint: {\"center_x\": 0.49, \"center_y\": 0.25}\n on_press: app.saveKey()\n\n MDLabel:\n text: \"Record yourself closing window\"\n halign: \"left\"\n font_size: 24\n pos_hint: {\"center_x\": 0.55, \"center_y\": 0.1}\n MDRectangleFlatButton:\n text: \"Press here\"\n font_size: 18\n pos_hint: {\"center_x\": 0.48, \"center_y\": 0.1}\n on_press: app.CloseWindowHelper()\n\n Screen:\n name: \"Keyboard manager\"\n\n MDCard:\n size_hint: 1, 0.905\n #size: 1080,665\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.46}\n padding: 25\n spacing: 25\n orientation: 'vertical'\n ScrollView:\n MDList:\n id: container2\n\n Screen:\n name: \"Schedule manager\"\n\n MDCard:\n size_hint: 1, 0.905\n #size: 1080,665\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.46}\n padding: 25\n spacing: 25\n orientation: 'vertical'\n ScrollView:\n MDList:\n id: container3\n\n\n MDNavigationDrawer:\n id: nav_drawer\n ContentNavigationDrawer:\n screen_manager: screen_manager\n nav_drawer: nav_drawer\n\n\n\"\"\"\n\nclass Tutorial(FloatLayout):\n pass\n\nclass delete(FloatLayout): pass\n\nclass view(FloatLayout): pass\n\nclass ShortcutWord(FloatLayout): pass\n\nclass ShortcutsSelecter(FloatLayout):\n pass\n\nclass NewTask(FloatLayout):\n def Build(self):\n self.theme_cls.theme_style = \"Light\"\n\nclass NewTaskNoMic(FloatLayout):\n pass\n\nclass CloseWindow(FloatLayout): pass\n\nclass ScheduleTasks(FloatLayout): pass\n\nclass RunTask(FloatLayout): pass\n\nclass ShortCuts(FloatLayout): pass\n\nclass ContentNavigationDrawer(BoxLayout):\n screen_manager = ObjectProperty()\n nav_drawer = ObjectProperty()\n\nclass AutoCompleter(Factory.MDTextField):\n suggestions = Factory.ListProperty()\n container = Factory.ObjectProperty()\n\n def listOfNames(self):\n with open(rf\"{path}/{getEmail()}/names_of_user_tasks.txt\", \"r\") as name_of_user_tasks:\n data_from_name_of_user_tasks = name_of_user_tasks.readlines()\n name_of_user_tasks_list = []\n for task in data_from_name_of_user_tasks:\n name_of_user_tasks_list.append(task)\n return name_of_user_tasks_list\n\n def on_text(self, _, text):\n out = []\n DATA_SOURCE = self.listOfNames()\n for word in DATA_SOURCE:\n if text in word:\n global work_name\n work_name = text\n out.append(word)\n self.suggestions = out\n global new_work_name\n\n try:\n new_work_name = min(out, key=len)\n new_work_name = new_work_name[:-1]\n except:\n new_work_name = \"\"\n\n def on_suggestions(self, _, suggestions):\n container = self.container\n if not container:\n return\n container.clear_widgets()\n for word in suggestions:\n btn = Factory.MDRectangleFlatButton(text=word[:-1],\n on_press=self.select_word,\n size_hint_x=None,\n width=150, )\n container.add_widget(btn)\n\n def select_word(self, btn):\n self.text = btn.text\n self.suggestions = []\n global work_name\n work_name = self.text\n\nclass AutoTask(MDApp, object):\n def __init__(self, Password, **kwargs):\n super().__init__(**kwargs)\n\n try:\n with open(f\"{path}/{getEmail()}_ultra_secure.txt\", \"r\") as f:\n secure_choice = f.read()\n f.close()\n\n if secure_choice == \"on\":\n self.Password_for_enc = Password\n else:\n self.Password_for_enc = getEmail()\n except:\n self.Password_for_enc = getEmail()\n\n\n\n with open(rf\"{path}/checkbox.txt\", 'w') as f:\n f.write(\"on\")\n f.close()\n taskToScheduled = \"\"\n logoPng = StringProperty(rf\"{ImagePath}\\logo.png\")\n ScheduledLabel = StringProperty(\"Type your task name\")\n disabledCheckbox = BooleanProperty(False)\n checkBoxImage2 = StringProperty(rf\"{ImagePath}\\checked.png\")\n opacityCheckbox = StringProperty(\"1\")\n plusImage = StringProperty(rf\"{ImagePath}\\plus.png\")\n checkBoxImage = StringProperty(rf\"{ImagePath}\\unchecked.png\")\n runImage = StringProperty(rf\"{ImagePath}\\run.png\")\n micImage = StringProperty(rf\"{ImagePath}\\microphone.png\")\n KeyboardImage = StringProperty(rf\"{ImagePath}\\keyboard_blue.png\")\n ClockImage = StringProperty(rf\"{ImagePath}\\clock.png\")\n GreenKeyboardImage = StringProperty(rf\"{ImagePath}\\keyboard_green.png\")\n text = StringProperty(\"\")\n bt = StringProperty(\"Start recording\")\n b2 = StringProperty(\"I can't record\")\n b3 = StringProperty(\"Start recording\")\n labelSelect = StringProperty(\"Type your task name\")\n micLabel = StringProperty(\"Quick audio run\")\n opacity = StringProperty(\"1\")\n opac = StringProperty(\"1\")\n opac2 = StringProperty(\"0\")\n opac3 = StringProperty(\"0\")\n opac4 = StringProperty(\"1\")\n output = StringProperty(\"\")\n user_input = None\n name = \"\"\n audio = \"\"\n kb = \"\"\n mb = \"\"\n mm = \"\"\n mw = \"\"\n count = 0\n disable = BooleanProperty(True)\n disabLable = BooleanProperty(False)\n boo = True\n all_speech_tries = \"\"\n keyLabel = StringProperty(\"Your stop recording key\")\n UserTimeChoice = \"\"\n UserDateChoice = \"\"\n keysLive = StringProperty(\"\")\n recordshortcut = BooleanProperty(True)\n scLabel = StringProperty(\"\")\n CloseWindowLabel1 = StringProperty(\"Here you can record yourself closing\\na new window, if you need help press on the\\ntutorial below\")\n opacityDisable = StringProperty(\"1\")\n close_tabs_helper = BooleanProperty(False)\n disabledShortcut = BooleanProperty(True)\n recordKey = StringProperty(Common.key())\n scheduleDisable = BooleanProperty(False)\n isMic = BooleanProperty(True)\n WatchText = StringProperty(\"No Thanks\")\n\n def build(self):\n self.use_kivy_settings = False\n self.settings_cls = SettingsPanel\n self.settings_cls.title = \"The best tool for automating your daily tasks\"\n self.icon = rf\"{ImagePath}\\icon.png\"\n self.theme_cls.theme_style = Common.GetColort()\n self.theme_cls.primary_palette = Common.Colorr()\n self.title = \"Auto Tasks\"\n global password_for_encrypt\n password_for_encrypt = self.Password_for_enc\n self.runShortCuts()\n self.runShortCutsWord()\n self.isMic = self.check_if_user_have_microphone()\n return Builder.load_string(Main)\n\n def runShortCuts(self):\n try:\n for file in listdir(f\"{path}/{getEmail()}/short_cuts\"):\n with open(f\"{path}/{getEmail()}/short_cuts/{file}\", 'r') as f:\n x = f.read()\n keyboard.add_hotkey(file, lambda y=x: self.selectTasksKivyManager(f\"{y}a\"))\n except:\n pass\n\n def on_start(self):\n try:\n with open(rf\"{path}\\{getEmail()}\\first_log_in.txt\", 'r') as f:\n read = f.read()\n if read == \"yes\":\n self.watch()\n with open(rf\"{path}\\{getEmail()}\\first_log_in.txt\", 'w') as f1:\n f1.write(\"no\")\n\n except:\n with open(rf\"{path}\\{getEmail()}\\first_log_in.txt\", 'w') as f1:\n f1.write(\"no\")\n self.watch()\n\n try:\n mkdir(f'{path}/{getEmail()}/schedule')\n except:\n pass\n\n with open(f\"{path}/open.txt\", 'w') as f:\n f.write(\"True\")\n\n list_tasks = Common.NameOfUserTasks()\n title = OneLineIconListItem(\n text=\"Here you can edit your task, delete them, and set a time when there gonna play\")\n self.root.ids.container.add_widget(title)\n\n for x in range(len(list_tasks)):\n icon10 = IconLeftWidget(icon=\"clock\")\n icon10.bind(on_press=lambda y, x=list_tasks[x]: self.show_date_picker(x))\n icon20 = IconLeftWidget(icon=\"delete\")\n icon20.bind(on_press=lambda y, x=list_tasks[x]: self.deleteTask(x))\n icon30 = IconLeftWidget(icon=\"play-circle\")\n icon30.bind(on_press=lambda y, x=list_tasks[x]: self.selectTasksKivyManager(x))\n icon40 = IconLeftWidget(icon=\"keyboard\")\n icon40.bind(on_press=lambda y, x=list_tasks[x]: self.shortCuts(x))\n items = OneLineIconListItem(text=\" \" + list_tasks[x] + \" task\")\n items.add_widget(icon10)\n items.add_widget(icon20)\n items.add_widget(icon30)\n items.add_widget(icon40)\n self.root.ids.container.add_widget(items)\n\n\n list_sc_task = []\n for sc in listdir(f\"{path}/{getEmail()}/short_cuts\"):\n list_sc_task.append(sc)\n list_sc_word = []\n for sc in listdir(f\"{path}/{getEmail()}/short_cuts_words\"):\n list_sc_word.append(sc)\n\n title = OneLineIconListItem(\n text=\"Here you can edit your short cuts and delete them\")\n self.root.ids.container2.add_widget(title)\n\n list_schedule = []\n for schedule in listdir(f\"{path}/{getEmail()}/schedule\"):\n if schedule != \"['2020', '1', '1'] & ['00', '00', '00'] & system_file\":\n list_schedule.append(schedule)\n title1 = OneLineIconListItem(\n text=\"Here you see your schedule tasks and delete them\")\n self.root.ids.container3.add_widget(title1)\n\n for x in range(len(list_sc_task)):\n icon = IconLeftWidget(icon=\"delete\")\n icon.bind(on_press=lambda y, x=list_sc_task[x]: self.deleteSc(x))\n icon2 = IconLeftWidget(icon=\"eye\")\n icon2.bind(on_press=lambda y, x=list_sc_task[x]: self.View(x))\n items = OneLineIconListItem(text=\" \" + list_sc_task[x] + \" shortcut for task\")\n items.add_widget(icon)\n items.add_widget(icon2)\n self.root.ids.container2.add_widget(items)\n\n for x in range(len(list_sc_word)):\n icon = IconLeftWidget(icon=\"delete\")\n icon.bind(on_press=lambda y, x=list_sc_word[x]: self.deleteSc(x))\n icon2 = IconLeftWidget(icon=\"eye\")\n icon2.bind(on_press=lambda y, x=list_sc_word[x]: self.View(x))\n items = OneLineIconListItem(text=\" \" + list_sc_word[x] + \" shortcut for word\")\n items.add_widget(icon)\n items.add_widget(icon2)\n self.root.ids.container2.add_widget(items)\n\n list_year_schedule = []\n list_month_schedule = []\n list_day_schedule = []\n list_hour_schedule = []\n list_min_schedule = []\n list_name_schedule = []\n\n for filename in listdir(f\"{path}/{getEmail()}/schedule\"):\n\n if filename != \"['2020', '1', '1'] & ['00', '00', '00'] & system_file\":\n list_schedule_name = filename.split(\"'\")\n\n name_schedule_task = list_schedule_name[12][2:]\n\n day_to_run = list_schedule_name[5]\n year_to_run = list_schedule_name[1]\n month_to_run = list_schedule_name[3]\n hour_to_run = list_schedule_name[7]\n min_to_run = list_schedule_name[9]\n\n list_year_schedule.append(year_to_run)\n list_month_schedule.append(month_to_run)\n list_day_schedule.append(day_to_run)\n list_hour_schedule.append(hour_to_run)\n\n list_min_schedule.append(min_to_run)\n list_name_schedule.append(name_schedule_task)\n\n for x in range(len(list_schedule)):\n icon = IconLeftWidget(icon=\"delete\")\n icon.bind(on_press=lambda y, x=list_schedule[x]: self.delete_schedule(x))\n items1 = OneLineIconListItem(\n text=\" \" + list_day_schedule[x] + \"/\" + list_month_schedule[x] + \"/\" + list_year_schedule[\n x] + \" | \" + list_hour_schedule[x] + \":\" + list_min_schedule[x] + \" \" +\n list_name_schedule[x] + \" schedule task\")\n items1.add_widget(icon)\n self.root.ids.container3.add_widget(items1)\n\n if not listdir(f\"{path}/{getEmail()}/schedule\"):\n pass\n else:\n schedule_thread = Thread(target=lambda: self.ScheduledTaskHelper_for_start(), daemon=True)\n schedule_thread.start()\n\n def deleteSc(self, name):\n try:\n remove(f\"{path}/{getEmail()}/short_cuts_words/{name}\")\n self.Delete()\n except:\n pass\n try:\n remove(f\"{path}/{getEmail()}/short_cuts/{name}\")\n self.Delete()\n except:\n pass\n\n try:\n keyboard.remove_all_hotkeys()\n self.runShortCuts()\n self.runShortCutsWord()\n\n except:\n pass\n\n def open_url(self):\n openweb('https://youtu.be/drcsnJp5GZY')\n\n def Delete(self):\n show = delete()\n but = (Button(text=\"close\", size_hint=(None, None),\n width=100, height=50, pos_hint={\"center_x\": 0.5, \"center_y\": 0.19}, opacity=0))\n show.add_widget(but)\n popupWindow2 = Popup(title=\"\", content=show, size_hint=(None, None), size=(360, 220),\n background_color=Common.LightDark(),\n auto_dismiss=False)\n but.bind(on_press=popupWindow2.dismiss)\n popupWindow2.open()\n\n def View(self, name):\n count = 0\n try:\n open(f\"{path}/{getEmail()}/short_cuts_words/{name}\", \"r\")\n count = count + 1\n except:\n pass\n try:\n open(f\"{path}/{getEmail()}/short_cuts/{name}\", \"r\")\n count = count + 1\n except:\n pass\n\n if count == 0:\n self.Delete()\n else:\n try:\n try:\n decfile(f\"{path}/{getEmail()}/short_cuts_words/{name}\", password_for_encrypt)\n\n with open(f\"{path}/{getEmail()}/short_cuts_words/{name}\", 'r') as f:\n self.scLabel = f.read()\n f.close()\n encfile(f\"{path}/{getEmail()}/short_cuts_words/{name}\", password_for_encrypt)\n\n show = view()\n but = (Button(text=\"close\", size_hint=(None, None),\n width=100, height=50, pos_hint={\"center_x\": 0.5, \"center_y\": 0.19}, opacity=0))\n show.add_widget(but)\n popupWindow2 = Popup(title=name + \" (shortcut for word)\", content=show, size_hint=(None, None),\n size=(360, 220), background_color=Common.LightDark(),\n auto_dismiss=False)\n but.bind(on_press=popupWindow2.dismiss)\n popupWindow2.open()\n except:\n with open(f\"{path}/{getEmail()}/short_cuts/{name}\", 'r') as f:\n self.scLabel = f.read()\n f.close()\n\n show = view()\n but = (Button(text=\"close\", size_hint=(None, None),\n width=100, height=50, pos_hint={\"center_x\": 0.5, \"center_y\": 0.19}, opacity=0))\n show.add_widget(but)\n popupWindow2 = Popup(title=name + \" (shortcut for task)\", content=show, size_hint=(None, None),\n size=(340, 220), background_color=Common.LightDark(),\n auto_dismiss=False)\n but.bind(on_press=popupWindow2.dismiss)\n popupWindow2.open()\n except:\n show = view()\n self.scLabel = \"Your task was deleted\"\n but = (Button(text=\"close\", size_hint=(None, None),\n width=100, height=50, pos_hint={\"center_x\": 0.5, \"center_y\": 0.19}, opacity=0))\n show.add_widget(but)\n popupWindow2 = Popup(title=name, content=show, size_hint=(None, None), size=(340, 220),\n background_color=Common.LightDark(),\n auto_dismiss=False)\n but.bind(on_press=popupWindow2.dismiss)\n popupWindow2.open()\n\n def darkMode(self):\n color = Common.readFile(rf\"{path}/file.txt\")\n if color == \"Dark\":\n a = \"Light\"\n Common.writeInFile(rf\"{path}/file.txt\", a)\n else:\n a = \"Dark\"\n Common.writeInFile(rf\"{path}/file.txt\", a)\n self.theme_cls.theme_style = a\n self.theme_cls.primary_palette = \"Green\"\n self.theme_cls.primary_palette = \"Blue\"\n self.theme_cls.primary_palette = Common.Colorr()\n\n def Show(self):\n statusMic = self.check_if_user_have_microphone()\n if statusMic:\n self.NoMic()\n else:\n self.Mic()\n\n def Mic(self):\n show = NewTask()\n but = (Button(text=\"close\", size_hint=(None, None),\n width=100, height=50, pos_hint={\"center_x\": 0.55, \"center_y\": 0.2}, opacity=0))\n show.add_widget(but)\n popupWindow = Popup(title=\"New task\", content=show, size_hint=(None, None), size=(530, 380),\n background_color=Common.LightDark(),\n auto_dismiss=False)\n but.bind(on_press=popupWindow.dismiss)\n self.bt = \"Start recording\"\n self.b2 = \"I can't record\"\n self.b3 = \"Start recording\"\n self.opac = \"1\"\n self.opac2 = \"0\"\n self.opac3 = \"0\"\n self.opac4 = \"1\"\n self.output = \"\"\n self.close_tabs_helper = False\n self.disable = True\n self.disabLable = False\n self.disabledCheckbox = False\n self.opacityCheckbox = \"1\"\n popupWindow.open()\n\n def NoMic(self):\n show = NewTaskNoMic()\n but = (Button(text=\"close\", size_hint=(None, None),\n width=100, height=50, pos_hint={\"center_x\": 0.55, \"center_y\": 0.2}, opacity=0))\n show.add_widget(but)\n popupWindow = Popup(title=\"New task\", content=show, size_hint=(None, None), size=(530, 380),\n background_color=Common.LightDark(),\n auto_dismiss=False)\n but.bind(on_press=popupWindow.dismiss)\n self.bt = \"Start recording\"\n self.b3 = \"Start recording\"\n self.opacity = \"1\"\n self.opac = \"1\"\n self.opac2 = \"0\"\n self.opac3 = \"0\"\n self.opac4 = \"1\"\n self.output = f\"\"\"We will record your motions\\n once you press the button below,\\n you can end the record by\\n pressing your '{Common.readFile(f\"{path}/key.txt\")}' key.\"\"\"\n self.close_tabs_helper = False\n self.disable = False\n self.disabLable = False\n self.disabledCheckbox = False\n self.opacityCheckbox = \"1\"\n popupWindow.open()\n\n def Show2(self):\n show = RunTask()\n but = (Button(text=\"close\", size_hint=(None, None),\n width=100, height=50, pos_hint={\"center_x\": 0.55, \"center_y\": 0.15}, opacity=0))\n show.add_widget(but)\n popupWindow2 = Popup(title=\"Run Task\", content=show, size_hint=(None, None), size=(540, 420),\n background_color=Common.LightDark(),\n auto_dismiss=False)\n but.bind(on_press=popupWindow2.dismiss)\n self.close_tabs_helper = False\n self.disabledCheckbox = False\n self.opacityCheckbox = \"1\"\n popupWindow2.open()\n\n def change(self):\n self.text = \"Please say your task name\"\n self.opac2 = \"0\"\n\n def Logout(self):\n Common.writeInFile(rf\"{path}/remember.txt\", \"no\")\n AutoTask(\"\").stop()\n\n def right_or_left(self):\n pass\n\n def startRecord_thred(self, name):\n self.opac = \"1\"\n self.opac2 = \"0\"\n try:\n r = Recognizer()\n with Microphone() as source:\n audio = r.listen(source, 4, 4)\n try:\n self.user_input = r.recognize_google(audio)\n self.bt = \"Record again\"\n self.b2 = \"Submit\"\n if self.user_input.lower() == name:\n self.text = str(self.user_input).lower()\n else:\n self.text = str(f\"{self.user_input}, record again\").lower()\n except:\n self.text = \"We could not\\n recognize your voice\"\n self.bt = \"Record again\"\n\n self.save_speech_error(self.user_input.lower())\n self.bt = \"Record again\"\n self.b2 = \"Submit\"\n except:\n self.text = \"Something went wrong\"\n\n def startRecord(self, name):\n s_thread = Thread(target=lambda: self.startRecord_thred(name), daemon=True)\n s_thread.start()\n\n def delete_schedule(self, name_s):\n try:\n remove(f\"{path}/{getEmail()}/schedule/{name_s}\")\n self.Delete()\n except:\n pass\n\n def name_of_tasks(self):\n with open(f\"{path}/{getEmail()}/names_of_user_tasks.txt\", \"a\") as f_name_tasks:\n f_name_tasks.write(self.name + '\\n')\n f_name_tasks.close()\n\n def save_speech_error(self, word):\n self.all_speech_tries += f\"{word.lower()}& \"\n\n def okay(self, name, audio):\n try:\n open(f\"{path}/{getEmail()}/file_saver/{name}_max_choice.txt\", \"w\")\n name_is_valid = True\n except:\n name_is_valid = False\n self.name = name\n self.audio = audio\n self.text = ''\n iss = False\n for x in self.name:\n if x != \" \" and '\"' not in x and \"<\" not in x and \">\" not in x and \"/\" not in x and r\"\\\"\" not in x and \"?\" not in x and \"*\" not in x and \"|\" not in x and \":\" not in x:\n iss = True\n\n if iss and name_is_valid:\n self.name_of_tasks()\n self.opac = \"0\"\n self.opac2 = \"1\"\n self.output = f\"\"\"We will record your motions\\n once you press the button below,\\n you can end the record by\\n pressing your '{Common.readFile(f\"{path}/key.txt\")}' key.\"\"\"\n self.opac3 = \"1\"\n self.opac4 = \"0\"\n self.disable = False\n # שומר את כל הניסיונות של המשתמש לחזור על שם המשימה בקובץ טקסט\n\n if not name in self.all_speech_tries:\n self.all_speech_tries += f\"{name.lower()}& \"\n with open(f\"{path}/{getEmail()}/file_saver/{name}all_speech_tries.txt\", 'w') as f:\n f.write(self.all_speech_tries)\n\n\n else:\n self.opac = \"1\"\n self.opac2 = \"0\"\n self.text = \"Your task name is invalid\"\n\n def changeLabel(self):\n if not self.boo:\n self.opac3 = \"0\"\n self.disable = True\n self.opac4 = \"0\"\n self.output = \"We are recording right now.\"\n self.disable = (False)\n self.disabLable = (True)\n\n def changeLabel2(self, task_name):\n try:\n open(f\"{path}/{getEmail()}/file_saver/{task_name}_max_choice.txt\", \"w\")\n name_is_valid = True\n except:\n name_is_valid = False\n iss = False\n for x in task_name:\n if x != \" \" and '\"' not in x and \"<\" not in x and \">\" not in x and \"/\" not in x and r\"\\\"\" not in x and \"?\" not in x and \"*\" not in x and \"|\" not in x and \":\" not in x:\n iss = True\n if iss and name_is_valid:\n self.opac3 = \"0\"\n self.disable = True\n self.opacity = \"0\"\n self.opac4 = \"0\"\n self.output = \"We are recording right now.\"\n self.disable = (False)\n self.disabLable = (True)\n with open(f\"{path}/{getEmail()}/file_saver/{task_name}all_speech_tries.txt\", 'w') as f:\n f.write(f\"{task_name.lower()}& \")\n with open(f\"{path}/{getEmail()}/names_of_user_tasks.txt\", \"a\") as f_name_tasks:\n f_name_tasks.write(task_name.lower() + '\\n')\n f_name_tasks.close()\n\n def ChangeLabel123(self):\n self.CloseWindowLabel1 = \"We are recording now\"\n\n def record_event_of_closing_new_desktop_for_threading(self):\n\n keyboard.press_and_release(\"left windows + tab\")\n\n mouse_events, keyboard_events = close_desktop_record()\n mouse_button = []\n mouse_wheel = []\n mouse_move = []\n\n # ממין את הEVENT לפי הסוג שלהם\n for event in mouse_events:\n if type(event) == MoveEvent:\n mouse_move.append(event)\n elif type(event) == WheelEvent:\n mouse_wheel.append(event)\n else:\n mouse_button.append(event)\n\n # reformat the lists into strings, in order to insert to database\n all_lists = helper_events.reformat_all(keyboard_events[:-1], mouse_button, mouse_wheel, mouse_move)\n # keyboard\n kb = '#'.join(all_lists[0])\n # mouse\n mb = '#'.join(all_lists[1]) # button\n mm = '#'.join(all_lists[2]) # move\n mw = '#'.join(all_lists[3]) # wheel\n\n # שומר את כל הפעולות בקבצים שונים לפי סוג הפעולה\n with open(f\"{path}/{getEmail()}/file_saver/close_new_desktop_after_recording_system_file_kb.txt\", 'w') as f:\n f.write(kb)\n\n with open(f\"{path}/{getEmail()}/file_saver/close_new_desktop_after_recording_system_file_mb.txt\", 'w') as f:\n f.write(mb)\n\n with open(f\"{path}/{getEmail()}/file_saver/close_new_desktop_after_recording_system_file_mm.txt\", 'w') as f:\n f.write(mm)\n\n with open(f\"{path}/{getEmail()}/file_saver/close_new_desktop_after_recording_system_file_mw.txt\", 'w') as f:\n f.write(mw)\n\n self.CloseWindowLabel1 = \"Your record succecfully saved\"\n\n def record_event_of_closing_new_desktop(self):\n\n record_closing_desktop_thread = Thread(\n target=lambda: self.record_event_of_closing_new_desktop_for_threading(), daemon=True)\n record_closing_desktop_thread.start()\n\n def run_close_new_deskstop(self):\n try:\n with open(f\"{path}/{getEmail()}/file_saver/close_new_desktop_after_recording_system_file_kb.txt\",\n 'r') as f1:\n data_kb = f1.read().replace('\\n', '')\n kb = data_kb\n\n with open(f\"{path}/{getEmail()}/file_saver/close_new_desktop_after_recording_system_file_mb.txt\",\n 'r') as f2:\n data_mb = f2.read().replace('\\n', '')\n mb = data_mb\n\n with open(f\"{path}/{getEmail()}/file_saver/close_new_desktop_after_recording_system_file_mw.txt\",\n 'r') as f3:\n data_mw = f3.read().replace('\\n', '')\n mw = data_mw\n\n with open(f\"{path}/{getEmail()}/file_saver/close_new_desktop_after_recording_system_file_mm.txt\",\n 'r') as f4:\n data_mm = f4.read().replace('\\n', '')\n mm = data_mm\n\n all_events = back_to_events(kb.split('#'), mb.split('#'), mm.split('#'), mw.split('#'))\n kb_events, mb_events, mm_events, mw_events = all_events\n\n all_mouse_events = mb_events + mm_events + mw_events\n all_mouse_events = sorted(all_mouse_events, key=lambda event: event.time)\n\n keyboard.start_recording()\n keyboard.stop_recording()\n\n keyboard.press_and_release(\"left windows + tab\")\n\n runMouseMouseKeyboardEvents(all_mouse_events, kb_events, 1, 1)\n\n ctrlWinArrow()\n except:\n pass\n\n\n def start_and_save_for_threading(self, task_name):\n iss = False\n for x in task_name:\n if x != \" \" and '\"' not in x and \"<\" not in x and \">\" not in x and \"/\" not in x and r\"\\\"\" not in x and \"?\" not in x and \"*\" not in x and \"|\" not in x and \":\" not in x:\n iss = True\n try:\n open(f\"{path}/{getEmail()}/file_saver/{task_name}_max_choice.txt\", \"w\")\n name_is_valid = True\n except:\n name_is_valid = False\n\n if iss and name_is_valid:\n if self.disabledCheckbox == False:\n mouse_events, keyboard_events = open_new_desktop_and_come_back_to_original_and_record()\n else:\n mouse_events, keyboard_events = just_record()\n\n with open(f\"{path}/{getEmail()}/maximized_choice.txt\", \"r\") as f:\n max_choice = f.read()\n f.close()\n\n if max_choice == \"Yes\":\n with open(f\"{path}/{getEmail()}/file_saver/{task_name}_max_choice.txt\", \"w\") as f:\n f.write(\"Yes\")\n f.close()\n else:\n with open(f\"{path}/{getEmail()}/file_saver/{task_name}_max_choice.txt\", \"w\") as f:\n f.write(\"No\")\n f.close()\n\n mouse_button = []\n mouse_wheel = []\n mouse_move = []\n\n # ממין את הEVENT לפי הסוג שלהם\n for event in mouse_events:\n if type(event) == MoveEvent:\n mouse_move.append(event)\n elif type(event) == WheelEvent:\n mouse_wheel.append(event)\n else:\n mouse_button.append(event)\n\n # reformat the lists into strings, in order to insert to database\n all_lists = helper_events.reformat_all(keyboard_events[:-1], mouse_button, mouse_wheel, mouse_move)\n # keyboard\n kb = '#'.join(all_lists[0])\n # mouse\n mb = '#'.join(all_lists[1]) # button\n mm = '#'.join(all_lists[2]) # move\n mw = '#'.join(all_lists[3]) # wheel\n\n # שומר את כל הפעולות בקבצים שונים לפי סוג הפעולה\n\n with open(f\"{path}/{getEmail()}/file_saver/{task_name}_kb.txt\", 'w') as f:\n f.write(kb)\n encfile(f\"{path}/{getEmail()}/file_saver/{task_name}_kb.txt\", password_for_encrypt)\n\n with open(f\"{path}/{getEmail()}/file_saver/{task_name}_mb.txt\", 'w') as f:\n f.write(mb)\n encfile(f\"{path}/{getEmail()}/file_saver/{task_name}_mb.txt\", password_for_encrypt)\n\n with open(f\"{path}/{getEmail()}/file_saver/{task_name}_mm.txt\", 'w') as f:\n f.write(mm)\n encfile(f\"{path}/{getEmail()}/file_saver/{task_name}_mm.txt\", password_for_encrypt)\n\n with open(f\"{path}/{getEmail()}/file_saver/{task_name}_mw.txt\", 'w') as f:\n f.write(mw)\n encfile(f\"{path}/{getEmail()}/file_saver/{task_name}_mw.txt\", password_for_encrypt)\n\n self.output = \"Your record successfully saved.\"\n self.opac3 = \"0\"\n self.disable = (True)\n self.boo = False\n\n if self.close_tabs_helper:\n self.run_close_new_deskstop()\n\n icon10 = IconLeftWidget(icon=\"clock\")\n icon10.bind(on_press=lambda y, x=task_name: self.show_date_picker(f\"{x}a\"))\n icon20 = IconLeftWidget(icon=\"delete\")\n icon20.bind(on_press=lambda y, x=task_name: self.deleteTask_before_restart(x))\n icon30 = IconLeftWidget(icon=\"play-circle\")\n icon30.bind(on_press=lambda y, x=task_name + \"a\": self.selectTasksKivyManager(x))\n icon40 = IconLeftWidget(icon=\"keyboard\")\n icon40.bind(on_press=lambda y, x=task_name + \"a\": self.shortCuts(x))\n items = OneLineIconListItem(text=\" \" + task_name + \" task\")\n items.add_widget(icon10)\n items.add_widget(icon20)\n items.add_widget(icon30)\n items.add_widget(icon40)\n self.root.ids.container.add_widget(items)\n\n else:\n self.output = \"Your task name is invalid\"\n\n def start_and_save(self, task_name):\n start_save_thread = Thread(target=lambda: self.start_and_save_for_threading(task_name))\n start_save_thread.start()\n\n def saveKey_for_thread(self):\n self.recordKey = keyboard.read_hotkey(suppress=False)\n Common.writeInFile(rf\"{path}/key.txt\", self.recordKey)\n\n def saveKey(self):\n recordkeyboard_thread = Thread(target=lambda: self.saveKey_for_thread(), daemon=True)\n if self.count == 0:\n recordkeyboard_thread.start()\n self.count = 1\n else:\n recordkeyboard_thread.start()\n\n def check_final_file_for_kivy(self, input):\n input = input[:-1]\n task_name_list = Common.NameOfUserTasks()\n\n # מפעיל את user_select_task ושומר את שם המשימה\n # עובר על הרשימה ומוצא את השם המתאים\n\n for name in task_name_list:\n if name != \"\":\n if search_string_in_file(name, input):\n user_input_error = name\n return user_input_error\n\n def maximize_window_thread_run(self):\n while True:\n if keep_maximized_run == True:\n sleep(3)\n keyboard.press(\"left windows\")\n keyboard.press_and_release(\"up\")\n keyboard.release(\"left windows\")\n else:\n break\n\n def maximize_window_thread_run1(self, time_factor):\n while True:\n if keep_maximized_run1 == True:\n sleep(3/time_factor)\n keyboard.press(\"left windows\")\n keyboard.press_and_release(\"up\")\n keyboard.release(\"left windows\")\n else:\n break\n\n def maximize_window_run1(self, speed_factor):\n max_run_thread1 = Thread(target=lambda: self.maximize_window_thread_run1(speed_factor), daemon=True)\n max_run_thread1.start()\n\n def maximize_window_run(self):\n max_run_thread = Thread(target=lambda: self.maximize_window_thread_run(), daemon=True)\n max_run_thread.start()\n\n def maximize_window_thread_run2(self):\n while True:\n if keep_maximized_run2 == True:\n sleep(3)\n keyboard.press(\"left windows\")\n keyboard.press_and_release(\"up\")\n keyboard.release(\"left windows\")\n else:\n break\n\n def maximize_window_run2(self):\n max_run_thread2 = Thread(target=lambda: self.maximize_window_thread_run2(), daemon=True)\n max_run_thread2.start()\n\n def maximize_window_thread_run3(self):\n while True:\n if keep_maximized_run3 == True:\n sleep(3)\n keyboard.press(\"left windows\")\n keyboard.press_and_release(\"up\")\n keyboard.release(\"left windows\")\n else:\n break\n\n def maximize_window_run3(self):\n max_run_thread3 = Thread(target=lambda: self.maximize_window_thread_run3(), daemon=True)\n max_run_thread3.start()\n\n def selectTasksKivyManager_for_threading(self, name): # select the task\n global keep_maximized_run\n\n try:\n name = name[:-1]\n\n decfile(rf\"{path}/{getEmail()}/file_saver/{name}_kb.txt\", password_for_encrypt)\n decfile(rf\"{path}/{getEmail()}/file_saver/{name}_mb.txt\", password_for_encrypt)\n decfile(rf\"{path}/{getEmail()}/file_saver/{name}_mw.txt\", password_for_encrypt)\n decfile(rf\"{path}/{getEmail()}/file_saver/{name}_mm.txt\", password_for_encrypt)\n\n with open(f\"{path}/{getEmail()}/file_saver/{name}_kb.txt\", 'r') as f1:\n data_kb = f1.read()\n kb = data_kb\n\n with open(f\"{path}/{getEmail()}/file_saver/{name}_mb.txt\", 'r') as f2:\n data_mb = f2.read()\n mb = data_mb\n\n with open(f\"{path}/{getEmail()}/file_saver/{name}_mw.txt\", 'r') as f3:\n data_mw = f3.read()\n mw = data_mw\n\n with open(f\"{path}/{getEmail()}/file_saver/{name}_mm.txt\", 'r') as f4:\n data_mm = f4.read()\n mm = data_mm\n\n encfile(rf\"{path}/{getEmail()}/file_saver/{name}_kb.txt\", password_for_encrypt)\n encfile(rf\"{path}/{getEmail()}/file_saver/{name}_mb.txt\", password_for_encrypt)\n encfile(rf\"{path}/{getEmail()}/file_saver/{name}_mw.txt\", password_for_encrypt)\n encfile(rf\"{path}/{getEmail()}/file_saver/{name}_mm.txt\", password_for_encrypt)\n\n all_events = back_to_events(kb.split('#'), mb.split('#'), mm.split('#'), mw.split('#'))\n kb_events, mb_events, mm_events, mw_events = all_events\n\n all_mouse_events = mb_events + mm_events + mw_events\n all_mouse_events = sorted(all_mouse_events, key=lambda event: event.time)\n\n keyboard.start_recording()\n keyboard.stop_recording()\n\n if ReadOpenWindow() == \"True\":\n with open(f\"{path}/{getEmail()}/file_saver/{name}_max_choice.txt\", \"r\") as f:\n max_choice = f.read()\n if max_choice == \"Yes\":\n keep_maximized_run = True\n self.maximize_window_run()\n ctrlWinD()\n runMouseMouseKeyboardEvents(all_mouse_events, kb_events, 1, 1)\n ctrlWinArrow()\n keep_maximized_run = False\n else:\n ctrlWinD()\n runMouseMouseKeyboardEvents(all_mouse_events, kb_events, 1, 1)\n ctrlWinArrow()\n\n\n else:\n with open(f\"{path}/{getEmail()}/file_saver/{name}_max_choice.txt\", \"r\") as f:\n max_choice = f.read()\n if max_choice == \"Yes\":\n keep_maximized_run = True\n self.maximize_window_run()\n runMouseMouseKeyboardEvents(all_mouse_events, kb_events, 1, 1)\n keep_maximized_run = False\n else:\n runMouseMouseKeyboardEvents(all_mouse_events, kb_events, 1, 1)\n\n if ReadCloseWindow() == \"True\":\n self.run_close_new_deskstop()\n\n except:\n pass\n\n def selectTasksKivyManager(self, name):\n try:\n open(f\"{path}/{getEmail()}/file_saver/{name[:-1]}_mm.txt\", \"r\")\n selectTasksKivyManager_thread = Thread(\n target=lambda: self.selectTasksKivyManager_for_threading(name))\n selectTasksKivyManager_thread.start()\n except:\n self.Delete()\n\n def deleteTask(self, name):\n\n with open(f\"{path}/{getEmail()}/names_of_user_tasks.txt\", \"r\") as f:\n read_data = f.readlines()\n with open(f\"{path}/{getEmail()}/names_of_user_tasks.txt\", \"w\") as f1:\n for task in read_data:\n if not task == name:\n f1.writelines(task)\n name = name[:-1]\n try:\n remove(f\"{path}/{getEmail()}/file_saver/{name}_kb.txt\")\n except:\n pass\n try:\n remove(f\"{path}/{getEmail()}/file_saver/{name}_mb.txt\")\n except:\n pass\n try:\n remove(f\"{path}/{getEmail()}/file_saver/{name}_mw.txt\")\n except:\n pass\n try:\n remove(f\"{path}/{getEmail()}/file_saver/{name}_mm.txt\")\n except:\n pass\n try:\n remove(f\"{path}/{getEmail()}/file_saver/{name}all_speech_tries.txt\")\n except:\n pass\n try:\n remove(f\"{path}/{getEmail()}/file_saver/{name}_max_choice.txt\")\n except:\n pass\n try:\n for file in listdir(rf\"{path}/{getEmail()}/short_cuts\"):\n with open(rf\"{path}/{getEmail()}/short_cuts/{file}\", 'r') as f:\n data = f.read()\n f.close()\n if data == name:\n remove(rf\"{path}/{getEmail()}/short_cuts/{file}\")\n except:\n pass\n\n self.Delete()\n\n def deleteTask_before_restart(self, name):\n with open(rf\"{path}/{getEmail()}/names_of_user_tasks.txt\", \"r\") as f:\n read_data = f.readlines()\n with open(rf\"{path}/{getEmail()}/names_of_user_tasks.txt\", \"w\") as f1:\n for task in read_data:\n if not task[:-1] == name:\n f1.writelines(task)\n try:\n remove(f\"{path}/{getEmail()}/file_saver/{name}_kb.txt\")\n except:\n pass\n try:\n remove(f\"{path}/{getEmail()}/file_saver/{name}_mb.txt\")\n except:\n pass\n try:\n remove(f\"{path}/{getEmail()}/file_saver/{name}_mw.txt\")\n except:\n pass\n try:\n remove(f\"{path}/{getEmail()}/file_saver/{name}_mm.txt\")\n except:\n pass\n try:\n remove(f\"{path}/{getEmail()}/file_saver/{name}all_speech_tries.txt\")\n except:\n pass\n try:\n remove(f\"{path}/{getEmail()}/file_saver/{name}all_speech_tries.txt\")\n except:\n pass\n try:\n remove(f\"{path}/{getEmail()}/file_saver/{name}_max_choice.txt\")\n except:\n pass\n try:\n for file in listdir(rf\"{path}/{getEmail()}/short_cuts\"):\n with open(rf\"{path}/{getEmail()}/short_cuts/{file}\", 'r') as f:\n data = f.read()\n f.close()\n if data == name:\n remove(rf\"{path}/{getEmail()}/short_cuts/{file}\")\n except:\n pass\n\n self.Delete()\n\n def selectTasksKivy_for_threading(self, name_for_select_kivy_task, speed_factor): # select the task\n global keep_maximized_run1\n try:\n speed_factor = float(speed_factor)\n name_for_select_kivy_task = int(name_for_select_kivy_task)\n name = new_work_name\n\n decfile(rf\"{path}/{getEmail()}/file_saver/{name}_kb.txt\", password_for_encrypt)\n decfile(rf\"{path}/{getEmail()}/file_saver/{name}_mb.txt\", password_for_encrypt)\n decfile(rf\"{path}/{getEmail()}/file_saver/{name}_mw.txt\", password_for_encrypt)\n decfile(rf\"{path}/{getEmail()}/file_saver/{name}_mm.txt\", password_for_encrypt)\n\n with open(rf\"{path}/{getEmail()}/file_saver/{name}_kb.txt\", 'r') as f1:\n data_kb = f1.read().replace('\\n', '')\n kb = data_kb\n\n with open(rf\"{path}/{getEmail()}/file_saver/{name}_mb.txt\", 'r') as f2:\n data_mb = f2.read().replace('\\n', '')\n mb = data_mb\n\n with open(rf\"{path}/{getEmail()}/file_saver/{name}_mw.txt\", 'r') as f3:\n data_mw = f3.read().replace('\\n', '')\n mw = data_mw\n\n with open(rf\"{path}/{getEmail()}/file_saver/{name}_mm.txt\", 'r') as f4:\n data_mm = f4.read().replace('\\n', '')\n mm = data_mm\n\n encfile(rf\"{path}/{getEmail()}/file_saver/{name}_kb.txt\", password_for_encrypt)\n encfile(rf\"{path}/{getEmail()}/file_saver/{name}_mb.txt\", password_for_encrypt)\n encfile(rf\"{path}/{getEmail()}/file_saver/{name}_mw.txt\", password_for_encrypt)\n encfile(rf\"{path}/{getEmail()}/file_saver/{name}_mm.txt\", password_for_encrypt)\n\n all_events = back_to_events(kb.split('#'), mb.split('#'), mm.split('#'), mw.split('#'))\n kb_events, mb_events, mm_events, mw_events = all_events\n\n all_mouse_events = mb_events + mm_events + mw_events\n all_mouse_events = sorted(all_mouse_events, key=lambda event: event.time)\n\n keyboard.start_recording()\n keyboard.stop_recording()\n\n if self.disabledCheckbox == False:\n with open(f\"{path}/{getEmail()}/file_saver/{name}_max_choice.txt\", \"r\") as f:\n max_choice = f.read()\n if max_choice == \"Yes\":\n keep_maximized_run1 = True\n self.maximize_window_run1(speed_factor)\n ctrlWinD()\n\n if speed_factor <= 0.1:\n speed_factor = 0.1\n if speed_factor >= 10:\n speed_factor = 10\n\n runMouseMouseKeyboardEvents(all_mouse_events, kb_events, name_for_select_kivy_task, speed_factor)\n ctrlWinArrow()\n keep_maximized_run1 = False\n else:\n ctrlWinD()\n runMouseMouseKeyboardEvents(all_mouse_events, kb_events, name_for_select_kivy_task, speed_factor)\n ctrlWinArrow()\n\n else:\n with open(f\"{path}/{getEmail()}/file_saver/{name}_max_choice.txt\", \"r\") as f:\n max_choice = f.read()\n if max_choice == \"Yes\":\n keep_maximized_run1 = True\n self.maximize_window_run1(speed_factor)\n runMouseMouseKeyboardEvents(all_mouse_events, kb_events, name_for_select_kivy_task, speed_factor)\n keep_maximized_run1 = False\n else:\n runMouseMouseKeyboardEvents(all_mouse_events, kb_events, name_for_select_kivy_task, speed_factor)\n\n if self.close_tabs_helper:\n self.run_close_new_deskstop()\n\n except:\n self.labelSelect = \"Something went wrong,\\ntry to enter a valid name\"\n\n def selectTasksKivy(self, name_for_select_kivy_task, speed_factor):\n selectTasksKivy_thread = Thread(\n target=lambda: self.selectTasksKivy_for_threading(name_for_select_kivy_task, speed_factor))\n selectTasksKivy_thread.start()\n\n def selectTasksKivyAudio_for_threading(self, name): # select the task\n global keep_maximized_run2\n\n try:\n name = name[:-1]\n decfile(rf\"{path}/{getEmail()}/file_saver/{name}_kb.txt\", password_for_encrypt)\n decfile(rf\"{path}/{getEmail()}/file_saver/{name}_mb.txt\", password_for_encrypt)\n decfile(rf\"{path}/{getEmail()}/file_saver/{name}_mw.txt\", password_for_encrypt)\n decfile(rf\"{path}/{getEmail()}/file_saver/{name}_mm.txt\", password_for_encrypt)\n\n with open(rf\"{path}/{getEmail()}/file_saver/{name}_kb.txt\", 'r') as f1:\n data_kb = f1.read()\n kb = data_kb\n\n with open(rf\"{path}/{getEmail()}/file_saver/{name}_mb.txt\", 'r') as f2:\n data_mb = f2.read()\n mb = data_mb\n\n with open(rf\"{path}/{getEmail()}/file_saver/{name}_mw.txt\", 'r') as f3:\n data_mw = f3.read()\n mw = data_mw\n\n with open(rf\"{path}/{getEmail()}/file_saver/{name}_mm.txt\", 'r') as f4:\n data_mm = f4.read()\n mm = data_mm\n\n encfile(rf\"{path}/{getEmail()}/file_saver/{name}_kb.txt\", password_for_encrypt)\n encfile(rf\"{path}/{getEmail()}/file_saver/{name}_mb.txt\", password_for_encrypt)\n encfile(rf\"{path}/{getEmail()}/file_saver/{name}_mw.txt\", password_for_encrypt)\n encfile(rf\"{path}/{getEmail()}/file_saver/{name}_mm.txt\", password_for_encrypt)\n\n all_events = back_to_events(kb.split('#'), mb.split('#'), mm.split('#'), mw.split('#'))\n kb_events, mb_events, mm_events, mw_events = all_events\n\n all_mouse_events = mb_events + mm_events + mw_events\n all_mouse_events = sorted(all_mouse_events, key=lambda event: event.time)\n\n keyboard.start_recording()\n keyboard.stop_recording()\n\n if ReadOpenWindow() == \"True\":\n with open(f\"{path}/{getEmail()}/file_saver/{name}_max_choice.txt\", \"r\") as f:\n max_choice = f.read()\n if max_choice == \"Yes\":\n keep_maximized_run2 = True\n self.maximize_window_run2()\n ctrlWinD()\n runMouseMouseKeyboardEvents(all_mouse_events, kb_events, 1, 1)\n keep_maximized_run2 = False\n ctrlWinArrow()\n else:\n ctrlWinD()\n runMouseMouseKeyboardEvents(all_mouse_events, kb_events, 1, 1)\n ctrlWinArrow()\n else:\n with open(f\"{path}/{getEmail()}/file_saver/{name}_max_choice.txt\", \"r\") as f:\n max_choice = f.read()\n if max_choice == \"Yes\":\n keep_maximized_run2 = True\n self.maximize_window_run()\n runMouseMouseKeyboardEvents(all_mouse_events, kb_events, 1, 1)\n keep_maximized_run2 = False\n else:\n runMouseMouseKeyboardEvents(all_mouse_events, kb_events, 1, 1)\n\n if ReadCloseWindow() == \"True\":\n self.run_close_new_deskstop()\n\n except:\n self.labelSelect = \"Something went wrong,\\ntry to enter a valid name\"\n\n def selectTasksKivyAudio(self, name_for_select_kivy_task):\n selectTasksKivyAudio_thread = Thread(\n target=lambda: self.selectTasksKivyAudio_for_threading(name_for_select_kivy_task))\n selectTasksKivyAudio_thread.start()\n\n def color(self, color):\n self.theme_cls.primary_palette = Common.SetColor(color)\n\n def audioRun_thread(self):\n if self.check_if_user_have_microphone():\n self.micLabel = \"We cannot detect\\nan input device\"\n else:\n try:\n r = Recognizer()\n with Microphone() as source:\n audio = r.listen(source, 4, 4)\n try:\n user_input = r.recognize_google(audio).lower()\n self.selectTasksKivyAudio(self.check_final_file_for_kivy(user_input))\n self.micLabel = \"Quick audio run\"\n\n except:\n self.micLabel = \"We could not\\n recognize your voice\"\n except:\n self.micLabel = \"Something went wrong\"\n\n def audioRun(self):\n audio = Thread(target=lambda: self.audioRun_thread(), daemon=True)\n audio.start()\n\n def change2(self):\n self.micLabel = \"We are recording now\"\n\n def check_box(self, value):\n if value:\n self.close_tabs_helper = True\n try:\n with open(rf\"{path}/{getEmail()}/file_saver/close_new_desktop_after_recording_system_file_mm.txt\",\n \"r\") as f:\n f.read()\n except:\n self.CloseWindowHelper()\n else:\n self.close_tabs_helper = False\n\n def choice_open_new_desktop(self, value):\n if value:\n self.disabledCheckbox = False\n self.opacityCheckbox = \"1\"\n else:\n self.disabledCheckbox = True\n self.opacityCheckbox = \"0\"\n self.close_tabs_helper = False\n\n def on_cancel(self, instance, time):\n pass\n\n def shortCuts(self, name):\n try:\n self.disabledShortcut = False\n self.keysLive = \"\"\n open(rf\"{path}/{getEmail()}/file_saver/{name[:-1]}_mm.txt\", \"r\")\n global nameForShortCuts\n nameForShortCuts = name\n show = ShortCuts()\n closeButton = (Button(text=\"close\", size_hint=(None, None),\n width=105, height=53, pos_hint={\"center_x\": 0.35, \"center_y\": 0.195}, opacity=0))\n show.add_widget(closeButton)\n popupWindow3 = Popup(title=\"Create a short cut\", content=show, size_hint=(None, None), size=(500, 360),\n background_color=Common.LightDark(),\n auto_dismiss=False)\n closeButton.bind(on_press=popupWindow3.dismiss)\n popupWindow3.open()\n except:\n self.Delete()\n\n def recordShortCut(self):\n self.keysLive = keyboard.read_hotkey(suppress=False)\n\n def recordShortCut_thred(self):\n recordkeyboard_thread = Thread(target=lambda: self.recordShortCut(), daemon=True)\n recordkeyboard_thread.start()\n\n def WriteShortcut(self):\n try:\n name_of_task_for_short_cut = nameForShortCuts[:-1]\n with open(rf\"{path}/{getEmail()}/short_cuts/{self.keysLive}\", 'w') as f:\n f.write(name_of_task_for_short_cut)\n f.close()\n self.runShortCuts()\n self.disabledShortcut = True\n icon = IconLeftWidget(icon=\"delete\")\n icon.bind(on_press=lambda y, x=self.keysLive: self.deleteSc(x))\n icon2 = IconLeftWidget(icon=\"eye\")\n icon2.bind(on_press=lambda y, x=self.keysLive: self.View(x))\n items = OneLineIconListItem(text=\" \" + self.keysLive + \" shortcut for task\")\n items.add_widget(icon)\n items.add_widget(icon2)\n self.root.ids.container2.add_widget(items)\n except:\n pass\n\n def nameOfShortCuts(self, name):\n with open(rf\"{path}/{getEmail()}/names_of_short_cuts.txt\", \"a\") as f_name_tasks:\n f_name_tasks.write(name + '\\n')\n f_name_tasks.close()\n\n def CloseWindowHelper(self):\n show = CloseWindow()\n closeButton = (Button(text=\"close\", size_hint=(None, None),\n width=100, height=50, pos_hint={\"center_x\": 0.2, \"center_y\": 0.23}, opacity=0))\n show.add_widget(closeButton)\n popupWindow3 = Popup(title=\"Close new window\", content=show, size_hint=(None, None), size=(520, 320),\n background_color=Common.LightDark(),\n auto_dismiss=False)\n closeButton.bind(on_press=popupWindow3.dismiss)\n popupWindow3.open()\n\n def ScSelecter(self):\n self.disabledShortcut = False\n self.keysLive = \"\"\n show = ShortcutsSelecter()\n closeButton = (Button(text=\"close\", size_hint=(None, None),\n width=100, height=50, pos_hint={\"center_x\": 0.35, \"center_y\": 0.15}, opacity=0))\n show.add_widget(closeButton)\n popupWindow4 = Popup(title=\"Create a short cuts for tasks\", content=show, size_hint=(None, None),\n background_color=Common.LightDark(),\n size=(500, 440),\n auto_dismiss=False)\n closeButton.bind(on_press=popupWindow4.dismiss)\n popupWindow4.open()\n\n def SaveShortcut(self):\n try:\n name_short_cut = new_work_name\n if self.keysLive != \" \" and '\"' not in self.keysLive and \"<\" not in self.keysLive and \">\" not in self.keysLive and \"/\" not in self.keysLive and r\"\\\"\" not in self.keysLive and \"?\" not in self.keysLive and \"*\" not in self.keysLive and \"|\" not in self.keysLive and \":\" not in self.keysLive:\n name_is_legit = True\n else:\n name_is_legit = False\n\n if name_is_legit:\n open(f\"{path}/{getEmail()}/file_saver/{name_short_cut}_mm.txt\")\n with open(f\"{path}/{getEmail()}/short_cuts/{self.keysLive}\", 'w') as f:\n f.write(name_short_cut)\n f.close()\n self.runShortCuts()\n self.disabledShortcut = True\n icon = IconLeftWidget(icon=\"delete\")\n icon.bind(on_press=lambda y, x=self.keysLive: self.deleteSc(x))\n icon2 = IconLeftWidget(icon=\"eye\")\n icon2.bind(on_press=lambda y, x=self.keysLive: self.View(x))\n items = OneLineIconListItem(text=\" \" + self.keysLive + \" shortcut for task\")\n items.add_widget(icon)\n items.add_widget(icon2)\n self.root.ids.container2.add_widget(items)\n else:\n self.keysLive = 'the keys can not contain >*<\"?:\\/'\n\n\n except:\n self.keysLive = \"Name is invalid\"\n\n def SctWord(self):\n self.disabledShortcut = False\n self.keysLive = \"\"\n show = ShortcutWord()\n but = (Button(text=\"close\", size_hint=(None, None),\n width=100, height=50, pos_hint={\"center_x\": 0.35, \"center_y\": 0.15}, opacity=0))\n show.add_widget(but)\n\n popupWindow = Popup(title=\"Create auto complete short cut\", content=show, size_hint=(None, None),\n background_color=Common.LightDark(),\n size=(520, 420),\n auto_dismiss=False)\n but.bind(on_press=popupWindow.dismiss)\n popupWindow.open()\n\n def runShortCutsWord(self):\n try:\n for file in listdir(f\"{path}/{getEmail()}/short_cuts_words\"):\n decfile(f\"{path}/{getEmail()}/short_cuts_words/{file}\", password_for_encrypt)\n\n with open(f\"{path}/{getEmail()}/short_cuts_words/{file}\", 'r') as f:\n x = f.read()\n keyboard.add_hotkey(file, lambda y=x: keyboard.write(y))\n encfile(f\"{path}/{getEmail()}/short_cuts_words/{file}\", password_for_encrypt)\n\n except:\n pass\n\n def word_short_cuts(self, word):\n try:\n with open(f\"{path}/{getEmail()}/short_cuts_words/{self.keysLive}\", 'w') as f:\n f.write(word)\n encfile(f\"{path}/{getEmail()}/short_cuts_words/{self.keysLive}\", password_for_encrypt)\n self.runShortCutsWord()\n self.disabledShortcut = True\n icon = IconLeftWidget(icon=\"delete\")\n icon.bind(on_press=lambda y, x=self.keysLive: self.deleteSc(x))\n icon2 = IconLeftWidget(icon=\"eye\")\n icon2.bind(on_press=lambda y, x=self.keysLive: self.View(x))\n items = OneLineIconListItem(text=\" \" + self.keysLive + \" shortcut for word\")\n items.add_widget(icon)\n items.add_widget(icon2)\n self.root.ids.container2.add_widget(items)\n\n except:\n pass\n\n def getDirection(self):\n with open(f\"{path}/{getEmail()}/Direction.txt\", 'r') as f:\n return f.read()\n\n def saveDirection(self, direction):\n left = ['l', 'left', 'no']\n right = ['r', 'right', 'yes']\n if direction.lower() in left:\n Common.Setlanguages(\"left\")\n if direction.lower() in right:\n Common.Setlanguages(\"right\")\n self.root.ids.Direction123.text = \"\"\n self.root.ids.Direction123.hint_text = self.getDirection()\n\n def getKey(self):\n f = open(rf\"{path}/key.txt\", 'r')\n MyKey = f.read()\n f.close()\n return MyKey\n\n def getOpenDesktop(self):\n if ReadOpenWindow() == \"True\":\n return \"Yes\"\n else:\n return \"No\"\n\n def getCloseDesktop(self):\n if ReadCloseWindow() == \"True\":\n return \"Yes\"\n else:\n return \"No\"\n\n def saveOpenDesktop(self, state):\n list = [\"true\", \"yes\", \"y\"]\n list2 = [\"false\", \"no\", \"n\"]\n if state.lower() in list:\n SetOpenWindow(\"True\")\n elif state.lower() in list2:\n SetOpenWindow(\"False\")\n self.root.ids.open.text = \"\"\n self.root.ids.open.hint_text = self.getOpenDesktop()\n\n def saveCloseDesktop(self, state):\n list = [\"true\", \"yes\", \"y\"]\n list2 = [\"false\", \"no\", \"n\"]\n if state.lower() in list:\n SetCloseWindow(\"True\")\n elif state.lower() in list2:\n SetCloseWindow(\"False\")\n self.root.ids.close.text = \"\"\n self.root.ids.close.hint_text = self.getCloseDesktop()\n\n def getName(self):\n with open(rf\"{path}/{getEmail()}/name.txt\", 'r') as f:\n return f.read()\n\n def show_time_picker(self):\n\n # Define default time\n now = datetime.datetime.now()\n\n current_time = now.strftime(\"%H:%M:%S\")\n default_time = datetime.datetime.strptime(current_time, '%H:%M:%S').time()\n\n time_dialog = MDTimePicker()\n # Set default Time\n time_dialog.set_time(default_time)\n time_dialog.bind(on_save=self.on_save1, on_cancel=self.on_cancel)\n time_dialog.open()\n\n def on_save1(self, instance, value):\n self.UserTimeChoice = str(value)\n self.opacityDisable = \"1\"\n self.scheduleDisable = True\n self.ScheduledLabel = \"Your task scheduled successfully\"\n self.ScheduledTask()\n try:\n mkdir(rf\"{path}/{getEmail()}/schedule1\")\n except:\n pass\n\n with open(rf\"{path}/{getEmail()}/schedule1/{self.taskToScheduled[:-1]}.txt\", \"w\") as f:\n f.write(self.UserTimeChoice + \"&\" + self.UserDateChoice[:-1])\n\n def on_save(self, instance, value, date_range):\n self.UserDateChoice = str(value)\n self.show_time_picker()\n\n def show_date_picker(self, name):\n try:\n open(f\"{path}/{getEmail()}/file_saver/{name[:-1]}_mm.txt\", \"r\")\n self.taskToScheduled = name\n date_dialog = MDDatePicker(mode=\"picker\")\n date_dialog.bind(on_save=self.on_save, on_cancel=self.on_cancel)\n date_dialog.open()\n except:\n self.Delete()\n\n def show_date_picker_via_main_screen(self):\n try:\n self.taskToScheduled = new_work_name + \"a\"\n open(f\"{path}/{getEmail()}/file_saver/{self.taskToScheduled[:-1]}_kb.txt\").close()\n date_dialog = MDDatePicker(mode=\"picker\")\n date_dialog.bind(on_save=self.on_save, on_cancel=self.on_cancel)\n date_dialog.open()\n except:\n self.ScheduledLabel = \"Please choose a valid name\"\n\n def ScheduledTaskHelper_for_start(self):\n\n def job(name):\n global keep_maximized_run3\n\n decfile(rf\"{path}/{getEmail()}/file_saver/{name}_kb.txt\", password_for_encrypt)\n decfile(rf\"{path}/{getEmail()}/file_saver/{name}_mb.txt\", password_for_encrypt)\n decfile(rf\"{path}/{getEmail()}/file_saver/{name}_mw.txt\", password_for_encrypt)\n decfile(rf\"{path}/{getEmail()}/file_saver/{name}_mm.txt\", password_for_encrypt)\n\n with open(rf\"{path}/{getEmail()}/file_saver/{name}_kb.txt\", 'r') as f1:\n data_kb = f1.read().replace('\\n', '')\n kb = data_kb\n\n with open(rf\"{path}/{getEmail()}/file_saver/{name}_mb.txt\", 'r') as f2:\n data_mb = f2.read().replace('\\n', '')\n mb = data_mb\n\n with open(rf\"{path}/{getEmail()}/file_saver/{name}_mw.txt\", 'r') as f3:\n data_mw = f3.read().replace('\\n', '')\n mw = data_mw\n\n with open(rf\"{path}/{getEmail()}/file_saver/{name}_mm.txt\", 'r') as f4:\n data_mm = f4.read().replace('\\n', '')\n mm = data_mm\n\n encfile(rf\"{path}/{getEmail()}/file_saver/{name}_kb.txt\", password_for_encrypt)\n encfile(rf\"{path}/{getEmail()}/file_saver/{name}_mb.txt\", password_for_encrypt)\n encfile(rf\"{path}/{getEmail()}/file_saver/{name}_mw.txt\", password_for_encrypt)\n encfile(rf\"{path}/{getEmail()}/file_saver/{name}_mm.txt\", password_for_encrypt)\n\n all_events = back_to_events(kb.split('#'), mb.split('#'), mm.split('#'), mw.split('#'))\n kb_events, mb_events, mm_events, mw_events = all_events\n\n all_mouse_events = mb_events + mm_events + mw_events\n all_mouse_events = sorted(all_mouse_events, key=lambda event: event.time)\n\n keyboard.start_recording()\n keyboard.stop_recording()\n\n with open(f\"{path}/{getEmail()}/file_saver/{name}_max_choice.txt\", \"r\") as f:\n max_choice = f.read()\n if max_choice == \"Yes\":\n keep_maximized_run3 = True\n self.maximize_window_run3()\n ctrlWinD()\n runMouseMouseKeyboardEvents(all_mouse_events, kb_events, 1, 1)\n ctrlWinArrow()\n keep_maximized_run3 = False\n else:\n ctrlWinD()\n runMouseMouseKeyboardEvents(all_mouse_events, kb_events, 1, 1)\n ctrlWinArrow()\n\n while True:\n\n list_year_schedule = []\n list_month_schedule = []\n list_day_schedule = []\n list_hour_schedule = []\n list_min_schedule = []\n list_name_schedule = []\n\n for filename in listdir(f\"{path}/{getEmail()}/schedule\"):\n list_schedule_name = filename.split(\"'\")\n\n name_schedule_task = list_schedule_name[12][2:]\n\n day_to_run = int(list_schedule_name[5])\n year_to_run = int(list_schedule_name[1])\n month_to_run = int(list_schedule_name[3])\n hour_to_run = int(list_schedule_name[7])\n min_to_run = int(list_schedule_name[9])\n\n list_year_schedule.append(year_to_run)\n list_month_schedule.append(month_to_run)\n list_day_schedule.append(day_to_run)\n list_hour_schedule.append(hour_to_run)\n list_min_schedule.append(min_to_run)\n list_name_schedule.append(name_schedule_task)\n\n date = datetime.datetime.now()\n\n for x in range(len(list_year_schedule)):\n if date.day == list_day_schedule[x] and date.month == list_month_schedule[x] and date.minute == \\\n list_min_schedule[x] and date.hour == list_hour_schedule[x] and date.year == list_year_schedule[\n x]:\n name_final = list_name_schedule[x]\n job(name_final)\n\n if int(list_hour_schedule[x]) < 10:\n hour_to_remove = \"0\" + str(list_hour_schedule[x])\n else:\n hour_to_remove = list_hour_schedule[x]\n\n if int(list_min_schedule[x]) < 10:\n min_to_remove = \"0\" + str(list_min_schedule[x])\n else:\n min_to_remove = list_min_schedule[x]\n try:\n remove(\n f\"{path}/{getEmail()}/schedule/['{list_year_schedule[x]}', '{list_month_schedule[x]}', '{list_day_schedule[x]}']&['{hour_to_remove}', '{min_to_remove}', '00']&{name_final}\")\n\n # del list_day_schedule[x]\n # del list_month_schedule[x]\n # del list_hour_schedule[x]\n # del list_min_schedule[x]\n # del list_year_schedule[x]\n # del list_name_schedule[x]\n sleep(10)\n except:\n sleep(65)\n\n # this breaks out of the while loop if it's the right day.\n # break\n # else:\n sleep(1) # wait 1 second\n\n def ScheduledTaskHelper(self):\n user_time_list = self.UserTimeChoice.split(\":\")\n UserDateList = self.UserDateChoice.split('-')\n if UserDateList[1][0] == '0':\n UserDateList[1] = UserDateList[1][1]\n if UserDateList[2][0] == '0':\n UserDateList[2] = UserDateList[2][1]\n\n with open(f'{path}/{getEmail()}/schedule/{UserDateList}&{user_time_list}&{self.taskToScheduled[:-1]}',\n 'w') as f:\n f.write(self.taskToScheduled[:-1])\n\n self.scheduleDisable = True\n\n icon = IconLeftWidget(icon=\"delete\")\n task_to_delete = str(UserDateList) + \"&\" + str(user_time_list) + \"&\" + self.taskToScheduled[:-1]\n icon.bind(on_press=lambda y, x=task_to_delete: self.delete_schedule(x))\n items1 = OneLineIconListItem(\n text=\" \" + UserDateList[2] + \"/\" + UserDateList[1] + \"/\" + UserDateList[0] + \" | \" + user_time_list[\n 0] + \":\" + user_time_list[1] + \" \" + self.taskToScheduled[:-1] + \" schedule task\")\n items1.add_widget(icon)\n self.root.ids.container3.add_widget(items1)\n\n def ScheduledTask(self):\n schedule_thread = Thread(target=lambda: self.ScheduledTaskHelper(), daemon=True)\n schedule_thread.start()\n\n def Schedule(self):\n self.scheduleDisable = False\n show = ScheduleTasks()\n but = (Button(text=\"close\", size_hint=(None, None),\n width=100, height=50, pos_hint={\"center_x\": 0.3, \"center_y\": 0.15}, opacity=0))\n show.add_widget(but)\n popupWindow = Popup(title=\"Create a Schedule Task\", content=show, size_hint=(None, None), size=(540, 420),\n background_color=Common.LightDark(),\n auto_dismiss=False)\n but.bind(on_press=popupWindow.dismiss)\n popupWindow.open()\n\n def get_Maximized_window_choice_state(self):\n try:\n with open(f\"{path}/{getEmail()}/maximized_choice.txt\", \"r\") as f:\n data = f.read()\n return data\n except:\n with open(f\"{path}/{getEmail()}/maximized_choice.txt\", \"w\") as f:\n f.write(\"No\")\n return \"No\"\n\n def save_Maximized_window_choice_state(self, state):\n list = [\"true\", \"yes\", \"y\"]\n list2 = [\"false\", \"no\", \"n\"]\n if state.lower() in list:\n with open(f\"{path}/{getEmail()}/maximized_choice.txt\", \"w\") as f:\n f.write(\"Yes\")\n f.close()\n elif state.lower() in list2:\n with open(f\"{path}/{getEmail()}/maximized_choice.txt\", \"w\") as f:\n f.write(\"No\")\n f.close()\n\n self.root.ids.close1.text = \"\"\n self.root.ids.close1.hint_text = self.get_Maximized_window_choice_state()\n\n def check_if_user_have_microphone(self):\n winmm = windll.winmm\n if winmm.waveInGetNumDevs() == 0:\n self.micLabel = \"We cannot detect\\nan input device\"\n return True\n else:\n return False\n\n def watch(self):\n show = Tutorial()\n closeButton = (Button(text=\"close\", size_hint=(None, None),\n width=100, height=50, pos_hint={\"center_x\": 0.35, \"center_y\": 0.2}, opacity=0))\n show.add_widget(closeButton)\n\n popupWindow0 = Popup(title=\"Welcome to Auto Tasks!\", content=show, size_hint=(None, None),\n background_color=Common.LightDark(),\n size=(600, 350),\n auto_dismiss=False)\n closeButton.bind(on_press=popupWindow0.dismiss)\n popupWindow0.open()\n\n def open_tutorial(self):\n openweb('https://www.youtube.com/watch?v=WTYO7_-7LJ4&ab_channel=AutoTasksOfficial')\n self.WatchText = (\"Close\")\n\n","repo_name":"EshqolDevelopment/Auto_Task_Premium","sub_path":"MainScreen.py","file_name":"MainScreen.py","file_ext":"py","file_size_in_byte":103143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70963903736","text":"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :\n# Manuel Guenther \nfrom __future__ import print_function\n\nimport sys, os\nimport argparse\n\nfrom . import ToolChainExecutor\nfrom .. import toolchain\nfrom .. import utils\n\nclass ToolChainExecutorGBU (ToolChainExecutor.ToolChainExecutor):\n\n def __init__(self, args, protocol, perform_training):\n # call base class constructor\n ToolChainExecutor.ToolChainExecutor.__init__(self, args)\n\n # select the protocol\n self.m_database.protocol = protocol\n self.m_perform_training = perform_training\n\n if args.training_set:\n self.m_database.all_files_options.update({'subworld' : args.training_set})\n self.m_database.extractor_training_options.update({'subworld' : args.training_set})\n self.m_database.projector_training_options.update({'subworld' : args.training_set})\n self.m_database.enroller_training_options.update({'subworld' : args.training_set})\n\n\n # add specific configuration for ZT-normalization\n self.m_configuration.models_directory = os.path.join(self.m_configuration.temp_directory, self.m_args.models_directory, self.m_database.protocol)\n\n self.m_configuration.scores_directory = os.path.join(self.m_configuration.user_directory, self.m_args.score_sub_directory, self.m_database.protocol, args.score_directory)\n\n # specify the file selector to be used\n self.m_file_selector = toolchain.FileSelector(\n self.m_database,\n preprocessed_directory = self.m_configuration.preprocessed_directory,\n extractor_file = self.m_configuration.extractor_file,\n features_directory = self.m_configuration.features_directory,\n projector_file = self.m_configuration.projector_file,\n projected_directory = self.m_configuration.projected_directory,\n enroller_file = self.m_configuration.enroller_file,\n model_directories = (self.m_configuration.models_directory,),\n score_directories = (self.m_configuration.scores_directory,)\n )\n\n # specify the file selector and tool chain objects to be used by this class (and its base class)\n self.m_tool_chain = toolchain.ToolChain(self.m_file_selector)\n\n\n def execute_tool_chain(self):\n \"\"\"Executes the desired tool chain on the local machine\"\"\"\n utils.info(\"Executing face recognition algorithm on protocol '%s'\" % self.m_database.protocol)\n # preprocessing\n if not self.m_args.skip_preprocessing:\n if self.m_args.dry_run:\n print (\"Would have preprocessed data for protocol '%s' ...\" % self.m_database.protocol)\n else:\n self.m_tool_chain.preprocess_data(\n self.m_preprocessor,\n groups = self.groups(),\n force = self.m_args.force)\n\n # feature extraction\n if self.m_perform_training and not self.m_args.skip_extractor_training and self.m_extractor.requires_training:\n if self.m_args.dry_run:\n print (\"Would have trained the extractor ...\")\n else:\n self.m_tool_chain.train_extractor(\n self.m_extractor,\n self.m_preprocessor,\n force = self.m_args.force)\n\n if not self.m_args.skip_extraction:\n if self.m_args.dry_run:\n print (\"Would have extracted the features for protocol '%s' ...\" % self.m_database.protocol)\n else:\n self.m_tool_chain.extract_features(\n self.m_extractor,\n self.m_preprocessor,\n groups = self.groups(),\n force = self.m_args.force)\n\n # feature projection\n if self.m_perform_training and not self.m_args.skip_projector_training and self.m_tool.requires_projector_training:\n if self.m_args.dry_run:\n print (\"Would have trained the projector ...\")\n else:\n self.m_tool_chain.train_projector(\n self.m_tool,\n self.m_extractor,\n force = self.m_args.force)\n\n if not self.m_args.skip_projection and self.m_tool.performs_projection:\n if self.m_args.dry_run:\n print (\"Would have projected the features for protocol '%s' ...\" % self.m_database.protocol)\n else:\n self.m_tool_chain.project_features(\n self.m_tool,\n self.m_extractor,\n groups = self.groups(),\n force = self.m_args.force)\n\n # model enrollment\n if self.m_perform_training and not self.m_args.skip_enroller_training and self.m_tool.requires_enroller_training:\n if self.m_args.dry_run:\n print (\"Would have trained the enroller ...\")\n else:\n self.m_tool_chain.train_enroller(\n self.m_tool,\n self.m_extractor,\n force = self.m_args.force)\n\n if not self.m_args.skip_enrollment:\n if self.m_args.dry_run:\n print (\"Would have enrolled the models for protocol '%s' ...\" % self.m_database.protocol)\n else:\n self.m_tool_chain.enroll_models(\n self.m_tool,\n self.m_extractor,\n compute_zt_norm = False,\n groups = ['dev'], # only dev group\n force = self.m_args.force)\n\n # score computation\n if not self.m_args.skip_score_computation:\n if self.m_args.dry_run:\n print (\"Would have computed the scores for protocol '%s' ...\" % self.m_database.protocol)\n else:\n self.m_tool_chain.compute_scores(\n self.m_tool,\n compute_zt_norm = False,\n groups = ['dev'], # only dev group\n preload_probes = self.m_args.preload_probes,\n force = self.m_args.force)\n\n # concatenation of scores\n if not self.m_args.skip_concatenation:\n if self.m_args.dry_run:\n print (\"Would have concatenated the scores for protocol '%s' ...\" % self.m_database.protocol)\n else:\n self.m_tool_chain.concatenate(\n compute_zt_norm = False,\n groups = ['dev']) # only dev group\n\n\n def add_jobs_to_grid(self, external_dependencies, external_job_ids):\n # collect job ids\n job_ids = {}\n job_ids.update(external_job_ids)\n\n # if there are any external dependencies, we need to respect them\n deps = external_dependencies[:]\n training_deps = external_dependencies[:]\n\n default_opt = ' --protocol %s'%self.m_database.protocol\n if self.m_perform_training:\n default_opt += ' --perform-training'\n # preprocessing; never has any dependencies.\n if not self.m_args.skip_preprocessing:\n # preprocessing must be done one after each other\n # since training files are identical for all protocols\n preprocessing_deps = deps[:]\n if 'preprocessing' in job_ids:\n preprocessing_deps.append(job_ids['preprocessing'])\n job_ids['preprocessing'] = self.submit_grid_job(\n 'preprocess' + default_opt,\n name = 'pre-%s' % self.m_database.protocol,\n number_of_parallel_jobs = self.m_grid.number_of_preprocessing_jobs,\n dependencies = preprocessing_deps,\n **self.m_grid.preprocessing_queue)\n deps.append(job_ids['preprocessing'])\n if self.m_perform_training:\n training_deps.append(job_ids['preprocessing'])\n\n # feature extraction training\n if self.m_perform_training and not self.m_args.skip_extractor_training and self.m_extractor.requires_training:\n job_ids['extraction_training'] = self.submit_grid_job(\n 'train-extractor' + default_opt,\n name = 'f-train',\n dependencies = training_deps,\n **self.m_grid.training_queue)\n if 'extraction_training' in job_ids:\n deps.append(job_ids['extraction_training'])\n\n if not self.m_args.skip_extraction:\n job_ids['feature_extraction'] = self.submit_grid_job(\n 'extract' + default_opt,\n name = 'extr-%s' % self.m_database.protocol,\n number_of_parallel_jobs = self.m_grid.number_of_extraction_jobs,\n dependencies = deps,\n **self.m_grid.extraction_queue)\n deps.append(job_ids['feature_extraction'])\n if self.m_perform_training:\n training_deps.append(job_ids['feature_extraction'])\n\n # feature projection training\n if self.m_perform_training and not self.m_args.skip_projector_training and self.m_tool.requires_projector_training:\n job_ids['projector_training'] = self.submit_grid_job(\n 'train-projector' + default_opt,\n name = \"p-train\",\n dependencies = training_deps,\n **self.m_grid.training_queue)\n if 'projector_training' in job_ids:\n deps.append(job_ids['projector_training'])\n\n if not self.m_args.skip_projection and self.m_tool.performs_projection:\n job_ids['feature_projection'] = self.submit_grid_job(\n 'project' + default_opt,\n name=\"pro-%s\" % self.m_database.protocol,\n number_of_parallel_jobs = self.m_grid.number_of_projection_jobs,\n dependencies = deps,\n **self.m_grid.projection_queue)\n deps.append(job_ids['feature_projection'])\n if self.m_perform_training:\n training_deps.append(job_ids['feature_projection'])\n\n # model enrollment training\n if self.m_perform_training and not self.m_args.skip_enroller_training and self.m_tool.requires_enroller_training:\n job_ids['enrollment_training'] = self.submit_grid_job(\n 'train-enroller' + default_opt,\n name=\"e-train\",\n dependencies = training_deps,\n **self.m_grid.training_queue)\n if 'enrollment_training' in job_ids:\n deps.append(job_ids['enrollment_training'])\n\n # enroll models\n if not self.m_args.skip_enrollment:\n job_ids['enroll'] = self.submit_grid_job(\n 'enroll' + default_opt,\n name = \"enr-%s\" % self.m_database.protocol,\n number_of_parallel_jobs = self.m_grid.number_of_enrollment_jobs,\n dependencies = deps,\n **self.m_grid.enrollment_queue)\n deps.append(job_ids['enroll'])\n\n # compute scores\n if not self.m_args.skip_score_computation:\n job_ids['score'] = self.submit_grid_job(\n 'compute-scores' + default_opt,\n name = \"score-%s\" % self.m_database.protocol,\n number_of_parallel_jobs = self.m_grid.number_of_scoring_jobs,\n dependencies = deps,\n **self.m_grid.scoring_queue)\n deps.append(job_ids['score'])\n\n # concatenate results\n if not self.m_args.skip_concatenation:\n job_ids['concatenate'] = self.submit_grid_job(\n 'concatenate' + default_opt,\n dependencies = deps,\n name = \"concat-%s\" % self.m_database.protocol)\n\n # return the job ids, in case anyone wants to know them\n return job_ids\n\n\n def execute_grid_job(self):\n \"\"\"This function executes the grid job that is specified on the command line.\"\"\"\n # preprocess the data\n if self.m_args.sub_task == 'preprocess':\n self.m_tool_chain.preprocess_data(\n self.m_preprocessor,\n groups = self.groups(),\n indices = self.indices(self.m_file_selector.original_data_list(groups=self.groups()), self.m_grid.number_of_preprocessing_jobs),\n force = self.m_args.force)\n\n # train the feature extractor\n elif self.m_args.sub_task == 'train-extractor':\n self.m_tool_chain.train_extractor(\n self.m_extractor,\n self.m_preprocessor,\n force = self.m_args.force)\n\n # extract the features\n elif self.m_args.sub_task == 'extract':\n self.m_tool_chain.extract_features(\n self.m_extractor,\n self.m_preprocessor,\n groups = self.groups(),\n indices = self.indices(self.m_file_selector.preprocessed_data_list(groups=self.groups()), self.m_grid.number_of_extraction_jobs),\n force = self.m_args.force)\n\n # train the feature projector\n elif self.m_args.sub_task == 'train-projector':\n self.m_tool_chain.train_projector(\n self.m_tool,\n self.m_extractor,\n force = self.m_args.force)\n\n # project the features\n elif self.m_args.sub_task == 'project':\n self.m_tool_chain.project_features(\n self.m_tool,\n self.m_extractor,\n groups = self.groups(),\n indices = self.indices(self.m_file_selector.preprocessed_data_list(groups=self.groups()), self.m_grid.number_of_projection_jobs),\n force = self.m_args.force)\n\n # train the model enroller\n elif self.m_args.sub_task == 'train-enroller':\n self.m_tool_chain.train_enroller(\n self.m_tool,\n self.m_extractor,\n force = self.m_args.force)\n\n # enroll the models\n elif self.m_args.sub_task == 'enroll':\n self.m_tool_chain.enroll_models(\n self.m_tool,\n self.m_extractor,\n indices = self.indices(self.m_file_selector.model_ids('dev'), self.m_grid.number_of_enrollment_jobs),\n compute_zt_norm = False,\n groups = ['dev'],\n force = self.m_args.force)\n\n # compute scores\n elif self.m_args.sub_task == 'compute-scores':\n self.m_tool_chain.compute_scores(\n self.m_tool,\n indices = self.indices(self.m_file_selector.model_ids('dev'), self.m_grid.number_of_scoring_jobs),\n compute_zt_norm = False,\n groups = ['dev'],\n preload_probes = self.m_args.preload_probes,\n force = self.m_args.force)\n\n # concatenate\n elif self.m_args.sub_task == 'concatenate':\n self.m_tool_chain.concatenate(\n compute_zt_norm = False,\n groups = ['dev'])\n\n # Test if the keyword was processed\n else:\n raise ValueError(\"The given subtask '%s' could not be processed. THIS IS A BUG. Please report this to the authors.\" % self.m_args.sub_task)\n\n\ndef parse_args(command_line_parameters):\n \"\"\"This function parses the given options (which by default are the command line options)\"\"\"\n\n # set up command line parser\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n conflict_handler='resolve')\n\n # add the arguments required for all tool chains\n config_group, dir_group, file_group, sub_dir_group, other_group, skip_group = ToolChainExecutorGBU.required_command_line_options(parser)\n\n # overwrite default database entry\n config_group.add_argument('-d', '--database', default = ['gbu'], nargs = '+',\n help = 'The database interface to be used. The default should work fine for the common cases.')\n\n sub_dir_group.add_argument('--models-directory', type = str, metavar = 'DIR', default = 'models',\n help = 'Subdirectories (of the --temp-directory) where the models should be stored')\n\n sub_dir_group.add_argument('--score-directory', metavar = 'DIR', default = 'nonorm',\n help = 'Sub-directory (of --user-directory) where to write the results to (used mainly to create directory structures consistent with the faceverify.py script)')\n\n #######################################################################################\n ############################ other options ############################################\n other_group.add_argument('-F', '--force', action='store_true',\n help = 'Force to erase former data if already exist')\n other_group.add_argument('-w', '--preload-probes', action='store_true', dest='preload_probes',\n help = 'Preload probe files during score computation (needs more memory, but is faster and requires fewer file accesses). WARNING! Use this flag with care!')\n other_group.add_argument('--protocols', type=str, nargs = '+', choices = ['Good', 'Bad', 'Ugly'], default = ['Good', 'Bad', 'Ugly'],\n help = 'The protocols to use, by default all three (Good, Bad, and Ugly) are executed.')\n other_group.add_argument('-x', '--training-set', choices=['x1', 'x2', 'x4', 'x8'],\n help = 'Select the training set to be used. Please do not use this option in a series of calls since this might influence other calls.')\n\n #######################################################################################\n #################### sub-tasks being executed by this script ##########################\n parser.add_argument('--sub-task',\n choices = ('preprocess', 'train-extractor', 'extract', 'train-projector', 'project', 'train-enroller', 'enroll', 'compute-scores', 'concatenate'),\n help = argparse.SUPPRESS) #'Executes a subtask (FOR INTERNAL USE ONLY!!!)'\n parser.add_argument('--protocol', type=str, choices=['Good','Bad','Ugly'],\n help = argparse.SUPPRESS) #'The protocol which should be used in this sub-task'\n parser.add_argument('--perform-training', action='store_true',\n help = argparse.SUPPRESS) #'Is this the first job that needs to perform the training?'\n\n #######################################################################################\n ####### shortcuts for the --skip-... commands #########################################\n skip_choices = ('preprocessing', 'extractor-training', 'extraction', 'projector-training', 'projection', 'enroller-training', 'enrollment', 'score-computation', 'concatenation')\n skip_group.add_argument('--execute-only', nargs = '+', choices = skip_choices,\n help = 'Executes only the given parts of the tool chain.')\n\n args = parser.parse_args(command_line_parameters)\n\n # set groups to be 'dev' only\n args.groups = ['dev',]\n\n if args.execute_only is not None:\n for skip in skip_choices:\n if skip not in args.execute_only:\n exec(\"args.skip_%s = True\" % (skip.replace(\"-\", \"_\")))\n return args\n\n\ndef face_verify(args, command_line_parameters, external_dependencies = [], external_fake_job_id = 0):\n \"\"\"This is the main entry point for computing face verification experiments.\n You just have to specify configuration scripts for any of the steps of the tool chain, which are:\n -- the database\n -- the preprocessing\n -- feature extraction\n -- the recognition tool\n -- and the grid configuration (in case, the function should be executed in the grid).\n Additionally, you can skip parts of the tool chain by selecting proper --skip-... parameters.\n If your probe files are not too big, you can also specify the --preload-probes switch to speed up the score computation.\n If files should be re-generated, please specify the --force option (might be combined with the --skip-... options)\"\"\"\n\n if args.sub_task:\n # execute the desired sub-task\n executor = ToolChainExecutorGBU(args, args.protocol, args.perform_training)\n executor.execute_grid_job()\n return {}\n\n elif args.grid:\n\n # get the name of this file\n this_file = __file__\n if this_file[-1] == 'c':\n this_file = this_file[0:-1]\n\n # for the first protocol, we do not have any own dependencies\n dependencies = external_dependencies\n job_ids = {}\n resulting_dependencies = {}\n perform_training = True\n dry_run_init = external_fake_job_id\n for protocol in args.protocols:\n # create an executor object\n executor = ToolChainExecutorGBU(args, protocol, perform_training)\n # write the info file, but only for the first protocol\n if protocol == args.protocols[0]:\n executor.write_info(command_line_parameters)\n executor.set_common_parameters(calling_file = this_file, parameters = command_line_parameters, fake_job_id = dry_run_init)\n\n # add the jobs\n new_job_ids = executor.add_jobs_to_grid(dependencies, job_ids)\n job_ids.update(new_job_ids)\n\n # perform training only in the first round since the training set is identical for all algorithms\n perform_training = False\n\n dry_run_init += 30\n\n if executor.m_grid.is_local() and args.run_local_scheduler:\n if args.dry_run:\n print (\"Would have started the local scheduler to finally run the experiments with parallel jobs\")\n else:\n # start the jman local deamon\n executor.execute_local_deamon()\n return {}\n\n # at the end of all protocols, return the list of dependencies\n return job_ids\n else:\n perform_training = True\n # not in a grid, use default tool chain sequentially\n for protocol in args.protocols:\n # generate executor for the current protocol\n executor = ToolChainExecutorGBU(args, protocol, perform_training)\n executor.write_info(command_line_parameters)\n # execute the tool chain locally\n executor.execute_tool_chain()\n perform_training = False\n\n # no dependencies since we executed the jobs locally\n return {}\n\n\ndef main(command_line_parameters = sys.argv):\n \"\"\"Executes the main function\"\"\"\n try:\n # do the command line parsing\n args = parse_args(command_line_parameters[1:])\n # perform face verification test\n face_verify(args, command_line_parameters)\n except Exception as e:\n # track any exceptions as error logs (i.e., to get a time stamp)\n utils.error(\"During the execution, an exception was raised: %s\" % e)\n raise\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ceesietopc/negev_hackathon","sub_path":"turtle_sine/src/facerec/script/faceverify_gbu.py","file_name":"faceverify_gbu.py","file_ext":"py","file_size_in_byte":20931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"72260128057","text":"# -*- coding: utf-8 -*-\r\nimport logging\r\nimport logging.config\r\nimport random\r\nimport sqlite3\r\nfrom enum import Enum\r\n\r\nimport phrases\r\n\r\nlogging.config.fileConfig(\"logging.ini\")\r\nlogger = logging.getLogger(\"database\")\r\n\r\n## === Classes === ##\r\nclass Category(Enum):\r\n \"\"\"Categories in the database\"\"\"\r\n GREET = \"3\"\r\n LEFT_SERVER = \"5\"\r\n MENTION = \"6,7\"\r\n ONLINE = \"8\"\r\n SHUTDOWN = \"9\"\r\n\r\n\r\nclass Database(object):\r\n \"\"\" For reading and parsing lines in a SQLite database.\r\n\r\n Args:\r\n dbFile(unicode): The filepath of the database.\r\n \"\"\"\r\n \r\n def __init__(self, db_file):\r\n self.db = db_file\r\n\r\n def get_column(self, header, table, maximum=None):\r\n \"\"\" Gets fields under a column header.\r\n\r\n Args:\r\n header(unicode): Name of column's header.\r\n table(unicode): Name of table.\r\n maximum(int, optional): Maximum amount of fields to fetch.\r\n\r\n Returns:\r\n fields(list): List of fields under header.\r\n \"\"\"\r\n fields = []\r\n table = phrases.clean(table)\r\n connection = sqlite3.connect(self.db)\r\n connection.row_factory = lambda cursor, row: row[0]\r\n c = connection.cursor()\r\n if maximum:\r\n c.execute(f\"SELECT {header} FROM {table} LIMIT ?\", [maximum])\r\n else:\r\n c.execute(f\"SELECT {header} FROM {table}\")\r\n fields = c.fetchall()\r\n c.close()\r\n \r\n return fields\r\n\r\n def get_field(self, field_id, header, table):\r\n \"\"\" Gets the field under the specified header by its primary key value.\r\n\r\n Args:\r\n field_id(int, str): Unique ID of line the field is in.\r\n header(unicode): Header of the field to fetch.\r\n table(unicode): Name of table to look into.\r\n\r\n Returns:\r\n The desired field, or None if the lookup failed.\r\n\r\n Raises:\r\n TypeError: If field_id doesn't exist in the table.\r\n \r\n Examples:\r\n >>> get_field(123, \"firstname\", \"kings\")\r\n Adgar\r\n \"\"\"\r\n header = phrases.clean(header)\r\n table = phrases.clean(table)\r\n field = None\r\n \r\n connection = sqlite3.connect(self.db)\r\n c = connection.cursor()\r\n\r\n statement = f\"SELECT {header} FROM {table} WHERE id=?\"\r\n logger.debug(statement)\r\n c.execute(statement, [field_id])\r\n\r\n try:\r\n field = c.fetchone()[0]\r\n except TypeError:\r\n logger.exception(f\"ID '{field_id}' was not in table '{table}'\")\r\n \r\n c.close()\r\n \r\n return field\r\n\r\n def get_ids(self, table, conditions=None, splitter=\",\"):\r\n \"\"\" Gets the IDs that fit within the specified conditions.\r\n\r\n Gets all IDs if conditions is None.\r\n\r\n Args:\r\n table(unicode): Name of table to look into.\r\n conditions(list, optional): Categories you want to filter the line by:\r\n {\"header of categories 1\": \"category1,category2\",\r\n \"header of category 2\": \"category3\"}\r\n Multiple categories under a single header are separated with a comma.\r\n\r\n Returns:\r\n ids(list): List of IDs that match the categories.\r\n\r\n Raises:\r\n OperationalError: If table or header doesn't exist.\r\n TypeError: If category is neither None nor a dictionary.\r\n\r\n Examples:\r\n >>> get_ids({\"type\": \"greeting\"})\r\n [1, 2, 3, 5, 9, 15] # Any row that has the type \"greeting\".\r\n\r\n >>> get_ids({\"type\": \"nickname,quip\", \"by\": \"Varric\"})\r\n # Any row by \"Varric\" that has the type \"nickname\" or \"quip\".\r\n [23, 24, 25, 34, 37, 41, 42, 43]\r\n \"\"\"\r\n ids = []\r\n table = phrases.clean(table)\r\n clause = \"\"\r\n \r\n connection = sqlite3.connect(self.db)\r\n connection.row_factory = lambda cursor, row: row[0] # Gets first element for fetchall()\r\n\r\n c = connection.cursor()\r\n\r\n if conditions:\r\n clause = \"WHERE (\"\r\n clause_list = [clause,]\r\n substitutes = []\r\n cat_count = 1\r\n header_count = 1\r\n\r\n ## TODO: Add ability to specify comparison operator (e.g. =, <, LIKE, etc.)\r\n for con in conditions:\r\n if 1 < header_count:\r\n clause_list.append(\" AND (\")\r\n\r\n sub_count = 1\r\n subconditions = conditions[con].split(splitter)\r\n for sub in subconditions:\r\n if 1 < sub_count:\r\n clause_list.append(\" OR \")\r\n \r\n clause_list.append(f\"{phrases.clean(con)}=?\")\r\n substitutes.append(sub)\r\n sub_count += 2\r\n \r\n clause_list.append(\")\")\r\n header_count += 2\r\n cat_count = 1\r\n\r\n clause = \"\".join(clause_list)\r\n\r\n statement = f\"SELECT id FROM {table} {clause}\"\r\n logger.debug(f\"(get_ids) Substitutes: {substitutes}\")\r\n logger.debug(f\"(get_ids) SQLite statement: {statement}\")\r\n\r\n c.execute(statement, substitutes)\r\n else:\r\n c.execute(f\"SELECT id FROM {table}\")\r\n\r\n ids = c.fetchall()\r\n\r\n return ids\r\n\r\n def random_line(self, header, table, conditions=None, splitter=\",\"):\r\n \"\"\" Chooses a random line from the table under the header.\r\n\r\n Args:\r\n header(unicode): The header of the random line's column.\r\n table(unicode): Name of the table to look into.\r\n conditions(dict, optional): Categories to filter the line by:\r\n {\"header of categories 1\": \"category1,category2\",\r\n \"header of category 2\": \"category3\"}\r\n Multiple categories under a single header are separated with a comma.\r\n splitter(unicode, optional): What separates multiple categories\r\n (default is a comma).\r\n\r\n Returns:\r\n line(unicode): A random line from the database.\r\n\r\n Raises:\r\n OperationalError: If header or table doesn't exist.\r\n TypeError: If category is neither None nor a dictionary.\r\n\r\n Examples:\r\n >>> random_line(\"line\", {\"type\": \"greeting\"})\r\n Hello.\r\n \"\"\"\r\n header = phrases.clean(header)\r\n table = phrases.clean(table)\r\n line = \"\"\r\n \r\n connection = sqlite3.connect(self.db)\r\n c = connection.cursor()\r\n\r\n if conditions:\r\n ids = self.get_ids(table, conditions, splitter)\r\n if ids:\r\n line = random.choice(ids)\r\n line = self.get_field(line, header, table)\r\n else:\r\n c.execute(f\"SELECT {header} FROM {table} ORDER BY Random() LIMIT 1\") # TODO: Take categories into account.\r\n line = c.fetchone()[0]\r\n\r\n return line\r\n\r\n\r\nclass DiscordDatabase(Database):\r\n \"\"\" An extension of Database for Discord. \"\"\"\r\n\r\n def add_server(self, server):\r\n \"\"\" Adds a server record to the database.\r\n\r\n Args:\r\n server(discord.Server): Server to add.\r\n\r\n \"\"\"\r\n pass\r\n\r\n def remove_server(self, server):\r\n \"\"\" Removes a server from the database.\r\n\r\n Args:\r\n server(discord.Server): Server to remove.\r\n\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass BotDatabase(DiscordDatabase):\r\n \"\"\" An extension of DiscordDatabase for functions specific to the bot. \"\"\"\r\n\r\n def add_song(self, url):\r\n \"\"\" Adds a song to the database.\r\n\r\n Args:\r\n url(str): URL of the song.\r\n \"\"\"\r\n pass\r\n\r\n def add_playlist(self, name, user):\r\n \"\"\" Adds a playlist to the database.\r\n\r\n Playlists are bound to one user across all servers.\r\n\r\n Args:\r\n name(str): Name of the playlist.\r\n user(discord.Member/User): User who made the playlist.\r\n\r\n \"\"\"\r\n pass\r\n\r\n def add_playlist_song(self, song, playlist):\r\n \"\"\" Adds a song to a playlist.\r\n\r\n Args:\r\n song(): Song to add.\r\n playlist(): The target playlist.\r\n\r\n \"\"\"\r\n pass\r\n \r\n","repo_name":"LeicaSimile/MeatBot-Discord","sub_path":"bot/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":8285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43372132011","text":"def decrypt_text_1798(text):\n decrypted_text = \"\"\n decrypt_dict = {'b':'a', 'r':'e', 'o':'i', 'w':'o', 'n':'u', 'e':'y'}\n for letter in text:\n if letter.lower() in decrypt_dict:\n decrypted_text += decrypt_dict[letter.lower()]\n else:\n decrypted_text += letter\n return decrypted_text\n\ndef encrypt_text_1798(text):\n encrypted_text = \"\"\n encrypt_dict = {'a':'b', 'e':'r', 'i':'o', 'o':'w', 'u':'n', 'y':'e'}\n for letter in text:\n if letter.lower() in encrypt_dict:\n encrypted_text += encrypt_dict[letter.lower()]\n else:\n encrypted_text += letter\n return encrypted_text\n\ndef decrypt_text_1802(text):\n decrypted_text = \"\"\n decrypt_dict = {'k':'a', 'c':'e', 'o':'i', 'l':'o', 'n':'u', 'u':'y'}\n for letter in text:\n if letter.lower() in decrypt_dict:\n decrypted_text += decrypt_dict[letter.lower()]\n else:\n decrypted_text += letter\n return decrypted_text\n\ndef encrypt_text_1802(text):\n encrypted_text = \"\"\n encrypt_dict = {'a':'k', 'e':'c', 'i':'o', 'o':'l', 'u':'n', 'y':'u'}\n for letter in text:\n if letter.lower() in encrypt_dict:\n encrypted_text += encrypt_dict[letter.lower()]\n else:\n encrypted_text += letter\n return encrypted_text\nascii_art = \"\\033[91;1m\" + \"\"\"\n \n ██████╗ ██████╗ ███╗ ██╗███████╗ ██████╗ ██╗ ██████╗ \n██╔════╝██╔═████╗████╗ ██║██╔════╝██╔═████╗██║ ╚════██╗\n██║ ██║██╔██║██╔██╗ ██║███████╗██║██╔██║██║ █████╔╝\n██║ ████╔╝██║██║╚██╗██║╚════██║████╔╝██║██║ ╚═══██╗\n╚██████╗╚██████╔╝██║ ╚████║███████║╚██████╔╝███████╗██████╔╝\n ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝╚══════╝ ╚═════╝ ╚══════╝╚═════╝ \n \n ___ ___ ____ _ ___ _______ \n / _ )/ _ \\/ __ \\ | /| / / |/ / __( )___ \n / _ / , _/ /_/ / |/ |/ / / _/ |/(_-< \n/____/_/|_|\\____/|__/|__/_/|_/___/ /___/ \n / ___/ _/ _ \\/ // / __/ _ \\ \n/ /___/ // ___/ _ / _// , _/ \n\\___/___/_/__/_//_/___/_/|_|\n\"\"\" + \"\\033[0m\"\n\nprint(ascii_art)\n\n# rest of the script\n\ncipher = input(\"Please enter the type of cipher you want to use (1798 or 1802): \")\n\nif cipher == \"1798\":\n operation = input(\"Do you want to encrypt or decrypt the text? (E/D): \").lower()\n text = input(\"Please enter the text you want to decrypt: \")\n if operation == \"e\":\n print(\"Encrypted text: \", encrypt_text_1798(text))\n elif operation == \"d\":\n print(\"Decrypted text: \", decrypt_text_1798(text))\n else:\n print(\"Invalid operation\")\nelif cipher == \"1802\":\n operation = input(\"Do you want to encrypt or decrypt the text? (E/D): \").lower()\n text = input(\"Please enter the text you want to decrypt: \")\n if operation == \"e\":\n print(\"Encrypted text: \", encrypt_text_1802(text))\n elif operation == \"d\":\n print(\"Decrypted text: \", decrypt_text_1802(text))\n else:\n print(\"Invalid operation\")\nelse:\n print(\"Invalid cipher type\")\n","repo_name":"codercoins/BROWNE-SCIPHERENCRYPTER-DECRYPTER","sub_path":"brownescipher.py","file_name":"brownescipher.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3128074889","text":"### ML with Gapminder dataset\n### Basic pipeline with ElasticNet, StandardScaler, Imputation and 3-fold cross-validation\n### Comparisson of models (untuned and tuned)\n\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\n\n# Setup the pipeline steps: steps\nsteps = [('imputation', Imputer(missing_values='NaN', strategy='mean', axis=0)),\n ('scaler', StandardScaler()),\n ('elasticnet', ElasticNet())]\n\n# Create the pipeline: pipeline \npipeline = Pipeline(steps)\n\n# Specify the hyperparameter space\nparameters = {'elasticnet__l1_ratio':np.linspace(0,1,30)}\n\n# Create train and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42)\n\n# Create the GridSearchCV object: gm_cv_scaled_tuned\ngm_cv_scaled_tuned = GridSearchCV(pipeline, param_grid=parameters, cv=3)\n\n# Instantiate and fit a tuned ElasticNet to the scaled data\ngm_cv_scaled_tuned.fit(X_train, y_train)\n\n# Instantiate and fit a ElasticNet to the untuned model and unscaled data with no CV\nm_untuned = ElasticNet().fit(X_train, y_train)\n\n# Compute and print the metrics\nr2_scaled_tuned = gm_cv_scaled_tuned.score(X_test, y_test)\nr2_unscaled_untuned = m_untuned.score(X_test, y_test)\nprint(\"Tuned ElasticNet Alpha: {}\".format(gm_cv_scaled_tuned.best_params_))\nprint(\"Tuned ElasticNet R squared: {}\".format(r2_scaled_tuned))\nprint(\"Untuned ElasticNet R squared: {}\".format(r2_unscaled_untuned))\n","repo_name":"macgrz/ML","sub_path":"various_models/gapminder_ElasticNet.py","file_name":"gapminder_ElasticNet.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38725721831","text":"# reversing a string\ndef reverse_string(str):\n\treturn str[::-1]\nprint(reverse_string('i want to earn billions'))\nprint \n\n# factorial of a number\ndef factorial(num):\n\tfact = 1\n\tele = 1\n\twhile (ele <= num):\n\t\tfact *= ele\n\t\tele += 1\n\treturn fact\nprint(factorial(5))\nprint \n\n# palindrome of string\ndef palindrome(str):\n\trev_str = str[::-1]\n\tif rev_str == str:\n\t\treturn \"palindrome\"\n\telse:\n\t\treturn \"not palindrome\"\nprint(palindrome('333'))\nprint(palindrome('234'))\nprint\n\n# find the longest word\ndef find_longest_word(str):\n\tsplit_string = str.split(' ')\n\t#print split_string\n\tnutshell = []\n\tfor i in split_string:\n\t\tnutshell.append(len(i))\n\tlong = sorted(nutshell)\n\tlongest_word = long[::-1]\n\treturn longest_word[0]\nprint(find_longest_word('i want to earn billions'))\nprint\n\n# test case\ndef test_case(str):\n\tlower = str.lower()\n\ttitle = lower.title()\n\treturn title\nprint(test_case('i want to earn billions'))\nprint\n\n# longest array --length\ndef longest_array(arr):\n\tindividual = []\n\tfor i in arr:\n\t\tindividual.append(len(i))\n\tsort = sorted(individual)\n\tdesc_sort = sort[::-1]\n\treturn desc_sort[0]\nprint(longest_array([[1,2,3,4],[2,3,4,5,6,7],\n\t[\"saf\",\"hds\",\"jdd\",\"chd\",\"jdh\",\"dwe\",\"dew\"]]))\nprint\n\n\"\"\"# longest array based on value\ndef longest_value(arr):\n\t#bada = [0,0,0,0]\n\tfor i in range(0,len(arr)):\n\t\ti += 1\n\t\tfor j in i:\n\t\t\tj += 1\n\treturn j\nprint(longest_value([[1,2,3,4],[2,3,4,5],[3,4,5,6],[4,5,6,7]]))\"\"\"\n\n# repeatition of string\ndef repeat_exam(str,num):\n\tif(num != 0):\n\t\treturn str * num\nprint(repeat_exam('i want to earn billions--',3))\nprint\n\n# mutation\ndef mutation(arr):\n\thakuna = arr[1]\n\tmatata = arr[0]\n\tfor i in range(0, len(matata)):\n\t\tif (matata[i] == hakuna[i]):\n\t\t\treturn 'i want to earn billions'\n\t\telse:\n\t\t\treturn False\nprint(mutation([[\"hello\"],[\"hello\"]]))\nprint(mutation([['sameer'],['loophole']]))\nprint\n\n# confirming the end\ndef end_test(str,target):\n\tif str[-len(target):] == target:\n\t\treturn True\n\telse:\n\t\treturn False\n\t#print('hdhdhdhhd'[-4:])\nprint(end_test('i want to earn billions','billions'))\nprint(end_test('whdudbcudjss','gdhsdt'))\nprint\n\n# partition of an array according to the size\ndef chunky_monkey(arr,size):\n\tnew_arr = []\n\ti = 0\n\twhile (i < len(arr)):\n\t\tnew_arr.append(arr[i:i+size])\n\t\ti += size\n\treturn new_arr\nprint(chunky_monkey([0,1,2,3,4,5],4))\nprint(chunky_monkey([1,2,3,4,4,5,6,7,8],3))\nprint\n\n# splicing the array\ndef splice_array(arr,how_many):\n\tsplice = arr[how_many:]\n\treturn splice\nprint(splice_array([1,2,3,4,5,6],4))\nprint(splice_array([8,7,6,5,4,3,2,6,4,3,7,5,4,2],4))\nprint\n\n# truncatioin\ndef truncate(str,num):\n\tif (len(str) > num and num > 3):\n\t\treturn str[0:num-3] + '...'\n\telif (len(str) > num and num <= 3):\n\t\treturn str[0:num] + '...'\n\telse:\n\t\treturn str\nprint(truncate('i want to earn billions',12))\nprint(truncate('earn billions: i want to',18))\nprint\n\n# chunk array in groups\ndef chunk_array_in_groups(arr,size):\n\tsimply = []\n\tjust = []\n\ttemp = arr\n\tresult = []\n\tfor i in range(0,size):\n\t\tsimply.append(arr[0:size])\n\tresult.append(simply[0])\n\tfor i in range(0,size):\n\t\tjust.append(temp[2:len(temp)])\n\tresult.append(just[0])\n\treturn result\nprint(chunk_array_in_groups([1,2,3,4,5,6],5))\nprint\n\n# get the index\ndef get_index(arr,num):\n\tarr = sorted(arr)\n\tfor i in range(0,len(arr)):\n\t\tif arr[i] >= num:\n\t\t\treturn i\n\t\telse:\n\t\t\tpass\nprint(get_index([1,2,3,4,5],1))\nprint(get_index([-2,-1,0,1,2,3,4],4))\nprint\n\n# ascii conversion\ndef string_ascii(str):\n\tasc = []\n\ts_a = list(str)\n\tfor i in s_a:\n\t\tasc.append(ord(i))\n\treturn asc\nprint(string_ascii('abcdefghijklmnopqrstuvwxyz'))\nprint(string_ascii('i want to earn billions!'))\nprint\n\n# string conversion\ndef ascii_string(num):\n\tn = []\n\tfor i in num:\n\t\tn.append(chr(i))\n\treturn ''.join(n)\nprint(ascii_string([105, 32, 119, 97, 110, 116, 32, 116, 111, 32, 101, 97, 114, 110, 32, 98, \n\t105, 108, 108, 105, 111, 110, 115, 33]))\nprint\n\n# searching for name\ndef search_engine(text,search):\n\tmy_search = search\n\treturn text.find(my_search)\nprint(search_engine('jnjhfu jdihfudsfh i want to earn billions hgugydb dusydgdsy','i want to earn billions'))\nprint\n\n# array destroyer\ndef array_destroyer(arr1,arr2):\n\tground_nut = []\n\tfor i in arr1:\n\t\tif i not in arr2:\n\t\t\tground_nut.append(i)\n\treturn ground_nut\nprint(array_destroyer([1,2,3,4,5,6,7,'jdhjhf'],[1,2,3]))\nprint\n\n# substrings .. return only last letter of any string\ndef sub_string(str):\n\treturn str[len(str)-1:len(str)]\nprint(sub_string('i want to earn billions'))\nprint(sub_string('loophole'))\nprint\n\n# confirmation\ndef confirm_letter(str,target):\n\tif target in str:\n\t\treturn 'i want to earn billions'\n\telse:\n\t\tpass\nprint(confirm_letter('i want to earn billions','b'))\nprint\n\ndef password_check(str, password):\n\tif str == password:\n\t\treturn 'successfully logged in'\n\telse:\n\t\treturn 'nope'\nprint(password_check('google','google'))\nprint\n","repo_name":"chaotic-enigma/python","sub_path":"python_programs/algorithms_py/algorithms_basic.py","file_name":"algorithms_basic.py","file_ext":"py","file_size_in_byte":4796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5582651639","text":"# @Author: George Onoufriou \n# @Date: 2021-07-24T15:47:41+01:00\n# @Last modified by: archer\n# @Last modified time: 2021-08-23T15:25:12+01:00\n\nimport unittest\nimport time\nimport numpy as np\n\nimport seal\n\n# backward compatibility\nfrom fhez.recache import ReCache\nfrom fhez.rescheme import ReScheme\nfrom fhez.reseal import ReSeal\n\n\nclass ReSeal_tests(unittest.TestCase):\n \"\"\"Unit test class aggregating all tests for the encryption class\"\"\"\n\n def setUp(self):\n self.startTime = time.time()\n\n def tearDown(self):\n t = time.time() - self.startTime\n print('%s: %.3f' % (self.id(), t))\n\n def defaults_ckks(self):\n return {\n \"scheme\": seal.scheme_type.CKKS,\n \"poly_mod_deg\": 8192,\n \"coeff_mod\": [60, 40, 40, 60],\n \"scale\": pow(2.0, 40),\n \"cache\": True,\n }\n\n def defaults_ckks_nocache(self):\n options = self.defaults_ckks()\n options[\"cache\"] = False\n return options\n\n def gen_reseal(self, defaults):\n if defaults[\"scheme\"] == seal.scheme_type.CKKS:\n r = ReSeal(scheme=defaults[\"scheme\"],\n poly_modulus_degree=defaults[\"poly_mod_deg\"],\n coefficient_modulus=defaults[\"coeff_mod\"],\n scale=defaults[\"scale\"])\n else:\n raise NotImplementedError(\"BFV default gen_reseal not implemented\")\n return r\n\n def test_init(self):\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n self.assertIsInstance(r, ReSeal)\n\n def test_serialize_deserialize(self):\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n d = r.__getstate__()\n r2 = ReSeal()\n r2.__setstate__(d)\n\n def test_param_property(self):\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n self.assertIsInstance(r.parameters, seal.EncryptionParameters)\n\n def test_context_property(self):\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n self.assertIsInstance(r.context, seal.SEALContext)\n\n def test_publickey_property(self):\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n self.assertIsInstance(r.public_key, seal.PublicKey)\n\n def test_privatekey_property(self):\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n self.assertIsInstance(r.private_key, seal.SecretKey)\n\n def test_relinkeys_property(self):\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n self.assertIsInstance(r.relin_keys, seal.RelinKeys)\n\n def test_ciphertext_property(self):\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n r.ciphertext = 100\n self.assertIsInstance(r.ciphertext, seal.Ciphertext)\n r.ciphertext = [1, 2, 3, 4, 5, 100]\n self.assertIsInstance(r.ciphertext, seal.Ciphertext)\n r.ciphertext = np.array([1, 2, 3, 4, 5, 100])\n self.assertIsInstance(r.ciphertext, seal.Ciphertext)\n\n def test_ciphertext_add_plaintext(self):\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n data = np.array([1, 2, 3])\n r.ciphertext = data\n r.ciphertext = r + 2\n r = r + 4 # test return object style\n result = r.plaintext\n print(\"c+p: 6 +\", data, \"=\", np.round(result[:data.shape[0]]))\n rounded_reshaped_result = np.round(result[:data.shape[0]])\n self.assertEqual((data+6).tolist(), rounded_reshaped_result.tolist())\n\n def test_ciphertext_add_ciphertext(self):\n import copy\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n data = np.array([1, 2, 3])\n r.ciphertext = data\n r2 = copy.deepcopy(r)\n r.ciphertext = r + r2\n r = r + r2 # test return object style\n result = r.plaintext\n print(\"c+c: 3 *\", data, \"=\", np.round(result[:data.shape[0]]))\n rounded_reshaped_result = np.round(result[:data.shape[0]])\n self.assertEqual((data*3).tolist(), rounded_reshaped_result.tolist())\n\n def test_ciphertext_multiply_plaintext(self):\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n data = np.array([1, 2, 3])\n r.ciphertext = data\n r.ciphertext = r * 2\n r = r * 4 # test return object style\n result = r.plaintext\n print(\"c*p: 8 *\", data, \"=\", np.round(result[:data.shape[0]]))\n rounded_reshaped_result = np.round(result[:data.shape[0]])\n self.assertEqual((data*8).tolist(), rounded_reshaped_result.tolist())\n\n def test_ciphertext_multiply_ciphertext(self):\n import copy\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n data = np.array([100, 200, 300])\n r.ciphertext = data\n r2 = copy.deepcopy(r)\n r.ciphertext = r * r2\n r = r * r2 # test return object style\n result = r.plaintext\n print(\"c*c:\", data, \" ^ 3 =\", np.round(result[:data.shape[0]]))\n rounded_reshaped_result = np.round(result[:data.shape[0]])\n # self.assertEqual((data * data * data).tolist(),\n # rounded_reshaped_result.tolist())\n np.testing.assert_array_almost_equal(result[:data.shape[0]],\n (data * data * data),\n decimal=0,\n verbose=True)\n\n def test_encrypt_decrypt(self):\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n data = np.array([1, 2, 3])\n r.ciphertext = data\n result = r.plaintext\n rounded_reshaped_result = np.round(result[:data.shape[0]])\n self.assertEqual((data).tolist(), rounded_reshaped_result.tolist())\n\n def test_complex_arithmetic(self):\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n data = np.array([2, 3, 4, 5, 6, 0.5, 8, 9])\n r.ciphertext = data\n r2 = r.new()\n # print(\"original\", r.plaintext[:data.shape[0]])\n r2.ciphertext = 20 * r\n # print(\"20 * original\", r2.plaintext[:data.shape[0]])\n r2.ciphertext = r + r2\n r2 = r2 * r # test return object style\n expected = ((data * 20) + data) * data\n result = r2.plaintext\n rounded_reshaped_result = np.round(result[:data.shape[0]])\n # self.assertEqual(expected.tolist(),\n # rounded_reshaped_result.tolist())\n np.testing.assert_array_almost_equal(result[:data.shape[0]], expected,\n decimal=0,\n verbose=True)\n\n def test_pickle(self):\n import pickle\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n r.ciphertext = np.array([1, 2, 3])\n dump = pickle.dumps(r)\n rp = pickle.loads(dump)\n self.assertIsInstance(rp, ReSeal)\n\n def test_deepcopy(self):\n import copy\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n r.ciphertext = np.array([1, 2, 3])\n rp = copy.deepcopy(r)\n self.assertIsInstance(rp, ReSeal)\n\n def test_cache(self):\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n self.assertIsInstance(r.cache, ReCache)\n\n def test_validity(self):\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n r.ciphertext = np.array([1, 2, 3])\n ReScheme().validate(r.__getstate__())\n\n def test_len(self):\n defaults = self.defaults_ckks()\n r = self.gen_reseal(defaults)\n r.ciphertext = np.array([1, 2, 3])\n self.assertIsInstance(len(r), int)\n\n\nif __name__ == \"__main__\":\n # run all the unit-tests\n print(\"now testing:\", __file__, \"...\")\n unittest.main()\n","repo_name":"DreamingRaven/python-fhez","sub_path":"fhez/reseal_test.py","file_name":"reseal_test.py","file_ext":"py","file_size_in_byte":7965,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"22"} +{"seq_id":"4287064168","text":"import pytest\nfrom httpx import AsyncClient\n\n\n@pytest.mark.parametrize(\n \"location, date_from, date_to, status_code\",\n [\n (\"Алтай\", \"2023-06-30\", \"2023-06-25\", 400),\n (\"Алтай\", \"2023-06-25\", \"2023-07-30\", 400),\n (\"Алтай\", \"2023-06-25\", \"2023-06-30\", 200),\n ],\n)\nasync def test_get_free_hotels(\n location: str, date_from: str, date_to: str, status_code: int, ac: AsyncClient\n):\n response = await ac.get(\n f\"/v1/hotels/{location}\",\n params={\n \"date_from\": date_from,\n \"date_to\": date_to,\n },\n )\n print(response)\n assert response.status_code == status_code\n","repo_name":"SogoDavid/booking_fast_api_pet_project","sub_path":"app/tests/integration_tests/test_hotels/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"22114805045","text":"import os\nimport re\nfrom collections import OrderedDict\n\nfrom .package import Package\n\ndef parse(filepath):\n \"\"\"\n Read the file given and parse it to return\n a list of Package information.\n \"\"\"\n rawdata = _readfile(filepath)\n return _parse_rawdata(rawdata)\n\n\ndef _readfile(filepath):\n if not os.path.exists(filepath):\n raise FileNotFoundError\n with open(filepath, 'r') as f:\n return f.read()\n\ndef _parse_rawdata(rawdata):\n # Precompile the regex to not recompile it everytime\n # in the double loop. Remove everything in the parenthese inclusive.\n regex = re.compile(r'\\(.*?\\)', re.ASCII)\n\n pkgs = []\n for pkg_info in rawdata.split('\\n\\n'):\n pkg = OrderedDict()\n for line in pkg_info.split('\\n'):\n if line == '':\n continue\n\n key = ''\n if line[0] == ' ' or not line[0].isupper():\n # We have a multiline information value\n key = next(reversed(pkg))\n value = line\n # Special case for Description we add a linebreak.\n if key == 'Description':\n value = '\\n' + line.strip()\n else:\n key, value = line.split(':', 1)\n\n # Do not strip \\n for Description value\n key, value = key.strip(), value.strip(' ')\n\n # Remove the version constrains for the package\n # information.\n if key in ['Depends', 'Pre-Depends', 'Recommends',\n 'Suggests', 'Replaces', 'Provides', 'Conflicts']:\n # Remove the :any that appears on \"python:any\" which\n # means choose between python2.6 and python2.7 but\n # we will use the default python which is \"python\".\n value = value.replace(':any', '')\n value = regex.sub('', value).split(',')\n value = [val.strip() for val in value]\n \n # Check if we need to create or append value to the key.\n if key in pkg:\n pkg[key] += value\n else:\n pkg[key] = value\n\n if pkg != OrderedDict():\n pkgs.append(Package(pkg))\n return pkgs\n","repo_name":"charlesvdv/debseeker","sub_path":"debseeker/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"15070284331","text":"''' A stupid little way of generating xml\n'''\n\nimport re\n\nfrom flask import escape\n\nPRETTY = True\n\nclass Context(object):\n __slots__ = ('_node', '_name', '_kwargs')\n pattern = re.compile(r'[A-Za-z]\\w*')\n\n def __init__(self, node, name, kwargs):\n self._node = node\n self._name = name\n self._kwargs = kwargs\n\n def __enter__(self):\n _node = self._node\n _buffer = _node._buffer\n _node.nl()\n _buffer.extend(['<', escape(self._name)])\n for k, v in self._kwargs.iteritems():\n assert Context.pattern.match(k)\n _buffer.extend([' ', k, '=\"', escape(v), '\"'])\n _buffer.append('>')\n _node._indent += 1\n _node.nl()\n\n def __exit__(self, exc_type, exc_value, traceback):\n _node = self._node\n _buffer = _node._buffer\n _node._indent -= 1\n _node.nl()\n if _buffer[-1] == '>' and _buffer[-3] != ''\n else:\n _buffer.extend([''])\n _node.nl()\n\n def __call__(_self, **kwargs):\n new_kwargs = dict(_self._kwargs)\n new_kwargs.update(kwargs)\n return Context(_self._node, _self._name, new_kwargs)\n\nclass Node(object):\n __slots__ = ('_buffer', '_indent')\n\n def __init__(self):\n self._buffer = ['', '\\n', '']\n self._indent = 0\n\n def tag(_self, _name, **kwargs):\n return Context(_self, _name, kwargs)\n\n\n def put(self, text):\n self._buffer.append(escape(text))\n\n def __str__(self):\n return ''.join(self._buffer)\n\n def nl(self):\n if PRETTY:\n _buffer = self._buffer\n if _buffer[-2] == '\\n':\n _buffer.pop()\n else:\n _buffer.append('\\n')\n _buffer.extend([' ' * self._indent])\n","repo_name":"themanaworld/tmw-tools","sub_path":"web/with_xml.py","file_name":"with_xml.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"286708761","text":"num = int(input())\nstairs = [0 for _ in range(301)]\ndp = [0 for _ in range(301)]\n\nfor i in range(num):\n stairs[i] = int(input())\n\ndp[0] = stairs[0]\ndp[1] = stairs[0] + stairs[1]\ndp[2] = max(stairs[1]+stairs[2], stairs[0]+stairs[2])\n\n# 마지막 계단의 전 계단을 밟는 경우와 밟지 않는 경우\nfor i in range(3, num):\n dp[i] = max(dp[i-3]+stairs[i-1]+stairs[i], dp[i-2]+stairs[i])\nprint(dp[num-1])","repo_name":"kklee0930/Algorithm","sub_path":"백준/Silver/2579. 계단 오르기/계단 오르기.py","file_name":"계단 오르기.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10361721782","text":"from tkinter import Toplevel, Canvas, Variable\nfrom tkinter.constants import TOP, BOTH\nfrom wrappers.logging_wrapper import debug\nfrom view_model import register_vm_updates, unregister_vm_updates\n\nclass DraggableMixin:\n def __init__(self, *args, on_drag=None, **kwargs):\n super().__init__(*args, **kwargs)\n DraggableMixin.make_draggable(self, on_drag)\n\n @staticmethod\n def make_draggable(widget, on_drag=None):\n def _on_drag_start(event):\n widget = event.widget.winfo_toplevel()\n widget._drag_start_x = event.x\n widget._drag_start_y = event.y\n\n def _on_drag_motion(event):\n widget = event.widget.winfo_toplevel()\n x = widget.winfo_x() - widget._drag_start_x + event.x\n y = widget.winfo_y() - widget._drag_start_y + event.y\n widget.geometry(\"+%d+%d\" % (x, y))\n if on_drag is not None: \n on_drag(x, y)\n\n widget.bind(\"\", _on_drag_start)\n widget.bind(\"\", _on_drag_motion)\n\nclass FloatingWindow(Toplevel):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.overrideredirect(True)\n\nclass RectangleWindow(DraggableMixin, FloatingWindow):\n\n _inherent_window_padding = 2\n _border_thickness = 5\n _offset_half_thickness = int(_border_thickness / 2)\n\n def __init__(self, x, y, width, height, *args, **kwargs):\n super().__init__(*args, on_drag=self._on_drag, **kwargs)\n\n #view models\n self.x, self.y, self.width, self.height = x, y, width, height\n\n # previous values\n self._x, self._y, self._width, self._height = None, None, None, None\n\n self._variable_traces = []\n\n register_vm_updates(\n self._on_vm_update,\n self.x, self.y, self.width, self.height\n )\n\n self.protocol(\"WM_DELETE_WINDOW\", self._on_closing)\n self.bind('', self._on_closing)\n\n self.resizable(True, True)\n self.attributes('-topmost', True)\n self.wm_attributes('-transparentcolor', self['bg'])\n self.deiconify()\n\n self._canvas = Canvas(self)\n self._rectangle_id = None\n\n self._redraw()\n\n def _on_closing(self, *args):\n unregister_vm_updates(\n self._variable_traces\n )\n \n def _on_vm_update(self, v):\n try:\n # check if the view actually needs to be updated\n if v.get() != v._previous_value:\n self._redraw()\n except: \n pass\n \n def _on_drag(self, x, y):\n \"\"\"A callback that handles updating the view model when the \n window position changed via DraggableMixin.\n \"\"\"\n # add the padding because padding calculation is handled internally\n # and the provided x and y values are really vm - padding.\n # manually set previous value because we don't need to redraw\n # the widget (drag handler already did that)\n self.x._previous_value = x + self._inherent_window_padding\n self.x.set(self.x._previous_value)\n\n self.y._previous_value = y + self._inherent_window_padding\n self.y.set(self.y._previous_value)\n\n def _redraw(self):\n if self._rectangle_id is not None:\n self._canvas.delete(self._rectangle_id)\n self._rectangle_id = None\n\n self.geometry(\n '%dx%d+%d+%d' % (\n self.width.get() + 2 * self._inherent_window_padding + 1, \n self.height.get() + 2 * self._inherent_window_padding + 1, \n self.x.get() - self._inherent_window_padding, \n self.y.get() - self._inherent_window_padding\n )\n )\n\n self._rectangle_id = self._canvas.create_rectangle(\n self._offset_half_thickness + 2, \n self._offset_half_thickness + 2, \n self.width.get() + self._offset_half_thickness + 2 - self._border_thickness + 1, \n self.height.get() + self._offset_half_thickness + 2 - self._border_thickness + 1, \n outline=\"green\", \n width=self._border_thickness\n )\n self._canvas.pack(side=TOP, fill=BOTH, expand=True)\n","repo_name":"bmclare19/py-fisher","sub_path":"gui/widgets/top_levels.py","file_name":"top_levels.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9164031558","text":"import os, sys, discord, platform, random, aiohttp, json, datetime, requests\nfrom discord.ext import commands\nfrom multiprocessing import context\nfrom cogs.functions import db\n\nif not os.path.isfile(\"config.py\"):\n sys.exit(\"'config.py' not found! Please add it and try again.\")\nelse:\n import config\n\nclass AdminCog(commands.Cog, name=\"admin\"):\n def __init__(self, bot):\n self.bot = bot\n \n @commands.is_owner()\n @commands.hybrid_command(name=\"die\", aliases=[\"logout\"])\n async def die(self, context):\n await context.send(\"Beep boop boop beep!! logging out!!..\")\n await context.bot.logout()\n \n\nasync def setup(bot):\n await bot.add_cog(AdminCog(bot))\n","repo_name":"hustleer/saneora","sub_path":"cogs/admin_cog.py","file_name":"admin_cog.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32411845464","text":"# 124 나라의 숫자\n\ndef solution(n):\n q = []\n ans = ''\n dic = {1: 1, 2: 2, 0: 4}\n while n > 0:\n tmp = n % 3\n q.append(dic[tmp])\n n = n//3\n if tmp == 0:\n n -= 1\n\n while q:\n ans += str(q.pop())\n\n return ans\n\n# 다른사람 풀이\n# def change124(n):\n# num = ['1','2','4']\n# answer = \"\"\n\n\n# while n > 0:\n# n -= 1\n# answer = num[n % 3] + answer\n# n //= 3\n\n# return answer\n","repo_name":"yeon52/Problem-Solving","sub_path":"CodingTest/Implementation/programmers/124-countries.py","file_name":"124-countries.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"875592719","text":"from itertools import combinations\nimport sys\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\nchicken_map = [list(map(int, input().split())) for _ in range(N)]\n\n# 여기서 두 번 돌아서 시간이 더 걸리는 것일까?\nchicken, houses = [], []\n\nfor r in range(N):\n for c in range(N):\n if chicken_map[r][c] == 1:\n houses.append((r,c))\n elif chicken_map[r][c] == 2:\n chicken.append((r,c))\n\n# chicken = [(i,j) for i in range(len(chicken_map)) for j in range(len(chicken_map[i])) if chicken_map[i][j] == 2]\n# houses = [(i,j) for i in range(len(chicken_map)) for j in range(len(chicken_map[i])) if chicken_map[i][j] == 1]\n\nchicken_remains = combinations(chicken, M)\n\nmin_total_chick_dist = float('inf')\nfor chicken_remain in chicken_remains:\n total_chick_dist = 0\n # 집집마다\n for hy, hx in houses:\n min_chick_dist = float('inf')\n # 치킨 집과의 거리 계산\n for cy, cx in chicken_remain:\n chick_dist = abs(cy-hy) + abs(cx-hx)\n if chick_dist < min_chick_dist:\n min_chick_dist = chick_dist\n total_chick_dist += min_chick_dist\n # total_chick_dist: 해당 치킨 집 조합에서 얻을 수 있는 도시의 치킨 거리\n if total_chick_dist < min_total_chick_dist:\n min_total_chick_dist = total_chick_dist\n\nprint(min_total_chick_dist)\n\n# for 반복문을 계속 사용하고 있는데, 이로 인해 연산 효율성이 매우 안좋게 나타나 실행 속도가 100대로 나타나는\n# 다른 답에 비해 나의 풀이는 500ms로 나타난다.\n\n# 이 원인은 map / zip, unzip을 활용하는 다른 코드들로 인한 차이라고 생각한다. ","repo_name":"Hoonst/algorithm","sub_path":"Graph/15686_chicken_delivery.py","file_name":"15686_chicken_delivery.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38291768335","text":"from __future__ import print_function\nimport numpy as np\nimport os\nimport librosa\nimport time\nimport pandas as pd\n\nimport config as cfg\nfrom utils.Logger import LOG\nfrom download_data import download\nfrom utils.utils import create_folder, read_audio\n\n\nclass DatasetDcase2019Task4:\n \"\"\"DCASE 2018 task 4 dataset\n This dataset contains multiple subsets:\n A train set divided in three subsets:\n - A weakly labeled set\n - An unlabeled-in-domain set\n - An unlabeled-out-of-domain set\n A test set\n An evaluation set\n The files should be ordered into the 'local_path' as described here:\n dataset root\n - readme.md\n - download_data.py\n - metadata\n - train\n - weak.tsv\n - unlabel_in_domain.tsv\n - synthetic_data.tsv\n - validation\n - validation.tsv\n - test_dcase2018.tsv\n - eval_dcase2018.tsv\n -eval\n - public.tsv\n - audio\n - train\n - weak\n - unlabel_in_domain\n - synthetic_data\n - validation\n - eval\n - public\n\n Args:\n local_path: str, (Default value = \"\") base directory where the dataset is, to be changed if\n dataset moved\n base_feature_dir: str, (Default value = \"features) base directory to store the features\n recompute_features: bool, (Default value = False) wether or not to recompute features\n subpart_data: int, (Default value = None) allow to take only a small part of the dataset.\n This number represents the number of data to download and use from each set\n save_log_feature: bool, (Default value = True) whether or not saving the logarithm of the feature or not\n (particularly useful to put False to apply some data augmentation)\n\n Attributes:\n local_path: str, base directory where the dataset is, to be changed if\n dataset moved\n base_feature_dir: str, base directory to store the features\n recompute_features: bool, wether or not to recompute features\n subpart_data: int, allow to take only a small part of the dataset.\n This number represents the number of data to download and use from each set\n save_log_feature: bool, whether or not saving the logarithm of the feature or not\n (particularly useful to put False to apply some data augmentation)\n feature_dir : str, directory to store the features\n\n \"\"\"\n def __init__(self, local_path=\"\", base_feature_dir=\"features\", recompute_features=False,\n save_log_feature=True):\n\n self.local_path = local_path\n self.recompute_features = recompute_features\n self.save_log_feature = save_log_feature\n\n feature_dir = os.path.join(base_feature_dir, \"sr\" + str(cfg.sample_rate) + \"_win\" + str(cfg.n_window)\n + \"_hop\" + str(cfg.hop_length) + \"_mels\" + str(cfg.n_mels))\n if not self.save_log_feature:\n feature_dir += \"_nolog\"\n\n self.feature_dir = os.path.join(feature_dir, \"features\")\n # create folder if not exist\n create_folder(self.feature_dir)\n\n def initialize_and_get_df(self, tsv_path, subpart_data=None, download=True):\n \"\"\" Initialize the dataset, extract the features dataframes\n Args:\n tsv_path: str, tsv path in the initial dataset\n subpart_data: int, the number of file to take in the dataframe if taking a small part of the dataset.\n download: bool, whether or not to download the data from the internet (youtube).\n\n Returns:\n pd.DataFrame\n The dataframe containing the right features and labels\n \"\"\"\n meta_name = os.path.join(self.local_path, tsv_path)\n if download:\n self.download_from_meta(meta_name, subpart_data)\n return self.extract_features_from_meta(meta_name, subpart_data)\n\n @staticmethod\n def get_classes(list_dfs):\n \"\"\" Get the different classes of the dataset\n Returns:\n A list containing the classes\n \"\"\"\n classes = []\n for df in list_dfs:\n if \"event_label\" in df.columns:\n classes.extend(df[\"event_label\"].dropna().unique()) # dropna avoid the issue between string and float\n elif \"event_labels\" in df.columns:\n classes.extend(df.event_labels.str.split(',', expand=True).unstack().dropna().unique())\n return list(set(classes))\n\n @staticmethod\n def get_subpart_data(df, subpart_data):\n column = \"filename\"\n if not subpart_data > len(df[column].unique()):\n filenames = df[column].drop_duplicates().sample(subpart_data, random_state=10)\n df = df[df[column].isin(filenames)].reset_index(drop=True)\n LOG.debug(\"Taking subpart of the data, len : {}, df_len: {}\".format(subpart_data, len(df)))\n return df\n\n @staticmethod\n def get_df_from_meta(meta_name, subpart_data=None):\n \"\"\"\n Extract a pandas dataframe from a tsv file\n\n Args:\n meta_name : str, path of the tsv file to extract the df\n subpart_data: int, the number of file to take in the dataframe if taking a small part of the dataset.\n\n Returns:\n dataframe\n \"\"\"\n df = pd.read_csv(meta_name, header=0, sep=\"\\t\")\n if subpart_data is not None:\n df = DatasetDcase2019Task4.get_subpart_data(df, subpart_data)\n return df\n\n @staticmethod\n def get_audio_dir_path_from_meta(filepath):\n \"\"\" Get the corresponding audio dir from a meta filepath\n\n Args:\n filepath : str, path of the meta filename (tsv)\n\n Returns:\n str\n path of the audio directory.\n \"\"\"\n base_filepath = os.path.splitext(filepath)[0]\n audio_dir = base_filepath.replace(\"metadata\", \"audio\")\n if audio_dir.split('/')[-2] in ['validation']:\n audio_dir = '/'.join(audio_dir.split('/')[:-1])\n audio_dir = os.path.abspath(audio_dir)\n return audio_dir\n\n def download_from_meta(self, filename, subpart_data=None, n_jobs=3, chunk_size=10):\n \"\"\"\n Download files contained in a meta file (tsv)\n\n Args:\n filename: str, path of the meta file containing the name of audio files to donwnload\n (tsv with column \"filename\")\n subpart_data: int, the number of files to use, if a subpart of the dataframe wanted.\n chunk_size: int, (Default value = 10) number of files to download in a chunk\n n_jobs : int, (Default value = 3) number of parallel jobs\n \"\"\"\n result_audio_directory = self.get_audio_dir_path_from_meta(filename)\n # read metadata file and get only one filename once\n df = DatasetDcase2019Task4.get_df_from_meta(filename, subpart_data)\n filenames = df.filename.drop_duplicates()\n download(filenames, result_audio_directory, n_jobs=n_jobs, chunk_size=chunk_size)\n\n def get_feature_file(self, filename):\n \"\"\"\n Get a feature file from a filename\n Args:\n filename: str, name of the file to get the feature\n\n Returns:\n numpy.array\n containing the features computed previously\n \"\"\"\n fname = os.path.join(self.feature_dir, os.path.splitext(filename)[0] + \".npy\")\n data = np.load(fname)\n return data\n\n def calculate_mel_spec(self, audio):\n \"\"\"\n Calculate a mal spectrogram from raw audio waveform\n Note: The parameters of the spectrograms are in the config.py file.\n Args:\n audio : numpy.array, raw waveform to compute the spectrogram\n\n Returns:\n numpy.array\n containing the mel spectrogram\n \"\"\"\n # Compute spectrogram\n ham_win = np.hamming(cfg.n_window)\n\n spec = librosa.stft(\n audio,\n n_fft=cfg.n_window,\n hop_length=cfg.hop_length,\n window=ham_win,\n center=True,\n pad_mode='reflect'\n )\n\n mel_spec = librosa.feature.melspectrogram(\n S=np.abs(spec), # amplitude, for energy: spec**2 but don't forget to change amplitude_to_db.\n sr=cfg.sample_rate,\n n_mels=cfg.n_mels,\n fmin=cfg.f_min, fmax=cfg.f_max,\n htk=False, norm=None)\n\n if self.save_log_feature:\n mel_spec = librosa.amplitude_to_db(mel_spec) # 10 * log10(S**2 / ref), ref default is 1\n mel_spec = mel_spec.T\n mel_spec = mel_spec.astype(np.float32)\n return mel_spec\n\n def extract_features_from_meta(self, tsv_audio, subpart_data=None):\n \"\"\"Extract log mel spectrogram features.\n\n Args:\n tsv_audio : str, file containing names, durations and labels : (name, start, end, label, label_index)\n the associated wav_filename is Yname_start_end.wav\n subpart_data: int, number of files to extract features from the tsv.\n \"\"\"\n t1 = time.time()\n df_meta = self.get_df_from_meta(tsv_audio, subpart_data)\n LOG.info(\"{} Total file number: {}\".format(tsv_audio, len(df_meta.filename.unique())))\n\n for ind, wav_name in enumerate(df_meta.filename.unique()):\n if ind % 500 == 0:\n LOG.debug(ind)\n wav_dir = self.get_audio_dir_path_from_meta(tsv_audio)\n wav_path = os.path.join(wav_dir, wav_name)\n\n out_filename = os.path.splitext(wav_name)[0] + \".npy\"\n out_path = os.path.join(self.feature_dir, out_filename)\n\n if not os.path.exists(out_path):\n if not os.path.isfile(wav_path):\n LOG.error(\"File %s is in the tsv file but the feature is not extracted!\" % wav_path)\n df_meta = df_meta.drop(df_meta[df_meta.filename == wav_name].index)\n else:\n (audio, _) = read_audio(wav_path, cfg.sample_rate)\n if audio.shape[0] == 0:\n print(\"File %s is corrupted!\" % wav_path)\n else:\n mel_spec = self.calculate_mel_spec(audio)\n\n np.save(out_path, mel_spec)\n\n LOG.debug(\"compute features time: %s\" % (time.time() - t1))\n\n return df_meta.reset_index(drop=True)\n","repo_name":"turpaultn/DCASE2019_task4","sub_path":"baseline/DatasetDcase2019Task4.py","file_name":"DatasetDcase2019Task4.py","file_ext":"py","file_size_in_byte":10502,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"22"} +{"seq_id":"20464164642","text":"def solution(array):\n dic = dict()\n for i in array:\n try:\n dic[i] += 1\n except:\n dic[i] = 1\n lst = list(dic.values())\n counts = max(lst)\n if lst.count(counts) != 1:\n return -1\n else:\n new_dict = {v:k for k, v in dic.items()}\n return new_dict[counts]","repo_name":"Minhyeok-kimm/CodingTest_practice","sub_path":"프로그래머스/lv0/120812. 최빈값 구하기/최빈값 구하기.py","file_name":"최빈값 구하기.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10253849919","text":"import math\n\ndef sphere(values):\n s = 0\n for x in values:\n s += x*x\n \n return s\n\ndef rastrigin(values):\n d = len(values)\n s = 0\n for x in values:\n s += (x**2) - 10*math.cos(2*math.pi*x)\n \n return 10*d + s\n\ndef rosenbrock(values):\n s = 0\n for i in range(len(values)-1):\n s += 100*(values[i+1]-values[i]**2)**2 + (values[i]-1)**2\n \n return s\n\ndef zakharov(values):\n s1 = 0\n s2 = 0\n for i in range(len(values)):\n x = values[i]\n s1 += x*x\n s2 += 0.5*i*x\n \n return s1 + s2**2 + s2**4\n ","repo_name":"beesuit/CE-GeneticAlgorithm","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32298389160","text":"import io\nimport base64\n\nfrom dash import Dash, html, Input, Output, dcc\nimport matplotlib.pyplot as plt\n\nimport sys\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use(\"agg\") # no GUI\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker\nimport mplhep as hep\nplt.style.use(hep.style.CMS)\nimport hist\nimport hist.plot\n\nsys.path.append(\"../..\")\nfrom HistogramLib.histogram import HistogramKind\nfrom HistogramLib.store import HistogramStore\nfrom hists.parameters import beamEnergies\nfrom hists.store import HistogramId\n\napp = Dash(__name__)\n\napp.layout = html.Div([\n html.H1(children='CLUE3D transverse profile'),\n #dcc.Dropdown(options=beamEnergies, value=100, id='beamEnergy', clearable=False),\n dcc.Slider(min=beamEnergies[0], max=beamEnergies[-1], step=None, \n marks={beamEnergy:f\"{beamEnergy} GeV\" for beamEnergy in beamEnergies},\n id=\"beamEnergy\", value=100),\n dcc.Slider(min=1, max=28, step=1, value=5, id=\"layer\"),\n html.Div(children=[\n dcc.RadioItems(options={\"default\" : \"Default\", \"ratio\" : \"Ratio\"}, value=\"default\", inline=True, id=\"ratioPlot\"),\n #dcc.RadioItems(options=[\"\"])\n dcc.Slider(min=1, max=10, step=10/30, value=6, id=\"maxDistanceToPlot\"),\n ]),\n \n html.Img(id=\"plot\")\n])\n\n\nhist_folder = '/grid_mnt/data_cms_upgrade/cuisset/testbeam18/clue3d/v33'\n#clueParams = \"single-file\"\nclueParams = \"cmssw\"\nhistStore = HistogramStore(hist_folder, HistogramId)\ndatatypeToLegendMap = {\"data\":\"Data\", \"sim_proton_v46_patchMIP\":\"Simulation\"}\n\n\ndef loadHists(layer:int, beamEnergy:int, datatypes:list[str]=[\"data\", \"sim_proton_v46_patchMIP\"]) -> tuple[list[hist.Hist], list[str]]:\n return [(histStore\n .get(HistogramId(\"Clus3DRechitsDistanceToImpact_AreaNormalized\", clueParams, datatype))\n .getHistogram(HistogramKind.WEIGHTED_PROFILE)[{\n \"beamEnergy\" : hist.loc(beamEnergy),\n \"mainOrAllTracksters\" : hist.loc(\"mainTrackster\"),\n # Project on clus3D_size\n \"layer\" : hist.loc(layer),\n }]\n .project(\"rechits_distanceToImpact\")\n ) for datatype in datatypes], [datatypeToLegendMap.get(datatype, datatype) for datatype in datatypes]\n\ndef addLumiLegend(main_ax, datatypes, layer, beamEnergy):\n if \"data\" in datatypes:\n hep.cms.text(\"Preliminary\", ax=main_ax)\n else:\n hep.cms.text(\"Simulation Preliminary\", ax=main_ax)\n hep.cms.lumitext(f\"Layer {layer} - $e^+$ {str(beamEnergy)} GeV\", ax=main_ax)\n main_ax.legend()\n\ndef makePlotMultiDatatype(layer:int, beamEnergy:int, datatypes:list[str], maxDistanceToPlot=6):\n \"\"\" Plot distribution of distance to impact on a layer \n See in custom_hists for how y is computed\"\"\"\n hists, labels = loadHists(layer, beamEnergy, datatypes)\n yerr = False\n \n #fig = plt.Figure()\n fig = plt.figure()\n ax = fig.subplots()\n ax.set_xlabel(\"Distance to extrapolated impact point (cm)\")\n ax.set_xlim(0, maxDistanceToPlot)\n ax.set_ylim(3e-4, 3)\n ax.set_ylabel(r\"$\\frac{1}{E_{cluster}} \\frac{dE_{hit}}{dA} (cm^{-2})$\")\n ax.set_yscale(\"log\")\n \n hep.histplot(hists, label=labels, yerr=yerr, ax=ax)\n\n addLumiLegend(ax, datatypes, layer, beamEnergy)\n return fig\n\ndef makePlotRatio(layer:int, beamEnergy:int, datatypes:list[str], maxDistanceToPlot=6):\n if len(datatypes) != 2:\n raise RuntimeError()\n hists, labels = loadHists(layer, beamEnergy, datatypes)\n\n #fig = plt.Figure()\n fig = plt.figure()\n grid = fig.add_gridspec(2, 1, hspace=0, height_ratios=[3, 1])\n main_ax:plt.Axes = fig.add_subplot(grid[0])\n subplot_ax:plt.Axes = fig.add_subplot(grid[1], sharex=main_ax)\n\n hep.histplot(hists, label=labels, yerr=False, ax=main_ax)\n\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n ratios = hists[0].values() / hists[1].values()\n #ratio_uncert = hist.intervals.ratio_uncertainty(\n # num=hists[0].values(),\n # denom=hists[1].values(),\n # uncertainty_type=\"poisson\", # Assume numerator is Poisson (ignore uncertainty on MC)\n #)\n ratio_uncert = None\n hist.plot.plot_ratio_array(hists[0], ratios, ratio_uncert, subplot_ax, ylim=(0.2,2), ylabel=\"Ratio\")\n \n #plt.gca().xaxis.set_minor_locator(matplotlib.ticker.NullLocator())\n #main_ax.xaxis.set_major_locator(matplotlib.ticker.AutoLocator())\n #main_ax.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())\n plt.setp(main_ax.get_xticklabels(), visible=False)\n main_ax.set_ylabel(r\"$\\frac{1}{E_{cluster}} \\frac{dE_{hit}}{dA} (cm^{-2})$\")\n main_ax.set_yscale(\"log\")\n main_ax.set_xlabel(\"\")\n main_ax.set_xlim(0, maxDistanceToPlot)\n subplot_ax.set_xlabel(\"Distance to extrapolated impact point (cm)\")\n\n addLumiLegend(main_ax, datatypes, layer, beamEnergy)\n return fig\n\ndef mplFigureToUrl(fig=None):\n if fig is None:\n fig = plt.gcf()\n buf = io.BytesIO() # in-memory files\n fig.savefig(buf, format=\"png\")\n plt.close(fig)\n data = base64.b64encode(buf.getbuffer()).decode(\"utf8\") # encode to html elements\n buf.close()\n return \"data:image/png;base64,{}\".format(data)\n\n\n@app.callback(\n Output(component_id='plot', component_property='src'),\n [Input(component_id = 'beamEnergy', component_property='value'),\n Input(\"layer\", \"value\"), Input(\"ratioPlot\", \"value\"), Input(\"maxDistanceToPlot\", \"value\")]\n)\ndef update_graph(beamEnergy, layer, ratioPlot:bool, maxDistanceToPlot):\n if ratioPlot == \"ratio\":\n fig = makePlotRatio(layer, beamEnergy=beamEnergy, datatypes=[\"data\", \"sim_proton_v46_patchMIP\"], maxDistanceToPlot=maxDistanceToPlot)\n else:\n fig = makePlotMultiDatatype(layer, beamEnergy=beamEnergy, datatypes=[\"data\", \"sim_proton_v46_patchMIP\"], maxDistanceToPlot=maxDistanceToPlot)\n return mplFigureToUrl(fig)\n\nif __name__ == '__main__':\n app.run_server(debug=True, port=8051)","repo_name":"tcuisset/HgcalClue3DClusteringPlotting","sub_path":"clue3D-results/transverseProfile/distanceToImpact_dash.py","file_name":"distanceToImpact_dash.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16249036776","text":"\"\"\"Module that connects and sends/receives bytes from the nRF52 SoC.\"\"\"\nimport typing\nimport asyncio\nimport logging\nimport zigpy.serial\nimport async_timeout\nimport serial # type: ignore\nimport zigpy_zboss.config as conf\nfrom zigpy_zboss import types as t\nfrom zigpy_zboss.frames import Frame\nfrom zigpy_zboss.checksum import CRC8\nfrom zigpy_zboss.logger import SERIAL_LOGGER\nfrom zigpy_zboss.exceptions import InvalidFrame\n\nLOGGER = logging.getLogger(__name__)\nACK_TIMEOUT = 1\nSEND_RETRIES = 2\nSTARTUP_TIMEOUT = 5\nRECONNECT_TIMEOUT = 10\n\n\nclass BufferTooShort(Exception):\n \"\"\"Exception when the buffer is too short.\"\"\"\n\n\nclass ZbossNcpProtocol(asyncio.Protocol):\n \"\"\"Zboss Ncp Protocol class.\"\"\"\n\n def __init__(self, config, api) -> None:\n \"\"\"Initialize the ZbossNcpProtocol object.\"\"\"\n self._api = api\n self._ack_seq = 0\n self._pack_seq = 0\n self._config = config\n self._transport = None\n self._reset_flag = False\n self._buffer = bytearray()\n self._reconnect_task = None\n self._tx_lock = asyncio.Lock()\n self._ack_received_event = None\n self._connected_event = asyncio.Event()\n\n self._port = config[conf.CONF_DEVICE_PATH]\n self._baudrate = config[conf.CONF_DEVICE_BAUDRATE]\n self._flow_control = config[conf.CONF_DEVICE_FLOW_CONTROL]\n\n @property\n def api(self):\n \"\"\"Return the owner of that object.\"\"\"\n return self._api\n\n @property\n def name(self) -> str:\n \"\"\"Return serial name.\"\"\"\n return self._transport.serial.name\n\n @property\n def baudrate(self) -> int:\n \"\"\"Return the baudrate.\"\"\"\n return self._transport.serial.baudrate\n\n @property\n def reset_flag(self) -> bool:\n \"\"\"Return True if a reset is in process.\"\"\"\n return self._reset_flag\n\n @reset_flag.setter\n def reset_flag(self, value) -> None:\n if isinstance(value, bool):\n self._reset_flag = value\n\n def connection_made(\n self, transport: asyncio.BaseTransport) -> None:\n \"\"\"Notify serial port opened.\"\"\"\n self._transport = transport\n message = f\"Opened {transport.serial.name} serial port\"\n if self._reset_flag:\n self._reset_flag = False\n return\n SERIAL_LOGGER.info(message)\n self._connected_event.set()\n\n def connection_lost(self, exc: typing.Optional[Exception]) -> None:\n \"\"\"Lost connection.\"\"\"\n if self._api is not None:\n self._api.connection_lost(exc)\n self.close()\n\n # Do not try to reconnect if no exception occured.\n if exc is None:\n return\n\n if not self._reset_flag:\n SERIAL_LOGGER.warning(\n f\"Unexpected connection lost... {exc}\")\n self._reconnect_task = asyncio.create_task(self._reconnect())\n\n async def _reconnect(self, timeout=RECONNECT_TIMEOUT):\n \"\"\"Try to reconnect the disconnected serial port.\"\"\"\n SERIAL_LOGGER.info(\"Trying to reconnect to the NCP module!\")\n assert self._api is not None\n loop = asyncio.get_running_loop()\n async with async_timeout.timeout(timeout):\n while True:\n try:\n _, proto = await zigpy.serial.create_serial_connection(\n loop=loop,\n protocol_factory=lambda: self,\n url=self._port,\n baudrate=self._baudrate,\n xonxoff=(self._flow_control == \"software\"),\n rtscts=(self._flow_control == \"hardware\"),\n )\n self._api._uart = proto\n break\n except serial.serialutil.SerialException:\n await asyncio.sleep(0.1)\n\n def close(self) -> None:\n \"\"\"Close serial connection.\"\"\"\n self._buffer.clear()\n self._ack_seq = 0\n self._pack_seq = 0\n if self._reconnect_task is not None:\n self._reconnect_task.cancel()\n self._reconnect_task = None\n # Reset transport\n if self._transport:\n message = \"Closing serial port\"\n LOGGER.debug(message)\n SERIAL_LOGGER.info(message)\n self._transport.close()\n self._transport = None\n\n def write(self, data: bytes) -> None:\n \"\"\"Write raw bytes to the transport.\n\n This method should be used instead\n of directly writing to the transport with `transport.write`.\n \"\"\"\n if self._transport is not None:\n SERIAL_LOGGER.debug(\"TX: %s\", t.Bytes.__repr__(data))\n self._transport.write(data)\n\n async def send(self, frame: Frame) -> None:\n \"\"\"Send data, and wait for acknowledgement.\"\"\"\n async with self._tx_lock:\n if isinstance(frame, Frame) and self._transport:\n self._ack_received_event = asyncio.Event()\n try:\n async with async_timeout.timeout(ACK_TIMEOUT):\n frame = self._set_frame_flag(frame)\n frame = self._ll_checksum(frame)\n self.write(frame.serialize())\n await self._ack_received_event.wait()\n except asyncio.TimeoutError:\n SERIAL_LOGGER.debug(\n f'No ACK after {ACK_TIMEOUT}s for '\n f'{t.Bytes.__repr__(frame.serialize())}'\n )\n\n def _set_frame_flag(self, frame):\n \"\"\"Return frame with required flags set.\"\"\"\n flag = t.LLFlags(self._pack_seq << 2)\n flag |= t.LLFlags.FirstFrag\n flag |= t.LLFlags.LastFrag\n frame.ll_header = frame.ll_header.with_flags(flag)\n return frame\n\n def _ll_checksum(self, frame):\n \"\"\"Return frame with new crc8 checksum calculation.\"\"\"\n crc = CRC8(frame.ll_header.serialize()[2:6]).digest()\n frame.ll_header = frame.ll_header.with_crc8(crc)\n return frame\n\n def data_received(self, data: bytes) -> None:\n \"\"\"Notify when there is data received from serial connection.\"\"\"\n self._buffer += data\n for frame in self._extract_frames():\n SERIAL_LOGGER.debug(f\"RX: {t.Bytes.__repr__(frame.serialize())}\")\n ll_header = frame.ll_header\n # Check if the frame is an ACK\n if ll_header.flags & t.LLFlags.isACK:\n ack_seq = (ll_header.flags & t.LLFlags.ACKSeq) >> 4\n if ack_seq == self._pack_seq:\n # Calculate next sequence number\n self._pack_seq = self._pack_seq % 3 + 1\n self._ack_received_event.set()\n return\n\n # Acknowledge the received frame\n self._ack_seq = (frame.ll_header.flags & t.LLFlags.PacketSeq) >> 2\n self.write(self._ack_frame().serialize())\n\n if frame.hl_packet is not None:\n try:\n self._api.frame_received(frame)\n except Exception as e:\n LOGGER.error(\n \"Received an exception while passing frame to API: %s\",\n frame,\n exc_info=e,\n )\n\n def _extract_frames(self) -> typing.Iterator[Frame]:\n \"\"\"Extract frames from the buffer until it is exhausted.\"\"\"\n while True:\n try:\n yield self._extract_frame()\n except BufferTooShort:\n # If the buffer is too short, there is nothing more we can do\n break\n except InvalidFrame:\n # If the buffer contains invalid data,\n # drop it until we find the signature\n signature_idx = self._buffer.find(\n Frame.signature.serialize(), 1)\n\n if signature_idx < 0:\n # If we don't have a signature in the buffer,\n # drop everything\n self._buffer.clear()\n else:\n del self._buffer[:signature_idx]\n\n def _extract_frame(self) -> Frame:\n \"\"\"Extract a single frame from the buffer.\"\"\"\n # The shortest possible frame is 7 bytes long\n if len(self._buffer) < 7:\n raise BufferTooShort()\n\n # The buffer must start with a SoF\n if self._buffer[0:2] != Frame.signature.serialize():\n raise InvalidFrame()\n\n length, _ = t.uint16_t.deserialize(self._buffer[2:4])\n\n # Don't bother deserializing anything if the packet is too short\n if len(self._buffer) < length + 2:\n raise BufferTooShort()\n\n # Check that the packet type is ZBOSS NCP API HL.\n if self._buffer[4] != t.TYPE_ZBOSS_NCP_API_HL:\n raise InvalidFrame()\n\n # At this point we should have a complete frame\n # If not, deserialization will fail and the error will propapate up\n frame, rest = Frame.deserialize(self._buffer)\n\n # If we get this far then we have a valid frame. Update the buffer.\n del self._buffer[: len(self._buffer) - len(rest)]\n\n return frame\n\n def _ack_frame(self):\n \"\"\"Return acknowledgement frame.\"\"\"\n ack_frame = Frame.ack(self._ack_seq)\n return ack_frame\n\n def __repr__(self) -> str:\n \"\"\"Return a string representing the class.\"\"\"\n return (\n f\"<\"\n f\"{type(self).__name__} connected to {self.name!r}\"\n f\" at {self.baudrate} baud\"\n f\" (api: {self._api})\"\n f\">\"\n )\n\n\nasync def connect(config: conf.ConfigType, api) -> ZbossNcpProtocol:\n \"\"\"Instantiate Uart object and connect to it.\"\"\"\n loop = asyncio.get_running_loop()\n\n port = config[conf.CONF_DEVICE_PATH]\n baudrate = config[conf.CONF_DEVICE_BAUDRATE]\n flow_control = config[conf.CONF_DEVICE_FLOW_CONTROL]\n\n LOGGER.info(\"Connecting to %s at %s baud\", port, baudrate)\n\n _, protocol = await zigpy.serial.create_serial_connection(\n loop=loop,\n protocol_factory=lambda: ZbossNcpProtocol(config, api),\n url=port,\n baudrate=baudrate,\n xonxoff=(flow_control == \"software\"),\n rtscts=(flow_control == \"hardware\"),\n )\n\n try:\n async with async_timeout.timeout(STARTUP_TIMEOUT):\n await protocol._connected_event.wait()\n except asyncio.TimeoutError:\n protocol.close()\n raise RuntimeError(\"Could not communicate with NCP!\")\n\n return protocol\n","repo_name":"kardia-as/zigpy-zboss","sub_path":"zigpy_zboss/uart.py","file_name":"uart.py","file_ext":"py","file_size_in_byte":10535,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"22"} +{"seq_id":"29397162371","text":"import tensorflow as tf\nimport tensorflow as tf\nimport numpy as np\nimport sys\nimport torch\nfrom tensorflow.keras.layers import Bidirectional, LSTM, Dense, Activation, TimeDistributed, Masking, GRU\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.utils import to_categorical\nfrom ast import literal_eval\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom collections import Counter\nfrom random import shuffle\nfrom transformers import AutoTokenizer, AutoModel\n\n\n\n# Modified tensorflow bilstm for sentence-level prediction\n\n#VECTOR_DIM = 1024\nVECTOR_DIM = 768 # bert\nMAX_SEQUENCE_LEN = 50\nIN_FILENAMES_TRAIN = ['../test_datasets/classes_elmo_1.txt',\n '../test_datasets/classes_elmo_2.txt']\nIN_FILENAMES_TEST = ['../test_datasets/classes_elmo_3.txt']\n#IN_FILENAME = '../vector_datasets/vectors_with_classes_elmo_slo_top_full.txt'\n#IN_FILENAME = '../vector_datasets/vectors_with_classes_elmo_slo_200k.txt'\n#IN_FILENAME_TEST = '../vector_datasets/vectors_with_classes_elmo_slo_avg_2_50k.txt'\n#IN_FILENAME_TEST = './vectors_with_classes_elmo_parseme_slo_top_full.txt'\nIN_FILENAME_TEST = None\nNUM_CLASSES = 5\n\n \ndef get_xy_per_expression(filenames, tokenizer, model):\n data_by_expressions = {}\n sent_wide_Y = []\n sents_X = []\n sents_Y = []\n curr_sent_X = []\n curr_sent_Y = []\n expressions = []\n curr_sent_words = []\n print('starting')\n CLS_TO_INT_DICT = {'NE': 3, 'DA': 2, '*':1, 'NEJASEN_ZGLED':4}\n classes = []\n words = []\n X = []\n Y = []\n for filename in filenames:\n print('reading file', filename)\n with open(filename, 'r', encoding='utf-8') as f:\n debug_sent = []\n for i, line in enumerate(f):\n if i % 500 == 0:\n print(i)\n #if i >= 1500:\n # break\n parts = line.split('\\t')\n word = parts[0]\n #print(len(parts))\n if len(word) == 0:\n continue\n if len(parts) != 3:\n continue\n\n #print('len of parts', len(parts))\n word = parts[0]\n cls = parts[1]\n expression = parts[2]\n debug_sent.append((word, cls, expression))\n #print(word, cls)\n classes.append(cls)\n words.append(word)\n #print(exp, vector[:10], cls, expression)\n if not (cls == 'DA' or cls == 'NE' or cls == '*'):\n continue\n curr_sent_words.append(word)\n #print(len(literal_eval(vector)))\n curr_sent_Y.append(CLS_TO_INT_DICT[cls]) \n if word[-1] == '.':\n #print('curr sent words', curr_sent_words)\n str_sentence = ' '.join([x for x in curr_sent_words])\n basic_tokens = [x for x in curr_sent_words]\n tokenized_text = tokenizer.tokenize(str_sentence)\n tokenized_text = [x for x in tokenized_text if x not in [',',':',';','!','?', '.', '\"', \"'\", '/', '\\\\']]\n if len(basic_tokens) == len(tokenized_text):\n print('tokenizer didn\\'t do anything')\n print(basic_tokens)\n print(tokenized_text)\n if len(tokenized_text) > 510:\n curr_sent = []\n curr_sent_words = []\n curr_sent_X = []\n curr_sent_Y = []\n debug_sent = []\n continue\n #print(expression)\n expanded_classes = []\n current_class_index = -1\n #print(basic_tokens)\n for w in tokenized_text:\n #print(w[0:2])\n if w[0:2] == '##':\n expanded_classes.append(curr_sent_Y[current_class_index])\n #print(w, curr_sent_Y[current_class_index], current_class_index, len(curr_sent_Y))\n else:\n current_class_index += 1\n expanded_classes.append(curr_sent_Y[current_class_index])\n #print(w, curr_sent_Y[current_class_index], current_class_index, len(curr_sent_Y))\n \n #for x, y in zip(tokenized_text, expanded_classes):\n # print(x,y)\n indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)\n tokens_tensor = torch.tensor([indexed_tokens])\n with torch.no_grad():\n outputs = model(tokens_tensor)\n predictions = outputs[0]\n vectors = predictions.numpy()[0]\n if expression not in data_by_expressions.keys():\n #print(expression)\n data_by_expressions[expression] = [(vectors, np.array(expanded_classes))]\n else:\n #print(expression)\n data_by_expressions[expression].append((vectors, np.array(expanded_classes)))\n sent_wide_cls = None\n if CLS_TO_INT_DICT['DA'] in curr_sent_Y:\n sent_wide_cls = CLS_TO_INT_DICT['DA']\n elif CLS_TO_INT_DICT['NE'] in curr_sent_Y:\n sent_wide_cls = CLS_TO_INT_DICT['NE']\n else:\n sent_wide_cls = CLS_TO_INT_DICT['NEJASEN_ZGLED']\n print('debug sent', debug_sent)\n sent_wide_Y.append(sent_wide_cls)\n debug_sent = []\n curr_sent_X = []\n curr_sent_words = []\n curr_sent_Y = []\n #if cls == '5':\n # print(line)\n X = np.array(X)\n Y = np.array(Y)\n sents_X = np.array(sents_X)\n sents_Y = np.array(sents_Y)\n \n print(Counter(classes))\n #print(Counter(words))\n #print(X.shape)\n #print(Y.shape)\n #print(sents_X.shape)\n #print(sents_Y.shape)\n #return sents_X, sents_Y\n return data_by_expressions\n\n \ndef bert_tensorflow_test(X_train, X_test, Y_train, Y_test):\n # Model\n model = Sequential()\n \n #model.add(Masking(mask_value=0.0, input_shape=(MAX_SEQUENCE_LEN,VECTOR_DIM)))\n model.add(Masking(mask_value=0.0, dtype='float64'))\n #forward_layer = LSTM(200, return_sequences=True)\n forward_layer = GRU(10, return_sequences=True, dropout=0.5)\n #backward_layer = LSTM(200, activation='relu', return_sequences=True,\n backward_layer = GRU(10, return_sequences=True, dropout=0.5,\n go_backwards=True)\n model.add(Bidirectional(forward_layer, backward_layer=backward_layer))#,\n #input_shape=(MAX_SEQUENCE_LEN,VECTOR_DIM)))\n #model.add(TimeDistributed(Dense(NUM_CLASSES)))\n # Remove TimeDistributed() so that predictions are now made for the entire sentence\n model.add(TimeDistributed(Dense(NUM_CLASSES)))\n model.add(Activation('softmax'))\n #print('preds shape', model.predict(X_train[:3]).shape)\n #print('Y_train shape', Y_train[:3].shape)\n #print(list(Y_train[:3]))\n classes = []\n for y in Y_train:\n cls = np.argmax(y)\n classes.append(cls)\n print(Counter(classes))\n\n model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n print('compiled model')\n model.fit(X_train, Y_train, batch_size=4, epochs=2, validation_split=0.0)\n print('fit model')\n eval = model.evaluate(X_test, Y_test, batch_size=4)\n #print('X_test[0]')\n #print(X_test[0])\n #print(X_train[0])\n preds = model.predict_proba(X_test, verbose=1, batch_size=4)\n #print(preds)\n num_correct = 0\n num_incorrect = 0\n TP = 0\n TN = 0\n FP = 0\n FN = 0\n # idiomatic = 2, non-idiomatic = 3\n with open('preds_out_temp.txt', 'w') as tempoutf:\n for pred, y in zip(preds, Y_test):\n for token_pred, token_y in zip(pred, y):\n #print(np.argmax(token_pred), np.argmax(token_y))\n if np.argmax(token_y) == 2 or np.argmax(token_y) == 3:\n if np.argmax(token_y) == np.argmax(token_pred):\n num_correct += 1\n else:\n num_incorrect += 1\n if np.argmax(token_pred) == 2 and np.argmax(token_y) == 2:\n TP += 1\n if np.argmax(token_pred) != 2 and np.argmax(token_y) != 2:\n TN += 1\n if np.argmax(token_pred) == 2 and np.argmax(token_y) != 2:\n FP += 1\n if np.argmax(token_pred) != 2 and np.argmax(token_y) == 2:\n FN += 1\n if num_correct + num_incorrect == 0:\n custom_accuracy = 0\n else:\n custom_accuracy = num_correct/(num_correct+num_incorrect)\n print('custom accuracy is', custom_accuracy)\n for y in Y_test:\n cls = np.argmax(y)\n classes.append(cls)\n class_nums = Counter(classes)\n print(class_nums)\n default_acc = class_nums[2] / (class_nums[2] + class_nums[3])\n print('default accuracy is', default_acc, 'or', 1 - default_acc)\n return eval, custom_accuracy, default_acc, [TP, TN, FP, FN]\n\n\ndef get_already_processed(filename):\n if filename == None:\n return set([])\n already_processed = []\n with open(filename, 'r', encoding='utf-8') as f:\n for line in f:\n words = line.split(' ')\n if words[0] == 'EXP':\n already_processed.append(' '.join(words[1:]))\n return set(already_processed)\n\n\n\n\"\"\"\n if gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n #logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n #print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)\n\"\"\"\n\n#X_train, X_test, Y_train, Y_test = get_bert_data(IN_FILENAME, None)\n#bert_tensorflow_test(X_train, X_test, Y_train, Y_test)\n#tf.disable_v2_behavior()\nmodel = AutoModel.from_pretrained(\"EMBEDDIA/crosloengual-bert\")\ntokenizer = AutoTokenizer.from_pretrained(\"EMBEDDIA/crosloengual-bert\")\nprint('model loaded ok')\n\n\ndbe_train = get_xy_per_expression(IN_FILENAMES_TRAIN, tokenizer, model)\ntrain_data = []\nfor k, i in dbe_train.items():\n train_data += i\n\ndbe_test = get_xy_per_expression(IN_FILENAMES_TEST, tokenizer, model)\ntest_data = []\nfor k, i in dbe_test.items():\n test_data += i\n\nwith open('./test_results_12vs3_tokens.txt', 'w', encoding='utf-8') as outf:\n f1s = []\n accs = []\n shuffle(train_data)\n shuffle(test_data)\n train_data = train_data[:int(len(train_data)*0.8)]\n #print(k, len(train_data), len(test_data))\n \n #train_X = np.array([x[0] for x in train_data])\n train_X = [x[0] for x in train_data]\n train_Y = [x[1] for x in train_data]\n #test_X = np.array([x[0] for x in test_data])\n test_X = [x[0] for x in test_data]\n test_Y = [x[1] for x in test_data]\n \n\n #print(sent_train_Y[:10])\n #print(sent_test_Y[:10])\n \n #print(train_Y)\n for y in train_Y:\n y2 = to_categorical(y, num_classes=NUM_CLASSES) \n #print(y2) \n #train_Y = to_categorical(train_Y, num_classes=NUM_CLASSES)\n #test_Y = to_categorical(stest_Y, num_classes=NUM_CLASSES)\n train_Y = [to_categorical(y, num_classes=NUM_CLASSES) for y in train_Y]\n test_Y = [to_categorical(y, num_classes=NUM_CLASSES) for y in test_Y]\n train_Y = np.array(train_Y)\n test_Y = np.array(test_Y)\n print('training length', len(train_X))\n print('test lentgh', len(test_X))\n #for x in train_X:\n #print('x', x)\n #print('type', type(x))\n #print('len', len(x))\n #print('type x[0][0]', type(x[0][0]))\n padded_train_X = pad_sequences(train_X, padding='post', maxlen=MAX_SEQUENCE_LEN, dtype='float', value=0.0)\n padded_test_X = pad_sequences(test_X, padding='post', maxlen=MAX_SEQUENCE_LEN, dtype='float', value=0.0)\n padded_train_Y = pad_sequences(train_Y, padding='post', maxlen=MAX_SEQUENCE_LEN)\n padded_test_Y = pad_sequences(test_Y, padding='post', maxlen=MAX_SEQUENCE_LEN)\n #test_masking_layer = Masking(mask_value=0.0)\n #masked_embedding = test_masking_layer(padded_train_X)\n #np.set_printoptions(threshold=sys.maxsize)\n #print(padded_train_X.shape, file=outf)\n #print('masked layer', masked_embedding._keras_mask, file=outf)\n #print('wrote masking layer')\n #raise TypeError\n results = bert_tensorflow_test(padded_train_X, padded_test_X, padded_train_Y, padded_test_Y)\n TP, TN, FP, FN = results[3]\n print('EXP', k[:-1], file=outf)\n print('eval is', results[0], file=outf)\n print('eval is', results[0])\n accs.append(results[0][1])\n print('custom accuracy is', results[1], file=outf)\n print('custom accuracy is', results[1])\n print('default accuracy is', results[2], 'or', 1-results[2], file=outf)\n print('default accuracy is', results[2], 'or', 1-results[2])\n print('num train is', len(padded_train_X), file=outf) \n print('num train is', len(padded_train_X))\n print('num test is', len(padded_test_X), file=outf)\n print('num test is', len(padded_test_X))\n print('TP', TP, 'TN', TN, 'FP', FP, 'FN', FN, file=outf)\n print('TP', TP, 'TN', TN, 'FP', FP, 'FN', FN)\n\n if TP == 0:\n precision = 0\n recall = 0\n print('precision', 0, file=outf)\n print('recall', 0, file=outf)\n print('F1 score', 0)\n f1s.append(0)\n else:\n precision = TP/(TP+FP)\n recall = TP/(TP+FN)\n print('precision', TP/(TP+FP), file=outf)\n print('recall', TP/(TP+FN), file=outf)\n print('F1 score', (2*precision*recall)/(precision+recall))\n f1s.append((2*precision*recall)/(precision+recall))\n \nprint('acc average', sum(accs)/len(accs))\nprint('F1 average', sum(f1s)/len(f1s))\n\n \n","repo_name":"TadejSkvorc/MICE","sub_path":"models/crosloengual_bert_multiple_files.py","file_name":"crosloengual_bert_multiple_files.py","file_ext":"py","file_size_in_byte":14386,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"36755396534","text":"import glob\nimport streamlit as st\nimport wget\nfrom PIL import Image\nimport torch\nimport cv2\nimport os\nimport time\nimport numpy as np\nimport base64\nst.set_page_config(layout=\"wide\")\n\n\ncfg_safety_model_path = 'models/best_v8safety_openvino_model/'\ncfg_weapon_model_path = 'models/weaponv155spt100ep_openvino_model/'\ncfg_vehicle_person_model_path = 'models/dronev75spt100ep_openvino_model/'\ncfg_switch_model_path = 'models/coxial_openvino_model/' \n\nmodel = None\nconfidence = .25\nvideo_type = None\nvideo_src = None\nuser_input = None\n\nst.markdown(\n \"\"\"\n \n \"\"\",\n unsafe_allow_html=True,\n )\n\ndef video_input(data_src, data_path, key):\n vid_file = None\n # if data_path == cfg_vehicle_person_model_path:\n if data_src == 'Live data':\n vid_file = \"livewebcam\"\n # elif data_src == 'Sample data':\n # vid_file = \"data/sample_videos/5secallguns.mp4\"\n elif data_src == 'Upload data':\n vid_bytes = st.file_uploader(\"Upload a video\", type=['mp4', 'mpv', 'avi'], key=key)\n if vid_bytes:\n vid_file = vid_bytes.name.split('.')[-1]\n with open(vid_file, 'wb') as out:\n out.write(vid_bytes.read())\n elif data_src == 'Rtsp data':\n vid_file = user_input\n st.write(\"You entered: \", user_input)\n # if data_path == cfg_safety_model_path:\n # if data_src == 'Live data':\n # vid_file = \"livewebcam\"\n # # elif data_src == 'Sample data':\n # # vid_file = \"data/sample_videos/5secallguns.mp4\"\n # elif data_src == 'Upload data':\n # vid_bytes = st.sidebar.file_uploader(\"Upload a video\", type=['mp4', 'mpv', 'avi'], key=key)\n # if vid_bytes:\n # vid_file = vid_bytes.name.split('.')[-1]\n # with open(vid_file, 'wb') as out:\n # out.write(vid_bytes.read())\n # elif data_src == 'Rtsp data':\n # vid_file = user_input\n # st.write(\"You entered: \", user_input)\n \n # video_src = vid_file\n\n if vid_file:\n if vid_file == \"livewebcam\":\n vid_file = 0 #default webcam for windows machine, need to enable webcam for Linux Ubuntu VM [install virtual box extension pack]\n cap = cv2.VideoCapture(vid_file)\n video_src = cap\n custom_size = st.checkbox(\"Custom frame size\", key=key)\n # confidence slider\n # confidence = st.slider('Confidence', min_value=0.1, max_value=1.0, value=.45)\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n if custom_size:\n width = st.sidebar.number_input(\"Width\", min_value=120, step=20, value=width)\n height = st.sidebar.number_input(\"Height\", min_value=120, step=20, value=height)\n else:\n width = 1000\n height = 500\n\n fps = 0\n # st1, st2, st3 = st.columns(3)\n\n #COMMENT THIS OUT //--------------------\n # with st1:\n # st.markdown(\"## Height\")\n # st1_text = st.markdown(f\"{height}\")\n # with st2:\n # st.markdown(\"## Width\")\n # st2_text = st.markdown(f\"{width}\")\n # with st3:\n # st.markdown(\"## FPS\")\n # st3_text = st.markdown(f\"{fps}\")\n #COMMENT THIS OUT //--------------------\n\n # st.markdown(\"---\")\n output = st.empty()\n prev_time = 0\n curr_time = 0\n while True:\n ret, frame = cap.read()\n if not ret:\n st.write(\"Can't read frame, stream ended? Exiting ....\")\n break\n frame = cv2.resize(frame, (width, height))\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n output_img = infer_image(frame)\n output.image(output_img)\n curr_time = time.time()\n fps = 1 / (curr_time - prev_time)\n prev_time = curr_time\n\n #COMMENT THIS OUT //--------------------\n # st1_text.markdown(f\"**{height}**\")\n # st2_text.markdown(f\"**{width}**\")\n # st3_text.markdown(f\"**{fps:.2f}**\")\n #COMMENT THIS OUT //--------------------\n\n cap.release()\n\n\ndef infer_image(im, size=None):\n model.conf = confidence\n model.source = video_src\n model.iou = 0.65\n model.agnostic = True # NMS class-agnostic\n model.multi_label = False\n model.size = 640\n result = model(im, size=size) if size else model(im)\n result.render()\n image = Image.fromarray(result.ims[0])\n return image\n\n\n@st.cache_resource\ndef load_model(path, device):\n torch.hub._validate_not_a_forked_repo = lambda a, b, c: True\n model_ = torch.hub.load('ultralytics/yolov5', 'custom', path=path, force_reload=True)\n model_.to(device)\n print(\"model to \", device)\n return model_\n\n\n@st.cache_resource\ndef download_model(url):\n model_file = wget.download(url, out=\"models\")\n return model_file\n\n\ndef get_user_model():\n model_src = st.sidebar.radio(\"Model source\", [\"file upload\", \"url\"])\n model_file = None\n if model_src == \"file upload\":\n model_bytes = st.sidebar.file_uploader(\"Upload a model file\", type=['pt'])\n if model_bytes:\n model_file = \"models/uploaded_\" + model_bytes.name\n with open(model_file, 'wb') as out:\n out.write(model_bytes.read())\n else:\n url = st.sidebar.text_input(\"model url\")\n if url:\n model_file_ = download_model(url)\n if model_file_.split(\".\")[-1] == \"pt\":\n model_file = model_file_\n\n return model_file\n\ndef main():\n # global variables\n global model, confidence, cfg_vehicle_person_model_path, cfg_safety_model_path, cfg_switch_model_path, cfg_weapon_model_path, video_src, user_input\n\n st.title(\"Use Cases\")\n\n # st.write(\"This page is still being implemented / Not fully functional\")\n\n st.markdown(\"---\")\n\n\n row1_col1, row1_col2 = st.columns(2)\n\n with row1_col1:\n #with st.expander(\"Coaxial Cable Detection\"):\n #\"\"\"### Coaxial Cable Detection\"\"\"\n #st.write(\"[Coaxial Cable Detection](http://localhost:8501/Coaxial_Cable)\")\n style = f\" Coaxial Cable Detection\"\n st.write(style,unsafe_allow_html=True)\n file_ = open(\"data/sample_gifs/coaxialcable2.gif\", \"rb\")\n contents = file_.read()\n data_url = base64.b64encode(contents).decode(\"utf-8\")\n file_.close()\n st.markdown(\n f'\"coxial',\n unsafe_allow_html=True,\n )\n\n with row1_col2:\n #with st.expander(\"Safety Detection\"):\n #\"\"\"### Safety Detection\"\"\"\n #st.write(\"[Safety Detection](http://localhost:8501/Safety)\")\n style = f\" Safety Detection\"\n st.write(style,unsafe_allow_html=True)\n file_ = open(\"data/sample_gifs/7sec_safety_off_to_on.gif\", \"rb\")\n contents = file_.read()\n data_url = base64.b64encode(contents).decode(\"utf-8\")\n file_.close()\n st.markdown(\n f'\"safety',\n unsafe_allow_html=True,\n )\n\n st.text(\"\")\n st.text(\"\")\n\n row2_col1, row2_col2 = st.columns(2)\n\n with row2_col1:\n #with st.expander(\"Traffic Detection\"):\n #\"\"\"### Traffic Detection\"\"\"\n #st.write(\"[Traffic Detection](http://localhost:8501/Traffic)\")\n style = f\" Traffic Detection\"\n st.write(style,unsafe_allow_html=True)\n file_ = open(\"data/sample_gifs/3sectrafficcam.gif\", \"rb\")\n contents = file_.read()\n data_url = base64.b64encode(contents).decode(\"utf-8\")\n file_.close()\n st.markdown(\n f'\"traffic',\n unsafe_allow_html=True,\n )\n\n with row2_col2:\n #with st.expander(\"Weapon Detection\"):\n #\"\"\"### Weapon Detection\"\"\"\n #st.write(\"[Weapon Detection](http://localhost:8501/Weapon)\")\n style = f\" Weapon Detection\"\n st.write(style,unsafe_allow_html=True)\n file_ = open(\"data/sample_gifs/5secallguns.gif\", \"rb\")\n contents = file_.read()\n data_url = base64.b64encode(contents).decode(\"utf-8\")\n file_.close()\n st.markdown(\n f'\"weapon',\n unsafe_allow_html=True,\n )\n\n\n\n # st.sidebar.title(\"Settings\")\n\n # device options\n # if torch.cuda.is_available():\n # device_option = st.sidebar.radio(\"Select Device\", ['cpu', 'cuda'], disabled=False, index=0)\n # else:\n # device_option = st.sidebar.radio(\"Select Device\", ['cpu', 'cuda'], disabled=True, index=0)\n\n # with st.expander(\"Safety Detection\"):\n # # load model\n # model = load_model(cfg_safety_model_path, None)\n\n # # vid src option slider\n # #with st.sidebar:\n # video_type = st.radio(\"Choose your video type\", [\"Upload a video\", \"Rtsp\", \"Live webcam\"], key=\"key_7\") #\"Sample data\", \n\n # if video_type == \"Live webcam\":\n # video_input('Live data', cfg_vehicle_person_model_path, key=\"key_8\")\n # # elif video_type == \"Sample data\":\n # # video_input('Sample data')\n # elif video_type == \"Upload a video\":\n # video_input('Upload data', cfg_vehicle_person_model_path, key=\"key_9\")\n # elif video_type == \"Rtsp\":\n # user_input = st.text_input(\"Enter the rtsp address ( rtsp://address )\", key=\"key_29\")\n # # video_src = user_input\n # if user_input:\n # video_input('Rtsp data', cfg_vehicle_person_model_path, key=\"key_10\")\n \n # # confidence slider\n # confidence = st.slider('Confidence', min_value=0.1, max_value=1.0, value=.65, key=\"key_24\")\n \n # with st.expander(\"Coaxial Cable Detection\"):\n # # load model\n # model = load_model(cfg_safety_model_path, None)\n\n # # vid src option slider\n # #with st.sidebar:\n # video_type = st.radio(\"Choose your video type\", [\"Upload a video\", \"Rtsp\", \"Live webcam\"], key=\"key_11\") #\"Sample data\", \n\n # if video_type == \"Live webcam\":\n # video_input('Live data', cfg_switch_model_path, key=\"key_12\")\n # # elif video_type == \"Sample data\":\n # # video_input('Sample data')\n # elif video_type == \"Upload a video\":\n # video_input('Upload data', cfg_switch_model_path, key=\"key_13\")\n # elif video_type == \"Rtsp\":\n # user_input = st.text_input(\"Enter the rtsp address ( rtsp://address )\", key=\"key_28\")\n # # video_src = user_input\n # if user_input:\n # video_input('Rtsp data', cfg_switch_model_path, key=\"key_14\")\n \n # # confidence slider\n # confidence = st.slider('Confidence', min_value=0.1, max_value=1.0, value=.1, key=\"key_25\")\n \n # with st.expander(\"Traffic Detection\"): #with st.expander(\"Person Detection\"):\n # # load model\n # model = load_model(cfg_vehicle_person_model_path, None)\n\n # # vid src option slider\n # #with st.sidebar:\n # video_type = st.radio(\"Choose your video type\", [\"Upload a video\", \"Rtsp\", \"Live webcam\"], key=\"key_3\") #\"Sample data\", \n\n # if video_type == \"Live webcam\":\n # video_input('Live data', cfg_vehicle_person_model_path, key=\"key_4\")\n # # elif video_type == \"Sample data\":\n # # video_input('Sample data')\n # elif video_type == \"Upload a video\":\n # video_input('Upload data', cfg_vehicle_person_model_path, key=\"key_5\")\n # elif video_type == \"Rtsp\":\n # user_input = st.text_input(\"Enter the rtsp address ( rtsp://address )\", key=\"key_30\")\n # # video_src = user_input\n # if user_input:\n # video_input('Rtsp data', cfg_vehicle_person_model_path, key=\"key_6\")\n \n # # confidence slider\n # confidence = st.slider('Confidence', min_value=0.1, max_value=1.0, value=.5, key=\"key_23\")\n\n # with st.expander(\"Weapon Detection\"):\n # # load model\n # model = load_model(cfg_safety_model_path, None)\n\n # # vid src option slider\n # #with st.sidebar:\n # video_type = st.radio(\"Choose your video type\", [\"Upload a video\", \"Rtsp\", \"Live webcam\"], key=\"key_15\") #\"Sample data\", \n\n # if video_type == \"Live webcam\":\n # video_input('Live data', cfg_weapon_model_path, key=\"key_16\")\n # # elif video_type == \"Sample data\":\n # # video_input('Sample data')\n # elif video_type == \"Upload a video\":\n # video_input('Upload data', cfg_weapon_model_path, key=\"key_17\")\n # elif video_type == \"Rtsp\":\n # user_input = st.text_input(\"Enter the rtsp address ( rtsp://address )\", key=\"key_27\")\n # # video_src = user_input\n # if user_input:\n # video_input('Rtsp data', cfg_weapon_model_path, key=\"key_18\")\n \n # # confidence slider\n # confidence = st.slider('Confidence', min_value=0.1, max_value=1.0, value=.5, key=\"key_26\")\n\n #st.sidebar.markdown(\"---\")\n\n \n\nif __name__ == \"__main__\":\n try:\n main()\n except SystemExit:\n pass\n","repo_name":"steve-qt/streamlit-yolov5","sub_path":"pages/08_Use_Cases.py","file_name":"08_Use_Cases.py","file_ext":"py","file_size_in_byte":14129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14180399965","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*- \n# @Time : 2019/11/23 18:25 \n# @Author : Patrick \n# @File : BulidCities.py\n# @Software: PyCharm\n\ndef fill_city_days(i, j, map, city, days):\n mi = float('inf')\n for ci in city:\n x, y = ci\n mi = min(abs(x - i) + abs(y - j), mi)\n days[i][j] = mi\n\n\ndef output(map):\n count = 0\n visit = [[False for _ in range(len(map[0]))] for _ in range(len(map))]\n days = [[0 for _ in range(len(map[0]))] for _ in range(len(map))]\n city = [(0, 0)]\n ans = 0\n dfs_city(0, 0, map, visit, city, days)\n for i in range(len(map)):\n for j in range(len(map[0])):\n if (i == 0 and j == 0) or map[i][j] == '#' or map[i][j] == '$':\n continue\n else:\n fill_city_days(i, j, map, city, days)\n for k in range(len(map)):\n ans += sum(days[k])\n return ans\n\n\ndef dfs_city(i, j, map, visited, city, days):\n if i >= len(map) or j >= len(map[0]) or i <= 0 or j <= 0 or visited[i][j]:\n return\n visited[i][j] = True\n if map[i][j] == '$':\n fill_city_days(i, j, map, city, days)\n city.append((i, j))\n\n dfs_city(i - 1, j, map, visited, city, days)\n dfs_city(i, j - 1, map, visited, city, days)\n dfs_city(i + 1, j, map, visited, city, days)\n dfs_city(i, j + 1, map, visited, city, days)\n\n\nrow = int(input())\ncoloum = int(input())\nif row == 0:\n print(0)\n exit(0)\nmap = []\nfor i in range(row):\n map.append(list(input()))\n\nprint(output(map))\n","repo_name":"hehehe47/LeetCode","sub_path":"SAP/BulidCities.py","file_name":"BulidCities.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43045988340","text":"\"\"\"This module contains the general information for BiosVfCPUPerformance ManagedObject.\"\"\"\n\nfrom ...imcmo import ManagedObject\nfrom ...imccoremeta import MoPropertyMeta, MoMeta\nfrom ...imcmeta import VersionMeta\n\n\nclass BiosVfCPUPerformanceConsts:\n VP_CPUPERFORMANCE_CUSTOM = \"custom\"\n VP_CPUPERFORMANCE_ENTERPRISE = \"enterprise\"\n VP_CPUPERFORMANCE_HIGH_THROUGHPUT = \"high-throughput\"\n VP_CPUPERFORMANCE_HPC = \"hpc\"\n VP_CPUPERFORMANCE_PLATFORM_DEFAULT = \"platform-default\"\n\n\nclass BiosVfCPUPerformance(ManagedObject):\n \"\"\"This is BiosVfCPUPerformance class.\"\"\"\n\n consts = BiosVfCPUPerformanceConsts()\n naming_props = set([])\n\n mo_meta = {\n \"classic\": MoMeta(\"BiosVfCPUPerformance\", \"biosVfCPUPerformance\", \"CPU-Performance\", VersionMeta.Version151f, \"InputOutput\", 0x1f, [], [\"admin\", \"read-only\", \"user\"], ['biosPlatformDefaults', 'biosSettings'], [], [\"Get\", \"Set\"]),\n \"modular\": MoMeta(\"BiosVfCPUPerformance\", \"biosVfCPUPerformance\", \"CPU-Performance\", VersionMeta.Version2013e, \"InputOutput\", 0x1f, [], [\"admin\", \"read-only\", \"user\"], ['biosPlatformDefaults', 'biosSettings'], [], [\"Get\", \"Set\"])\n }\n\n\n prop_meta = {\n\n \"classic\": {\n \"dn\": MoPropertyMeta(\"dn\", \"dn\", \"string\", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),\n \"rn\": MoPropertyMeta(\"rn\", \"rn\", \"string\", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),\n \"status\": MoPropertyMeta(\"status\", \"status\", \"string\", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [\"\", \"created\", \"deleted\", \"modified\", \"removed\"], []),\n \"vp_cpu_performance\": MoPropertyMeta(\"vp_cpu_performance\", \"vpCPUPerformance\", \"string\", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, [\"custom\", \"enterprise\", \"high-throughput\", \"hpc\", \"platform-default\"], []),\n \"child_action\": MoPropertyMeta(\"child_action\", \"childAction\", \"string\", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),\n },\n\n \"modular\": {\n \"dn\": MoPropertyMeta(\"dn\", \"dn\", \"string\", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),\n \"rn\": MoPropertyMeta(\"rn\", \"rn\", \"string\", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),\n \"status\": MoPropertyMeta(\"status\", \"status\", \"string\", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [\"\", \"created\", \"deleted\", \"modified\", \"removed\"], []),\n \"vp_cpu_performance\": MoPropertyMeta(\"vp_cpu_performance\", \"vpCPUPerformance\", \"string\", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, [\"custom\", \"enterprise\", \"high-throughput\", \"hpc\", \"platform-default\"], []),\n \"child_action\": MoPropertyMeta(\"child_action\", \"childAction\", \"string\", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),\n },\n\n }\n\n prop_map = {\n\n \"classic\": {\n \"dn\": \"dn\", \n \"rn\": \"rn\", \n \"status\": \"status\", \n \"vpCPUPerformance\": \"vp_cpu_performance\", \n \"childAction\": \"child_action\", \n },\n\n \"modular\": {\n \"dn\": \"dn\", \n \"rn\": \"rn\", \n \"status\": \"status\", \n \"vpCPUPerformance\": \"vp_cpu_performance\", \n \"childAction\": \"child_action\", \n },\n\n }\n\n def __init__(self, parent_mo_or_dn, **kwargs):\n self._dirty_mask = 0\n self.status = None\n self.vp_cpu_performance = None\n self.child_action = None\n\n ManagedObject.__init__(self, \"BiosVfCPUPerformance\", parent_mo_or_dn, **kwargs)\n\n","repo_name":"CiscoUcs/imcsdk","sub_path":"imcsdk/mometa/bios/BiosVfCPUPerformance.py","file_name":"BiosVfCPUPerformance.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"22"} +{"seq_id":"22587244451","text":"\"\"\"\n Stores the outline for the GUI via a new class and dictionaries.\n\n The `Widg` class stores positional info and adds a constructor that creates the QObject. If the \n object is a button, adds the name to the text.\n\n\"\"\"\n\nfrom PySide6.QtWidgets import (QLabel, QPushButton, QComboBox, QLineEdit)\nfrom .initialisers import *\n\nclass Widg():\n \"\"\"\n Holds information about widgets - almost a dictionary\n \"\"\"\n def __init__(self, name: str, Qtype, row_pos, col_pos, row_span, col_span, initialiser = None):\n self.name = name\n self.Qtype = Qtype\n self.pos = [row_pos, col_pos, row_span, col_span]\n self.init = initialiser\n\n def construct(self):\n \"\"\"\n Returns the QWidget object after construction\n IF self.qtype is QPushButton, add the name to the constructor\n \"\"\"\n if self.Qtype is QPushButton: return self.Qtype(self.name)\n else: return self.Qtype()\n\n\"\"\"\n OUTLINE of the GUI\n Dictionary of categories, with several objs per category\n Each object key should be unique\n\"\"\"\nOUTLINE = {\n \"config\": {\n \"config_title\": Widg(\"Configuration\", QLabel, 0, 0, 1, 2),\n\n \"mot_path_name\": Widg(\" MOT Path\", QLabel, 1, 0, 1, 1),\n \"mot_path\": Widg(\"Choose MOT Path\", QPushButton, 1, 1, 1, 1),\n\n \"folder_name\": Widg(\" Folder Name\", QLabel, 2, 0, 1, 1),\n \"folder\": Widg(\"folder\", QComboBox, 2, 1, 1, 1),\n\n \"sequence_name\":Widg(\" Sequence Num\", QLabel, 3, 0, 1, 1),\n \"sequence\": Widg(\"sequence\", QComboBox, 3, 1, 1, 1),\n\n \"max_ram_name\": Widg(\" Max RAM (GB)\", QLabel, 4, 0, 1, 1),\n \"max_ram\": Widg(\"max_ram\", QLineEdit, 4, 1, 1, 1, max_ram)\n },\n \"solutions\": {\n \"solutions_title\": Widg(\"Solutions\", QLabel, 5, 0, 1, 2),\n\n \"display_frame_id_name\": Widg(\" Frame ID\", QLabel, 6, 0, 1, 1),\n \"display_frame_id\": Widg(\"display_frame_id\", QComboBox, 6, 1, 1, 1),\n \n \"display\": Widg(\"Display\", QPushButton, 7, 0, 1, 2),\n \"clear_display\": Widg(\"Clear Display\", QPushButton, 8, 0, 1, 2)\n },\n \"hyperparams\": {\n \"hyperparam_title\": Widg(\"Hyperparameters\", QLabel, 0, 2, 1, 2),\n\n \"tau_name\": Widg(\"Tau\", QLabel, 1, 2, 1, 1),\n \"tau\": Widg(\"tau\", QLineEdit, 1, 3, 1, 1, tau),\n\n \"sigma_p_name\": Widg(\"Sigma P\", QLabel, 2, 2, 1, 1),\n \"sigma_p\": Widg(\"sigma_p\", QLineEdit, 2, 3, 1, 1),\n\n \"sigma_v_name\": Widg(\"Sigma V\", QLabel, 3, 2, 1, 1),\n \"sigma_v\": Widg(\"sigma_v\", QLineEdit, 3, 3, 1, 1),\n\n \"sigma_a_name\": Widg(\"Sigma A\", QLabel, 4, 2, 1, 1),\n \"sigma_a\": Widg(\"sigma_a\", QLineEdit, 4, 3, 1, 1),\n\n \"sigma_d_name\": Widg(\"Sigma D\", QLabel, 5, 2, 1, 1),\n \"sigma_d\": Widg(\"sigma_d\", QLineEdit, 5, 3, 1, 1, sigma)\n },\n \"tracking\": {\n \"tracking_title\": Widg(\"Tracking\", QLabel, 9, 0, 1, 4),\n\n \"start_frame_id_name\": Widg(\" First Frame ID\", QLabel, 10, 0, 1, 1),\n \"start_frame_id\": Widg(\"start_frame_id\", QComboBox, 10, 1, 1, 1),\n\n \"end_frame_id_name\": Widg(\" Final Frame ID\", QLabel, 11, 0, 1, 1),\n \"end_frame_id\": Widg(\"end_frame_id\", QComboBox, 11, 1, 1, 1),\n\n \"track\": Widg(\"Track and\\nEvaluate\", QPushButton, 10, 2, 2, 2)\n },\n \"results\": {\n \"results_title\": Widg(\"Results\", QLabel, 12, 0, 1, 4),\n \"results\": Widg(\" \", QLabel, 13, 0, 4, 4),\n \"save_results\": Widg(\"Save Results\", QPushButton, 17, 0, 1, 2, save_results),\n \"save_gif\": Widg(\"Save GIF\", QPushButton, 17, 2, 1, 2, save_gif)\n },\n \"image\": {\n \"image_title\": Widg(\"Image\", QLabel, 0, 4, 1, 7),\n \"image\": Widg(\"Image\", QLabel, 1, 4, 17, 7)\n }\n}","repo_name":"itbergl/needle-in-a-haystack","sub_path":"gui/gui_outline.py","file_name":"gui_outline.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"3245719520","text":"from turtle import Screen\nfrom paddle import Paddle\nfrom ball import Ball\nfrom score import Score\nimport time\nscreen=Screen()\nscreen.bgcolor(\"black\")\nscreen.title(\"pong\")\nscreen.setup(width=800,height=600)\nscreen.tracer(0)\n\n\nright_player=Paddle(360,0)\nleft_player=Paddle(-360,0)\nball=Ball()\nscore_board = Score()\n\nscreen.listen()\nscreen.onkey(right_player.go_up,\"Up\")\nscreen.onkey(right_player.go_down,\"Down\")\nscreen.onkey(left_player.go_up,\"w\")\nscreen.onkey(left_player.go_down,\"s\")\n\ngame_is_on=True\n\nwhile game_is_on:\n time.sleep(ball.movspeed)\n screen.update()\n ball.move()\n if ball.ycor() > 280 or ball.ycor() < -280:\n ball.bounceOnWall()\n x_pos=ball.xcor()\n r_pos=ball.distance(right_player.player)\n l_pos=ball.distance(left_player.player)\n if (x_pos > 350 or x_pos < -350):\n if (r_pos < 59 or l_pos < 59) :\n ball.bounceOnPaddle()\n else:\n ball.resetPos()\n if (x_pos > 350):\n score_board.l_score += 1\n elif (x_pos < -350):\n score_board.r_score += 1\n score_board.updateScore()\n\n\n\nscreen.exitonclick()","repo_name":"hisamdavid/100-Days-of-python","sub_path":"day22/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13749693389","text":"from django.contrib import admin\nfrom django.urls import path\nfrom drf_yasg import openapi\nfrom netflix_2.views import MovieList, aMovie\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework.permissions import AllowAny\n\ndoc_view = get_schema_view(\n openapi.Info(\n title=\"Netflix\",\n default_version='v1',\n description=\"(REST API) Clone of Netflix using Django Rest Framework\",\n contact=openapi.Contact(\"Abdumutalib Mirzajonov \")\n ),\n public=True,\n permission_classes=(AllowAny,),\n)\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('movie/', MovieCreateList.as_view()),\n path('doc/', doc_view.with_ui(\"swagger\", cache_timeout=0), name=\"swagger-doc\"),\n path('redoc/', doc_view.with_ui(\"redoc\", cache_timeout=0), name=\"redoc-doc\"),\n path('movie/', ActorCreateList.as_view()),\n path('movie/ APIClient:\n yield APIClient()\n\n\n@pytest.fixture\ndef test_user_data() -> dict:\n user_data = {\n \"username\": \"test_user\",\n \"first_name\": \"first_name\",\n \"last_name\": \"last_name\",\n \"email\": \"email@test.com\",\n \"password\": \"test_password\",\n }\n return user_data\n\n\n@pytest.fixture(scope=\"function\")\ndef test_db_user(django_user_model, test_user_data) -> User:\n try:\n user = django_user_model.objects.get(username=test_user_data[\"username\"])\n except django_user_model.DoesNotExist:\n user = django_user_model.objects.create_user(**test_user_data)\n return user\n\n\n@pytest.fixture\ndef user_api_client(api_client, test_db_user):\n client = api_client\n refresh = RefreshToken.for_user(test_db_user)\n client.credentials(HTTP_AUTHORIZATION=f\"Bearer {refresh.access_token}\")\n return client\n\n\n@pytest.fixture\ndef another_test_user_data() -> dict:\n user_data = {\n \"username\": \"another_user\",\n \"first_name\": \"another_first_name\",\n \"last_name\": \"another_last_name\",\n \"email\": \"another_email@test.com\",\n \"password\": \"antother_test_password\",\n }\n return user_data\n\n\n@pytest.fixture(scope=\"function\")\ndef another_test_db_user(django_user_model, another_test_user_data) -> User:\n try:\n user = django_user_model.objects.get(\n username=another_test_user_data[\"username\"]\n )\n except django_user_model.DoesNotExist:\n user = django_user_model.objects.create(**another_test_user_data)\n return user\n\n\n@pytest.fixture\ndef admin_user_data() -> dict:\n user_data = {\n \"username\": \"test_admin\",\n \"first_name\": \"admin_first_name\",\n \"last_name\": \"admin_last_name\",\n \"email\": \"admin_email@test.com\",\n \"password\": \"admin_test_password\",\n \"is_staff\": True,\n \"is_superuser\": False,\n }\n return user_data\n\n\n@pytest.fixture\ndef db_admin_user(db, django_user_model, admin_user_data):\n try:\n admin_user = django_user_model.objects.get(username=admin_user_data[\"username\"])\n except django_user_model.DoesNotExist:\n admin_user = django_user_model.objects.create_user(**admin_user_data)\n return admin_user\n\n\n@pytest.fixture\ndef admin_api_client(api_client, db_admin_user):\n client = api_client\n refresh = RefreshToken.for_user(db_admin_user)\n client.credentials(HTTP_AUTHORIZATION=f\"Bearer {refresh.access_token}\")\n return client\n","repo_name":"alv2017/Events-API","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12145218682","text":"#!/usr/bin/env python\nimport sys\nfrom setuptools import setup\n\nrequirements = [\n \"requests>=2.25.0\",\n \"retrying>=1.3.3\"\n]\n\nsetup(name='slacker-client',\n description='OptScale Slacker API Client',\n url='http://hystax.com',\n author='Hystax',\n author_email='info@hystax.com',\n package_dir={'slacker_client': ''},\n packages=['slacker_client'],\n install_requires=requirements,\n )\n","repo_name":"hystax/optscale","sub_path":"optscale_client/slacker_client/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":646,"dataset":"github-code","pt":"22"} +{"seq_id":"16774915727","text":"#!/usr/bin/env python3.8\nN = int(input())\nA = list(map(int, input().split()))\n\ndp = [None] * (N + 1)\ndp[0] = 0\ndp[1] = A[0]\n\nfor i in range(2, N + 1):\n a = A[i - 1]\n dp[i] = max(dp[i - 2] + a, dp[i - 1])\nprint(dp[N])","repo_name":"harukaeru/CompetitiveProgramming","sub_path":"math-and-algorithm/031/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"18982870500","text":"from tkinter import *\n\nroot = Tk()\nroot.title(\"caculator\")\ne = Entry(root,width=35,borderwidth=5)\ne.grid(row=0,column=0,columnspan=10,padx=10,pady=10)\ndef button_add():\n\tnum1=e.get()\n\tglobal fnum\n\tfnum = int(num1)\n\te.delete(0,'end')\ndef button_equal():\n\te.delete(0,'end')\n\tnum2= e.get()\n\tglobal snum\n\tsnum = int(num2)\n\te.insert(0,fnum+snum)\ndef button_clear():\n\te.delete(0,'end')\ndef button_click(number):\n\te.insert(\"end\",number)\nb1 = Button(root,text=\"1\",padx=40,pady=20,command=lambda: button_click(1))\nb2 = Button(root,text=\"2\",padx=40,pady=20,command=lambda: button_click(2))\nb3 = Button(root,text=\"3\",padx=40,pady=20,command=lambda: button_click(3))\nb4 = Button(root,text=\"4\",padx=40,pady=20,command=lambda: button_click(4))\nb5 = Button(root,text=\"5\",padx=40,pady=20,command=lambda: button_click(5))\nb6 = Button(root,text=\"6\",padx=40,pady=20,command=lambda: button_click(6))\nb7 = Button(root,text=\"7\",padx=40,pady=20,command=lambda: button_click(7))\nb8 = Button(root,text=\"8\",padx=40,pady=20,command=lambda: button_click(8))\nb9 = Button(root,text=\"9\",padx=40,pady=20,command=lambda: button_click(9))\nb0 = Button(root,text=\"0\",padx=40,pady=20,command=lambda: button_click(0))\nbclear = Button(root,text=\"Clear\",padx=79,pady=20,command=button_clear)\nbequal = Button(root,text=\"=\",padx=91,pady=20,command=button_equal)\nbadd = Button(root,text=\"+\",padx=39,pady=20,command=button_add)\nb1.grid(row=3,column=0)\nb2.grid(row=3,column=1)\nb3.grid(row=3,column=2)\n\nb4.grid(row=2,column=0)\nb5.grid(row=2,column=1)\nb6.grid(row=2,column=2)\n\nb7.grid(row=1,column=0)\nb8.grid(row=1,column=1)\nb9.grid(row=1,column=2)\n\nb0.grid(row=4,column=0)\nbequal.grid(row=4,column=1,columnspan=2)\nbclear.grid(row=5,column=0,columnspan=2)\nbadd.grid(row=5,column=2)\nroot.mainloop()\n","repo_name":"Imran-wafa/Caculator.py","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13726013321","text":"s = input()\r\nsumOdd = 0\r\nsumEven = 0\r\nfor i in range(0,len(s)):\r\n if i % 2 != 0:\r\n sumEven += int(s[i])\r\n else:\r\n sumOdd += int(s[i])\r\nif (sumEven * 3 + sumOdd) % 10 == 0:\r\n print(\"yes\")\r\nelse:\r\n print(\"no\")","repo_name":"pribishlove/Python","sub_path":"buns/mod2/task13.py","file_name":"task13.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70237554295","text":"\"\"\"empty message\n\nRevision ID: 444c5f94bdf5\nRevises: 55dbf26a2b08\nCreate Date: 2023-07-04 23:43:03.755392\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '444c5f94bdf5'\ndown_revision = '55dbf26a2b08'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('addresses', schema=None) as batch_op:\n batch_op.add_column(sa.Column('street_number', sa.String(), nullable=False))\n batch_op.drop_column('name')\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('addresses', schema=None) as batch_op:\n batch_op.add_column(sa.Column('name', sa.VARCHAR(), nullable=False))\n batch_op.drop_column('street_number')\n\n # ### end Alembic commands ###\n","repo_name":"tasosxak/person_list","sub_path":"migrations/versions/444c5f94bdf5_.py","file_name":"444c5f94bdf5_.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28887318523","text":"def fibonacci():\n\n current_num = 0\n previous_num = 1\n\n for i in range(2):\n yield i\n\n while True:\n result = current_num + previous_num\n yield result\n current_num = previous_num\n previous_num = result\n\n\ngenerator = fibonacci()\nfor i in range(5):\n print(next(generator))\n\n# generator = fibonacci()\n# for i in range(1):\n# print(next(generator))\n","repo_name":"sasho132/softuni-courses","sub_path":"python-oop-jun-2022/iterators_and_generators/fibonacci_generator.py","file_name":"fibonacci_generator.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4158113636","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\nx = np.arange(-10, 10, 0.01)\r\ny = 1 / (1 + np.power(np.e, -x))\r\ny1 = np.power(np.e, -x) / (1 + np.power(np.e, -x))\r\n\r\nplt.plot(x, y)\r\nplt.show()\r\n\r\nplt.plot(x, y1)\r\nplt.show()\r\n\r\nline = 0.5 * x + 3\r\n\r\nplt.plot(x, line)\r\nplt.show()\r\n\r\nsig = 1 / (1 + np.power(np.e, -line))\r\n\r\nplt.plot(x, sig)\r\nplt.show()\r\n\r\nfrom sklearn.metrics import confusion_matrix\r\ncm = confusion_matrix(y_test, y_pred)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"tesla-is/Weekend-Batch-Coding-Examples","sub_path":"sal_2.py","file_name":"sal_2.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"22"} +{"seq_id":"16276754538","text":"import os\n\nimport ast\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom collections import Counter\nfrom scipy.stats import entropy, skew\nfrom sklearn.decomposition import PCA\nimport joblib\nfrom tensorflow.keras import layers, optimizers, callbacks\nimport librosa\nfrom tensorflow.keras.models import Sequential, load_model\nfrom sklearn.metrics import (\n confusion_matrix, accuracy_score,\n precision_recall_fscore_support,\n roc_curve, auc\n)\nfrom sklearn.model_selection import RandomizedSearchCV\nimport numpy\nimport pandas\nfrom sklearn.pipeline import make_pipeline, FeatureUnion, Pipeline\nfrom sklearn.preprocessing import RobustScaler, StandardScaler, MinMaxScaler\nimport pydub\nimport matplotlib.pyplot as pyplot\nimport pywt\nimport seaborn\nfrom tensorflow.keras.wrappers.scikit_learn import KerasClassifier\nimport tensorflow as tf\nfrom pandas.api.types import CategoricalDtype\n\n# constants\nBASE_DIR_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\nSYS_DIR_PATH = os.path.dirname(BASE_DIR_PATH)\nMOUNTED_DATASET_PATH = SYS_DIR_PATH + '/s3-bucket'\nSAMPLE_HIPHOP_FILE_PATH = MOUNTED_DATASET_PATH + '/gtzan/wavfiles/hiphop.00000.wav'\nSAMPLE_POP_FILE_PATH = MOUNTED_DATASET_PATH + '/gtzan/wavfiles/pop.00000.wav'\nSAMPLE_ROCK_FILE_PATH = MOUNTED_DATASET_PATH + '/gtzan/wavfiles/rock.00000.wav'\nGENRES = ['hiphop', 'rock', 'pop']\nGENRES_MAP = {\n 0: 'hiphop',\n 1: 'rock',\n 2: 'pop'\n}\n# set log directory for tensorboard logs\nroot_logdir = os.path.join(os.curdir, \"my_logs\")\n\n\n# utility classes and functions\ndef visualize_conf_metrics(X_test, y_test, model):\n '''\n visualize_conf_metrics() plot the confusion metrics\n taking as parameter the test data and model instance.\n '''\n # make prediction on test data\n predicted_y_test = model.predict_proba(X_test)\n\n # compute confusion matrix\n conf_matrix = confusion_matrix(\n numpy.argmax(y_test, 1), numpy.argmax(predicted_y_test, 1))\n conf_matrix = pandas.DataFrame(conf_matrix)\n conf_matrix = conf_matrix.rename(columns=GENRES_MAP)\n conf_matrix.index = conf_matrix.columns\n\n # plot confusion matrix\n pyplot.figure(figsize=(20, 12))\n seaborn.set(font_scale=2);\n ax = seaborn.heatmap(conf_matrix, annot=True, cmap=seaborn.cubehelix_palette(50));\n ax.set(xlabel='Predicted Values', ylabel='Actual Values');\n\n\ndef evaluate_model(X_test, y_test, model):\n \"\"\"\n evaluate_model() returns evaluation metrics to \n measure the performance of the model. Taking as\n parameter test data and model instance.\n \"\"\"\n mean_fpr = numpy.linspace(start=0, stop=1, num=100)\n\n # compute probabilistic predictiond for the evaluation set\n probabilities = model.predict_proba(X_test)[:, 1]\n\n # compute exact predictiond for the evaluation set\n predicted_values = model.predict(X_test)\n\n # compute accuracy\n accuracy = accuracy_score(y_test, predicted_values)\n\n # compute precision, recall and f1 score for class 1\n precision, recall, f1_score, _ = precision_recall_fscore_support(y_test, predicted_values, labels=[1])\n\n # # compute fpr and tpr values for various thresholds\n # # by comparing the true target values to the predicted probabilities for class 1\n # fpr, tpr, _ = roc_curve(y_test, probabilities)\n\n # # compute true positive rates for the values in the array mean_fpr\n # tpr_transformed = np.array([interp(mean_fpr, fpr, tpr)])\n\n # # compute the area under the curve\n # auc = auc(fpr, tpr)\n\n return accuracy, precision[0], recall[0], f1_score[0]\n\n\ndef get_run_logdir():\n '''\n get_run_logdir() generates subdirectory path with\n current date & time.\n '''\n import time\n run_id = time.strftime(\"run_%Y_%m_%d-%H_%M_%S\")\n return os.path.join(root_logdir, run_id)\n\n\ndef training_best_model(X_train, y_train, model_name, ncols,\n build_fn, preprocess_pipeline, params, batch_size=32,\n epochs=100):\n \"\"\"\n training_best_model() trains the best model\n from each subsets and returns an instance\n of the model. Taking as params things you \n need to train a model.\n \"\"\"\n # create early stopping callback instance\n early_stopping_cb = callbacks.EarlyStopping(\n patience=10, restore_best_weights=True)\n\n # generate log dir and create tensorboard callback instance \n run_logdir = get_run_logdir() # e.g., './my_logs/run_2019_01_16-11_28_43'\n tensorboard_cb = callbacks.TensorBoard(run_logdir)\n\n # wrap the function with keras wrapper\n clf = KerasClassifier(build_fn=build_fn(model_name, ncols))\n\n # pass-in params to be used to create model\n clf.set_params(**params)\n\n # create pipeline estimator\n estimator = Pipeline([\n ('preprocess', preprocess_pipeline),\n ('clf', clf)\n ])\n\n # learn model & validate with 20% of the training dataset\n estimator.fit(X_train, y_train, clf__batch_size=batch_size,\n clf__validation_split=0.2, clf__epochs=epochs,\n clf__callbacks=[early_stopping_cb, tensorboard_cb])\n\n return estimator\n\n\ndef create_norm_pipelines(features):\n \"\"\"\n create_norm_pipelines() returns a dict of \n normalization pipeline instances of four \n subsets of the dataset taking as parameter\n features (pandas.DataFrame).\n \"\"\"\n # break into subset\n (timbral_rhythmic_predictors, predictors_with_pos_corr,\n wavelet_predictors) = break_into_subsets(features)\n all_predictors = features.drop('genre_label', axis=1)\n\n # create dict of pipeline instances for each subset \n norm_pipelines = {\n 'all': normalization_pipeline(\n all_predictors.columns.values),\n 'tr': normalization_pipeline(\n timbral_rhythmic_predictors.columns.values),\n 'pos_corr': normalization_pipeline(\n predictors_with_pos_corr.columns.values),\n 'wavelet': normalization_pipeline(\n wavelet_predictors.columns.values)\n }\n\n return norm_pipelines\n\n\ndef break_into_subsets(features):\n \"\"\"\n break_into_subsets() returns subsets of the dataset\n taking as parameter features (pandas.DataFrame).\n \"\"\"\n\n # get wavelet subset \n wavelet_predictors = features.filter(regex=(r'.+_db[458]{1}_.+'))\n\n # get timbral & rhytmic subset\n wavelet_predictors_labels = wavelet_predictors.columns.values\n timbral_rhythmic_predictors = features.loc[:, features.columns.difference(\n numpy.append(wavelet_predictors_labels, 'genre_label'))]\n\n # get features with +ve correlation with the target subset\n corr_wf_target = features.corr()[['genre_label']].sort_values(\n by=['genre_label'], ascending=False)\n predictor_labels_with_pos_corr = corr_wf_target.loc[\n corr_wf_target.loc[:, 'genre_label'] > 0].index.values\n predictors_wf_pos_corr = features.loc[\n :, predictor_labels_with_pos_corr].drop('genre_label', axis=1)\n\n return timbral_rhythmic_predictors, predictors_wf_pos_corr, wavelet_predictors\n\n\ndef train_model(X, y, model_name, ncols, build_fn, preprocess_pipeline,\n param_dist, batch_size=32, epochs=100):\n \"\"\"\n train_model() trains a model using cv and return\n the best model score, params, & instance. Takes as\n parameter things you need to train a model.\n \"\"\"\n # create early stopping callback instance\n early_stopping_cb = callbacks.EarlyStopping(\n patience=10, restore_best_weights=True)\n\n # wrap the function with keras wrapper\n clf = KerasClassifier(build_fn=build_fn(model_name, ncols))\n\n # create pipeline estimator\n pipeline = Pipeline([\n ('preprocess', preprocess_pipeline),\n ('clf', clf)\n ])\n\n # instantiate RandomizedSearchCV\n # if you're not using a GPU, you can set n_jobs to something other than 1\n rscv = RandomizedSearchCV(pipeline, param_dist, cv=3, n_jobs=1)\n\n # learn model & validate with 20% of the training dataset\n search = rscv.fit(X, y, clf__batch_size=batch_size,\n clf__validation_split=0.2, clf__epochs=epochs,\n clf__callbacks=[early_stopping_cb])\n\n print(search.best_score_, search.best_params_)\n\n return search.best_score_, search.best_params_, search.best_estimator_\n\n\ndef visualize_wavelet(signal, waveletname, level_of_dec):\n \"\"\"\n visualize_wavelet() create original visualization for a signal and \n create visualization of the signal for a specified wavelet type for\n the range of decomposition level specified taking as parameter the\n signal, wavelet name and max level of decomposition. \n \n It was adapted from Ahmet Taspinar's blog titled:\n 'A guide for using the Wavelet Transform in Machine Learning'.\n \"\"\"\n # visualize orignal signal\n fig, ax = pyplot.subplots(figsize=(10, 5))\n ax.set_title(\"Music Signal: \")\n ax.plot(signal)\n\n # visualize specified wavelet for 1 - specified levels of decomposition\n cA = signal\n fig, axarr = pyplot.subplots(nrows=level_of_dec, ncols=2, figsize=(10, level_of_dec * 1.5))\n for level in range(level_of_dec):\n (cA, cD) = pywt.dwt(cA, waveletname)\n axarr[level, 0].plot(cA, 'r')\n axarr[level, 1].plot(cD, 'g')\n axarr[level, 0].set_ylabel(\"Level {}\".format(level + 1), fontsize=14, rotation=90)\n axarr[level, 0].set_yticklabels([])\n if level == 0:\n axarr[level, 0].set_title(\"Approximation coefficients\", fontsize=14)\n axarr[level, 1].set_title(\"Detail coefficients\", fontsize=14)\n axarr[level, 1].set_yticklabels([])\n pyplot.tight_layout()\n pyplot.show()\n\n\ndef get_optimizer(lr, optimizer):\n \"\"\"\n get_optimizer() returns a optimizer instance taking as parameter\n learning_rate (int) and optimizer (str)\n \"\"\"\n optimizer_instances = {\n 'rmsprop': optimizers.RMSprop(lr=lr),\n 'adam': optimizers.Adam(lr=lr),\n 'adagrad': optimizers.Adagrad(lr=lr)\n }\n return optimizer_instances[optimizer]\n\n\ndef set_shape_create_cnn_model(name, ncols):\n \"\"\"\n set_shape_create_model() returns a create_cnn_model function,\n taking as parameters the model name and column input shape.\n \"\"\"\n\n def create_cnn_model(n_hidden=1, activation='relu', optimizer='adam',\n kernel_initializer='glorot_uniform', units=30,\n filters=16, kernel_size=3, dropout=0.25, lr=3):\n \"\"\"\n create_cnn_model() returns a CNN model,\n taking as parameters things you want to verify\n using cross validation and model selection\n \"\"\"\n\n # initialize a random seed for replication purposes\n numpy.random.seed(23456)\n tf.random.set_seed(123)\n\n model_layers = [\n layers.Conv1D(\n filters=filters, kernel_size=kernel_size,\n input_shape=[ncols, 1]),\n ]\n\n index = 1\n for layer in range(n_hidden):\n index = index + 1\n # add a maxpooling layer\n model_layers.append(layers.MaxPooling1D())\n # add convolution layer\n model_layers.append(\n layers.Conv1D(\n filters=index * filters, kernel_size=kernel_size,\n activation=activation)\n )\n\n model_layers.append(layers.MaxPooling1D())\n model_layers.append(layers.Flatten())\n\n for layer in range(n_hidden):\n model_layers.append(\n layers.Dense(units, activation=activation,\n kernel_initializer=kernel_initializer)\n )\n model_layers.append(layers.Dropout(dropout))\n\n # add an output layer\n model_layers.append(layers.Dense(3, activation='softmax'))\n\n # Initiating an empty NN\n model = Sequential(layers=model_layers, name=name)\n\n print(model.summary())\n\n # Compiling our NN\n model.compile(loss='categorical_crossentropy',\n optimizer=get_optimizer(lr, optimizer),\n metrics=['accuracy'])\n\n return model\n\n return create_cnn_model\n\n\ndef set_shape_create_model(name, ncols):\n \"\"\"\n set_shape_create_model() returns a create_model function, \n taking as parameters the model name and column input shape.\n \"\"\"\n\n def create_model(n_hidden=1, activation='relu', optimizer='adam',\n kernel_initializer='glorot_uniform', lr=3, units=30):\n \"\"\"\n create_model() returns a FNN model, \n taking as parameters things you\n want to verify using cross-validation and model selection\n \"\"\"\n # initialize a random seed for replication purposes\n numpy.random.seed(23456)\n tf.random.set_seed(123)\n\n model_layers = [\n layers.Flatten(input_shape=[ncols, 1]),\n ]\n\n # multiplier = n_hidden + 1\n # units = 3 * n_hidden\n for layer in range(n_hidden):\n # add a full-connected layer\n # units = 3 * multiplier * multiplier\n model_layers.append(\n layers.Dense(units, activation=activation,\n kernel_initializer=kernel_initializer)\n )\n # multiplier -= 1\n\n # add an output layer\n model_layers.append(layers.Dense(3, activation='softmax'))\n\n # Initiating the NN\n model = Sequential(layers=model_layers, name=name)\n\n print(model.summary())\n\n # Compiling the NN\n model.compile(loss='categorical_crossentropy',\n optimizer=get_optimizer(lr, optimizer),\n metrics=['accuracy'])\n\n return model\n\n return create_model\n\n\nclass AddColumnNames(BaseEstimator, TransformerMixin):\n \"\"\"\n AddColumnNames is used to add columns to a pipeline.\n \"\"\"\n\n def __init__(self, columns):\n self.columns = columns\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n return pandas.DataFrame(data=X, columns=self.columns)\n\n\nclass ColumnSelector(BaseEstimator, TransformerMixin):\n \"\"\"\n ColumnSelector is used to select columns in a pipeline\n \"\"\"\n\n def __init__(self, columns):\n self.columns = columns\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n assert isinstance(X, pandas.DataFrame)\n\n try:\n return X[self.columns]\n except KeyError:\n cols_error = list(set(self.columns) - set(X.columns))\n raise KeyError(\"The DataFrame does not include the columns: %s\" % cols_error)\n\n\nclass Reshape1DTo2D(BaseEstimator, TransformerMixin):\n \"\"\"\n Reshape2D reshapes 1D input to 2D.\n \"\"\"\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n assert isinstance(X, numpy.ndarray)\n nrows, ncols = X.shape\n return X.reshape(nrows, ncols, 1)\n\n\nclass LogTransformation(BaseEstimator, TransformerMixin):\n \"\"\"\n Log Transformation performs log transformation on input dataframe\n \"\"\"\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n assert isinstance(X, pandas.core.frame.DataFrame)\n\n # silence the SettingWithCopyWarning as values are\n # being set the right way using .loc[] not [].\n # The interpreter doesnt know the difference so\n # it would've displayed the warning nonetheless\n pandas.options.mode.chained_assignment = None\n\n labels = X.columns.values\n for label in labels:\n curr_column = X.loc[:, label]\n min_val = curr_column.min()\n increment = numpy.abs(min_val) + 1\n X.loc[:, label] = numpy.log(curr_column + increment)\n\n return X\n\n\n# def standardization_pipeline(predictors_all, predictors_with_outliers,\n# predictors_without_outliers, n_components):\ndef standardization_pipeline(predictors_all, predictors_with_outliers,\n predictors_without_outliers):\n \"\"\"\n standardization_pipeline() returns a Pipeline object, \n taking as parameters the all labels in the dataset,\n labels of predictors with outliers, and ones without\n outliers.\n \"\"\"\n if len(predictors_with_outliers) == 0:\n return make_pipeline(\n AddColumnNames(columns=predictors_all),\n FeatureUnion(transformer_list=[\n ('predictors_without_outliers', make_pipeline(\n ColumnSelector(columns=predictors_without_outliers),\n StandardScaler()\n ))\n ]),\n # PCA(n_components=n_components),\n Reshape1DTo2D()\n )\n\n return make_pipeline(\n AddColumnNames(columns=predictors_all),\n FeatureUnion(transformer_list=[\n ('predictors_with_outliers', make_pipeline(\n ColumnSelector(columns=predictors_with_outliers),\n LogTransformation(),\n RobustScaler()\n )),\n ('predictors_without_outliers', make_pipeline(\n ColumnSelector(columns=predictors_without_outliers),\n StandardScaler()\n )),\n ]),\n # PCA(n_components=n_components),\n Reshape1DTo2D()\n )\n\n\n# def normalization_pipeline(predictors_all, n_components):\ndef normalization_pipeline(predictors_all):\n \"\"\"\n normalization_pipeline() returns a Pipeline object, \n taking as parameters the all labels in the dataset.\n \"\"\"\n return make_pipeline(\n AddColumnNames(columns=predictors_all),\n FeatureUnion(transformer_list=[\n ('predictors_all', make_pipeline(\n ColumnSelector(columns=predictors_all),\n MinMaxScaler()\n ))\n ]),\n # PCA(n_components=n_components),\n Reshape1DTo2D()\n )\n\n\ndef load(filepath):\n \"\"\"\n load() was adapted from the FMA: A Dataset For Music Analysis repository.\n It is used to load the tracks.csv file from the FMA dataset.\n \"\"\"\n filename = os.path.basename(filepath)\n if 'tracks' in filename:\n tracks = pandas.read_csv(filepath, index_col=0, header=[0, 1])\n\n COLUMNS = [('track', 'tags'), ('album', 'tags'), ('artist', 'tags'),\n ('track', 'genres'), ('track', 'genres_all')]\n for column in COLUMNS:\n tracks[column] = tracks[column].map(ast.literal_eval)\n\n COLUMNS = [('track', 'date_created'), ('track', 'date_recorded'),\n ('album', 'date_created'), ('album', 'date_released'),\n ('artist', 'date_created'), ('artist', 'active_year_begin'),\n ('artist', 'active_year_end')]\n for column in COLUMNS:\n tracks[column] = pandas.to_datetime(tracks[column])\n\n SUBSETS = ('small', 'medium', 'large')\n tracks['set', 'subset'] = tracks['set', 'subset'].astype(\n CategoricalDtype(categories=SUBSETS, ordered=True))\n\n COLUMNS = [('track', 'genre_top'), ('track', 'license'),\n ('album', 'type'), ('album', 'information'),\n ('artist', 'bio')]\n for column in COLUMNS:\n tracks[column] = tracks[column].astype('category')\n\n return tracks\n\n\ndef stats(feature):\n \"\"\"\n stats() returns a dict of stats: mean, median, std & variance,\n for a single feature.\n \"\"\"\n return {\n 'mean': numpy.mean(feature),\n 'median': numpy.median(feature),\n 'std': numpy.std(feature),\n 'var': numpy.var(feature)\n }\n\n\ndef calc_entropy(feature):\n \"\"\"\n calc_entropy() returns the entropy for a feature list.\n It was adapted from Ahmet Taspinar's blog titled:\n 'A guide for using the Wavelet Transform in Machine Learning'.\n \"\"\"\n counter_values = Counter(feature).most_common()\n probabilities = [elem[1] / len(feature) for elem in counter_values]\n entropy_val = entropy(probabilities)\n return entropy_val\n\n\ndef extra_stats(feature):\n \"\"\"\n extra_stats() returns a dict of stats which include;\n sub-band energy, skewness, (5th, 25th, 75th & 95th) percentile,\n root mean square, zero crossing rate, mean crossing rate,\n and entropy, for a single feature list.\n \"\"\"\n return {\n 'sb_energy': numpy.mean(numpy.abs(feature)),\n 'skewness': skew(feature),\n '5th_percentile': numpy.nanpercentile(feature, 5),\n '25th_percentile': numpy.nanpercentile(feature, 25),\n '75th_percentile': numpy.nanpercentile(feature, 75),\n '95th_percentile': numpy.nanpercentile(feature, 95),\n 'rms': numpy.nanmean(numpy.sqrt(feature ** 2)),\n 'zcr': len(numpy.nonzero(numpy.diff(numpy.array(feature) > 0))[0]),\n 'mcr': len(numpy.nonzero(numpy.diff(numpy.array(feature) > numpy.nanmean(feature)))[0]),\n 'entropy': calc_entropy(feature),\n }\n\n\ndef extract_cqt(file):\n \"\"\"\n extract_cqt() returns the log-mel value for audio signal, \n taking as parameters file object of an audio signal\n \"\"\"\n # get sample rate of audio file and load audio file as time series\n sample_rate = librosa.core.get_samplerate(file.path);\n time_series, _ = librosa.core.load(file.path, sample_rate);\n\n # compute cqt and convert from amplitude to decibels unit\n cqt = librosa.cqt(time_series, sample_rate);\n scaled_cqt = librosa.amplitude_to_db(cqt, ref=numpy.max);\n\n return scaled_cqt\n\n\ndef extract_mel_spect(file):\n \"\"\"\n extract_mel_spect() returns the mel spectogram value for an audio signal, \n taking as parameters file object of an audio signal\n \"\"\"\n # get sample rate of audio file and load audio file as time series\n sample_rate = librosa.core.get_samplerate(file.path);\n time_series, _ = librosa.core.load(file.path, sample_rate);\n\n # compute spectogram and convert spectogram to decibels unit \n mel_spect = librosa.feature.melspectrogram(time_series, sample_rate);\n scaled_mel_spect = librosa.power_to_db(mel_spect, ref=numpy.max);\n\n return scaled_mel_spect\n\n\n# store for all features to be extracted except log-mel and mel-spectrogram.\ndataframe = pandas.DataFrame({\n 'genre_label': [],\n 'data_source': [],\n\n 'mean_spec_centroid': [],\n 'median_spec_centroid': [],\n 'std_spec_centroid': [],\n 'var_spec_centroid': [],\n\n 'mean_spec_rolloff': [],\n 'median_spec_rolloff': [],\n 'std_spec_rolloff': [],\n 'var_spec_rolloff': [],\n\n 'mean_zcr': [],\n 'median_zcr': [],\n 'std_zcr': [],\n 'var_zcr': [],\n\n 'mean_spec_bw': [],\n 'median_spec_bw': [],\n 'std_spec_bw': [],\n 'var_spec_bw': [],\n\n 'mean_spec_contrast_1': [],\n 'median_spec_contrast_1': [],\n 'std_spec_contrast_1': [],\n 'var_spec_contrast_1': [],\n\n 'mean_spec_contrast_2': [],\n 'median_spec_contrast_2': [],\n 'std_spec_contrast_2': [],\n 'var_spec_contrast_2': [],\n\n 'mean_spec_contrast_3': [],\n 'median_spec_contrast_3': [],\n 'std_spec_contrast_3': [],\n 'var_spec_contrast_3': [],\n\n 'mean_spec_contrast_4': [],\n 'median_spec_contrast_4': [],\n 'std_spec_contrast_4': [],\n 'var_spec_contrast_4': [],\n\n 'mean_spec_contrast_5': [],\n 'median_spec_contrast_5': [],\n 'std_spec_contrast_5': [],\n 'var_spec_contrast_5': [],\n\n 'mean_spec_contrast_6': [],\n 'median_spec_contrast_6': [],\n 'std_spec_contrast_6': [],\n 'var_spec_contrast_6': [],\n\n 'mean_spec_contrast_7': [],\n 'median_spec_contrast_7': [],\n 'std_spec_contrast_7': [],\n 'var_spec_contrast_7': [],\n\n 'mean_mfcc_1': [],\n 'median_mfcc_1': [],\n 'std_mfcc_1': [],\n 'var_mfcc_1': [],\n\n 'mean_mfcc_2': [],\n 'median_mfcc_2': [],\n 'std_mfcc_2': [],\n 'var_mfcc_2': [],\n\n 'mean_mfcc_3': [],\n 'median_mfcc_3': [],\n 'std_mfcc_3': [],\n 'var_mfcc_3': [],\n\n 'mean_mfcc_4': [],\n 'median_mfcc_4': [],\n 'std_mfcc_4': [],\n 'var_mfcc_4': [],\n\n 'mean_mfcc_5': [],\n 'median_mfcc_5': [],\n 'std_mfcc_5': [],\n 'var_mfcc_5': [],\n\n 'mean_mfcc_6': [],\n 'median_mfcc_6': [],\n 'std_mfcc_6': [],\n 'var_mfcc_6': [],\n\n 'mean_mfcc_7': [],\n 'median_mfcc_7': [],\n 'std_mfcc_7': [],\n 'var_mfcc_7': [],\n\n 'mean_mfcc_8': [],\n 'median_mfcc_8': [],\n 'std_mfcc_8': [],\n 'var_mfcc_8': [],\n\n 'mean_mfcc_9': [],\n 'median_mfcc_9': [],\n 'std_mfcc_9': [],\n 'var_mfcc_9': [],\n\n 'mean_mfcc_10': [],\n 'median_mfcc_10': [],\n 'std_mfcc_10': [],\n 'var_mfcc_10': [],\n\n 'mean_mfcc_11': [],\n 'median_mfcc_11': [],\n 'std_mfcc_11': [],\n 'var_mfcc_11': [],\n\n 'mean_mfcc_12': [],\n 'median_mfcc_12': [],\n 'std_mfcc_12': [],\n 'var_mfcc_12': [],\n\n 'mean_mfcc_13': [],\n 'median_mfcc_13': [],\n 'std_mfcc_13': [],\n 'var_mfcc_13': [],\n\n 'lpc_1': [],\n 'lpc_2': [],\n 'lpc_3': [],\n 'lpc_4': [],\n\n 'tempo': [],\n\n 'mean_beats': [],\n 'median_beats': [],\n 'std_beats': [],\n 'var_beats': [],\n\n 'mean_beats_timestamp': [],\n 'median_beats_timestamp': [],\n 'std_beats_timestamp': [],\n 'var_beats_timestamp': [],\n\n 'mean_db4_cA4': [],\n 'median_db4_cA4': [],\n 'std_db4_cA4': [],\n 'var_db4_cA4': [],\n 'sb_energy_db4_cA4': [],\n 'skewness_db4_cA4': [],\n '5th_percentile_db4_cA4': [],\n '25th_percentile_db4_cA4': [],\n '75th_percentile_db4_cA4': [],\n '95th_percentile_db4_cA4': [],\n 'rms_db4_cA4': [],\n 'zcr_db4_cA4': [],\n 'mcr_db4_cA4': [],\n 'entropy_db4_cA4': [],\n\n 'mean_db4_cD4': [],\n 'median_db4_cD4': [],\n 'std_db4_cD4': [],\n 'var_db4_cD4': [],\n 'sb_energy_db4_cD4': [],\n 'skewness_db4_cD4': [],\n '5th_percentile_db4_cD4': [],\n '25th_percentile_db4_cD4': [],\n '75th_percentile_db4_cD4': [],\n '95th_percentile_db4_cD4': [],\n 'rms_db4_cD4': [],\n 'zcr_db4_cD4': [],\n 'mcr_db4_cD4': [],\n 'entropy_db4_cD4': [],\n\n 'mean_db4_cD3': [],\n 'median_db4_cD3': [],\n 'std_db4_cD3': [],\n 'var_db4_cD3': [],\n 'sb_energy_db4_cD3': [],\n 'skewness_db4_cD3': [],\n '5th_percentile_db4_cD3': [],\n '25th_percentile_db4_cD3': [],\n '75th_percentile_db4_cD3': [],\n '95th_percentile_db4_cD3': [],\n 'rms_db4_cD3': [],\n 'zcr_db4_cD3': [],\n 'mcr_db4_cD3': [],\n 'entropy_db4_cD3': [],\n\n 'mean_db4_cD2': [],\n 'median_db4_cD2': [],\n 'std_db4_cD2': [],\n 'var_db4_cD2': [],\n 'sb_energy_db4_cD2': [],\n 'skewness_db4_cD2': [],\n '5th_percentile_db4_cD2': [],\n '25th_percentile_db4_cD2': [],\n '75th_percentile_db4_cD2': [],\n '95th_percentile_db4_cD2': [],\n 'rms_db4_cD2': [],\n 'zcr_db4_cD2': [],\n 'mcr_db4_cD2': [],\n 'entropy_db4_cD2': [],\n\n 'mean_db4_cD1': [],\n 'median_db4_cD1': [],\n 'std_db4_cD1': [],\n 'var_db4_cD1': [],\n 'sb_energy_db4_cD1': [],\n 'skewness_db4_cD1': [],\n '5th_percentile_db4_cD1': [],\n '25th_percentile_db4_cD1': [],\n '75th_percentile_db4_cD1': [],\n '95th_percentile_db4_cD1': [],\n 'rms_db4_cD1': [],\n 'zcr_db4_cD1': [],\n 'mcr_db4_cD1': [],\n 'entropy_db4_cD1': [],\n\n 'mean_db5_cA4': [],\n 'median_db5_cA4': [],\n 'std_db5_cA4': [],\n 'var_db5_cA4': [],\n 'sb_energy_db5_cA4': [],\n 'skewness_db5_cA4': [],\n '5th_percentile_db5_cA4': [],\n '25th_percentile_db5_cA4': [],\n '75th_percentile_db5_cA4': [],\n '95th_percentile_db5_cA4': [],\n 'rms_db5_cA4': [],\n 'zcr_db5_cA4': [],\n 'mcr_db5_cA4': [],\n 'entropy_db5_cA4': [],\n\n 'mean_db5_cD4': [],\n 'median_db5_cD4': [],\n 'std_db5_cD4': [],\n 'var_db5_cD4': [],\n 'sb_energy_db5_cD4': [],\n 'skewness_db5_cD4': [],\n '5th_percentile_db5_cD4': [],\n '25th_percentile_db5_cD4': [],\n '75th_percentile_db5_cD4': [],\n '95th_percentile_db5_cD4': [],\n 'rms_db5_cD4': [],\n 'zcr_db5_cD4': [],\n 'mcr_db5_cD4': [],\n 'entropy_db5_cD4': [],\n\n 'mean_db5_cD3': [],\n 'median_db5_cD3': [],\n 'std_db5_cD3': [],\n 'var_db5_cD3': [],\n 'sb_energy_db5_cD3': [],\n 'skewness_db5_cD3': [],\n '5th_percentile_db5_cD3': [],\n '25th_percentile_db5_cD3': [],\n '75th_percentile_db5_cD3': [],\n '95th_percentile_db5_cD3': [],\n 'rms_db5_cD3': [],\n 'zcr_db5_cD3': [],\n 'mcr_db5_cD3': [],\n 'entropy_db5_cD3': [],\n\n 'mean_db5_cD2': [],\n 'median_db5_cD2': [],\n 'std_db5_cD2': [],\n 'var_db5_cD2': [],\n 'sb_energy_db5_cD2': [],\n 'skewness_db5_cD2': [],\n '5th_percentile_db5_cD2': [],\n '25th_percentile_db5_cD2': [],\n '75th_percentile_db5_cD2': [],\n '95th_percentile_db5_cD2': [],\n 'rms_db5_cD2': [],\n 'zcr_db5_cD2': [],\n 'mcr_db5_cD2': [],\n 'entropy_db5_cD2': [],\n\n 'mean_db5_cD1': [],\n 'median_db5_cD1': [],\n 'std_db5_cD1': [],\n 'var_db5_cD1': [],\n 'sb_energy_db5_cD1': [],\n 'skewness_db5_cD1': [],\n '5th_percentile_db5_cD1': [],\n '25th_percentile_db5_cD1': [],\n '75th_percentile_db5_cD1': [],\n '95th_percentile_db5_cD1': [],\n 'rms_db5_cD1': [],\n 'zcr_db5_cD1': [],\n 'mcr_db5_cD1': [],\n 'entropy_db5_cD1': [],\n\n 'mean_db8_cA7': [],\n 'median_db8_cA7': [],\n 'std_db8_cA7': [],\n 'var_db8_cA7': [],\n 'sb_energy_db8_cA7': [],\n 'skewness_db8_cA7': [],\n '5th_percentile_db8_cA7': [],\n '25th_percentile_db8_cA7': [],\n '75th_percentile_db8_cA7': [],\n '95th_percentile_db8_cA7': [],\n 'rms_db8_cA7': [],\n 'zcr_db8_cA7': [],\n 'mcr_db8_cA7': [],\n 'entropy_db8_cA7': [],\n\n 'mean_db8_cD7': [],\n 'median_db8_cD7': [],\n 'std_db8_cD7': [],\n 'var_db8_cD7': [],\n 'sb_energy_db8_cD7': [],\n 'skewness_db8_cD7': [],\n '5th_percentile_db8_cD7': [],\n '25th_percentile_db8_cD7': [],\n '75th_percentile_db8_cD7': [],\n '95th_percentile_db8_cD7': [],\n 'rms_db8_cD7': [],\n 'zcr_db8_cD7': [],\n 'mcr_db8_cD7': [],\n 'entropy_db8_cD7': [],\n\n 'mean_db8_cD6': [],\n 'median_db8_cD6': [],\n 'std_db8_cD6': [],\n 'var_db8_cD6': [],\n 'sb_energy_db8_cD6': [],\n 'skewness_db8_cD6': [],\n '5th_percentile_db8_cD6': [],\n '25th_percentile_db8_cD6': [],\n '75th_percentile_db8_cD6': [],\n '95th_percentile_db8_cD6': [],\n 'rms_db8_cD6': [],\n 'zcr_db8_cD6': [],\n 'mcr_db8_cD6': [],\n 'entropy_db8_cD6': [],\n\n 'mean_db8_cD5': [],\n 'median_db8_cD5': [],\n 'std_db8_cD5': [],\n 'var_db8_cD5': [],\n 'sb_energy_db8_cD5': [],\n 'skewness_db8_cD5': [],\n '5th_percentile_db8_cD5': [],\n '25th_percentile_db8_cD5': [],\n '75th_percentile_db8_cD5': [],\n '95th_percentile_db8_cD5': [],\n 'rms_db8_cD5': [],\n 'zcr_db8_cD5': [],\n 'mcr_db8_cD5': [],\n 'entropy_db8_cD5': [],\n\n 'mean_db8_cD4': [],\n 'median_db8_cD4': [],\n 'std_db8_cD4': [],\n 'var_db8_cD4': [],\n 'sb_energy_db8_cD4': [],\n 'skewness_db8_cD4': [],\n '5th_percentile_db8_cD4': [],\n '25th_percentile_db8_cD4': [],\n '75th_percentile_db8_cD4': [],\n '95th_percentile_db8_cD4': [],\n 'rms_db8_cD4': [],\n 'zcr_db8_cD4': [],\n 'mcr_db8_cD4': [],\n 'entropy_db8_cD4': [],\n\n 'mean_db8_cD3': [],\n 'median_db8_cD3': [],\n 'std_db8_cD3': [],\n 'var_db8_cD3': [],\n 'sb_energy_db8_cD3': [],\n 'skewness_db8_cD3': [],\n '5th_percentile_db8_cD3': [],\n '25th_percentile_db8_cD3': [],\n '75th_percentile_db8_cD3': [],\n '95th_percentile_db8_cD3': [],\n 'rms_db8_cD3': [],\n 'zcr_db8_cD3': [],\n 'mcr_db8_cD3': [],\n 'entropy_db8_cD3': [],\n\n 'mean_db8_cD2': [],\n 'median_db8_cD2': [],\n 'std_db8_cD2': [],\n 'var_db8_cD2': [],\n 'sb_energy_db8_cD2': [],\n 'skewness_db8_cD2': [],\n '5th_percentile_db8_cD2': [],\n '25th_percentile_db8_cD2': [],\n '75th_percentile_db8_cD2': [],\n '95th_percentile_db8_cD2': [],\n 'rms_db8_cD2': [],\n 'zcr_db8_cD2': [],\n 'mcr_db8_cD2': [],\n 'entropy_db8_cD2': [],\n\n 'mean_db8_cD1': [],\n 'median_db8_cD1': [],\n 'std_db8_cD1': [],\n 'var_db8_cD1': [],\n 'sb_energy_db8_cD1': [],\n 'skewness_db8_cD1': [],\n '5th_percentile_db8_cD1': [],\n '25th_percentile_db8_cD1': [],\n '75th_percentile_db8_cD1': [],\n '95th_percentile_db8_cD1': [],\n 'rms_db8_cD1': [],\n 'zcr_db8_cD1': [],\n 'mcr_db8_cD1': [],\n 'entropy_db8_cD1': [],\n\n})\n\n\ndef feedback(file, genre_label):\n \"\"\"\n feedback() is a extract function from extract_audio_features(), taking\n as parameter a file object and genre label. Prints the status of the \n feature extraction process.\n \"\"\"\n if type(file) == str:\n print('appended features extracted from ' + file + ' with genre: ' + genre_label)\n else:\n print('appended features extracted from ' + str(file.name) + ' with genre: ' + genre_label)\n\n\ndef get_time_series_sample_rate(file):\n \"\"\"\n get_time_series_sample_rate() return the sample rate & time series\n of the audio file taking as parameter the file path or file object\n \"\"\"\n if type(file) == str:\n sample_rate = librosa.core.get_samplerate(file)\n time_series, _ = librosa.core.load(file, sample_rate)\n else:\n sample_rate = librosa.core.get_samplerate(file.path)\n time_series, _ = librosa.core.load(file.path, sample_rate)\n\n return sample_rate, time_series\n\n\ndef extract_beats_time(time_series, sample_rate):\n \"\"\"\n extract_beats_time() computes the tempo, beats & time stamp of beats\n for an audio file taking as parameter the time series and sample rate of the\n audi file.\n \"\"\"\n tempo, beats = librosa.beat.beat_track(time_series, sample_rate)\n beats_timestamp = librosa.frames_to_time(beats, sample_rate)\n\n return tempo, beats, beats_timestamp\n\n\ndef extract_audio_features(dataframe, file, genre_label, data_source):\n \"\"\"\n This function takes a dataframe, an audio file (check librosa for acceptable formats),\n genre label, and data source. It extract features from the audio and returns the\n dataframe with the new row appended.\n\n Timbral, rhythmic, and wavelet features are extracted excluding log-mel and mel-spectogram.\n\n Parameters:\n dataframe (pandas.Dataframe): Dataframe to be updated with new row.\n file (File or str): an audio file or file path.\n genre_label (str): audio genre label\n data_source (str): fma or gtzan\n \"\"\"\n\n # get sample rate of audio file & load audio file as time series\n sample_rate, time_series = get_time_series_sample_rate(file)\n\n # compute timbral features\n # compute spectral centroid\n spec_centroid = librosa.feature.spectral_centroid(time_series, sample_rate)\n stats_spec_centroid = stats(spec_centroid)\n\n # compute spectral roll-off\n spec_rolloff = librosa.feature.spectral_rolloff(time_series, sample_rate)\n stats_spec_rolloff = stats(spec_rolloff)\n\n # compute zero crossing rate\n zcr = librosa.feature.zero_crossing_rate(time_series)\n stats_zcr = stats(zcr)\n\n # compute spectral bandwidth\n spec_bw = librosa.feature.spectral_bandwidth(time_series, sample_rate)\n stats_spec_bw = stats(spec_bw[0])\n\n # compute spectral contrast\n spec_contrast = librosa.feature.spectral_contrast(time_series, sample_rate)\n stats_spec_contrast_1 = stats(spec_contrast[0])\n stats_spec_contrast_2 = stats(spec_contrast[1])\n stats_spec_contrast_3 = stats(spec_contrast[2])\n stats_spec_contrast_4 = stats(spec_contrast[3])\n stats_spec_contrast_5 = stats(spec_contrast[4])\n stats_spec_contrast_6 = stats(spec_contrast[5])\n stats_spec_contrast_7 = stats(spec_contrast[6])\n\n # compute 13 mel-frequency cepstral coefficients\n mfcc = librosa.feature.mfcc(time_series, sample_rate, n_mfcc=13)\n stat_mfcc_1 = stats(mfcc[0])\n stat_mfcc_2 = stats(mfcc[1])\n stat_mfcc_3 = stats(mfcc[2])\n stat_mfcc_4 = stats(mfcc[3])\n stat_mfcc_5 = stats(mfcc[4])\n stat_mfcc_6 = stats(mfcc[5])\n stat_mfcc_7 = stats(mfcc[6])\n stat_mfcc_8 = stats(mfcc[7])\n stat_mfcc_9 = stats(mfcc[8])\n stat_mfcc_10 = stats(mfcc[9])\n stat_mfcc_11 = stats(mfcc[10])\n stat_mfcc_12 = stats(mfcc[11])\n stat_mfcc_13 = stats(mfcc[12])\n\n # compute 3rd order linear prediction coefficients\n lpc = librosa.lpc(time_series, 3)\n\n # compute rhythmic features\n # compute tempo, beats & beats' time stamp\n tempo, beats, beats_timestamp = extract_beats_time(time_series, sample_rate)\n stats_beats = stats(beats)\n stats_beats_timestamp = stats(beats_timestamp)\n\n # compute wavelet features\n # compute coefficients for Db4 at level 4 decomposition\n db4_coeffs = pywt.wavedec(time_series, 'db4', level=4)\n db4_cA4, db4_cD4, db4_cD3, db4_cD2, db4_cD1 = db4_coeffs\n stats_db4_cA4 = {**stats(db4_cA4), **extra_stats(db4_cA4)}\n stats_db4_cD4 = {**stats(db4_cD4), **extra_stats(db4_cD4)}\n stats_db4_cD3 = {**stats(db4_cD3), **extra_stats(db4_cD3)}\n stats_db4_cD2 = {**stats(db4_cD2), **extra_stats(db4_cD2)}\n stats_db4_cD1 = {**stats(db4_cD1), **extra_stats(db4_cD1)}\n\n # compute coefficients for Db5 at level 4 decomposition\n db5_coeffs = pywt.wavedec(time_series, 'db5', level=4)\n db5_cA4, db5_cD4, db5_cD3, db5_cD2, db5_cD1 = db5_coeffs\n stats_db5_cA4 = {**stats(db5_cA4), **extra_stats(db5_cA4)}\n stats_db5_cD4 = {**stats(db5_cD4), **extra_stats(db5_cD4)}\n stats_db5_cD3 = {**stats(db5_cD3), **extra_stats(db5_cD3)}\n stats_db5_cD2 = {**stats(db5_cD2), **extra_stats(db5_cD2)}\n stats_db5_cD1 = {**stats(db5_cD1), **extra_stats(db5_cD1)}\n\n # compute coefficients for Db8 at level 7 decomposition\n db8_coeffs = pywt.wavedec(time_series, 'db4', level=7)\n db8_cA7, db8_cD7, db8_cD6, db8_cD5, db8_cD4, db8_cD3, db8_cD2, db8_cD1 = db8_coeffs\n stats_db8_cA7 = {**stats(db8_cA7), **extra_stats(db8_cA7)}\n stats_db8_cD7 = {**stats(db8_cD7), **extra_stats(db8_cD7)}\n stats_db8_cD6 = {**stats(db8_cD6), **extra_stats(db8_cD6)}\n stats_db8_cD5 = {**stats(db8_cD5), **extra_stats(db8_cD5)}\n stats_db8_cD4 = {**stats(db8_cD4), **extra_stats(db8_cD4)}\n stats_db8_cD3 = {**stats(db8_cD3), **extra_stats(db8_cD3)}\n stats_db8_cD2 = {**stats(db8_cD2), **extra_stats(db8_cD2)}\n stats_db8_cD1 = {**stats(db8_cD1), **extra_stats(db8_cD1)}\n\n # create new row\n new_row = {\n 'genre_label': genre_label,\n 'data_source': data_source,\n\n 'mean_spec_centroid': stats_spec_centroid['mean'],\n 'median_spec_centroid': stats_spec_centroid['median'],\n 'std_spec_centroid': stats_spec_centroid['std'],\n 'var_spec_centroid': stats_spec_centroid['var'],\n\n 'mean_spec_rolloff': stats_spec_rolloff['mean'],\n 'median_spec_rolloff': stats_spec_rolloff['median'],\n 'std_spec_rolloff': stats_spec_rolloff['std'],\n 'var_spec_rolloff': stats_spec_rolloff['var'],\n\n 'mean_zcr': stats_zcr['mean'],\n 'median_zcr': stats_zcr['median'],\n 'std_zcr': stats_zcr['std'],\n 'var_zcr': stats_zcr['var'],\n\n 'mean_spec_bw': stats_spec_bw['mean'],\n 'median_spec_bw': stats_spec_bw['median'],\n 'std_spec_bw': stats_spec_bw['std'],\n 'var_spec_bw': stats_spec_bw['var'],\n\n 'mean_spec_contrast_1': stats_spec_contrast_1['mean'],\n 'median_spec_contrast_1': stats_spec_contrast_1['median'],\n 'std_spec_contrast_1': stats_spec_contrast_1['std'],\n 'var_spec_contrast_1': stats_spec_contrast_1['var'],\n\n 'mean_spec_contrast_2': stats_spec_contrast_2['mean'],\n 'median_spec_contrast_2': stats_spec_contrast_2['median'],\n 'std_spec_contrast_2': stats_spec_contrast_2['std'],\n 'var_spec_contrast_2': stats_spec_contrast_2['var'],\n\n 'mean_spec_contrast_3': stats_spec_contrast_3['mean'],\n 'median_spec_contrast_3': stats_spec_contrast_3['median'],\n 'std_spec_contrast_3': stats_spec_contrast_3['std'],\n 'var_spec_contrast_3': stats_spec_contrast_3['var'],\n\n 'mean_spec_contrast_4': stats_spec_contrast_4['mean'],\n 'median_spec_contrast_4': stats_spec_contrast_4['median'],\n 'std_spec_contrast_4': stats_spec_contrast_4['std'],\n 'var_spec_contrast_4': stats_spec_contrast_4['var'],\n\n 'mean_spec_contrast_5': stats_spec_contrast_5['mean'],\n 'median_spec_contrast_5': stats_spec_contrast_5['median'],\n 'std_spec_contrast_5': stats_spec_contrast_5['std'],\n 'var_spec_contrast_5': stats_spec_contrast_5['var'],\n\n 'mean_spec_contrast_6': stats_spec_contrast_6['mean'],\n 'median_spec_contrast_6': stats_spec_contrast_6['median'],\n 'std_spec_contrast_6': stats_spec_contrast_6['std'],\n 'var_spec_contrast_6': stats_spec_contrast_6['var'],\n\n 'mean_spec_contrast_7': stats_spec_contrast_7['mean'],\n 'median_spec_contrast_7': stats_spec_contrast_7['median'],\n 'std_spec_contrast_7': stats_spec_contrast_7['std'],\n 'var_spec_contrast_7': stats_spec_contrast_7['var'],\n\n 'mean_mfcc_1': stat_mfcc_1['mean'],\n 'median_mfcc_1': stat_mfcc_1['median'],\n 'std_mfcc_1': stat_mfcc_1['std'],\n 'var_mfcc_1': stat_mfcc_1['var'],\n\n 'mean_mfcc_2': stat_mfcc_2['mean'],\n 'median_mfcc_2': stat_mfcc_2['median'],\n 'std_mfcc_2': stat_mfcc_2['std'],\n 'var_mfcc_2': stat_mfcc_2['var'],\n\n 'mean_mfcc_3': stat_mfcc_3['mean'],\n 'median_mfcc_3': stat_mfcc_3['median'],\n 'std_mfcc_3': stat_mfcc_3['std'],\n 'var_mfcc_3': stat_mfcc_3['var'],\n\n 'mean_mfcc_4': stat_mfcc_4['mean'],\n 'median_mfcc_4': stat_mfcc_4['median'],\n 'std_mfcc_4': stat_mfcc_4['std'],\n 'var_mfcc_4': stat_mfcc_4['var'],\n\n 'mean_mfcc_5': stat_mfcc_5['mean'],\n 'median_mfcc_5': stat_mfcc_5['median'],\n 'std_mfcc_5': stat_mfcc_5['std'],\n 'var_mfcc_5': stat_mfcc_5['var'],\n\n 'mean_mfcc_6': stat_mfcc_6['mean'],\n 'median_mfcc_6': stat_mfcc_6['median'],\n 'std_mfcc_6': stat_mfcc_6['std'],\n 'var_mfcc_6': stat_mfcc_6['var'],\n\n 'mean_mfcc_7': stat_mfcc_7['mean'],\n 'median_mfcc_7': stat_mfcc_7['median'],\n 'std_mfcc_7': stat_mfcc_7['std'],\n 'var_mfcc_7': stat_mfcc_7['var'],\n\n 'mean_mfcc_8': stat_mfcc_8['mean'],\n 'median_mfcc_8': stat_mfcc_8['median'],\n 'std_mfcc_8': stat_mfcc_8['std'],\n 'var_mfcc_8': stat_mfcc_8['var'],\n\n 'mean_mfcc_9': stat_mfcc_9['mean'],\n 'median_mfcc_9': stat_mfcc_9['median'],\n 'std_mfcc_9': stat_mfcc_9['std'],\n 'var_mfcc_9': stat_mfcc_9['var'],\n\n 'mean_mfcc_10': stat_mfcc_10['mean'],\n 'median_mfcc_10': stat_mfcc_10['median'],\n 'std_mfcc_10': stat_mfcc_10['std'],\n 'var_mfcc_10': stat_mfcc_10['var'],\n\n 'mean_mfcc_11': stat_mfcc_11['mean'],\n 'median_mfcc_11': stat_mfcc_11['median'],\n 'std_mfcc_11': stat_mfcc_11['std'],\n 'var_mfcc_11': stat_mfcc_11['var'],\n\n 'mean_mfcc_12': stat_mfcc_12['mean'],\n 'median_mfcc_12': stat_mfcc_12['median'],\n 'std_mfcc_12': stat_mfcc_12['std'],\n 'var_mfcc_12': stat_mfcc_12['var'],\n\n 'mean_mfcc_13': stat_mfcc_13['mean'],\n 'median_mfcc_13': stat_mfcc_13['median'],\n 'std_mfcc_13': stat_mfcc_13['std'],\n 'var_mfcc_13': stat_mfcc_13['var'],\n\n 'lpc_1': lpc[0],\n 'lpc_2': lpc[1],\n 'lpc_3': lpc[2],\n 'lpc_4': lpc[3],\n\n 'tempo': tempo,\n\n 'mean_beats': stats_beats['mean'],\n 'median_beats': stats_beats['median'],\n 'std_beats': stats_beats['std'],\n 'var_beats': stats_beats['var'],\n\n 'mean_beats_timestamp': stats_beats_timestamp['mean'],\n 'median_beats_timestamp': stats_beats_timestamp['median'],\n 'std_beats_timestamp': stats_beats_timestamp['std'],\n 'var_beats_timestamp': stats_beats_timestamp['var'],\n\n 'mean_db4_cA4': stats_db4_cA4['mean'],\n 'median_db4_cA4': stats_db4_cA4['median'],\n 'std_db4_cA4': stats_db4_cA4['std'],\n 'var_db4_cA4': stats_db4_cA4['var'],\n 'sb_energy_db4_cA4': stats_db4_cA4['sb_energy'],\n 'skewness_db4_cA4': stats_db4_cA4['skewness'],\n '5th_percentile_db4_cA4': stats_db4_cA4['5th_percentile'],\n '25th_percentile_db4_cA4': stats_db4_cA4['25th_percentile'],\n '75th_percentile_db4_cA4': stats_db4_cA4['75th_percentile'],\n '95th_percentile_db4_cA4': stats_db4_cA4['95th_percentile'],\n 'rms_db4_cA4': stats_db4_cA4['rms'],\n 'zcr_db4_cA4': stats_db4_cA4['zcr'],\n 'mcr_db4_cA4': stats_db4_cA4['mcr'],\n 'entropy_db4_cA4': stats_db4_cA4['entropy'],\n\n 'mean_db4_cD4': stats_db4_cD4['mean'],\n 'median_db4_cD4': stats_db4_cD4['median'],\n 'std_db4_cD4': stats_db4_cD4['std'],\n 'var_db4_cD4': stats_db4_cD4['var'],\n 'sb_energy_db4_cD4': stats_db4_cD4['sb_energy'],\n 'skewness_db4_cD4': stats_db4_cD4['skewness'],\n '5th_percentile_db4_cD4': stats_db4_cD4['5th_percentile'],\n '25th_percentile_db4_cD4': stats_db4_cD4['25th_percentile'],\n '75th_percentile_db4_cD4': stats_db4_cD4['75th_percentile'],\n '95th_percentile_db4_cD4': stats_db4_cD4['95th_percentile'],\n 'rms_db4_cD4': stats_db4_cD4['rms'],\n 'zcr_db4_cD4': stats_db4_cD4['zcr'],\n 'mcr_db4_cD4': stats_db4_cD4['mcr'],\n 'entropy_db4_cD4': stats_db4_cD4['entropy'],\n\n 'mean_db4_cD3': stats_db4_cD3['mean'],\n 'median_db4_cD3': stats_db4_cD3['median'],\n 'std_db4_cD3': stats_db4_cD3['std'],\n 'var_db4_cD3': stats_db4_cD3['var'],\n 'sb_energy_db4_cD3': stats_db4_cD3['sb_energy'],\n 'skewness_db4_cD3': stats_db4_cD3['skewness'],\n '5th_percentile_db4_cD3': stats_db4_cD3['5th_percentile'],\n '25th_percentile_db4_cD3': stats_db4_cD3['25th_percentile'],\n '75th_percentile_db4_cD3': stats_db4_cD3['75th_percentile'],\n '95th_percentile_db4_cD3': stats_db4_cD3['95th_percentile'],\n 'rms_db4_cD3': stats_db4_cD3['rms'],\n 'zcr_db4_cD3': stats_db4_cD3['zcr'],\n 'mcr_db4_cD3': stats_db4_cD3['mcr'],\n 'entropy_db4_cD3': stats_db4_cD3['entropy'],\n\n 'mean_db4_cD2': stats_db4_cD2['mean'],\n 'median_db4_cD2': stats_db4_cD2['median'],\n 'std_db4_cD2': stats_db4_cD2['std'],\n 'var_db4_cD2': stats_db4_cD2['var'],\n 'sb_energy_db4_cD2': stats_db4_cD2['sb_energy'],\n 'skewness_db4_cD2': stats_db4_cD2['skewness'],\n '5th_percentile_db4_cD2': stats_db4_cD2['5th_percentile'],\n '25th_percentile_db4_cD2': stats_db4_cD2['25th_percentile'],\n '75th_percentile_db4_cD2': stats_db4_cD2['75th_percentile'],\n '95th_percentile_db4_cD2': stats_db4_cD2['95th_percentile'],\n 'rms_db4_cD2': stats_db4_cD2['rms'],\n 'zcr_db4_cD2': stats_db4_cD2['zcr'],\n 'mcr_db4_cD2': stats_db4_cD2['mcr'],\n 'entropy_db4_cD2': stats_db4_cD2['entropy'],\n\n 'mean_db4_cD1': stats_db4_cD1['mean'],\n 'median_db4_cD1': stats_db4_cD1['median'],\n 'std_db4_cD1': stats_db4_cD1['std'],\n 'var_db4_cD1': stats_db4_cD1['var'],\n 'sb_energy_db4_cD1': stats_db4_cD1['sb_energy'],\n 'skewness_db4_cD1': stats_db4_cD1['skewness'],\n '5th_percentile_db4_cD1': stats_db4_cD1['5th_percentile'],\n '25th_percentile_db4_cD1': stats_db4_cD1['25th_percentile'],\n '75th_percentile_db4_cD1': stats_db4_cD1['75th_percentile'],\n '95th_percentile_db4_cD1': stats_db4_cD1['95th_percentile'],\n 'rms_db4_cD1': stats_db4_cD1['rms'],\n 'zcr_db4_cD1': stats_db4_cD1['zcr'],\n 'mcr_db4_cD1': stats_db4_cD1['mcr'],\n 'entropy_db4_cD1': stats_db4_cD1['entropy'],\n\n 'mean_db5_cA4': stats_db5_cA4['mean'],\n 'median_db5_cA4': stats_db5_cA4['median'],\n 'std_db5_cA4': stats_db5_cA4['std'],\n 'var_db5_cA4': stats_db5_cA4['var'],\n 'sb_energy_db5_cA4': stats_db5_cA4['sb_energy'],\n 'skewness_db5_cA4': stats_db5_cA4['skewness'],\n '5th_percentile_db5_cA4': stats_db5_cA4['5th_percentile'],\n '25th_percentile_db5_cA4': stats_db5_cA4['25th_percentile'],\n '75th_percentile_db5_cA4': stats_db5_cA4['75th_percentile'],\n '95th_percentile_db5_cA4': stats_db5_cA4['95th_percentile'],\n 'rms_db5_cA4': stats_db5_cA4['rms'],\n 'zcr_db5_cA4': stats_db5_cA4['zcr'],\n 'mcr_db5_cA4': stats_db5_cA4['mcr'],\n 'entropy_db5_cA4': stats_db5_cA4['entropy'],\n\n 'mean_db5_cD4': stats_db5_cD4['mean'],\n 'median_db5_cD4': stats_db5_cD4['median'],\n 'std_db5_cD4': stats_db5_cD4['std'],\n 'var_db5_cD4': stats_db5_cD4['var'],\n 'sb_energy_db5_cD4': stats_db5_cD4['sb_energy'],\n 'skewness_db5_cD4': stats_db5_cD4['skewness'],\n '5th_percentile_db5_cD4': stats_db5_cD4['5th_percentile'],\n '25th_percentile_db5_cD4': stats_db5_cD4['25th_percentile'],\n '75th_percentile_db5_cD4': stats_db5_cD4['75th_percentile'],\n '95th_percentile_db5_cD4': stats_db5_cD4['95th_percentile'],\n 'rms_db5_cD4': stats_db5_cD4['rms'],\n 'zcr_db5_cD4': stats_db5_cD4['zcr'],\n 'mcr_db5_cD4': stats_db5_cD4['mcr'],\n 'entropy_db5_cD4': stats_db5_cD4['entropy'],\n\n 'mean_db5_cD3': stats_db5_cD3['mean'],\n 'median_db5_cD3': stats_db5_cD3['median'],\n 'std_db5_cD3': stats_db5_cD3['std'],\n 'var_db5_cD3': stats_db5_cD3['var'],\n 'sb_energy_db5_cD3': stats_db5_cD3['sb_energy'],\n 'skewness_db5_cD3': stats_db5_cD3['skewness'],\n '5th_percentile_db5_cD3': stats_db5_cD3['5th_percentile'],\n '25th_percentile_db5_cD3': stats_db5_cD3['25th_percentile'],\n '75th_percentile_db5_cD3': stats_db5_cD3['75th_percentile'],\n '95th_percentile_db5_cD3': stats_db5_cD3['95th_percentile'],\n 'rms_db5_cD3': stats_db5_cD3['rms'],\n 'zcr_db5_cD3': stats_db5_cD3['zcr'],\n 'mcr_db5_cD3': stats_db5_cD3['mcr'],\n 'entropy_db5_cD3': stats_db5_cD3['entropy'],\n\n 'mean_db5_cD2': stats_db5_cD2['mean'],\n 'median_db5_cD2': stats_db5_cD2['median'],\n 'std_db5_cD2': stats_db5_cD2['std'],\n 'var_db5_cD2': stats_db5_cD2['var'],\n 'sb_energy_db5_cD2': stats_db5_cD2['sb_energy'],\n 'skewness_db5_cD2': stats_db5_cD2['skewness'],\n '5th_percentile_db5_cD2': stats_db5_cD2['5th_percentile'],\n '25th_percentile_db5_cD2': stats_db5_cD2['25th_percentile'],\n '75th_percentile_db5_cD2': stats_db5_cD2['75th_percentile'],\n '95th_percentile_db5_cD2': stats_db5_cD2['95th_percentile'],\n 'rms_db5_cD2': stats_db5_cD2['rms'],\n 'zcr_db5_cD2': stats_db5_cD2['zcr'],\n 'mcr_db5_cD2': stats_db5_cD2['mcr'],\n 'entropy_db5_cD2': stats_db5_cD2['entropy'],\n\n 'mean_db5_cD1': stats_db5_cD1['mean'],\n 'median_db5_cD1': stats_db5_cD1['median'],\n 'std_db5_cD1': stats_db5_cD1['std'],\n 'var_db5_cD1': stats_db5_cD1['var'],\n 'sb_energy_db5_cD1': stats_db5_cD1['sb_energy'],\n 'skewness_db5_cD1': stats_db5_cD1['skewness'],\n '5th_percentile_db5_cD1': stats_db5_cD1['5th_percentile'],\n '25th_percentile_db5_cD1': stats_db5_cD1['25th_percentile'],\n '75th_percentile_db5_cD1': stats_db5_cD1['75th_percentile'],\n '95th_percentile_db5_cD1': stats_db5_cD1['95th_percentile'],\n 'rms_db5_cD1': stats_db5_cD1['rms'],\n 'zcr_db5_cD1': stats_db5_cD1['zcr'],\n 'mcr_db5_cD1': stats_db5_cD1['mcr'],\n 'entropy_db5_cD1': stats_db5_cD1['entropy'],\n\n 'mean_db8_cA7': stats_db8_cA7['mean'],\n 'median_db8_cA7': stats_db8_cA7['median'],\n 'std_db8_cA7': stats_db8_cA7['std'],\n 'var_db8_cA7': stats_db8_cA7['var'],\n 'sb_energy_db8_cA7': stats_db8_cA7['sb_energy'],\n 'skewness_db8_cA7': stats_db8_cA7['skewness'],\n '5th_percentile_db8_cA7': stats_db8_cA7['5th_percentile'],\n '25th_percentile_db8_cA7': stats_db8_cA7['25th_percentile'],\n '75th_percentile_db8_cA7': stats_db8_cA7['75th_percentile'],\n '95th_percentile_db8_cA7': stats_db8_cA7['95th_percentile'],\n 'rms_db8_cA7': stats_db8_cA7['rms'],\n 'zcr_db8_cA7': stats_db8_cA7['zcr'],\n 'mcr_db8_cA7': stats_db8_cA7['mcr'],\n 'entropy_db8_cA7': stats_db8_cA7['entropy'],\n\n 'mean_db8_cD7': stats_db8_cD7['mean'],\n 'median_db8_cD7': stats_db8_cD7['median'],\n 'std_db8_cD7': stats_db8_cD7['std'],\n 'var_db8_cD7': stats_db8_cD7['var'],\n 'sb_energy_db8_cD7': stats_db8_cD7['sb_energy'],\n 'skewness_db8_cD7': stats_db8_cD7['skewness'],\n '5th_percentile_db8_cD7': stats_db8_cD7['5th_percentile'],\n '25th_percentile_db8_cD7': stats_db8_cD7['25th_percentile'],\n '75th_percentile_db8_cD7': stats_db8_cD7['75th_percentile'],\n '95th_percentile_db8_cD7': stats_db8_cD7['95th_percentile'],\n 'rms_db8_cD7': stats_db8_cD7['rms'],\n 'zcr_db8_cD7': stats_db8_cD7['zcr'],\n 'mcr_db8_cD7': stats_db8_cD7['mcr'],\n 'entropy_db8_cD7': stats_db8_cD7['entropy'],\n\n 'mean_db8_cD6': stats_db8_cD6['mean'],\n 'median_db8_cD6': stats_db8_cD6['median'],\n 'std_db8_cD6': stats_db8_cD6['std'],\n 'var_db8_cD6': stats_db8_cD6['var'],\n 'sb_energy_db8_cD6': stats_db8_cD6['sb_energy'],\n 'skewness_db8_cD6': stats_db8_cD6['skewness'],\n '5th_percentile_db8_cD6': stats_db8_cD6['5th_percentile'],\n '25th_percentile_db8_cD6': stats_db8_cD6['25th_percentile'],\n '75th_percentile_db8_cD6': stats_db8_cD6['75th_percentile'],\n '95th_percentile_db8_cD6': stats_db8_cD6['95th_percentile'],\n 'rms_db8_cD6': stats_db8_cD6['rms'],\n 'zcr_db8_cD6': stats_db8_cD6['zcr'],\n 'mcr_db8_cD6': stats_db8_cD6['mcr'],\n 'entropy_db8_cD6': stats_db8_cD6['entropy'],\n\n 'mean_db8_cD5': stats_db8_cD5['mean'],\n 'median_db8_cD5': stats_db8_cD5['median'],\n 'std_db8_cD5': stats_db8_cD5['std'],\n 'var_db8_cD5': stats_db8_cD5['var'],\n 'sb_energy_db8_cD5': stats_db8_cD5['sb_energy'],\n 'skewness_db8_cD5': stats_db8_cD5['skewness'],\n '5th_percentile_db8_cD5': stats_db8_cD5['5th_percentile'],\n '25th_percentile_db8_cD5': stats_db8_cD5['25th_percentile'],\n '75th_percentile_db8_cD5': stats_db8_cD5['75th_percentile'],\n '95th_percentile_db8_cD5': stats_db8_cD5['95th_percentile'],\n 'rms_db8_cD5': stats_db8_cD5['rms'],\n 'zcr_db8_cD5': stats_db8_cD5['zcr'],\n 'mcr_db8_cD5': stats_db8_cD5['mcr'],\n 'entropy_db8_cD5': stats_db8_cD5['entropy'],\n\n 'mean_db8_cD4': stats_db8_cD4['mean'],\n 'median_db8_cD4': stats_db8_cD4['median'],\n 'std_db8_cD4': stats_db8_cD4['std'],\n 'var_db8_cD4': stats_db8_cD4['var'],\n 'sb_energy_db8_cD4': stats_db8_cD4['sb_energy'],\n 'skewness_db8_cD4': stats_db8_cD4['skewness'],\n '5th_percentile_db8_cD4': stats_db8_cD4['5th_percentile'],\n '25th_percentile_db8_cD4': stats_db8_cD4['25th_percentile'],\n '75th_percentile_db8_cD4': stats_db8_cD4['75th_percentile'],\n '95th_percentile_db8_cD4': stats_db8_cD4['95th_percentile'],\n 'rms_db8_cD4': stats_db8_cD4['rms'],\n 'zcr_db8_cD4': stats_db8_cD4['zcr'],\n 'mcr_db8_cD4': stats_db8_cD4['mcr'],\n 'entropy_db8_cD4': stats_db8_cD4['entropy'],\n\n 'mean_db8_cD3': stats_db8_cD3['mean'],\n 'median_db8_cD3': stats_db8_cD3['median'],\n 'std_db8_cD3': stats_db8_cD3['std'],\n 'var_db8_cD3': stats_db8_cD3['var'],\n 'sb_energy_db8_cD3': stats_db8_cD3['sb_energy'],\n 'skewness_db8_cD3': stats_db8_cD3['skewness'],\n '5th_percentile_db8_cD3': stats_db8_cD3['5th_percentile'],\n '25th_percentile_db8_cD3': stats_db8_cD3['25th_percentile'],\n '75th_percentile_db8_cD3': stats_db8_cD3['75th_percentile'],\n '95th_percentile_db8_cD3': stats_db8_cD3['95th_percentile'],\n 'rms_db8_cD3': stats_db8_cD3['rms'],\n 'zcr_db8_cD3': stats_db8_cD3['zcr'],\n 'mcr_db8_cD3': stats_db8_cD3['mcr'],\n 'entropy_db8_cD3': stats_db8_cD3['entropy'],\n\n 'mean_db8_cD2': stats_db8_cD2['mean'],\n 'median_db8_cD2': stats_db8_cD2['median'],\n 'std_db8_cD2': stats_db8_cD2['std'],\n 'var_db8_cD2': stats_db8_cD2['var'],\n 'sb_energy_db8_cD2': stats_db8_cD2['sb_energy'],\n 'skewness_db8_cD2': stats_db8_cD2['skewness'],\n '5th_percentile_db8_cD2': stats_db8_cD2['5th_percentile'],\n '25th_percentile_db8_cD2': stats_db8_cD2['25th_percentile'],\n '75th_percentile_db8_cD2': stats_db8_cD2['75th_percentile'],\n '95th_percentile_db8_cD2': stats_db8_cD2['95th_percentile'],\n 'rms_db8_cD2': stats_db8_cD2['rms'],\n 'zcr_db8_cD2': stats_db8_cD2['zcr'],\n 'mcr_db8_cD2': stats_db8_cD2['mcr'],\n 'entropy_db8_cD2': stats_db8_cD2['entropy'],\n\n 'mean_db8_cD1': stats_db8_cD1['mean'],\n 'median_db8_cD1': stats_db8_cD1['median'],\n 'std_db8_cD1': stats_db8_cD1['std'],\n 'var_db8_cD1': stats_db8_cD1['var'],\n 'sb_energy_db8_cD1': stats_db8_cD1['sb_energy'],\n 'skewness_db8_cD1': stats_db8_cD1['skewness'],\n '5th_percentile_db8_cD1': stats_db8_cD1['5th_percentile'],\n '25th_percentile_db8_cD1': stats_db8_cD1['25th_percentile'],\n '75th_percentile_db8_cD1': stats_db8_cD1['75th_percentile'],\n '95th_percentile_db8_cD1': stats_db8_cD1['95th_percentile'],\n 'rms_db8_cD1': stats_db8_cD1['rms'],\n 'zcr_db8_cD1': stats_db8_cD1['zcr'],\n 'mcr_db8_cD1': stats_db8_cD1['mcr'],\n 'entropy_db8_cD1': stats_db8_cD1['entropy'],\n }\n\n # append new row\n dataframe = dataframe.append(new_row, ignore_index=True)\n\n feedback(file, genre_label)\n\n return dataframe\n\n\ndef map_beats_to_timestamp(filepath):\n \"\"\"\n map_beats_to_timestamp() maps\n beats (numpy.Array) to timestamps (numpy.Array) taking\n as parameter the filepath (str) of a .wav audio file\n \"\"\"\n output = []\n\n sample_rate, time_series = get_time_series_sample_rate(filepath)\n _, beats, beats_timestamps = extract_beats_time(time_series, sample_rate)\n\n for x, y in zip(beats, beats_timestamps):\n output.append({'beats': x.item(), 'timestamps': y.item()})\n\n return output\n\n\ndef get_model_pipeline():\n \"\"\"\n get_model_pipeline() returns the preprocessing pipeline and model\n \"\"\"\n import requests\n\n pipeline_estimator_path = BASE_DIR_PATH + '/amgc_project/app/file/pipeline_estimator.pkl'\n model_path = BASE_DIR_PATH + '/amgc_project/app/file/cnn_model.h5'\n\n # check if pipeline and model has already been downloaded\n if not (os.path.isfile(pipeline_estimator_path) and os.path.isfile(model_path)):\n pipeline_estimator_url = \"https://storage.googleapis.com/music-dataset-bucket/model/pipeline_estimator_3.pkl\"\n model_url = \"https://storage.googleapis.com/music-dataset-bucket/model/cnn_model_3.h5\"\n\n # get pipeline and model from bucket & store them\n try:\n pipeline_resp = requests.get(pipeline_estimator_url)\n model_resp = requests.get(model_url)\n\n with open(pipeline_estimator_path, 'wb') as p_file, open(model_path, 'wb') as m_file:\n p_file.write(pipeline_resp.content)\n m_file.write(model_resp.content)\n\n except requests.HTTPError as err:\n print(f'Operation failed (HTTP Error): {err.strerror}')\n\n except IOError as err:\n print(f'Operation failed (IOError): {err.strerror}')\n\n # load preprocessing pipeline and model instance\n pipeline_estimator = joblib.load(pipeline_estimator_path)\n model = load_model(model_path)\n\n return pipeline_estimator, model\n\n\ndef is_silent(filepath, silence_threshold=-30):\n \"\"\"\n is_silent() returns True if the audio file is filled with\n more than 30% of silence and False if otherwise taking\n as parameter the filepath (str) of a .wav audio file &\n the silence threshold (int).\n \"\"\"\n audio_segment = pydub.AudioSegment.from_wav(filepath)\n no_of_chunk, audio_len = 3, len(audio_segment)\n silence, chunk_len = 0, audio_len/no_of_chunk\n\n for i in range(no_of_chunk):\n start_at, end_at = i * chunk_len, (i+1) * chunk_len\n chunk = audio_segment[start_at:end_at]\n silence = silence + pydub.silence.detect_leading_silence(\n chunk, silence_threshold=silence_threshold)\n\n silence_percentage = silence / len(audio_segment) * 100\n if silence_percentage >= 30:\n return True\n return False\n\n\ndef extract_features_make_prediction(filepath):\n \"\"\"\n extract_features_make_prediction() takes the path to a .wav audio\n file as input. It extract features from the audio file,\n scales these features & make prediction with them.\n\n Parameter:\n filepath (str): path to a .wav audio file\n \"\"\"\n\n if is_silent(filepath):\n return {}\n\n # extract features\n features = extract_audio_features(dataframe, filepath, '', '')\n\n # extract beats & timestamp separately for analysis\n beats_and_timestamps = map_beats_to_timestamp(filepath)\n\n # drop some columns\n features = features.drop(['data_source', 'lpc_1'], axis=1)\n\n # filter wavelet features & collect labels\n wavelet_predictors = features.filter(regex=(r'.+_db[458]{1}_.+'))\n wavelet_predictors_labels = wavelet_predictors.columns.values\n\n # collect timbral & rhythmic features\n timbral_rhythmic_predictors = features.loc[:, features.columns.difference(\n numpy.append(wavelet_predictors_labels, 'genre_label'))]\n X = timbral_rhythmic_predictors\n\n # load preprocessing pipeline and model instance\n pipeline_estimator, model = get_model_pipeline()\n\n # transform data & make predictions\n X = pipeline_estimator.transform(X)\n prediction = model.predict(X)\n\n # map predictions to genres\n map_prediction_to_genre = {}\n for i in range(3):\n map_prediction_to_genre[GENRES[i]] = prediction[0][i].item()\n\n data = {\n 'prediction': map_prediction_to_genre,\n 'analysis': beats_and_timestamps\n }\n\n return data\n","repo_name":"DKMDebugin/amgc","sub_path":"feature_extraction_deep_learning/custom_module/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":61734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"20932544665","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom diting.encoding import *\n\nimport numpy as np\nimport pandas as pd\n\nfrom keras.models import Sequential\nfrom keras.layers import Conv1D, MaxPooling1D, Conv2D, MaxPooling2D\nfrom keras.layers import LSTM\nfrom keras.layers import Dense, Activation, Flatten\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.utils import plot_model\nfrom keras import backend as K\n\nfrom bayes_opt import BayesianOptimization\n\nimport subprocess\n\ndef model_CNN(dataPath, feature, target):\n\t\n\tdef model(inputShape, numClasses, n_filters, n_kernel_size, n_units):\n\t\tn_filters = int(round(n_filters))\n\t\tn_kernel_size = int(round(n_kernel_size))\n\t\tn_units = int(round(n_units))\n\t\t\n\t\tcnnModel = Sequential()\n\t\tcnnModel.add(Conv2D(input_shape=inputShape, filters=n_filters, kernel_size=(n_kernel_size, inputShape[-1]), activation=\"relu\", padding =\"same\", name=\"conv2d_1\", data_format=\"channels_first\"))\n\t\tcnnModel.add(MaxPooling2D(pool_size=(int(inputShape[1]/2), 1), strides=None, name=\"max_pooling2d_1\", data_format=\"channels_first\"))\n\t\tcnnModel.add(Flatten(name=\"flatten_1\"))\n\t\tcnnModel.add(BatchNormalization(name=\"batch_normalization_1\"))\n\t\tcnnModel.add(Dense(units=n_units, name=\"dense_1\"))\n\t\tcnnModel.add(Activation(\"relu\", name=\"activation_1\"))\n\t\tcnnModel.add(BatchNormalization(name=\"batch_normalization_2\"))\n\t\tcnnModel.add(Dense(units=numClasses, name=\"dense_2\"))\n\t\tcnnModel.add(Activation(\"softmax\", name=\"activation_2\"))\n\t\tcnnModel.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\n\t\treturn cnnModel\n\t\n\tdef optimize(n_filters, n_kernel_size, n_units, n_epoch, n_batch_size):\n\t\tX_train, y_train, inputShape, numClasses = loadKmerEncodedMat(dataPath, feature, target, \"train\", \"3D\")\n\t\tX_val, y_val, inputShape, numClasses = loadKmerEncodedMat(dataPath, feature, target, \"val\", \"3D\")\n\t\t\n\t\tn_model = model(inputShape, numClasses, n_filters, n_kernel_size, n_units)\n\t\tn_epoch = int(round(n_epoch))\n\t\tn_batch_size = int(round(n_batch_size))\n\t\t\n\t\tcnn = n_model.fit(X_train, y_train, batch_size=n_batch_size, validation_data=(X_val, y_val), epochs=n_epoch, verbose=0)\n\t\treturn cnn.history[\"val_acc\"][-1]\n\t\n\toptimizer = BayesianOptimization(\n\t\tf = optimize, \n\t\tpbounds = {\n\t\t\t\"n_filters\":(12, 16),\n\t\t\t\"n_kernel_size\":(12, 24),\n\t\t\t\"n_units\":(1, 24),\n\t\t\t\"n_epoch\":(1,100),\n\t\t\t\"n_batch_size\":(1,16)\n\t\t},\n\t\trandom_state = 8853\n\t)\n\toptimizer.maximize(n_iter = 2)\n#\toptimizer.maximize(n_iter = 2, alpha = 1e-2, n_restarts_optimizer = 2, acq=\"ucb\", kappa=5)\n\tprint(optimizer.max)\n\n\tn_filters = int(round(optimizer.max[\"params\"][\"n_filters\"]))\n\tn_kernel_size = int(round(optimizer.max[\"params\"][\"n_kernel_size\"]))\n\tn_units = int(round(optimizer.max[\"params\"][\"n_units\"]))\n\tn_epoch = int(round(optimizer.max[\"params\"][\"n_epoch\"]))\n\tn_batch_size = int(round(optimizer.max[\"params\"][\"n_batch_size\"]))\n\tX_train, y_train, inputShape, numClasses = loadKmerEncodedMat(dataPath, feature, target, \"train\", \"3D\")\n\tX_val, y_val, inputShape, numClasses = loadKmerEncodedMat(dataPath, feature, target, \"val\", \"3D\")\n\t\n\tcnn = model(inputShape, numClasses, n_filters, n_kernel_size, n_units)\n\tprint(cnn.summary())\n\tcnn.fit(X_train, y_train, batch_size=n_batch_size, validation_data=(X_val, y_val), epochs=n_epoch, verbose=0)\n\tcnn.save(dataPath+feature+\"_cnn.h5\")\n\tplot_model(cnn, to_file=dataPath+feature+\"_cnn.png\")\n\treturn cnn\n\n\n\ndef model_LSTM(dataPath, feature, target):\n\t\n\tdef model(inputShape, numClasses, n_lstmunits, n_units):\n\t\tn_lstmunits = int(round(n_lstmunits))\n\t\tn_units = int(round(n_units))\n\t\t\n\t\tlstmModel = Sequential()\n\t\tlstmModel.add(LSTM(units=n_lstmunits, input_shape=inputShape, name=\"lstm_1\"))\n\t\tlstmModel.add(BatchNormalization(name=\"batch_normalization_1\"))\n\t\tlstmModel.add(Dense(units=n_units, name=\"dense_1\"))\n\t\tlstmModel.add(Activation(\"relu\", name=\"activation_1\"))\n\t\tlstmModel.add(BatchNormalization(name=\"batch_normalization_2\"))\n\t\tlstmModel.add(Dense(units=numClasses, name=\"dense_2\"))\n\t\tlstmModel.add(Activation(\"softmax\", name=\"activation_2\"))\n\t\tlstmModel.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\n\t\treturn lstmModel\n\t\n\tdef optimize(n_lstmunits, n_units, n_epoch, n_batch_size):\n\t\tX_train, y_train, inputShape, numClasses = loadKmerEncodedMat(dataPath, feature, target, \"train\", \"2D\")\n\t\tX_val, y_val, inputShape, numClasses = loadKmerEncodedMat(dataPath, feature, target, \"val\", \"2D\")\n\t\t\n\t\tn_model = model(inputShape, numClasses, n_lstmunits, n_units)\n\t\tn_epoch = int(round(n_epoch))\n\t\tn_batch_size = int(round(n_batch_size))\n\t\t\n\t\tlstm = n_model.fit(X_train, y_train, batch_size=n_batch_size, validation_data=(X_val, y_val), epochs=n_epoch, verbose=0)\n\t\treturn lstm.history[\"val_acc\"][-1]\n\t\n\toptimizer = BayesianOptimization(\n\t\tf = optimize, \n\t\tpbounds = {\n\t\t\t\"n_lstmunits\":(1, 16),\n\t\t\t\"n_units\":(1, 16),\n\t\t\t\"n_epoch\":(1,100),\n\t\t\t\"n_batch_size\":(1,32)\n\t\t},\n\t\trandom_state = 8853\n\t)\n\toptimizer.maximize(n_iter = 2, alpha = 1e-2, n_restarts_optimizer = 2, acq=\"ucb\", kappa=5)\n\tprint(optimizer.max)\n\n\tn_lstmunits = int(round(optimizer.max[\"params\"][\"n_lstmunits\"]))\n\tn_units = int(round(optimizer.max[\"params\"][\"n_units\"]))\n\tn_epoch = int(round(optimizer.max[\"params\"][\"n_epoch\"]))\n\tn_batch_size = int(round(optimizer.max[\"params\"][\"n_batch_size\"]))\n\tX_train, y_train, inputShape, numClasses = loadKmerEncodedMat(dataPath, feature, target, \"train\", \"2D\")\n\tX_val, y_val, inputShape, numClasses = loadKmerEncodedMat(dataPath, feature, target, \"val\", \"2D\")\n\t\n\tlstm = model(inputShape, numClasses, n_lstmunits, n_units)\n\tprint(lstm.summary())\n\tlstm.fit(X_train, y_train, batch_size=n_batch_size, validation_data=(X_val, y_val), epochs=n_epoch, verbose=0)\n\tlstm.save(dataPath+feature+\"_lstm.h5\")\n\tplot_model(lstm, to_file=dataPath+feature+\"_lstm.png\")\n\treturn lstm\n\n\n\ndef getEnsembleMat(firstBin, leftBead, totalBin, npArray, beadSize, mat):\n\tif leftBead == 0:\n\t\tmat.append(np.array(npArray))\n\t\treturn\n\tfor bead in range(leftBead, -beadSize, -beadSize):\n\t\tif firstBin == totalBin:\n\t\t\treturn\n\t\tnpArray[firstBin] = bead\n\t\tgetEnsembleMat(firstBin+1, leftBead-bead, totalBin, npArray, beadSize, mat)\n\treturn\n\n\ndef getBestEnsemble(binNumber, beadNumber, beadSize, preds, y_val):\n\tnpArray = np.zeros(binNumber)\n\talphaMat = []\n\tgetEnsembleMat(0, beadNumber, binNumber, npArray, beadSize, alphaMat)\n\talphaMat = np.array(alphaMat) / 100\n\n\terror = 1\n\tbest = []\n\tfor alpha in alphaMat:\n\t\ttotal = 0\n\t\tfor i in range(len(alpha)):\n\t\t\ttotal += alpha[i]*preds[i]\n\t\ttotal = np.argmax(total, axis=1)\n\t\terr = np.sum(np.not_equal(total, np.argmax(y_val, axis=1))) / y_val.shape[0]\n\t\tif err < error:\n\t\t\terror = err\n\t\t\tbest = alpha\n\treturn error, best\n\n\ndef inSilicoMutagenesis(seq, pos):\n\tseqs = []\n\tfor i in pos:\n\t\ttmpSeq = []\n\t\ttmpSeq.append(seq[:i] + \"A\" + seq[i+1:])\n\t\ttmpSeq.append(seq[:i] + \"C\" + seq[i+1:])\n\t\ttmpSeq.append(seq[:i] + \"G\" + seq[i+1:])\n\t\ttmpSeq.append(seq[:i] + \"T\" + seq[i+1:])\n\t\tseqs.append(tmpSeq)\n\tseqs = np.array(seqs)\n\treturn seqs\n\ndef getMutationScores(seqs, pos, oriData, model, encoding, params):\n\tpred = model.predict(oriData)\n\tmutScores = []\n\tfor o, seq in enumerate(seqs): \n\t\tseqMat = inSilicoMutagenesis(seq, pos)\n\t\ttarget = np.argmax(pred[o])\n\t\toriScore = pred[o][target]\n\t\tscores = []\n\t\tfor i in range(0,seqMat.shape[0]):\n\t\t\ttmpScores = []\n\t\t\tfor j in range(0,seqMat.shape[1]):\n\t\t\t\ttmpMat = kmerEncoding3D(kmerEncoding3D(encoding(seqMat[i][j], params[0], params[1])))\n\t\t\t\ttmpScores.append(model.predict(tmpMat)[0][target]-oriScore)\n\t\t\tscores.append(tmpScores)\n\t\tmutScores.append(scores)\n\tmutScores = np.array(mutScores)\n\treturn mutScores\n\n\ndef getGradients(model, layerName, data):\n\tloss = model.get_layer(layerName).output\n\tgrads = K.gradients(loss, [model.input])[0]\n\tfn = K.function([model.input], [loss, grads])\n\treturn fn([data])\n\ndef getSeqImportance(model, layerName, data):\n\treturn getGradients(model, layerName, data)[1]\n\ndef getSeqMotifs(model, layerName, data, seqs, dataPath):\n\tmotifs = np.mean(getGradients(model, layerName, data)[0], axis=-1)\n\tmotifs = np.transpose(motifs,(1,0,2))\n\tmotifLen = model.get_layer(layerName).get_weights()[0].shape[0]\n\tmotifLen2 = (motifLen - 1) // 2\n\tfor i, motif in enumerate(motifs):\n\t\tfileName = dataPath + layerName + \"_motif_\" + str(i+1)\n\t\tfasta = open(fileName+\".fasta\", \"w\")\n\t\tfor j, pos in enumerate(motif):\n\t\t\tk = np.argmax(pos)\n\t\t\tmot = seqs[j][k-motifLen2:k+motifLen-motifLen2]\n\t\t\tif len(mot) == motifLen:\n\t\t\t\tfasta.write(\">%d_%d_%f\\n%s\\n\" % (j, k, pos[k], mot))\n\t\tfasta.close()\n\t\tweblogoCmd = 'weblogo -X NO -Y NO --errorbars NO --fineprint \"\" -C \"#008000\" A A -C \"#0000cc\" C C -C \"#ffb300\" G G -C \"#cc0000\" T T < %s.fasta > %s.eps' % (fileName, fileName)\n\t\tsubprocess.call(weblogoCmd, shell=True)\n","repo_name":"WenlongShen/Diting","sub_path":"diting/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12541910294","text":"def solve():\n tests = int(input())\n for _ in range(tests):\n n, m = list(map(int, input().split()))\n\n if n % 2 == 0 and m % 2 == 0:\n print('abdullah')\n else:\n print('hasan')\n\nif __name__ == \"__main__\":\n solve()\n","repo_name":"ffekirnew/a2sv-contests","sub_path":"contest-25/c-rock_piles.py","file_name":"c-rock_piles.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18582800083","text":"def search2DMatrix(matrix, target):\n\n # uses decrease and conquer to determine potential row\n def binarySearchRow(_matrix, _target):\n start = 0\n end = len(_matrix) - 1\n\n while start <= end:\n mid = start + (end - start) // 2\n row = _matrix[mid]\n\n if row[0] <= _target <= row[-1]:\n return row\n\n if row[0] > _target:\n end = mid - 1\n elif row[0] < _target:\n start = mid + 1\n\n return None\n\n # uses binary seaarch to find target\n def binarySearch(arr, _target):\n start = 0\n end = len(arr) - 1\n\n while start <= end:\n mid = start + (end - start) // 2\n\n if arr[mid] > _target:\n end = mid - 1\n elif arr[mid] < _target:\n start = mid + 1\n else:\n return True\n\n return False\n\n potentialRow = binarySearchRow(matrix, target)\n return binarySearch(potentialRow, target) if potentialRow else False\n\n\nmatrix = [[1, 3, 5, 7], [10, 11, 16, 20], [23, 30, 34, 50]]\n\nprint(search2DMatrix(matrix, 16) == True)\nprint(search2DMatrix(matrix, 3) == True)\nprint(search2DMatrix(matrix, 100) == False)\n","repo_name":"Lobarr/interview-practice","sub_path":"problems/search2DMatrix.py","file_name":"search2DMatrix.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"73046506935","text":"import requests\nfrom tcex import TcEx\nfrom plyara import Plyara, utils\nimport json\n\n\n\n\"\"\"Move below back to config file\"\"\"\nVT_API_KEY = \"\"\nRULESET_URL = \"https://www.virustotal.com/api/v3/intelligence/hunting_rulesets\"\nTC_CONFIG = {\n \"api_access_id\": \"\",\n \"api_default_org\": \"\",\n \"api_secret_key\": \"\",\n \"tc_api_path\": \"\",\n \"tc_log_level\": \"debug\",\n \"tc_log_path\": \"log\",\n \"tc_token\": \"\",\n \"tc_token_expires\": 0\n}\n\nDATASTORE_HOME = 'virustotal'\nDATASTORE_UPDATES_TABLE = 'last_update_table'\nDATASTORE_MAPPINGS = 'rule_mappings'\nDATASTORE_ID_LOOKUP_TABLE = 'id_lookup_table'\nOWNER = 'Research Labs'\n\ntcex = TcEx(config=TC_CONFIG)\n\n\ndef get_rulesets(limit=None, cursor=None, vt_filter=None):\n params = {}\n if limit:\n params['limit'] = limit\n if vt_filter:\n params['filter'] = vt_filter\n if cursor:\n params['cursor'] = cursor\n response = requests.get(\n RULESET_URL,\n headers={'x-apikey': VT_API_KEY},\n params=params\n ).json()\n new_cursor = response.get('meta', dict()).get('cursor')\n yield response['data']\n if new_cursor:\n yield from get_rulesets(limit, new_cursor, vt_filter)\n else:\n yield list()\n\n\ndef create_rules_in_tc(rules, raw_rules, ruleset_name, id_lookup_table, mappings):\n # TODO: Add meta fields as tags (Sandbox Restricted)\n groups = tcex.ti.group(owner=OWNER) # Owner might need to be brought up as an arg\n parameters = {'includes': ['additional', 'attributes', 'labels', 'tags']}\n for rule in rules:\n rule_name = rule['rule_name']\n rule_content = utils.rebuild_yara_rule(rule)\n lookup_context = id_lookup_table.setdefault(\n ruleset_name, dict()).setdefault(\n rule_name, dict())\n versions = lookup_context.setdefault(\n 'versions', dict())\n groups = lookup_context.setdefault(\n 'groups', list())\n group_created = False\n lookup_context['priority'] = next(filter(lambda x: 'priority' in x, rule.get('metadata', [])), None)\n lookup_context['restricted'] = next(filter(lambda x: 'sandbox_restricted' in x, rule.get('metadata', [])), None)\n version = next(filter(lambda x: 'version' in x, rule.get('metadata', [])), None) or '1.0'\n signature_name = '{} : {} V{}'.format(ruleset_name, rule_name, version)\n kwargs = {\n 'group_type': 'Signature',\n 'name': signature_name,\n 'file_name': '{}.yara'.format(rule_name),\n 'file_type': 'YARA',\n 'file_text': rule_content,\n 'owner': OWNER\n }\n try:\n existing_id = lookup_context['versions'][version]\n except KeyError:\n existing_id = False\n signature_ti = tcex.ti.group(**kwargs)\n r = signature_ti.create()\n response_id = r.json()['data']['signature']['id']\n for v in versions:\n version_ti = tcex.ti.group(group_type='Signature', unique_id=versions[v], owner=OWNER)\n signature_ti.add_association(target=version_ti)\n versions[version] = response_id\n lookup_context['latest'] = response_id\n else:\n kwargs['unique_id'] = existing_id\n signature_ti = tcex.ti.group(**kwargs)\n list(\n map(signature_ti.delete_attribute,\n [item['id'] for item in signature_ti.attributes()]))\n list(\n map(signature_ti.delete_tag,\n [item['name'] for item in signature_ti.tags()]))\n signature_ti.update()\n\n for meta in rule.get('metadata', []):\n if not meta:\n continue\n attr_type, attr_value = meta.popitem()\n if attr_type in mappings.get('associations', dict()):\n group_type = mappings['associations'][attr_type]\n for group_name in attr_value.split(','):\n filters = tcex.ti.filters()\n filters.add_filter('name', '=', group_name)\n a_groups = tcex.ti.group(group_type=group_type, owner=OWNER)\n returned_groups = list(a_groups.many(filters=filters, params=parameters))\n if not returned_groups:\n group_ti = tcex.ti.group(group_type=group_type, name=group_name, owner=OWNER)\n r = group_ti.create()\n groups.append({\n 'name': group_name,\n 'id': r.json()['data'][group_type.lower()]['id'],\n 'type': group_type\n })\n signature_ti.add_association(target=group_ti)\n continue\n for g in returned_groups:\n group_ti = tcex.ti.group(group_type=group_type, unique_id=g['id'], owner=OWNER)\n groups.append({\n 'name': group_name,\n 'id': g['id'],\n 'type': group_type\n })\n signature_ti.add_association(target=group_ti)\n group_created = True\n elif attr_type == 'tags' and mappings.get('tags'):\n list(map(\n lambda tag: signature_ti.add_tag(name='τ {}'.format(tag)),\n attr_value.split(',')))\n elif attr_type in mappings.get('boolean_tags', []) and attr_value:\n signature_ti.add_tag(name='ζ {}'.format(attr_type.replace('_', ' ').title()))\n elif mappings.get('attributes', dict()).get(attr_type):\n signature_ti.add_attribute(\n attribute_type=mappings['attributes'].get(attr_type),\n attribute_value=attr_value\n )\n if not group_created and mappings.get('default_association'):\n group_ti = tcex.ti.group(\n group_type=mappings['default_association'],\n name=rule_name,\n owner=OWNER)\n r = group_ti.create()\n groups.append({\n 'name': rule_name,\n 'id': r.json()['data'][mappings['default_association'].lower()]['id'],\n 'type': mappings['default_association']\n })\n signature_ti.add_association(target=group_ti)\n signature_ti.add_tag(name='α {}'.format(ruleset_name))\n signature_ti.add_tag(name='β {}'.format(rule_name))\n\ndef main():\n parser = Plyara()\n ds = tcex.datastore('organization', DATASTORE_HOME)\n updates_table = id_lookup_table = {}\n # ds.add(rid=DATASTORE_UPDATES_TABLE, data=updates_table) # uncomment to wipe ds\n # ds.add(rid=DATASTORE_ID_LOOKUP_TABLE, data=id_lookup_table)\n # return\n try:\n updates_table = ds.get(rid=DATASTORE_UPDATES_TABLE)['_source']\n except RuntimeError:\n ds.add(rid=DATASTORE_UPDATES_TABLE, data=updates_table)\n try:\n id_lookup_table = ds.get(rid=DATASTORE_ID_LOOKUP_TABLE)['_source']\n except RuntimeError:\n ds.add(rid=DATASTORE_ID_LOOKUP_TABLE, data=id_lookup_table)\n try:\n mappings = ds.get(rid=DATASTORE_MAPPINGS)['_source']\n if isinstance(mappings, str):\n mappings = json.loads(mappings)\n if 'data' in mappings:\n mappings = mappings['data'] if isinstance(mappings['data'], dict) else json.loads(mappings['data']) # probably not necessary but just to be safe\n except RuntimeError as e:\n print(e.args[1]) # tcex log\n return\n r = get_rulesets(limit=40)\n data = next(r)\n while data:\n for ruleset in data:\n modification_date = ruleset['attributes']['modification_date']\n ruleset_name = ruleset['attributes']['name']\n ruleset_id = ruleset['id']\n last_update = updates_table.get(ruleset_id)\n # last_update = False # uncomment when testing\n if not last_update or modification_date > last_update:\n raw_rules = ruleset['attributes']['rules']\n rules = parser.parse_string(raw_rules)\n create_rules_in_tc(rules, raw_rules.split('\\n'), ruleset_name, id_lookup_table, mappings)\n print('{} Ruleset Processed'.format(ruleset_name))\n updates_table[str(ruleset_id)] = modification_date\n parser.clear()\n data = next(r)\n ds.add(rid=DATASTORE_UPDATES_TABLE, data=updates_table)\n ds.add(rid=DATASTORE_ID_LOOKUP_TABLE, data=id_lookup_table)\n\nmain()","repo_name":"cezhunter/ThreatConnect","sub_path":"rule_sync.py","file_name":"rule_sync.py","file_ext":"py","file_size_in_byte":8573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39221628882","text":"try :\r\n #x = 1/0\r\n x = open('filename','x')\r\nexcept FileExistsError as e :\r\n print(e)\r\n\r\nfinally :\r\n print('I Looks like a header')\r\n\r\ntry :\r\n x = None\r\n if x is None :\r\n raise Exception\r\n \r\nexcept Exception as e :\r\n print('I Love Exception')\r\n\r\n\r\ntry :\r\n gateway = \"Gateway:Opened\"\r\n print(gateway)\r\n x = 2 + '2'\r\n print(x)\r\n\r\nexcept Exception as e :\r\n print(f'The Exception caught is {e}')\r\n\r\nfinally :\r\n gateway = \"Gateway:Closed\"\r\n print(gateway)","repo_name":"pratushdevelopment/djangoPractice","sub_path":"prac1.py","file_name":"prac1.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"17676574133","text":"import cssmin\nimport sys\n\npath = \"../dist/styles/\"\n\ndef minify(file):\n output = cssmin.cssmin(open(path + file).read())\n # print(output)\n with open(path + file[0:file.index(\".\")]+\".min.css\", \"w\") as f:\n # print(\"./static/\" + file[0:file.index(\".\")]+\".min.css\", \"w\")\n f.write(output)\n\nminify(sys.argv[1])\nprint(path + sys.argv[1][0:sys.argv[1].index(\".\")]+\".min.css\" + \" produced\")\n\n","repo_name":"5idneyD/BasicAccounting","sub_path":"bin/minifyCSS.py","file_name":"minifyCSS.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"23533320059","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @File : dir_config.py\nimport os\nimport sys\nimport time\n\n\n# Get the root path of the project under different system environments\nif \"win\" in sys.platform:\n BASEDIR = os.path.dirname(os.path.dirname(__file__))\nelif \"linux\" in sys.platform:\n BASEDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n \n# Equipped with httprunner2 X frame; Project catalogue\nSWAGGERDIR = os.path.join(BASEDIR, \"swagger\") \nTESTSUITEDIR = os.path.join(SWAGGERDIR, \"testsuites\")\nTESTCASEDIR = os.path.join(SWAGGERDIR, \"testcases\")\nAPIDIR = os.path.join(SWAGGERDIR, \"api\")\n\n# REPORT PATH\nREPORTDIR = os.path.join(BASEDIR, \"reports\")\n\n# Profile path\nPROFILEDIR = os.path.join(BASEDIR, \"properties\") \nPROFILEPATH = os.path.join(PROFILEDIR, \"config.ini\")\n \n# Log path\nLOGDIR = os.path.join(BASEDIR, \"logs\")\nLOGFILEPATH = os.path.join(LOGDIR, \"AllServer.log\")\n \n# Backup path\nBACKUPDIR = os.path.join(BASEDIR, \"swaggerBackUp\") \n\n# Test case data file\nCSVFILEPATH = os.path.join(SWAGGERDIR, \"Api_TestCases.csv\")\nEXCELFILEPATH = os.path.join(SWAGGERDIR, \"Api_TestCases.xlsx\")\nBACKTESTCASEPATH = os.path.join(SWAGGERDIR, \"Api_TestCases_bak_{}.xlsx\".format(time.strftime('%Y-%m-%d_%H-%m')))\n\n\nif __name__ == '__main__':\n print(SWAGGERDIR)\n","repo_name":"xuping2012/httprunner_swagger","sub_path":"common/dir_config.py","file_name":"dir_config.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"22"} +{"seq_id":"2729250198","text":"from api.apis.carriers.fedex.soap_objects.common.version_id import VersionId\nfrom api.apis.carriers.fedex.soap_objects.common.web_authentication_detail import (\n WebAuthenticationDetail,\n)\nfrom api.apis.carriers.fedex.soap_objects.ship.client_detail import ClientDetail\nfrom api.apis.carriers.fedex.soap_objects.ship.requested_shipment import (\n RequestedShipment,\n)\nfrom api.apis.carriers.fedex.soap_objects.soap_object import FedExSoapObject\n\n\nclass ProcessShipmentRequest(FedExSoapObject):\n _required_keys = {\n \"WebAuthenticationDetail\",\n \"ClientDetail\",\n \"RequestedShipment\",\n \"Version\",\n }\n _optional_keys = {\"TransactionDetail\"}\n\n def __init__(\n self,\n gobox_request: dict,\n master_tracking=None,\n sequence=None,\n num_packages: int = None,\n ):\n request = RequestedShipment(\n gobox_request=gobox_request,\n master_tracking=master_tracking,\n sequence=sequence,\n num_packages=num_packages,\n )\n\n version = VersionId(\n version={\"ServiceId\": \"ship\", \"Major\": 23, \"Intermediate\": 0, \"Minor\": 0}\n )\n\n client = ClientDetail(\n account_number=gobox_request[\"account_number\"],\n meter_number=gobox_request[\"meter_number\"],\n )\n\n auth = WebAuthenticationDetail(\n key=gobox_request[\"key\"],\n password=gobox_request[\"password\"],\n )\n\n super().__init__(\n {\n \"WebAuthenticationDetail\": auth.data,\n \"ClientDetail\": client.data,\n \"Version\": version.data,\n \"RequestedShipment\": request.data,\n },\n required_keys=self._required_keys,\n optional_keys=self._optional_keys,\n )\n","repo_name":"JimRh/ubbereview","sub_path":"api/apis/carriers/fedex/soap_objects/ship/process_shipment_request.py","file_name":"process_shipment_request.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26029626695","text":"try:\n msj = \"Ingrese cantidad de horas trabajadas:\\n\"\n x = input(msj)\n val_x = int(x)\n msj = \"Ingrese el importe por hora trabajada:\\n\"\n y = input(msj)\n val_y = float(y)\n salario = val_x * val_y\n print(\"El salario que le corresponde por lo trabajado es de: \", salario)\n h=1\nexcept:\n print(\"El dato ingresado no es un numero\")\n h=2\n\nif h==1:\n print(\"Programa terminado OK\")\nelif h==2:\n print(\"El Programa termino por un error de ingreso\")\n\n\nprint(\"Chauuuuuuuuu \"*3)\n\n\n","repo_name":"cjpizarroz/Python","sub_path":"Exepciones.py","file_name":"Exepciones.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73974389816","text":"from django.urls import path\r\n\r\nfrom employees import views\r\n\r\nurlpatterns = [\r\n path('', views.dashboard, name=\"dashboard\"),\r\n path('employees/', views.employees, name=\"employees\"),\r\n path('add_employee/', views.add_employee, name=\"add_employee\"),\r\n path('suit/', views.suit, name=\"suit\"),\r\n path('add_suit/', views.add_suit, name=\"add_suit\"),\r\n path('allocate_suit/', views.allocate_suit, name=\"allocate_suit\"),\r\n path('realtime_data/', views.realtime_data, name=\"realtime_data\"),\r\n path('delete_suit/', views.delete_suit, name=\"delete_suit\"),\r\n path('delete_employee/', views.delete_employee, name=\"delete_employee\"),\r\n path('/update_employee', views.update_employee, name=\"update_employee\"),\r\n path('/update_suit', views.update_suit, name=\"update_suit\"),\r\n]\r\n","repo_name":"Rajsundar7/Design-and-Developement-of-Smart-Suit-for-Coal-Miners","sub_path":"smartsuit django project (web appilication)/employees/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29699663544","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Issues:\n# 1- currently does not capture pobj in active voice example I added milk to flower, flower is not captured ('I', 'added', 'miilk')\n# 2- it does not capture the correct sentiment for cases like I saved money by using my cleverrx card\n\n# This version includes the adverb exctraction, append_callexpand function, lexicon map and score tuples\nimport spacy\nimport pandas as pd\n\n\n# use spacy small model\nnlp = spacy.load('en_core_web_lg')\n\n# dependency markers for subjects\nSUBJECTS = {\"nsubj\", \"nsubjpass\", \"csubj\", \"csubjpass\", \"agent\", \"expl\"}\n\n# dependency markers for objects\nOBJECTS = {\"dobj\", \"dative\", \"attr\", \"oprd\", \"pobj\"}\n\n# POS tags that will break adjoining items\nBREAKER_POS = {\"CCONJ\", \"VERB\"}\n\n# words that are negations\nNEGATIONS = {\"no\", \"not\", \"n't\", \"never\", \"none\"}\n \n\n# does dependency set contain any coordinating conjunctions?\ndef contains_conj(depSet):\n return \"and\" in depSet or \"or\" in depSet or \"nor\" in depSet or \\\n \"but\" in depSet or \"yet\" in depSet or \"so\" in depSet or \"for\" in depSet\n\n\n# get subs joined by conjunctions\ndef _get_subs_from_conjunctions(subs):\n more_subs = []\n for sub in subs:\n # rights is a generator\n rights = list(sub.rights)\n rightDeps = {tok.lower_ for tok in rights}\n if contains_conj(rightDeps):\n more_subs.extend([tok for tok in rights if tok.dep_ in SUBJECTS or tok.pos_ == \"NOUN\"])\n if len(more_subs) > 0:\n more_subs.extend(_get_subs_from_conjunctions(more_subs))\n return more_subs\n\n\n# get objects joined by conjunctions\ndef _get_objs_from_conjunctions(objs):\n more_objs = []\n for obj in objs:\n # rights is a generator\n rights = list(obj.rights)\n rightDeps = {tok.lower_ for tok in rights}\n if contains_conj(rightDeps):\n more_objs.extend([tok for tok in rights if tok.dep_ in OBJECTS or tok.pos_ == \"NOUN\"])\n if len(more_objs) > 0:\n more_objs.extend(_get_objs_from_conjunctions(more_objs))\n return more_objs\n\n\n# find sub dependencies\ndef _find_subs(tok):\n head = tok.head\n while head.pos_ != \"VERB\" and head.pos_ != \"NOUN\" and head.head != head:\n head = head.head\n if head.pos_ == \"VERB\":\n subs = [tok for tok in head.lefts if tok.dep_ == \"SUB\"]\n if len(subs) > 0:\n verb_negated = _is_negated(head)\n subs.extend(_get_subs_from_conjunctions(subs))\n return subs, verb_negated\n elif head.head != head:\n return _find_subs(head)\n elif head.pos_ == \"NOUN\":\n return [head], _is_negated(tok)\n return [], False\n\n\n# is the tok set's left or right negated?\ndef _is_negated(tok):\n parts = list(tok.lefts) + list(tok.rights)\n for dep in parts:\n if dep.lower_ in NEGATIONS:\n return True\n return False\n\n\n\n# get all the verbs on tokens with negation marker\ndef _find_svs(tokens):\n svs = []\n verbs = [tok for tok in tokens if tok.pos_ == \"VERB\"]\n for v in verbs:\n subs, verbNegated = _get_all_subs(v)\n if len(subs) > 0:\n for sub in subs:\n svs.append((sub.orth_, \"!\" + v.orth_ if verbNegated else v.orth_))\n return svs\n\n\n# get grammatical objects for a given set of dependencies (including passive sentences)\ndef _get_objs_from_prepositions(deps, is_pas):\n objs = []\n for dep in deps:\n if dep.pos_ == \"ADP\" and (dep.dep_ == \"prep\" or (is_pas and dep.dep_ == \"agent\")):\n objs.extend([tok for tok in dep.rights if tok.dep_ in OBJECTS or\n (tok.pos_ == \"PRON\" and tok.lower_ == \"me\") or\n (is_pas and tok.dep_ == 'pobj')])\n return objs\n\n\n# get objects from the dependencies using the attribute dependency\ndef _get_objs_from_attrs(deps, is_pas):\n for dep in deps:\n if dep.pos_ == \"NOUN\" and dep.dep_ == \"attr\":\n verbs = [tok for tok in dep.rights if tok.pos_ == \"VERB\"]\n if len(verbs) > 0:\n for v in verbs:\n rights = list(v.rights)\n objs = [tok for tok in rights if tok.dep_ in OBJECTS]\n objs.extend(_get_objs_from_prepositions(rights, is_pas))\n if len(objs) > 0:\n return v, objs\n return None, None\n\n\n# xcomp; open complement - verb has no suject\ndef _get_obj_from_xcomp(deps, is_pas):\n for dep in deps:\n if dep.pos_ == \"VERB\" and dep.dep_ == \"xcomp\":\n v = dep\n rights = list(v.rights)\n objs = [tok for tok in rights if tok.dep_ in OBJECTS]\n objs.extend(_get_objs_from_prepositions(rights, is_pas))\n if len(objs) > 0:\n return v, objs\n return None, None\n\n\n# get all functional subjects adjacent to the verb passed in\ndef _get_all_subs(v):\n verb_negated = _is_negated(v)\n subs = [tok for tok in v.lefts if tok.dep_ in SUBJECTS and tok.pos_ != \"DET\"]\n if len(subs) > 0:\n subs.extend(_get_subs_from_conjunctions(subs))\n else:\n foundSubs, verb_negated = _find_subs(v)\n subs.extend(foundSubs)\n return subs, verb_negated\n\n\n# is the token a verb? (excluding auxiliary verbs)\ndef _is_non_aux_verb(tok):\n return tok.pos_ == \"VERB\" and (tok.dep_ != \"aux\" and tok.dep_ != \"auxpass\")\n\n\n# return the verb to the right of this verb in a CCONJ relationship if applicable\n# returns a tuple, first part True|False and second part the modified verb if True\ndef _right_of_verb_is_conj_verb(v):\n # rights is a generator\n rights = list(v.rights)\n\n # VERB CCONJ VERB (e.g. he beat and hurt me)\n if len(rights) > 1 and rights[0].pos_ == 'CCONJ':\n for tok in rights[1:]:\n if _is_non_aux_verb(tok):\n return True, tok\n\n return False, v\n\n\n\n# get all objects for an active/passive sentence\ndef _get_all_objs(v, is_pas):\n # rights is a generator\n rights = list(v.rights)\n\n objs = [tok for tok in rights if tok.dep_ in OBJECTS or (is_pas and tok.dep_ == 'pobj')]\n objs.extend(_get_objs_from_prepositions(rights, is_pas))\n\n #potentialNewVerb, potentialNewObjs = _get_objs_from_attrs(rights)\n #if potentialNewVerb is not None and potentialNewObjs is not None and len(potentialNewObjs) > 0:\n # objs.extend(potentialNewObjs)\n # v = potentialNewVerb\n\n potential_new_verb, potential_new_objs = _get_obj_from_xcomp(rights, is_pas)\n if potential_new_verb is not None and potential_new_objs is not None and len(potential_new_objs) > 0:\n objs.extend(potential_new_objs)\n v = potential_new_verb\n if len(objs) > 0:\n objs.extend(_get_objs_from_conjunctions(objs))\n return v, objs\n\n\n# return true if the sentence is passive - at he moment a sentence is assumed passive if it has an auxpass verb\ndef _is_passive(tokens):\n for tok in tokens:\n if tok.dep_ == \"auxpass\":\n return True\n return False\n\n\n# resolve a 'that' where/if appropriate\ndef _get_that_resolution(toks):\n for tok in toks:\n if 'that' in [t.orth_ for t in tok.lefts]:\n return tok.head\n return toks\n\n\n# simple stemmer using lemmas\ndef _get_lemma(word: str):\n tokens = nlp(word)\n if len(tokens) == 1:\n return tokens[0].lemma_\n return word\n\n\n# print information for displaying all kinds of things of the parse tree\ndef printDeps(toks):\n for tok in toks:\n print(tok.orth_, tok.dep_, tok.pos_, tok.head.orth_, [t.orth_ for t in tok.lefts], [t.orth_ for t in tok.rights])\n\n\n# expand an obj / subj np using its chunk\ndef expand(item, tokens, visited): # sub, tokens, visited\n if item.lower_ == 'that':\n item = _get_that_resolution(tokens)\n\n parts = []\n\n if hasattr(item, 'lefts'):\n for part in item.lefts:\n if part.pos_ in BREAKER_POS:\n break\n if not part.lower_ in NEGATIONS:\n parts.append(part)\n\n parts.append(item)\n\n if hasattr(item, 'rights'):\n for part in item.rights:\n if part.pos_ in BREAKER_POS:\n break\n if not part.lower_ in NEGATIONS:\n parts.append(part)\n\n if hasattr(parts[-1], 'rights'):\n for item2 in parts[-1].rights:\n if item2.pos_ == \"DET\" or item2.pos_ == \"NOUN\":\n if item2.i not in visited:\n visited.add(item2.i)\n parts.extend(expand(item2, tokens, visited))\n break\n\n return parts\n\n\n# convert a list of tokens to a string\ndef to_str(tokens):\n return ' '.join([item.text for item in tokens])\n\n# finds the adverbs that are attached to a verb \ndef find_adverbs_of_v(verb):\n print(\"find_adverbs_of_v:\", verb)\n expanded_v = [verb]\n is_expanded = False\n if hasattr(verb, 'lefts'):\n lefts = list(verb.lefts)\n for tok in lefts[-3:]:\n if tok.dep_ in [\"advcl\", \"advmod\"]:\n expanded_v = [tok] + expanded_v\n is_expanded = True\n \n if hasattr(verb, 'rights'):\n rights = list(verb.rights)\n for tok in rights[:3]:\n if tok.dep_ in [\"advcl\", \"advmod\"]:\n expanded_v = expanded_v + [tok]\n is_expanded = True\n\n return is_expanded, expanded_v\n\n# reads the lexicon xlsx file and creates a word sentiment map\ndef create_word_sentiment_map():\n \n xls = \"/home/maryam/sentiment_analysis/final_lexicon.xlsx\"\n df = pd.read_excel(xls, index=False)\n word_score_lexicon_dict = {}\n for i in range(len(df)):\n word_score_lexicon_dict[df.iloc[i,0]] = df.iloc[i,1]\n return word_score_lexicon_dict\n\n# Given a sub, verb, obj it calculates a score of each phrase\n# The score of words are added up together\n# In case of verb negation or object negation, the score will multiplied by -1\ndef calculate_sentiment_score (sub, verb, obj):\n #calculates the sentiment score of the (subject, verb, object)\n word_sentiment_map = create_word_sentiment_map()\n sub_score = 0\n for tok in sub:\n if tok.lemma_ in word_sentiment_map.keys():\n sub_score += word_sentiment_map[tok.lemma_] \n \n verb_score = 0\n for tok in verb:\n if tok.lemma_ in word_sentiment_map.keys():\n verb_score += word_sentiment_map[tok.lemma_]\n if verbNegated or objNegated:\n verb_score = -1*verb_score\n \n obj_score = 0\n for tok in obj:\n if tok.lemma_ in word_sentiment_map.keys():\n obj_score += word_sentiment_map[tok.lemma_]\n \n# if verb.lemma_ in word_sentiment_map.keys():\n# verb_score += word_sentiment_map[verb.lemma_]\n# if verbNegated or objNegated:\n# verb_score = -1*verb_score\n return (sub_score, verb_score, obj_score)\n\n# calles the expan function and find adverb function for given sub, verb and obj\n# appends the list of (sub, obj, verb) tuples and scores\ndef append_and_callexpand(sub, verb, obj):\n subject_extended = expand(sub, tokens, visited)\n object_extended = expand(obj, tokens, visited)\n is_expanded, expanded_v = find_adverbs_of_v(verb)\n print(\"append_and_callexpand:\", is_expanded, to_str(expanded_v))\n if is_expanded:\n verb_to_append_list = []\n for item in expanded_v:\n if item==verb:\n verb_to_append_list.append(\"!\" + verb.lemma_ if verbNegated or objNegated else verb.lemma_)\n # .join(\"!\" + verb.lemma_ if verbNegated or objNegated else verb.lemma_)\n else:\n verb_to_append_list.append(item.lemma_)\n # verb_to_append = ' '.join(item.lemma_)\n verb_to_append = ' '.join(item for item in verb_to_append_list)\n else:\n verb_to_append = \"!\" + verb.lemma_ if verbNegated or objNegated else verb.lemma_ # in active voices, v.lower_ can be used\n \n svos.append((to_str(subject_extended), verb_to_append, to_str(object_extended)))\n svos_sentiment_score.append(calculate_sentiment_score(subject_extended, expanded_v, object_extended))\n return\n \n\n# find verbs and their subjects / objects to create SVOs, detect passive/active sentences\ndef findSVOs(tokens):\n global verbNegated\n global objNegated\n global svos\n global svos_sentiment_score\n global visited\n svos = []\n svos_sentiment_score = []\n is_pas = _is_passive(tokens)\n verbs = [tok for tok in tokens if _is_non_aux_verb(tok)]\n visited = set() # recursion detection\n for v in verbs:\n# print(\"we are in the main vfor loop:\", v)\n subs, verbNegated = _get_all_subs(v)\n# print(v, \"subs, verbNegated\" , subs, verbNegated)\n # hopefully there are subs, if not, don't examine this verb any longer\n if len(subs) > 0:\n# print(\"len(subs) > 0\")\n isConjVerb, conjV = _right_of_verb_is_conj_verb(v)\n# print('isConjVerb, conjV', isConjVerb, conjV)\n if isConjVerb:\n# print(\"if isConjVerb:\", sub, v, obj)\n v2, objs = _get_all_objs(conjV, is_pas)\n for sub in subs:\n for obj in objs:\n objNegated = _is_negated(obj)\n \n if is_pas: # reverse object / subject for passive\n append_and_callexpand(obj, v, sub)\n append_and_callexpand (obj, v2, sub)\n else:\n append_and_callexpand(sub, v, obj)\n append_and_callexpand(sub, v2, obj) \n else:\n# print(\"else of isConjVerb:\", v, subs)\n v, objs = _get_all_objs(v, is_pas)\n# print(\"before for inside else\",subs, v, objs)\n for sub in subs:\n for obj in objs:\n objNegated = _is_negated(obj)\n\n if is_pas: # reverse object / subject for passive\n append_and_callexpand(obj, v, sub)\n\n else:\n print(\"right before append stage:\", subs, v, objs)\n append_and_callexpand(sub, v, obj)\n\n return svos, svos_sentiment_score\n\n\n# In[8]:\n\n\nfrom spacy import displacy\ntokens = nlp(\"I hate sympathy and flower don't like lovely happiness\")\nfor sent in tokens.sents:\n print(sent.text,\"\\n\")\n\nprint(type(' '.join([item.text for item in tokens])))\n# print(tokens[1].dep_)\nfor token in tokens:\n print(token.text, \"->\", token.tag_ ,\"->\", token.pos_, \"->\", token.dep_ , \"->\", token.orth_ , \"->\",token.lemma_,\"\" )\n\nsvos, svos_sentiment_score = findSVOs(tokens)\nprint(svos)\nprint(svos_sentiment_score)\n# displacy.serve(tokens, style='dep')\n\n# In[ ]:\n\n\n\n","repo_name":"djw1809/cips_lab","sub_path":"cleverrx_bot/maryams_code/subject_verb_object_extract_me4.py","file_name":"subject_verb_object_extract_me4.py","file_ext":"py","file_size_in_byte":14735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"69954374138","text":"f = open(\"测试1.txt\",\"w\")\nf.write(\"Lycoridiata\\n\")\nf.write(\"wulei\\n\")\nf.write(\"leilei\\n\")\nf.write(\"Xingyu\\n\")\n \n#两种方法实现把每一行文件以数组元素的形式放进数组中(split/splilines)\n \n#其中spit是一个分割的作用,以'\\n'为分割点,即把每一段分割成一个元素放入数组中\n \nf = open(\"测试1.txt\",\"r\")\n# print(f.read())\nget = f.read()\nresult = get.split('\\n')\n\n\n#直接用splitlines()放法来实现行分割\nother_result = get.splitlines()\nfor i in range (len(other_result)):\n\n print(result[i])\n print(\"******\")\n #print(other_result[i])\n #print(\"******\")\n \nf.close()\n","repo_name":"STLLYM/STLLYM.work1","sub_path":"取数。。??.py","file_name":"取数。。??.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42136650074","text":"from __future__ import unicode_literals\n\nfrom PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt, QRegExp, QFileInfo\nfrom PyQt5.QtWidgets import QWidget, QTreeWidgetItem\n\nfrom E5Gui.E5Application import e5App\nfrom E5Gui import E5FileDialog, E5MessageBox\n\nfrom .Ui_CallTraceViewer import Ui_CallTraceViewer\n\nimport UI.PixmapCache\nimport Preferences\nimport Utilities\n\n\nclass CallTraceViewer(QWidget, Ui_CallTraceViewer):\n \"\"\"\n Class implementing the Call Trace viewer widget.\n \n @signal sourceFile(str, int) emitted to show the source of a call/return\n point\n \"\"\"\n sourceFile = pyqtSignal(str, int)\n \n def __init__(self, debugServer, parent=None):\n \"\"\"\n Constructor\n \n @param debugServer reference to the debug server object (DebugServer)\n @param parent reference to the parent widget (QWidget)\n \"\"\"\n super(CallTraceViewer, self).__init__(parent)\n self.setupUi(self)\n \n self.__dbs = debugServer\n \n self.startTraceButton.setIcon(\n UI.PixmapCache.getIcon(\"callTraceStart.png\"))\n self.stopTraceButton.setIcon(\n UI.PixmapCache.getIcon(\"callTraceStop.png\"))\n self.resizeButton.setIcon(UI.PixmapCache.getIcon(\"resizeColumns.png\"))\n self.clearButton.setIcon(UI.PixmapCache.getIcon(\"editDelete.png\"))\n self.saveButton.setIcon(UI.PixmapCache.getIcon(\"fileSave.png\"))\n \n self.__headerItem = QTreeWidgetItem(\n [\"\", self.tr(\"From\"), self.tr(\"To\")])\n self.__headerItem.setIcon(0, UI.PixmapCache.getIcon(\"callReturn.png\"))\n self.callTrace.setHeaderItem(self.__headerItem)\n \n self.__callStack = []\n \n self.__entryFormat = \"{0}:{1} ({2})\"\n self.__entryRe = QRegExp(r\"\"\"(.+):(\\d+)\\s\\((.*)\\)\"\"\")\n \n self.__projectMode = False\n self.__project = None\n \n self.__callTraceEnabled = Preferences.toBool(\n Preferences.Prefs.settings.value(\"CallTrace/Enabled\", False))\n if self.__callTraceEnabled:\n self.startTraceButton.setEnabled(False)\n else:\n self.stopTraceButton.setEnabled(False)\n \n self.__dbs.callTraceInfo.connect(self.__addCallTraceInfo)\n \n def __setCallTraceEnabled(self, enabled):\n \"\"\"\n Private slot to set the call trace enabled status.\n \n @param enabled flag indicating the new state (boolean)\n \"\"\"\n self.__dbs.setCallTraceEnabled(enabled)\n self.stopTraceButton.setEnabled(enabled)\n self.startTraceButton.setEnabled(not enabled)\n self.__callTraceEnabled = enabled\n Preferences.Prefs.settings.setValue(\"CallTrace/Enabled\", enabled)\n \n @pyqtSlot()\n def on_startTraceButton_clicked(self):\n \"\"\"\n Private slot to start call tracing.\n \"\"\"\n self.__setCallTraceEnabled(True)\n \n @pyqtSlot()\n def on_stopTraceButton_clicked(self):\n \"\"\"\n Private slot to start call tracing.\n \"\"\"\n self.__setCallTraceEnabled(False)\n \n @pyqtSlot()\n def on_resizeButton_clicked(self):\n \"\"\"\n Private slot to resize the columns of the call trace to their contents.\n \"\"\"\n for column in range(self.callTrace.columnCount()):\n self.callTrace.resizeColumnToContents(column)\n \n @pyqtSlot()\n def on_clearButton_clicked(self):\n \"\"\"\n Private slot to clear the call trace.\n \"\"\"\n self.clear()\n \n @pyqtSlot()\n def on_saveButton_clicked(self):\n \"\"\"\n Private slot to save the call trace info to a file.\n \"\"\"\n if self.callTrace.topLevelItemCount() > 0:\n fname, selectedFilter = E5FileDialog.getSaveFileNameAndFilter(\n self,\n self.tr(\"Save Call Trace Info\"),\n \"\",\n self.tr(\"Text Files (*.txt);;All Files (*)\"),\n None,\n E5FileDialog.Options(E5FileDialog.DontConfirmOverwrite))\n if fname:\n ext = QFileInfo(fname).suffix()\n if not ext:\n ex = selectedFilter.split(\"(*\")[1].split(\")\")[0]\n if ex:\n fname += ex\n if QFileInfo(fname).exists():\n res = E5MessageBox.yesNo(\n self,\n self.tr(\"Save Call Trace Info\"),\n self.tr(\"

    The file {0} already exists.\"\n \" Overwrite it?

    \").format(fname),\n icon=E5MessageBox.Warning)\n if not res:\n return\n fname = Utilities.toNativeSeparators(fname)\n \n try:\n f = open(fname, \"w\", encoding=\"utf-8\")\n itm = self.callTrace.topLevelItem(0)\n while itm is not None:\n isCall = itm.data(0, Qt.UserRole)\n if isCall:\n call = \"->\"\n else:\n call = \"<-\"\n f.write(\"{0} {1} || {2}\\n\".format(\n call,\n itm.text(1), itm.text(2)))\n itm = self.callTrace.itemBelow(itm)\n f.close()\n except IOError as err:\n E5MessageBox.critical(\n self,\n self.tr(\"Error saving Call Trace Info\"),\n self.tr(\"\"\"

    The call trace info could not\"\"\"\n \"\"\" be written to {0}

    \"\"\"\n \"\"\"

    Reason: {1}

    \"\"\")\n .format(fname, str(err)))\n \n @pyqtSlot(QTreeWidgetItem, int)\n def on_callTrace_itemDoubleClicked(self, item, column):\n \"\"\"\n Private slot to open the double clicked file in an editor.\n \n @param item reference to the double clicked item (QTreeWidgetItem)\n @param column column that was double clicked (integer)\n \"\"\"\n if item is not None and column > 0:\n columnStr = item.text(column)\n if self.__entryRe.exactMatch(columnStr.strip()):\n filename, lineno, func = self.__entryRe.capturedTexts()[1:]\n try:\n lineno = int(lineno)\n except ValueError:\n # do nothing, if the line info is not an integer\n return\n if self.__projectMode:\n filename = self.__project.getAbsolutePath(filename)\n self.sourceFile.emit(filename, lineno)\n \n def clear(self):\n \"\"\"\n Public slot to clear the call trace info.\n \"\"\"\n self.callTrace.clear()\n self.__callStack = []\n \n def setProjectMode(self, enabled):\n \"\"\"\n Public slot to set the call trace viewer to project mode.\n \n In project mode the call trace info is shown with project relative\n path names.\n \n @param enabled flag indicating to enable the project mode (boolean)\n \"\"\"\n self.__projectMode = enabled\n if enabled and self.__project is None:\n self.__project = e5App().getObject(\"Project\")\n \n def __addCallTraceInfo(self, isCall, fromFile, fromLine, fromFunction,\n toFile, toLine, toFunction):\n \"\"\"\n Private method to add an entry to the call trace viewer.\n \n @param isCall flag indicating a 'call' (boolean)\n @param fromFile name of the originating file (string)\n @param fromLine line number in the originating file (string)\n @param fromFunction name of the originating function (string)\n @param toFile name of the target file (string)\n @param toLine line number in the target file (string)\n @param toFunction name of the target function (string)\n \"\"\"\n if isCall:\n icon = UI.PixmapCache.getIcon(\"forward.png\")\n else:\n icon = UI.PixmapCache.getIcon(\"back.png\")\n parentItem = \\\n self.__callStack[-1] if self.__callStack else self.callTrace\n \n if self.__projectMode:\n fromFile = self.__project.getRelativePath(fromFile)\n toFile = self.__project.getRelativePath(toFile)\n \n itm = QTreeWidgetItem(\n parentItem,\n [\"\",\n self.__entryFormat.format(fromFile, fromLine, fromFunction),\n self.__entryFormat.format(toFile, toLine, toFunction)])\n itm.setIcon(0, icon)\n itm.setData(0, Qt.UserRole, isCall)\n itm.setExpanded(True)\n \n if isCall:\n self.__callStack.append(itm)\n else:\n if self.__callStack:\n self.__callStack.pop(-1)\n \n def isCallTraceEnabled(self):\n \"\"\"\n Public method to get the state of the call trace function.\n \n @return flag indicating the state of the call trace function (boolean)\n \"\"\"\n return self.__callTraceEnabled\n","repo_name":"pycom/Pymakr","sub_path":"Debugger/CallTraceViewer.py","file_name":"CallTraceViewer.py","file_ext":"py","file_size_in_byte":9169,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"22"} +{"seq_id":"25114410600","text":"import pandas as pd\nimport numpy as np\nimport os\n\ninput_data_folder = \"../orig_logs\"\noutput_data_folder = \"../labeled_logs_csv_processed\"\nfilenames = [\"Production.csv\"]\n\ncase_id_col = \"Case ID\"\nactivity_col = \"Activity\"\nresource_col = \"Resource\"\ntimestamp_col = \"Complete Timestamp\"\nlabel_col = \"label\"\npos_label = \"deviant\"\nneg_label = \"regular\"\n\nfreq_threshold = 10\ntimeunit = 'm'\n\n# features for classifier\nstatic_cat_cols = [\"Part_Desc_\", \"Rework\"]\nstatic_num_cols = [\"Work_Order_Qty\"]\ndynamic_cat_cols = [activity_col, resource_col, \"Report_Type\", \"Resource.1\"]\ndynamic_num_cols = [\"Qty_Completed\", \"Qty_for_MRB\", \"activity_duration\"]\n\nstatic_cols = static_cat_cols + static_num_cols + [case_id_col, label_col]\ndynamic_cols = dynamic_cat_cols + dynamic_num_cols + [timestamp_col]\ncat_cols = dynamic_cat_cols + static_cat_cols\n\ndef assign_label(group):\n tmp = group[\"Qty_Rejected\"] > 0\n tmp = tmp.reset_index()[\"Qty_Rejected\"]\n if sum(tmp) > 0:\n idx = tmp[tmp==True].index[0]\n group = group.iloc[:idx,:]\n group[label_col] = pos_label\n else:\n group[label_col] = neg_label\n return group\n\ndef extract_timestamp_features(group):\n \n group = group.sort_values(timestamp_col, ascending=False, kind='mergesort')\n \n tmp = group[timestamp_col] - group[timestamp_col].shift(-1)\n tmp = tmp.fillna(0)\n group[\"timesincelastevent\"] = tmp.apply(lambda x: float(x / np.timedelta64(1, 'm'))) # m is for minutes\n\n tmp = group[timestamp_col] - group[timestamp_col].iloc[-1]\n tmp = tmp.fillna(0)\n group[\"timesincecasestart\"] = tmp.apply(lambda x: float(x / np.timedelta64(1, 'm'))) # m is for minutes\n\n group = group.sort_values(timestamp_col, ascending=True, kind='mergesort')\n group[\"event_nr\"] = range(1, len(group) + 1)\n \n return group\n\ndef get_open_cases(date):\n return sum((dt_first_last_timestamps[\"start_time\"] <= date) & (dt_first_last_timestamps[\"end_time\"] > date))\n\n\nfor filename in filenames:\n \n data = pd.read_csv(os.path.join(input_data_folder,filename), sep=\";\")\n\n # add event duration\n data[\"Complete Timestamp\"] = pd.to_datetime(data[\"Complete Timestamp\"])\n data[\"Start Timestamp\"] = pd.to_datetime(data[\"Start Timestamp\"])\n tmp = data[\"Complete Timestamp\"] - data[\"Start Timestamp\"]\n tmp = tmp.fillna(0)\n data[\"activity_duration\"] = tmp.apply(lambda x: float(x / np.timedelta64(1, timeunit)))\n \n # assign labels\n data = data.sort_values(timestamp_col, ascending=True, kind='mergesort').groupby(case_id_col).apply(assign_label)\n \n data = data[static_cols + dynamic_cols]\n\n # add features extracted from timestamp\n data[timestamp_col] = pd.to_datetime(data[timestamp_col])\n data[\"timesincemidnight\"] = data[timestamp_col].dt.hour * 60 + data[timestamp_col].dt.minute\n data[\"month\"] = data[timestamp_col].dt.month\n data[\"weekday\"] = data[timestamp_col].dt.weekday\n data[\"hour\"] = data[timestamp_col].dt.hour\n data = data.groupby(case_id_col).apply(extract_timestamp_features)\n \n # add inter-case features\n data = data.sort_values([timestamp_col], ascending=True, kind='mergesort')\n dt_first_last_timestamps = data.groupby(case_id_col)[timestamp_col].agg([min, max])\n dt_first_last_timestamps.columns = [\"start_time\", \"end_time\"]\n data[\"open_cases\"] = data[timestamp_col].apply(get_open_cases)\n \n # impute missing values\n grouped = data.sort_values(timestamp_col, ascending=True, kind='mergesort').groupby(case_id_col)\n for col in static_cols + dynamic_cols:\n data[col] = grouped[col].transform(lambda grp: grp.fillna(method='ffill'))\n \n data[cat_cols] = data[cat_cols].fillna('missing')\n data = data.fillna(0)\n \n # set infrequent factor levels to \"other\"\n for col in cat_cols:\n counts = data[col].value_counts()\n mask = data[col].isin(counts[counts >= freq_threshold].index)\n data.loc[~mask, col] = \"other\"\n\n data.to_csv(os.path.join(output_data_folder,filename), sep=\";\", index=False)\n ","repo_name":"irhete/predictive-monitoring-benchmark","sub_path":"preprocessing/preprocess_logs_production.py","file_name":"preprocess_logs_production.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"22"} +{"seq_id":"13059151387","text":"def get_hours():\r\n hours = []\r\n for i in range(0,24):\r\n hour = str(i)+ \"h\"\r\n for m in (0,15,30,45):\r\n if (m == 0):\r\n temp = hour + \"00\"\r\n hours.append(temp)\r\n else:\r\n temp = hour + str(m)\r\n hours.append(temp)\r\n return hours\r\n\r\nh = get_hours()\r\nprint(len(h))\r\nheader =[\"Data\",\"Zona\",\"Contadores\",\"ID_Espira\",\"Nr_carros\"] + h\r\nprint (header)","repo_name":"vasco0201/tese","sub_path":"Espiras/InfoCentral_20180119_1803/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73980515895","text":"import numpy as np\nimport pandas as pd\n\ndef XYZ_to_xyz(XYZ):\n '''将XYZ转换为xyz. X+Y+Z=0时xyz为NaN.'''\n XYZ = np.asarray(XYZ)\n S = XYZ.sum(axis=-1, keepdims=True)\n S = np.where(np.isclose(S, 0), np.nan, S)\n xyz = XYZ / S\n\n return xyz\n\ndef XYZ_to_xyY(XYZ):\n '''将XYZ转换为xyY. X+Y+Z=0时xy为NaN.'''\n XYZ = np.asarray(XYZ)\n xyY = XYZ_to_xyz(XYZ)\n xyY[..., 2] = XYZ[..., 1]\n\n return xyY\n\ndef xyY_to_XYZ(xyY):\n '''将xyY转换为XYZ.'''\n xyY = np.asarray(xyY)\n x, y, Y = (xyY[..., i] for i in range(3))\n Y_y = Y / y\n X = x * Y_y\n Z = (1 - x - y) * Y_y\n XYZ = np.stack((X, Y, Z), axis=-1)\n\n return XYZ\n\ndef gamma_encoding(RGB):\n '''对线性sRGB做编码得到sRGB.'''\n RGB = np.asarray(RGB).astype(float)\n mask = RGB > 0.0031308\n RGB[~mask] *= 12.92\n RGB[mask] = 1.055 * RGB[mask]**(1 / 2.4) - 0.055\n\n return RGB\n\ndef gamma_decoding(RGB):\n '''对sRGB做解码得到线性sRGB.'''\n RGB = np.array(RGB, float)\n mask = RGB > 0.04045\n RGB[~mask] /= 12.92\n RGB[mask] = ((RGB[mask] + 0.055) / 1.055)**2.4\n\n return RGB\n\ndef move_toward_white(RGB):\n '''RGB与白色混合直至色域边界.'''\n RGB = np.array(RGB, float)\n RGB -= RGB.min(axis=-1, keepdims=True).clip(None, 0)\n\n return RGB\n\ndef normalize_by_maximum(RGB):\n '''分别用RGB里三个分量的最大值做归一化.'''\n RGB = np.array(RGB, float)\n RGB /= RGB.max(axis=-1, keepdims=True)\n\n return RGB\n\ndef XYZ_to_sRGB(XYZ):\n '''将XYZ转换为线性sRGB.'''\n M = np.array([\n [+3.2406, -1.5372, -0.4986],\n [-0.9689, +1.8758, +0.0415],\n [+0.0557, -0.2040, +1.0570]\n ])\n RGB = np.tensordot(XYZ, M, (-1, 1))\n\n return RGB\n\ndef sRGB_to_XYZ(RGB):\n '''将线性sRGB转换为XYZ.'''\n M = np.array([\n [0.4124, 0.3576, 0.1805],\n [0.2126, 0.7152, 0.0722],\n [0.0193, 0.1192, 0.9505]\n ])\n XYZ = np.tensordot(RGB, M, (-1, 1))\n\n return XYZ\n\ndef XYZ_to_RGB(XYZ):\n '''将XYZ转换为RGB(CIE 1931).'''\n M = np.array([\n [+0.41846, -0.15866, -0.08283],\n [-0.09117, +0.25243, +0.01571],\n [+0.00092, -0.00255, +0.17860]\n ])\n RGB = np.tensordot(XYZ, M, (-1, 1))\n\n return RGB\n\ndef RGB_to_XYZ(RGB):\n '''将RGB(CIE 1931)转换为XYZ.'''\n M = np.array([\n [2.76888, 1.75175, 1.13016],\n [1.00000, 4.59070, 0.06010],\n [0.00000, 0.05651, 5.59427]\n ])\n XYZ = np.tensordot(RGB, M, (-1, 1))\n\n return XYZ\n\ndef RGB_to_rgb(RGB):\n '''将RGB转换为rgb.'''\n return XYZ_to_xyz(RGB)\n\ndef load_xyz_cmf():\n '''读取1931 XYZ CMF.'''\n return pd.read_csv('./data/cie_1931_2deg_xyz_cmf.csv', index_col=0)\n\ndef load_xyz_cc():\n '''读取1931 XYZ CC.'''\n return pd.read_csv('./data/cie_1931_2deg_xyz_cc.csv', index_col=0)\n\ndef load_rgb_cmf(from_xyz=True):\n '''读取Wright-Guild RGB CMF.'''\n if from_xyz:\n xyz_cmf = load_xyz_cmf()\n rgb_cmf = pd.DataFrame(\n XYZ_to_RGB(xyz_cmf),\n index=xyz_cmf.index,\n columns=['r', 'g', 'b']\n )\n else:\n rgb_cmf = pd.read_csv(\n './data/wright_guild_1931_2deg_rgb_cmf.csv',\n index_col=0\n )\n\n return rgb_cmf\n\ndef load_rgb_cc(from_xyz=True):\n '''读取Wright-Guild RGB CC.'''\n if from_xyz:\n rgb_cmf = load_rgb_cmf(from_xyz=True)\n rgb_cc = pd.DataFrame(\n RGB_to_rgb(rgb_cmf),\n index=rgb_cmf.index,\n columns=rgb_cmf.columns\n )\n else:\n rgb_cc = pd.read_csv(\n './data/wright_guild_1931_2deg_rgb_cc.csv',\n index_col=0\n )\n\n return rgb_cc\n\ndef load_lef():\n '''读取1924 LEF.'''\n return pd.read_csv(\n './data/cie_1924_photopic_lef.csv',\n index_col=0\n )['lef']\n\ndef load_lms():\n '''读取Stiles & Burch LMS.'''\n return pd.read_csv(\n './data/stiles_burch_2deg_lms.csv',\n index_col=0\n ).fillna(0)","repo_name":"ZhaJiMan/do_color","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70919581175","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: Darren Vong\n\"\"\"\nfrom collections import OrderedDict\n\nimport profiles\n\n# Attributes here are properly separated words rather than camel cased attributes\n# as in the module api.attribute\nATTR_TO_PROFILE_KEY = OrderedDict([\n (\"Points\", \"total_points\"),\n (\"Selected By\", \"selected_by_percent\"),\n (\"Price\", \"now_cost\"),\n (\"Goals\", \"goals_scored\"),\n (\"Assists\", \"assists\"),\n (\"Clean sheets\", \"clean_sheets\"),\n (\"Yellow cards\", \"yellow_cards\")\n])\n\ndef get_players_profiles(player1, player2, col):\n \"\"\"Finds the two players' profiles searched for.\n @param player1: name of the first player's profile to search for\n @param player2: name of the second player's profile to search for\n @param col: the MongoDB database collection to search the profiles from\n @return a tuple containing both players' profile data held in a dictionary.\n \"\"\"\n \n player1_profile = profiles.get_profile_contents(player1, col)\n player2_profile = profiles.get_profile_contents(player2, col)\n return player1_profile, player2_profile\n\ndef generate_table(p1_profile, p2_profile):\n \"\"\"Generates a head-to-head comparison table using the player profiles data.\n @param p1_profile: the first player's profile\n @param p2_profile: the second player's profile\n @return the HTML for generating most of the table body in the head-to-head page template\n \"\"\"\n \n table = u\"\"\n for attr in ATTR_TO_PROFILE_KEY.iterkeys():\n table += generate_row(p1_profile, p2_profile, attr)\n \n return table \n\ndef generate_row(p1_profile, p2_profile, attr):\n \"\"\"Generates a row of the head-to-head comparison table for the attribute specified.\n @param p1_profile: the first player's profile\n @param p2_profile: the second player's profile\n @param attr: the attribute to use to generate this row of the table\n @return the row in HTML for the attribute\n \"\"\"\n \n integer_val_attr = [\"Points\", \"Goals\", \"Assists\", \"Yellow cards\", \"Clean sheets\"]\n if attr in integer_val_attr:\n p1_val = int(p1_profile[ATTR_TO_PROFILE_KEY[attr]])\n p2_val = int(p2_profile[ATTR_TO_PROFILE_KEY[attr]])\n row = row_template(p1_val, p2_val, attr)\n elif attr == \"Price\":\n p1_val = p1_profile[ATTR_TO_PROFILE_KEY[attr]]/10.0 \n p2_val = p2_profile[ATTR_TO_PROFILE_KEY[attr]]/10.0\n row = row_template(p1_val, p2_val, attr, prefix=u\"£\", suffix=u\"M\")\n else:\n p1_val = float(p1_profile[ATTR_TO_PROFILE_KEY[attr]])\n p2_val = float(p2_profile[ATTR_TO_PROFILE_KEY[attr]])\n row = row_template(p1_val, p2_val, attr, suffix=u\"%\")\n return row\n\ndef row_template(p1_val, p2_val, attr, prefix=u\"\", suffix=u\"\"):\n \"\"\"Auxiliary function for generate_row above - this function contains the logic\n for deciding which value cell is highlighted (by adding the *-success CSS classes)\n depending on the value and the attribute specified.\n @param p1_val: the attribute value of the first player\n @param p2_val: the attribute value of the second player\n @param attr: the attribute to use for the table row\n @keyword prefix: the characters to insert before the value of the attribute\n @keyword suffix: the characters to insert after the value of the attribute\n @return the row in HTML for the attribute with the superior value highlighted\n \"\"\"\n \n row = u\"\"\n if p1_val > p2_val:\n row = u\"\\n\"\n if attr == \"Price\" or attr == \"Yellow cards\":\n row += u\"\"+prefix+unicode(p1_val)+suffix+u\"\\n\"\n else:\n row += u\"\"+prefix+unicode(p1_val)+suffix+u\"\\n\"\n row += u\"\"+attr+u\"\\n\"\n if attr == \"Price\" or attr == \"Yellow cards\":\n row += u\"\"+prefix+unicode(p2_val)+suffix+u\"\\n\"\n else:\n row += u\"\"+prefix+unicode(p2_val)+suffix+u\"\\n\"\n row += u\"\\n\"\n elif p1_val == p2_val:\n row = u\"\\n\"\n row += u\"\"+prefix+unicode(p1_val)+suffix+u\"\\n\"\n row += u\"\"+attr+u\"\\n\"\n row += u\"\"+prefix+unicode(p2_val)+suffix+u\"\\n\"\n row += u\"\\n\"\n else: # p2 > p1\n row = u\"\\n\"\n if attr == \"Price\" or attr == \"Yellow cards\":\n row += u\"\"+prefix+unicode(p1_val)+suffix+u\"\\n\"\n else:\n row += u\"\"+prefix+unicode(p1_val)+suffix+u\"\\n\"\n row += u\"\"+attr+u\"\\n\"\n if attr == \"Price\" or attr == \"Yellow cards\":\n row += u\"\"+prefix+unicode(p2_val)+suffix+u\"\\n\"\n else:\n row += u\"\"+prefix+unicode(p2_val)+suffix+u\"\\n\"\n row += u\"\\n\"\n return row\n","repo_name":"darrenvong/fpl-data-visualiser","sub_path":"views/head_to_head.py","file_name":"head_to_head.py","file_ext":"py","file_size_in_byte":5083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26130994054","text":"# import libraries\nimport sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n \"\"\"Load two datasets and merge them.\n Args:\n messages_filepath: str. Filepath of messages data\n categories_filepath: str. Filepath of categories data\n \n Returns:\n df:Pandas Dataframe. Combined dataset of messages and categories\n \"\"\"\n\n # load messages dataset\n messages = pd.read_csv(messages_filepath)\n \n # load categories dataset\n categories = pd.read_csv(categories_filepath)\n \n # merge datasets\n df = messages.merge(categories, on=(\"id\"))\n return df\n \n\ndef clean_data(df):\n \"\"\"Clean the dateset by clearly defining Category Names,\n convert category columns values to binary, and drops duplicates. \n\n Args:\n df: Pandas Dataframe. Combined dataset of messages and categories\n\n Returns:\n df: Pandas Dataframe. Cleaned version of dataset\n \"\"\"\n \n # create a dataframe of the 36 individual category columns\n categories = df[\"categories\"].str.split(\";\",expand=True)\n \n # select the first row of the categories dataframe\n row = categories.iloc[[0],:]\n \n # use this row to extract a list of new column names for categories\n category_colnames = row.apply(lambda series : series[0][0:-2])\n \n # rename the columns of `categories`\n categories.columns = category_colnames\n \n #Convert category values to just numbers 0 or 1\n for column in categories:\n # set each value to be the last character of the string\n categories[column] = categories[column].astype(\"str\").apply(lambda str : str[-1])\n \n # convert column from string to binary values\n categories[column] = categories[column].astype(\"int64\")\n categories[column] = categories[column].astype(\"bool\")*1 \n \n #drop the original categories column from `df`\n df = df.drop(\"categories\",axis=1)\n \n # concatenate the original dataframe with the new `categories` dataframe\n df = pd.concat([df, categories], axis=1)\n \n #Remove duplicates\n duplicate_rows = df.duplicated()\n if True in duplicate_rows:\n df = df[~duplicate_rows]\n \n return df\n \n \ndef save_data(df, database_filename):\n \"\"\"Saves the data in SQL database.\n Args:\n df: Pandas Dataframe. Cleaned version of combined dataset of messages and categories.\n database_filename: str. Database filename to be used for storing data.\n \"\"\"\n engine = create_engine('sqlite:///{0}'.format(database_filename))\n df.to_sql('cleaned_data', engine, index=False, if_exists=\"replace\")\n\n\ndef main():\n \"\"\"Load the datasets, clean data and save the processed data\"\"\"\n \n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n \n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()","repo_name":"ankitaggarwal64/Disaster-Response-Pipelines","sub_path":"Data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"16796053747","text":"#!/usr/bin/env python3.8\nN = int(input())\n*P, = map(int, input().split())\n\nQ = [None] * N\n\nfor i in range(N):\n Q[P[i] - 1] = i + 1\n\nprint(' '.join(map(str, Q)))\n","repo_name":"harukaeru/CompetitiveProgramming","sub_path":"olds/abc217/C/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":164,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"70649632377","text":"from auditlog.registry import auditlog\nfrom django.conf import settings\nfrom django.contrib.gis.db import models\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django.utils.translation import gettext_lazy as _\nfrom enumfields import Enum, EnumField, EnumIntegerField\n\nfrom traffic_control.enums import DeviceTypeTargetModel, LaneNumber, LaneType, Reflection, Size, Surface\nfrom traffic_control.mixins.models import (\n InstalledDeviceModel,\n OwnedDeviceModel,\n SoftDeleteModel,\n SourceControlModel,\n UpdatePlanLocationMixin,\n UserControlModel,\n)\nfrom traffic_control.models.affect_area import CoverageArea\nfrom traffic_control.models.common import OperationBase, OperationType, TrafficControlDeviceType\nfrom traffic_control.models.mount import MountPlan, MountReal, MountType\nfrom traffic_control.models.plan import Plan\nfrom traffic_control.models.traffic_sign import LocationSpecifier, TrafficSignPlan, TrafficSignReal\nfrom traffic_control.validators import validate_structured_content\n\n\nclass Color(Enum):\n BLUE = 1\n YELLOW = 2\n\n class Labels:\n BLUE = _(\"Blue\")\n YELLOW = _(\"Yellow\")\n\n\nclass AbstractAdditionalSign(SourceControlModel, SoftDeleteModel, UserControlModel, OwnedDeviceModel):\n device_type = models.ForeignKey(\n TrafficControlDeviceType,\n verbose_name=_(\"Device type\"),\n null=True,\n blank=True,\n on_delete=models.PROTECT,\n limit_choices_to=Q(Q(target_model=None) | Q(target_model=DeviceTypeTargetModel.ADDITIONAL_SIGN)),\n )\n order = models.SmallIntegerField(\n verbose_name=_(\"Order\"),\n default=1,\n blank=False,\n null=False,\n help_text=_(\n \"The order of the sign in relation to the signs at the same point. \"\n \"Order from top to bottom, from left to right starting at 1.\"\n ),\n )\n content_s = models.JSONField(\n verbose_name=_(\"Content\"),\n blank=True,\n null=True,\n help_text=_(\"Additional sign content as JSON document\"),\n )\n location = models.PointField(_(\"Location (3D)\"), dim=3, srid=settings.SRID)\n height = models.IntegerField(\n _(\"Height\"),\n blank=True,\n null=True,\n help_text=_(\"The height of the sign from the ground, measured from the top in centimeters.\"),\n )\n size = EnumField(\n Size,\n verbose_name=_(\"Size\"),\n max_length=1,\n blank=True,\n null=True,\n )\n direction = models.IntegerField(\n _(\"Direction\"),\n default=0,\n help_text=_(\n \"Direction of the sign in degrees. \"\n \"If 'road name' is entered the direction is in relation to the road. Otherwise cardinal direction is used. \"\n \"e.g. 0 = North, 45 = North East, 90 = East, 180 = South.\"\n ),\n )\n reflection_class = EnumField(\n Reflection,\n verbose_name=_(\"Reflection\"),\n max_length=2,\n blank=True,\n null=True,\n )\n surface_class = EnumField(\n Surface,\n verbose_name=_(\"Surface\"),\n max_length=6,\n blank=True,\n null=True,\n )\n color = EnumIntegerField(\n Color,\n verbose_name=_(\"Color\"),\n blank=True,\n null=True,\n )\n mount_type = models.ForeignKey(\n MountType,\n verbose_name=_(\"Mount type\"),\n blank=True,\n null=True,\n on_delete=models.SET_NULL,\n help_text=_(\"Type of the mount this sign is attached to.\"),\n )\n road_name = models.CharField(\n _(\"Road name\"),\n max_length=254,\n blank=True,\n null=True,\n help_text=_(\"Name of the road this sign is installed at.\"),\n )\n lane_number = EnumField(\n LaneNumber,\n verbose_name=_(\"Lane number\"),\n null=True,\n blank=True,\n help_text=_(\"Describes which lane of the road this sign affects.\"),\n )\n lane_type = EnumField(\n LaneType,\n verbose_name=_(\"Lane type\"),\n null=True,\n blank=True,\n help_text=_(\"The type of lane which this sign affects.\"),\n )\n location_specifier = EnumIntegerField(\n LocationSpecifier,\n verbose_name=_(\"Location specifier\"),\n blank=True,\n null=True,\n help_text=_(\"Specifies where the sign is in relation to the road.\"),\n )\n validity_period_start = models.DateField(\n _(\"Validity period start\"),\n blank=True,\n null=True,\n help_text=_(\"Date on which this sign becomes active.\"),\n )\n validity_period_end = models.DateField(\n _(\"Validity period end\"),\n blank=True,\n null=True,\n help_text=_(\"Date after which this sign becomes inactive.\"),\n )\n seasonal_validity_period_start = models.DateField(\n _(\"Seasonal validity period start\"),\n blank=True,\n null=True,\n help_text=_(\"Date on which this sign becomes seasonally active.\"),\n )\n seasonal_validity_period_end = models.DateField(\n _(\"Seasonal validity period end\"),\n blank=True,\n null=True,\n help_text=_(\"Date after which this sign becomes seasonally inactive.\"),\n )\n\n class Meta:\n abstract = True\n\n def clean(self):\n validation_errors = {}\n\n content_s_validation_errors = validate_structured_content(self.content_s, self.device_type)\n if len(content_s_validation_errors) > 0:\n validation_errors[\"content_s\"] = content_s_validation_errors\n\n if len(validation_errors) > 0:\n raise ValidationError(validation_errors)\n\n def save(self, *args, **kwargs):\n if self.device_type and not self.device_type.validate_relation(DeviceTypeTargetModel.ADDITIONAL_SIGN):\n raise ValidationError(f'Device type \"{self.device_type}\" is not allowed for additional signs')\n\n super().save(*args, **kwargs)\n\n def __str__(self):\n return f\"{self.__class__.__name__} {self.id}\"\n\n\nclass AdditionalSignPlan(UpdatePlanLocationMixin, AbstractAdditionalSign):\n parent = models.ForeignKey(\n TrafficSignPlan,\n verbose_name=_(\"Parent Traffic Sign Plan\"),\n on_delete=models.PROTECT,\n related_name=\"additional_signs\",\n blank=True,\n null=True,\n help_text=_(\"The traffic sign to which this additional sign is associated.\"),\n )\n mount_plan = models.ForeignKey(\n MountPlan,\n verbose_name=_(\"Mount Plan\"),\n on_delete=models.PROTECT,\n blank=True,\n null=True,\n help_text=_(\"Mount that this sign is mounted on.\"),\n )\n plan = models.ForeignKey(\n Plan,\n verbose_name=_(\"Plan\"),\n on_delete=models.PROTECT,\n related_name=\"additional_sign_plans\",\n blank=True,\n null=True,\n help_text=_(\"Plan which this Additional Sign Plan is a part of.\"),\n )\n\n class Meta:\n db_table = \"additional_sign_plan\"\n verbose_name = _(\"Additional Sign Plan\")\n verbose_name_plural = _(\"Additional Sign Plans\")\n unique_together = [\"source_name\", \"source_id\"]\n\n\nclass AdditionalSignReal(AbstractAdditionalSign, InstalledDeviceModel):\n parent = models.ForeignKey(\n TrafficSignReal,\n verbose_name=_(\"Parent Traffic Sign Real\"),\n on_delete=models.PROTECT,\n related_name=\"additional_signs\",\n blank=True,\n null=True,\n help_text=_(\"The traffic sign to which this additional sign is associated.\"),\n )\n additional_sign_plan = models.ForeignKey(\n AdditionalSignPlan,\n verbose_name=_(\"Additional Sign Plan\"),\n on_delete=models.PROTECT,\n blank=True,\n null=True,\n help_text=_(\"The plan for this Additional Sign.\"),\n )\n mount_real = models.ForeignKey(\n MountReal,\n verbose_name=_(\"Mount Real\"),\n on_delete=models.PROTECT,\n blank=True,\n null=True,\n help_text=_(\"Mount that this sign is mounted on.\"),\n )\n installation_id = models.CharField(\n _(\"Installation id\"),\n max_length=254,\n blank=True,\n null=True,\n help_text=_(\"The id number of the installation record.\"),\n )\n installation_details = models.CharField(\n _(\"Installation details\"),\n max_length=254,\n blank=True,\n null=True,\n help_text=_(\"Additional details about the installation.\"),\n )\n installed_by = models.CharField(\n _(\"Installed by\"),\n max_length=254,\n blank=True,\n null=True,\n help_text=_(\"Name of the organization who installed this sign.\"),\n )\n manufacturer = models.CharField(\n _(\"Manufacturer\"),\n max_length=254,\n blank=True,\n null=True,\n help_text=_(\"Name of the organization that manufactured this sign.\"),\n )\n rfid = models.CharField(\n _(\"RFID\"),\n max_length=254,\n blank=True,\n null=True,\n help_text=_(\"RFID tag of the sign (if any).\"),\n )\n legacy_code = models.CharField(\n _(\"Legacy Traffic Sign Code\"),\n max_length=32,\n blank=True,\n null=True,\n help_text=_(\"The sign type code of the sign in the old Finnish road traffic law.\"),\n )\n permit_decision_id = models.CharField(\n _(\"Permit decision id\"),\n max_length=254,\n blank=True,\n null=True,\n help_text=_(\"The id number of the installation permit.\"),\n )\n operation = models.CharField(\n _(\"Operation\"),\n max_length=64,\n blank=True,\n null=True,\n help_text=_(\"Maintenance operations done to the sign, e.g. washing, straightening or painting.\"),\n )\n scanned_at = models.DateTimeField(\n _(\"Scanned at\"),\n blank=True,\n null=True,\n help_text=_(\"Date and time on which this sign was last scanned at.\"),\n )\n attachment_url = models.URLField(\n _(\"Attachment url\"),\n max_length=500,\n blank=True,\n null=True,\n help_text=_(\"URL to a file attachment related to this sign.\"),\n )\n coverage_area = models.ForeignKey(\n CoverageArea,\n verbose_name=_(\"Coverage area\"),\n blank=True,\n null=True,\n on_delete=models.PROTECT,\n help_text=_(\"Coverage area that this sign belongs to.\"),\n )\n\n class Meta:\n db_table = \"additional_sign_real\"\n verbose_name = _(\"Additional Sign Real\")\n verbose_name_plural = _(\"Additional Sign Reals\")\n unique_together = [\"source_name\", \"source_id\"]\n\n\nclass AdditionalSignRealOperation(OperationBase):\n operation_type = models.ForeignKey(\n OperationType,\n limit_choices_to={\"additional_sign\": True},\n verbose_name=_(\"operation type\"),\n on_delete=models.PROTECT,\n )\n additional_sign_real = models.ForeignKey(\n AdditionalSignReal,\n verbose_name=_(\"additional sign real\"),\n on_delete=models.PROTECT,\n related_name=\"operations\",\n )\n\n class Meta:\n db_table = \"additional_sign_real_operation\"\n ordering = [\"operation_date\"]\n verbose_name = _(\"Additional sign real operation\")\n verbose_name_plural = _(\"Additional sign real operations\")\n\n\nauditlog.register(AdditionalSignPlan)\nauditlog.register(AdditionalSignReal)\n","repo_name":"City-of-Helsinki/city-infrastructure-platform","sub_path":"traffic_control/models/additional_sign.py","file_name":"additional_sign.py","file_ext":"py","file_size_in_byte":11268,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"22"} +{"seq_id":"32976511341","text":"import os\r\nimport time\r\nimport json\r\n\r\ndef getNames(numbers,init=True, device=\"07586311CO072403\"):\r\n '''Manupulating Android Using ADB'''\r\n if init:\r\n os.system(f\"adb -s {device} shell am force-stop com.truecaller\")\r\n os.system(f\"adb -s {device} shell monkey -p com.truecaller -v 1\")\r\n time.sleep(5)\r\n os.system(f\"adb -s {device} shell input tap 400 210\")\r\n time.sleep(1)\r\n Data = {}\r\n for number in numbers:\r\n os.system(f\"adb -s {device} shell input text {number}\")\r\n time.sleep(3)\r\n os.system(f\"adb -s {device} shell uiautomator dump\")\r\n os.system(f\"adb -s {device} pull /sdcard/window_dump.xml\")\r\n f = open(\"window_dump.xml\", \"r\", encoding='utf8')\r\n textList = f.read().replace(\"><\", \">\\n<\").split(\"\\n\")\r\n f.close()\r\n for line in textList:\r\n if 'resource-id=\"com.truecaller:id/title\"' in line:\r\n items = line.split(\"\\\"\")\r\n Data[number] = items[3]\r\n os.system(f\"adb -s {device} shell input tap 800 225\")\r\n return Data\r\n\r\ndef getNumbers(device=\"07586311CO072403\"):\r\n '''Getting Numbers From Whatsapp Groups'''\r\n os.system(f\"adb -s {device} shell am force-stop com.whatsapp.w4b\")\r\n os.system(f\"adb -s {device} shell monkey -p com.whatsapp.w4b -v 1\")\r\n print(\"\\n\\n\\t Open Group and Tap View all \\n\\n \")\r\n time.sleep(10)\r\n\r\n ns = []\r\n flag = 1\r\n while flag:\r\n #Taking UI Screenshot\r\n os.system(f\"adb -s {device} shell uiautomator dump\")\r\n os.system(f\"adb -s {device} pull /sdcard/window_dump.xml\")\r\n\r\n f = open(\"window_dump.xml\", \"r\", encoding='utf8')\r\n lines = f.read().replace(\"><\", \">\\n<\").split(\"\\n\")\r\n f.close()\r\n\r\n for line in lines:\r\n if \"View past participants\" in line:\r\n flag = 0\r\n elif \"com.whatsapp.w4b:id/name\" in line :\r\n numValue = line.split(\"\\\"\")[3]\r\n number01 = numValue.replace(\" \", \"\")[2:]\r\n if number01.isnumeric():\r\n #append if number is not saved Use else for saved names\r\n ns.append(numValue.replace(\" \", \"\")[3:])\r\n os.system(f\"adb -s {device} shell input swipe 550 1300 550 300 1000 \")\r\n return ns\r\n\r\n\r\nif __name__ == \"__main__\":\r\n Contacts = getNames(getNumbers())\r\n print(Contacts)\r\n dataFile = open(\"Contacts.json\", \"w\", encoding='utf8')\r\n result = json.dumps(Contacts)\r\n dataFile.write(result)\r\n","repo_name":"iam7t9/NameFinder","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14416006216","text":"import numpy as np\nimport os\nimport operator\n\nROOTDIR = '/home/prometej/Workspaces/PythonWorkspace/Resources/wikidump_pom'\nVOCABULARY_LEMMATIZED = '/home/prometej/Workspaces/PythonWorkspace/chatbot-unk/word_based_version/file_resources/words_lemmatized_vocab.pkl'\n\n# list of all allowed words\nwith open(VOCABULARY_LEMMATIZED, 'rb') as v:\n VOCAB = pickle.load(v)\n\nVOCAB = sorted(VOCAB.items(), key = operator.itemgetter(1))\n\nwith open('/home/prometej/Workspaces/PythonWorkspace/chatbot-unk/word_based_version/file_resources/vocab2', 'r') as v:\n VOCAB = eval(v.read())\n\nfor subdir, dirs, files in os.walk(ROOTDIR):\n name = str(subdir).split('/')[-1]\n print(subdir)\n for f in files:\n print('Working on file:', f)\n SOURCE = str(subdir) + '/' + str(f)\n\n # load text and covert to lowercase\n text = open(SOURCE, encoding='utf-8').read()\n text = text.lower()\n text_list = text.split()\n\n for k in range(len(text_list)):\n text_list[k] = ''.join([i for i in text_list[k] if i in VOCAB])\n\n\n for i in text_list:\n if i == '$#':\n flag = np.random.choice(2, 1, p=[0.2, 0.8])\n if flag == 1:\n text_list.remove(i)\n\n data = ' '.join(text_list)\n\n with open(SOURCE, 'w+') as s:\n s.write(data)\n","repo_name":"skansi/chatbot-unk","sub_path":"word_based_version/file_resources/format_dictionary.py","file_name":"format_dictionary.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33143382716","text":"from selenium import webdriver\r\nimport time\r\nimport os\r\nimport win32clipboard\r\nproject_name=input('your project name: ') \r\nos.mkdir(f'D:\\\\{project_name}')\r\nuname=''\r\npwd=''\r\ndriver=webdriver.Chrome()\r\ndriver.get('https://github.com')\r\nlogin_btn=driver.find_element_by_xpath('/html/body/div[1]/header/div/div[2]/div[2]/a[1]')\r\nlogin_btn.click()\r\ntime.sleep(1)\r\nusername= driver.find_element_by_xpath('/html/body/div[3]/main/div/div[4]/form/input[2]')\r\nusername.send_keys(uname)\r\npassword=driver.find_element_by_xpath('/html/body/div[3]/main/div/div[4]/form/input[3]')\r\npassword.send_keys(pwd)\r\nfinallogin=driver.find_element_by_xpath('/html/body/div[3]/main/div/div[4]/form/input[14]')\r\nfinallogin.click()\r\nnewrepobtn=driver.find_element_by_xpath('/html/body/div[4]/div/aside[1]/div[2]/div[2]/div/h2/a')\r\nnewrepobtn.click()\r\nreponame=driver.find_element_by_xpath('/html/body/div[4]/main/div/form/div[2]/auto-check/dl/dd/input')\r\nreponame.send_keys(project_name)\r\ntime.sleep(5)\r\ncreate=driver.find_element_by_xpath('//*[@id=\"new_repository\"]/div[4]/button')\r\ncreate.click()\r\ntime.sleep(3)\r\ngitkey=driver.find_element_by_xpath('/html/body/div[4]/div/main/div[2]/div/git-clone-help/div[1]/div/div[4]/div/span/span/clipboard-copy')\r\ngitkey.click()\r\nwin32clipboard.OpenClipboard()\r\ndata = win32clipboard.GetClipboardData()\r\nwin32clipboard.CloseClipboard()\r\n#print (data)\r\nproj='#'+project_name\r\nos.chdir(f\"D:\\\\{project_name}\")\r\nos.system(f'echo {project_name} >> README.md')\r\nos.system('git init')\r\nos.system('git add README.md')\r\nos.system('git commit -m \"first commit\"')\r\nos.system('git branch -M main')\r\nos.system(f'git remote add origin {data}')\r\nos.system('git push -u origin main')\r\ndriver.quit()\r\n","repo_name":"lakshaycodes/Project-creator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"69947730298","text":"import pandas as pd\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\n\n\n\ntrain = pd.read_csv('../input/train.csv')\n\ntest = pd.read_csv('../input/test.csv')\ntrain.info(max_cols=250)\ntest.info(max_cols=250)\nf, ax = plt.subplots(figsize=(25,5))\n\n\n\ntrain.drop(['target', 'ID_code'], axis=1).plot.box(ax=ax, rot=90)\nlen(train.loc[train.target == 1])/len(train)\nr2 = pd.concat([train.drop(['target', 'ID_code'], axis=1), test.drop('ID_code', axis=1)]).corr()**2\n\nr2 = np.tril(r2, k=-1) # remove upper triangle and diagonal\n\nr2[r2 == 0] = np.nan # replace 0 with nan\nf, ax = plt.subplots(figsize=(20,20))\n\nsns.heatmap(np.sqrt(r2), annot=False,cmap='viridis', ax=ax)\ntarget_r2 = train.drop(['ID_code', 'target'], axis=1).corrwith(train.target).agg('square')\n\n\n\nf, ax = plt.subplots(figsize=(25,5))\n\ntarget_r2.agg('sqrt').plot.bar(ax=ax)\ntop = target_r2.loc[np.sqrt(target_r2) > 0.048].index\n\ntop\nfrom sklearn.preprocessing import PolynomialFeatures\n\n\n\npolyfeat_train = pd.DataFrame(PolynomialFeatures(2).fit_transform(train[top]))\n\npolyfeat_test = pd.DataFrame(PolynomialFeatures(2).fit_transform(test[top]))\nfrom imblearn.over_sampling import RandomOverSampler\n# additional imports\n\nfrom imblearn.pipeline import Pipeline\n\nfrom sklearn.model_selection import GridSearchCV\n\nfrom sklearn.preprocessing import RobustScaler\nfrom imblearn.pipeline import Pipeline\n\nfrom sklearn.model_selection import GridSearchCV\n\nimport lightgbm as lgb\n\n\n\nlgbpipe = Pipeline([('resample', RandomOverSampler(random_state=42)), ('model', lgb.LGBMClassifier(random_state=42, objective='binary', metric='auc', \n\n boosting='gbdt', verbosity=1,\n\n tree_learner='serial'))])\n\n\n\nparams = { \n\n \"model__max_depth\" : [20],\n\n \"model__num_leaves\" : [30],\n\n \"model__learning_rate\" : [0.1],\n\n \"model__subsample_freq\": [5],\n\n \"model__subsample\" : [0.3],\n\n \"model__colsample_bytree\" : [0.05],\n\n \"model__min_child_samples\": [100],\n\n \"model__min_child_weight\": [10],\n\n \"model__reg_alpha\" : [0.12],\n\n \"model__reg_lambda\" : [15.5],\n\n \"model__n_estimators\" : [600]\n\n }\n\n\n\n# previous best-fit gridsearch parameters and results\n\n# {'model__colsample_bytree': 0.05, 'model__learning_rate': 0.1, 'model__max_depth': 20, 'model__min_child_samples': 100, 'model__min_child_weight': 10, 'model__n_estimators': 100, 'model__num_leaves': 30, 'model__reg_alpha': 0.1, 'model__reg_lambda': 10, 'model__subsample': 0.3, 'model__subsample_freq': 5}\n\n# 0.8735588789424164\n\n# {'model__colsample_bytree': 0.05, 'model__learning_rate': 0.1, 'model__max_depth': 20, 'model__min_child_samples': 100, 'model__min_child_weight': 10, 'model__n_estimators': 400, 'model__num_leaves': 30, 'model__reg_alpha': 0.12, 'model__reg_lambda': 0.2, 'model__subsample': 0.3, 'model__subsample_freq': 5}\n\n# 0.8915905852982839\n\n# {'model__colsample_bytree': 0.05, 'model__learning_rate': 0.1, 'model__max_depth': 20, 'model__min_child_samples': 100, 'model__min_child_weight': 10, 'model__n_estimators': 500, 'model__num_leaves': 30, 'model__reg_alpha': 0.12, 'model__reg_lambda': 0.2, 'model__subsample': 0.3, 'model__subsample_freq': 5}\n\n# 0.8923071245054173\n\n# {'model__colsample_bytree': 0.05, 'model__learning_rate': 0.1, 'model__max_depth': 20, 'model__min_child_samples': 100, 'model__min_child_weight': 10, 'model__n_estimators': 600, 'model__num_leaves': 30, 'model__reg_alpha': 0.12, 'model__reg_lambda': 0.2, 'model__subsample': 0.3, 'model__subsample_freq': 5}\n\n# 0.8925518240005254\n\n# {'model__colsample_bytree': 0.05, 'model__learning_rate': 0.1, 'model__max_depth': 20, 'model__min_child_samples': 100, 'model__min_child_weight': 10, 'model__n_estimators': 550, 'model__num_leaves': 30, 'model__reg_alpha': 0.12, 'model__reg_lambda': 0.2, 'model__subsample': 0.3, 'model__subsample_freq': 5}\n\n# 0.8924978701504809\n\n# {'model__colsample_bytree': 0.05, 'model__learning_rate': 0.1, 'model__max_depth': 20, 'model__min_child_samples': 100, 'model__min_child_weight': 10, 'model__n_estimators': 600, 'model__num_leaves': 30, 'model__reg_alpha': 0.12, 'model__reg_lambda': 15, 'model__subsample': 0.3, 'model__subsample_freq': 5}\n\n# 0.8941148812638564\n\n# {'model__colsample_bytree': 0.05, 'model__learning_rate': 0.1, 'model__max_depth': 20, 'model__min_child_samples': 100, 'model__min_child_weight': 10, 'model__n_estimators': 600, 'model__num_leaves': 30, 'model__reg_alpha': 0.5, 'model__reg_lambda': 12, 'model__subsample': 0.3, 'model__subsample_freq': 5}\n\n# 0.8938169988416745\n\n# {'model__colsample_bytree': 0.05, 'model__learning_rate': 0.1, 'model__max_depth': 20, 'model__min_child_samples': 100, 'model__min_child_weight': 10, 'model__n_estimators': 600, 'model__num_leaves': 30, 'model__reg_alpha': 0.3, 'model__reg_lambda': 15, 'model__subsample': 0.3, 'model__subsample_freq': 5}\n\n# 0.8941407236592286\n\n# {'model__colsample_bytree': 0.05, 'model__learning_rate': 0.1, 'model__max_depth': 20, 'model__min_child_samples': 100, 'model__min_child_weight': 10, 'model__n_estimators': 600, 'model__num_leaves': 30, 'model__reg_alpha': 0.2, 'model__reg_lambda': 15, 'model__subsample': 0.3, 'model__subsample_freq': 5}\n\n# 0.8938875270813017\n\n# {'model__colsample_bytree': 0.05, 'model__learning_rate': 0.1, 'model__max_depth': 20, 'model__min_child_samples': 100, 'model__min_child_weight': 10, 'model__n_estimators': 600, 'model__num_leaves': 30, 'model__reg_alpha': 0.2, 'model__reg_lambda': 15, 'model__subsample': 0.3, 'model__subsample_freq': 5}\n\n# 0.8938875270813017\n\n# {'model__colsample_bytree': 0.05, 'model__learning_rate': 0.1, 'model__max_depth': 20, 'model__min_child_samples': 100, 'model__min_child_weight': 10, 'model__n_estimators': 600, 'model__num_leaves': 30, 'model__reg_alpha': 0.12, 'model__reg_lambda': 15.5, 'model__subsample': 0.3, 'model__subsample_freq': 5}\n\n# 0.8943001048082946\n\n# {'model__colsample_bytree': 0.05, 'model__learning_rate': 0.1, 'model__max_depth': 20, 'model__min_child_samples': 100, 'model__min_child_weight': 10, 'model__n_estimators': 600, 'model__num_leaves': 30, 'model__reg_alpha': 0.12, 'model__reg_lambda': 15.2, 'model__subsample': 0.3, 'model__subsample_freq': 5}\n\n# 0.8939732044413886\n\n\n\nlgbgrid = GridSearchCV(lgbpipe, param_grid=params, cv=10, scoring='roc_auc')\n\nlgbgrid.fit(train.drop(['ID_code', 'target'], axis=1), train.target)\n\n\n\nprint(lgbgrid.best_params_)\n\nprint(lgbgrid.best_score_)\nfrom sklearn.linear_model import RidgeClassifier\n\n\n\nridgepipe = Pipeline([('resample', RandomOverSampler(random_state=42)), ('scaler', RobustScaler()), ('model', RidgeClassifier(random_state=42))])\n\n\n\nparams = {'model__alpha': [1.0]} # between 0.5 and 2; best-fit so far: 1\n\n \n\nridgegrid = GridSearchCV(ridgepipe, param_grid=params, cv=3, scoring='roc_auc')\n\nridgegrid.fit(pd.concat([train.drop(['ID_code', 'target'], axis=1), polyfeat_train], axis=1, join='inner'), train.target)\n\n\n\nprint(ridgegrid.best_params_)\n\nprint(ridgegrid.best_score_)\npred = pd.DataFrame(lgbgrid.predict_proba(test.drop(['ID_code'], axis=1))[:, -1], columns=['target'], index=test.loc[:, 'ID_code'])\n\npred.to_csv('submission.csv', index=True)\ntest.head()","repo_name":"aorursy/new-nb-5","sub_path":"mommermi_resampling-gridsearch-lightgbm-magic.py","file_name":"mommermi_resampling-gridsearch-lightgbm-magic.py","file_ext":"py","file_size_in_byte":7276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72848828217","text":"import dataclasses\nimport functools\nfrom typing import Any\n\n\nclass ChainArgumentNotCallable(Exception):\n pass\n\n\n@dataclasses.dataclass\nclass ChainError:\n index: int\n fn: str\n args: Any\n exception: Exception\n\n\nclass ChainErrorException(Exception):\n def __init__(self, index, fn, arguments, original_exception, *args):\n self.index = index\n self.fn = fn\n self.arguments = arguments\n self.original_exception = original_exception\n self.message = f\"Exception raises on chain index: {self.index}. Fun: {self.fn}. Args: {self.arguments}\"\n super(ChainErrorException, self).__init__(self.message, *args)\n\n\nclass Chain:\n \"\"\"\n Chaining functions using pipe operators. The next function will receive\n result of the previous function call as it first argument.\n\n ```python\n @chainable\n def add(a, b):\n return a + b\n\n chain = add(1, 2) | add(3) | add(4)\n # ((1 + 2) + 3) + 4\n\n chain()\n # 10\n ```\n \"\"\"\n\n chains = []\n\n def __init__(self, fn, *args, **kwargs):\n if not callable(fn):\n raise ChainArgumentNotCallable\n\n self.fn = fn\n self.args = args\n self._on_error = None\n\n def on_error(self, fn):\n self._on_error = fn\n\n def __or__(self, other):\n left_most = len(self.chains) == 0\n if left_most:\n other.chains = [self, other]\n else:\n other.chains = [*self.chains, other]\n\n return other\n\n def __call__(self, *args, **kwargs):\n error_handler = self.get_on_error()\n index = 0\n try:\n chain = self.chains[0]\n except IndexError:\n chain = self\n\n try:\n result = chain.fn(*chain.args)\n if not self.chains:\n return result\n\n for index, chain in enumerate(self.chains[1:], 1):\n result = chain.fn(result, *chain.args)\n except Exception as e:\n error_args = [index, chain.fn.__name__, chain.args, e]\n if error_handler:\n return error_handler(ChainError(*error_args))\n raise ChainErrorException(*error_args)\n return result\n\n def __str__(self):\n string = \"chains of:\\n\"\n if not self.chains:\n string += f\" {self.fn.__name__} - {self.args},\\n\"\n else:\n for chain in self.chains:\n string += f\" {chain.fn.__name__} - {chain.args},\\n\"\n string += \"]\"\n return string\n\n def __repr__(self):\n return self.__str__()\n\n def get_on_error(self):\n return self._on_error\n\n\ndef chainable(fn):\n @functools.wraps(fn)\n def inner(*args, **kwargs):\n chain = Chain(fn, *args, **kwargs)\n return chain\n\n return inner\n","repo_name":"ihfazhillah/chainable","sub_path":"src/chain.py","file_name":"chain.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33207380506","text":"import atexit\nimport os\nimport sys\nimport time\nfrom contextlib import suppress\nfrom signal import SIGTERM\n\n\nclass Daemon:\n def __init__(self, pidfile=None):\n self.pidfile = pidfile or os.path.join(\"/var/run/exhal.service\")\n\n def start(self):\n try:\n self.get_pidfile()\n except IOError:\n pass\n finally:\n self.daemonize()\n self.run()\n\n def stop(self):\n try:\n pid = self.get_pidfile()\n except IOError:\n return\n try:\n while 1:\n os.kill(pid, SIGTERM)\n time.sleep(0.1)\n except OSError as err:\n e = str(err.args)\n if e.find(\"No such process\") > 0:\n self.delete_pidfile()\n else:\n sys.exit(1)\n\n def daemonize(self):\n self.fork()\n\n os.chdir(\"/\")\n os.setsid()\n os.umask(0)\n\n self.fork()\n\n atexit.register(self.delete_pidfile)\n self.create_pidfile()\n\n def fork(self):\n try:\n if os.fork() > 0:\n sys.exit(0)\n except OSError as err:\n self.error(f\"failed to fork a child process. Reason: {err}\\n\")\n\n def delete_pidfile(self):\n with suppress(FileNotFoundError):\n os.remove(self.pidfile)\n\n def create_pidfile(self):\n with open(self.pidfile, \"w+\") as fh:\n fh.write(str(os.getpid()) + \"\\n\")\n\n def get_pidfile(self):\n with open(self.pidfile, \"r\") as fh:\n return int(fh.read().strip())\n\n def error(self, message):\n sys.stderr.write(f\"{message}\\n\")\n sys.exit(1)\n\n def restart(self):\n self.stop()\n self.start()\n\n def run(self):\n raise NotImplementedError\n","repo_name":"nficano/exhal","sub_path":"exhal/daemon.py","file_name":"daemon.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70269994296","text":"# -*- coding: utf-8 -*-\n\nimport tkinter as tk\nimport subprocess\nimport os\nimport webbrowser\nimport platform\n\nfrom utils import file_exists\n\nfont_color = \"#0f425b\"\nfu_green = '#6b9e1f'\nfu_grey = '#ccc'\n\n\nclass Page(tk.Frame):\n def __init__(self, *args, **kwargs):\n tk.Frame.__init__(self, *args, **kwargs)\n def show(self):\n self.lift()\n\n\nclass Introduction(Page):\n\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=1)\n\n title = tk.Label(description_container, text=\"1. Rückblick\", font=\"TkHeaderFont 24 bold\", bg='white',\n fg=font_color)\n title.place(x=0, y=0)\n\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, wrap=\"word\", pady=5, padx=5)\n text.place(x=0, rely=0.07, relwidth=1, relheight=0.23)\n text.insert(\"1.0\", \"Im letzten Teil des Tutorials haben wir die grundlegenden Strukturen und Befehle von Git gelernt.\"\n \" Die gelernten Befehle sind die, die man in der Regel am häufigsten benutzt, wenn man mit Git arbeitet. \"\n \"\\n\\nDas bisher gelernte kratzt jedoch nur an der Oberfläche dessen, was mit Git möglich ist. Git bietet noch zahlreiche nützliche Befehle, \"\n \"die wir in diesem Teil des Tutorials lernen werden. Außerdem wollen wir uns ein Remote Repository auf dem GitLab-Server \"\n \"des Instituts anlegen und die Interaktion zwischen lokalem und Remote Repository erkunden.\")\n additional = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, wrap=\"word\", pady=5, padx=5)\n additional.place(x=0, rely=0.82, relwidth=1, relheight=0.15)\n additional.insert(\"1.0\", \"Dieser Teil des Tutorials funktioniert geauso wie der erste. Du bekommst \"\n \"Informationen zu einer bestimmten Struktur oder einem Befehl und sollst diesen dann im integrierten Terminal ausführen.\"\n \"\\nDu kannst das Tutorial auch nebenbei in einem extra Terminal ausführen, musst jedoch auch hier darauf achten, \"\n \"dass einige der Befehle abhängig vom Betriebssystem sind.\")\n\n img = tk.PhotoImage(file=\"./img/generalstructure.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.15, rely=0.30)\n\n\nclass Preparation(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n def run_command(command):\n if command == \"mkdir new_repo\":\n if os.system(command) == 0:\n output['text'] = \"Das Verzeichnis 'new_repo' wurde angelegt.\"\n elif platform.system() == \"Windows\" and os.system(command) != 0:\n output['text'] = \"Das Verzeichnis existiert bereits!\"\n elif platform.system() != \"Windows\" and os.system(command) == 256:\n output['text'] = \"Das Projekt existiert bereits.\"\n elif command == \"cd new_repo\":\n try:\n os.chdir(\"./new_repo\")\n output['text'] = \"Gewechselt in das Verzeichnis 'new_repo'.{}\".format(os.getcwd())\n task1['bg'] = '#6b9e1f'\n except:\n if os.getcwd().endswith(\"new_repo\"):\n output['text'] = \"Du befindest dich bereits im new_repo-Verzeichnis. {}\".format(os.getcwd())\n task1['bg'] = '#6b9e1f'\n else:\n output['text'] = \"Bist du sicher, dass du einen Projektordner namens 'new_repo' angelegt hast?\"\n elif command == \"git init\":\n if os.getcwd().endswith(\"new_repo\"):\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n task2['bg'] = fu_green\n else:\n output['text'] = \"Du befindest dich nicht im Verzeichnis new_repo. Wechsel in das Verzeichnis!\"\n elif command == \"git status\":\n if os.getcwd().endswith(\"new_repo\"):\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n else:\n output['text'] = \"Du befindest dich nicht im Verzeichnis new_repo. Wechsel in das Verzeichnis!\"\n elif command.startswith(\"git add\"):\n if os.getcwd().endswith(\"new_repo\"):\n subprocess.check_output(command, shell=True)\n output['text'] = \"Datei wurde zur Versionierung vorgemerkt.\"\n else:\n output['text'] = \"Du befindest dich nicht im Verzeichnis new_repo. Wechsel in das Verzeichnis!\"\n elif command.startswith(\"git commit -m\"):\n if os.getcwd().endswith(\"new_repo\"):\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n task4['bg'] = fu_green\n else:\n output['text'] = \"Du befindest dich nicht im Verzeichnis new_repo. Wechsel in das Verzeichnis!\"\n elif command == \"touch first.txt\":\n if os.getcwd().endswith(\"new_repo\"):\n if platform.system() == \"Windows\":\n try:\n subprocess.check_output(\"first.txt\", shell=True)\n output['text'] = \"Die Datei first.txt wurde angelegt.\"\n except:\n pass\n else:\n subprocess.check_output(command, shell=True)\n output['text'] = \"Die Datei first.txt wurde erfolgreich angelegt.\"\n else:\n output['text'] = \"Du befindest dich nicht im Verzeichnis new_repo. Wechsel in das Verzeichnis!\"\n elif command == \"touch second.txt\":\n if os.getcwd().endswith(\"new_repo\"):\n if platform.system() == \"Windows\":\n try:\n subprocess.check_output(\"second.txt\", shell=True)\n output['text'] = \"Die Datei second.txt wurde angelegt.\"\n except:\n pass\n else:\n subprocess.check_output(command, shell=True)\n output['text'] = \"Die Datei second.txt wurde erfolgreich angelegt.\"\n else:\n output['text'] = \"Du befindest dich nicht im Verzeichnis new_repo. Wechsel in das Verzeichnis!\"\n elif command == \"touch third.txt\":\n if os.getcwd().endswith(\"new_repo\"):\n if platform.system() == \"Windows\":\n try:\n subprocess.check_output(\"third.txt\", shell=True)\n output['text'] = \"Die Datei third.txt wurde angelegt.\"\n task3['bg'] = fu_green\n except:\n pass\n else:\n subprocess.check_output(command, shell=True)\n output['text'] = \"Die Datei third.txt wurde erfolgreich angelegt.\"\n task3['bg'] = fu_green\n else:\n output['text'] = \"Du befindest dich nicht im Verzeichnis new_repo. Wechsel in das Verzeichnis!\"\n else:\n output['text'] = \"Prüfe deine Syntax!\"\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=0.6)\n\n title = tk.Label(description_container, text=\"2. Vorbereitungen\", font=\"TkHeaderFont 24 bold\", bg='white',\n fg=font_color)\n title.place(x=0, y=0)\n\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, wrap=\"word\", pady=5,\n padx=5)\n text.place(x=0, rely=0.1, relwidth=1, relheight=0.54)\n text.insert(\"1.0\",\n \"Im vorherigen Teil des Tutorials haben wir ein Repository angelegt und in diesem zahlreiche Befehle ausprobiert. \"\n \"Das wollen wir im jetzigen Teil des Tutorials auch machen. In einem neuen Repository können wir, \"\n \"wie bereits im ersten Teil, Dateien anlegen, versionieren und unterschiedliche Git-Befehle ausprobieren.\"\n \"\\n\\nHier eine kleine Erinnerung an die im ersten Teil schon verwendeten Befehle:\"\n \"\\n- mkdir Verzeichnisname (Anlegen eines Verzeichnisses)\"\n \"\\n- cd Verzeichnisname (Wechseln in das angegebene Verzeichnis)\"\n \"\\n- git init (Anlegen eines Repositorys)\"\n \"\\n- touch Dateiname (Anlegen einer leeren Datei mit dem angegebenen Namen)\"\n \"\\n- git add (Vormerken zur Versionierung)\"\n \"\\n- git commit -m \\\"Commitmessage\\\" (Aktuelle Projektversion speichern)\")\n\n task_title = tk.Label(description_container, text=\"Aufgaben\", font=\"TkFont 14 bold\", bg=\"white\", fg=font_color)\n task_title.place(x=0, rely=0.65)\n task1 = tk.Label(description_container, text=\"1. Lege einen neuen Ordner namens 'new_repo' an und wechsel in dieses Verzeichnis..\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task1.place(x=0, rely=0.72)\n task2 = tk.Label(description_container,\n text=\"2. Initiiiere ein Repository in diesem Verzeichnis\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task2.place(x=0, rely=0.79)\n task3 = tk.Label(description_container,\n text=\"3. Lege 3 neue Dateien an: first.txt, second.txt und third.txt\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task3.place(x=0, rely=0.86)\n task4 = tk.Label(description_container,\n text=\"4. Merke alle Dateien zur Versionierung vor und commite sie.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task4.place(x=0, rely=0.93)\n\n\n terminal_container = tk.Frame(self, bg=\"#464e51\")\n terminal_container.place(relwidth=1, relheight=0.4, rely=0.6)\n\n command_line = tk.Entry(terminal_container, bg=\"#464e51\", fg=\"#ccc\", font=\"TkFont 10 bold\")\n command_line.place(relwidth=0.8, relheight=0.15)\n run_button = tk.Button(terminal_container, text=\"Run\", command=lambda: run_command(command_line.get()),\n bg=fu_green, fg=\"white\")\n run_button.place(relwidth=0.2, relheight=0.15, relx=0.8)\n output = tk.Label(terminal_container, bg=\"#464e51\", bd=5, height=10, width=20, fg=\"#ccc\", justify=\"left\",\n anchor=\"nw\", font=\"TkFont 10 bold\")\n output.place(relheight=0.85, relwidth=1, rely=0.15)\n\n\nclass GitDiff(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n def run_command(command):\n if command == \"echo \\\"Der erste Inhalt\\\" >> first.txt\":\n subprocess.check_output(command, shell=True)\n output['text'] = \"Inahlt in die Datei first.txt geschrieben.\"\n task1['bg'] = fu_green\n elif command == \"git diff first.txt\":\n if os.getcwd().endswith(\"new_repo\"):\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n task2['bg'] = fu_green\n else:\n output['text'] = \"Wechsel bitte zuerst in das new_repo-Verzeichnis!\"\n else:\n output['text'] = \"Überprüfe deine Syntax!\"\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=0.6)\n\n title = tk.Label(description_container, text=\"3. Git diff\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.1, relwidth=1, relheight=0.65)\n text.insert(\"1.0\", \"Wenn man sehr viele Änderungen macht, kann man schnell mal den Überblick verlieren welche man gemacht hat. \"\n \"Der Befehl 'git status' hilft ja bereits, um zu sehen, an welcher Datei Veränderungen vorgenommen wurden.\"\n \" Das ist schön und gut, aber man will natürlich auch wissen, was genau man in der Datei verändert hat. \"\n \"Vor allem, wenn die Änderungen nicht von einem selbst stammen. \"\n \"\\n\\nHierzu gibt es den Befehl 'git diff Dateiname'.\"\n \" Dieser zeigt einem alle gemachten Änderungen in der Datei an. Hier wird verglichen zwischen \"\n \"dem Inhalt der Datei beim letzten Commit und dem Inhalt, der seitdem dazugekommenen ist und nicht commited wurde.\"\n \" Alle von Änderungen betroffenen Codezeilen werden dann im Terminal angezeigt. Genauer gesagt, werden beide Versionen angezeigt: \"\n \"die alte und die neue Version.\"\n \" Die alte Version des Codes wird mit einem Minus gekennzecihnet, die neue Version mit einem Plus.\"\n \"\\n\\nIm vorherigen Schritt haben wir die leere Datei first.txt commited und werden nun einen neuen Satz in diese Datei schreiben, \"\n \"der dann durch 'git diff' markiert werden sollte.\")\n\n task_title = tk.Label(description_container, text=\"Aufgaben\", font=\"TkFont 14 bold\", bg=\"white\", fg=font_color)\n task_title.place(x=0, rely=0.77)\n task1 = tk.Label(description_container, text=\"1. Schreibe den Text \\\"Der erste Inhalt\\\" in die Datei first.txt.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task1.place(x=0, rely=0.84)\n task2 = tk.Label(description_container, text=\"2. Lass dir die Unterschiede in der first.txt anzeigen\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task2.place(x=0, rely=0.92)\n\n terminal_container = tk.Frame(self, bg=\"#464e51\")\n terminal_container.place(relwidth=1, relheight=0.4, rely=0.6)\n\n command_line = tk.Entry(terminal_container, bg=\"#464e51\", fg=\"#ccc\", font=\"TkFont 10 bold\")\n command_line.place(relwidth=0.8, relheight=0.15)\n run_button = tk.Button(terminal_container, text=\"Run\", command=lambda: run_command(command_line.get()),\n bg=fu_green, fg=\"white\")\n run_button.place(relwidth=0.2, relheight=0.15, relx=0.8)\n output = tk.Label(terminal_container, bg=\"#464e51\", bd=5, height=10, width=20, fg=\"#ccc\", justify=\"left\",\n anchor=\"nw\", font=\"TkFont 10 bold\")\n output.place(relheight=0.85, relwidth=1, rely=0.15)\n\n\nclass GitCheckout(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n def run_command(command):\n if os.getcwd().endswith(\"new_repo\"):\n if command == \"git checkout first.txt\":\n subprocess.check_output(command, shell=True)\n output['text'] = \"Alle Änderungen in der Datei first.txt wurden rückgängig gemacht\"\n task1['bg'] = fu_green\n elif command == \"git status\":\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n task2['bg'] = fu_green\n else:\n output['text'] = \"Überprüfe deine Syntax!\"\n else:\n output['text'] = \"Du befindest dich nicht im new_repo-Verzeichnis! Gehe ein paar Schritte zurück \" \\\n \"und wechsel in das Verzeichnis!\"\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=0.6)\n\n title = tk.Label(description_container, text=\"4. Git checkout\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.1, relwidth=1, relheight=0.65)\n text.insert(\"1.0\", \"Es kann natürlich vorkommen, dass man eine Weile an seinem Projekt gearbeitet hat und irgendwann merkt,\"\n \" dass man totalen Blödsinn gemacht hat und eigentlich alles rückgängig machen will. \"\n \"Hierzu gibt es verscheidene Möglichkeiten, die speziell auf den Versionierungssstatus der Änderungen bezogen sind.\"\n \"\\n\\nZuerst wollen wir Änderungen rückgängig machen, die noch nicht zum Commit vorgemerkt sind und auch noch nicht commited wurden. \"\n \"Also Änderungen, die bisher nur im Working Directory zu finden sind.\"\n \"\\n\\nHierzu gibt es den Befehl 'git checkout' den man entweder auf eine bestimmte Datei beziehen kann 'git checkout Dateiname' oder \"\n \"auf alle gemachten Änderungen 'git checkout *'.\"\n \"\\n\\nNachdem du mit 'git checkout' die Änderungen in der first.txt rückgängig gemacht hast, kannst du den Status des Repositorys prüfen. \"\n \"Du solltest sehen, dass die Datei 'first.txt' nicht mehr angezeigt wird, da alle Änderungen rückgängig gemacht wurden.\")\n\n task_title = tk.Label(description_container, text=\"Aufgaben\", font=\"TkFont 14 bold\", bg=\"white\", fg=font_color)\n task_title.place(x=0, rely=0.77)\n task1 = tk.Label(description_container, text=\"1. Mache alle Änderungen in der first.txt rückgängig.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task1.place(x=0, rely=0.84)\n task2 = tk.Label(description_container, text=\"2. Prüfe den Änderungsstatus deines Repositorys.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task2.place(x=0, rely=0.92)\n\n terminal_container = tk.Frame(self, bg=\"#464e51\")\n terminal_container.place(relwidth=1, relheight=0.4, rely=0.6)\n\n command_line = tk.Entry(terminal_container, bg=\"#464e51\", fg=\"#ccc\", font=\"TkFont 10 bold\")\n command_line.place(relwidth=0.8, relheight=0.15)\n run_button = tk.Button(terminal_container, text=\"Run\", command=lambda: run_command(command_line.get()),\n bg=fu_green, fg=\"white\")\n run_button.place(relwidth=0.2, relheight=0.15, relx=0.8)\n output = tk.Label(terminal_container, bg=\"#464e51\", bd=5, height=10, width=20, fg=\"#ccc\", justify=\"left\",\n anchor=\"nw\", font=\"TkFont 10 bold\")\n output.place(relheight=0.85, relwidth=1, rely=0.15)\n\n img = tk.PhotoImage(file=\"./img/gitcheckout.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.815, rely=0.76)\n\n\nclass GitReset(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n def run_command(command):\n if os.getcwd().endswith(\"new_repo\"):\n if file_exists(\"second.txt\"):\n if command == \"git add second.txt\":\n subprocess.check_output(command, shell=True)\n output['text'] = \"Die Datei second.txt wurde zur Versionierung vorgemerkt!\"\n task2['bg'] = fu_green\n elif command == \"git status\":\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n elif command == \"git reset second.txt\":\n subprocess.check_output(command, shell=True)\n output['text'] = \"Vormerkung der Datei second.txt rückgängig gemacht!\"\n task3['bg'] = fu_green\n elif command == \"echo \\\"Das hier wird vorgemerkt.\\\" >> second.txt\":\n subprocess.check_output(command, shell=True)\n output['text'] = \"Der Satz wurde in die Datei second.txt geschrieben.\"\n task1['bg'] = fu_green\n else:\n output['text'] = \"Überprüfe deine Syntax!\"\n else:\n output['text'] = \"Die Datei second.txt existiert nicht.\\nGehe einige Schritte zurück und erledige alle Aufgaben!\"\n else:\n output['text'] = \"Du befindest dich nicht im Verzeichnis gitcourse.\\n Gehe zurück zu Schritt ... und \" \\\n \"wechsel das Verzeichnis!\"\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=0.6)\n\n title = tk.Label(description_container, text=\"5. Git reset\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.1, relwidth=1, relheight=0.61)\n text.insert(\"1.0\", \"Es kann vorkommen, dass man in einem Projekt Änderungen gemacht und zur Versionierung vorgemerkt hat, \"\n \"die man eigentlich doch nicht versionieren möchte. \"\n \"Das ist natürlich ungünstig und deswegen will man diese dann wieder aus der Vormerkungen herausnehmen. \"\n \"Hierzu bietet Git den Befehl 'git reset'. Mit 'git reset Dateiname' kann man eine oder meherere \"\n \"Dateien aus der Staging Area entfernen.\"\n \"\\nIm Status-Bereich von Git werden die Änderungen also aus der Staging Area ('Zum Commit vorgemerkte Änderungen') \"\n \"zurück in das Working Directory verschoben ('Unversionierte Dateien' oder 'Nicht zum commit vorgemerkte Änderungen')\"\n \"\\n\\nDas wollen wir mal ausprobieren. Zuerst schreiben wir den Text 'Das hier wird vorgemerkt.' \"\n \"in die 'second.txt' und merken diese Änderungen zur Versionieurng vor.\"\n \" Die Vormerkung können wir uns mit 'git status' anzeigen lassen\"\n \" und mit 'git reset Dateiname' machen wir diese Vormerkung rückgängig. Mit 'git status' \"\n \"sollte man nun sehen, dass die Änderungen nicht mehr in der Staging Area (Zum Commit vorgemerkt) gelistet sind.\")\n\n task_title = tk.Label(description_container, text=\"Aufgaben\", font=\"TkFont 14 bold\", bg=\"white\", fg=font_color)\n task_title.place(x=0, rely=0.72)\n task1 = tk.Label(description_container, text=\"1. Schreibe \\\"Das hier wird vorgemerkt.\\\" in die Datei second.txt.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task1.place(x=0, rely=0.79)\n task2 = tk.Label(description_container, text=\"2. Merke die second.txt zur Versionierung vor.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task2.place(x=0, rely=0.86)\n task3 = tk.Label(description_container, text=\"3. Mache den Staging-Prozess der second.txt-Datei rückgängig.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task3.place(x=0, rely=0.93)\n\n terminal_container = tk.Frame(self, bg=\"#464e51\")\n terminal_container.place(relwidth=1, relheight=0.4, rely=0.6)\n\n command_line = tk.Entry(terminal_container, bg=\"#464e51\", fg=\"#ccc\", font=\"TkFont 10 bold\")\n command_line.place(relwidth=0.8, relheight=0.15)\n run_button = tk.Button(terminal_container, text=\"Run\", command=lambda: run_command(command_line.get()),\n bg=fu_green, fg=\"white\")\n run_button.place(relwidth=0.2, relheight=0.15, relx=0.8)\n output = tk.Label(terminal_container, bg=\"#464e51\", bd=5, height=10, width=20, fg=\"#ccc\", justify=\"left\",\n anchor=\"nw\", font=\"TkFont 10 bold\")\n output.place(relheight=0.85, relwidth=1, rely=0.15)\n\n img = tk.PhotoImage(file=\"./img/gitreset.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.65, rely=0.75)\n\n\nclass GitLog(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n def run_command(command):\n if os.getcwd().endswith(\"new_repo\"):\n if command == \"git log\":\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n task1['bg'] = fu_green\n else:\n output['text'] = \"Überprüfe deine Syntax!\"\n else:\n output['text'] = \"Du befindest dich nicht im new_repo-Verzeichnis! Gehe ein paar Schritte zurück und\" \\\n \" wechsel in das Verzeichnis!\"\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=0.6)\n\n title = tk.Label(description_container, text=\"6. Git log\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.1, relwidth=0.6, relheight=0.4)\n text.insert(\"1.0\", \"Im Laufe der Bearbeitung eines Projektes archiviert man sehr viele Projektversionen.\"\n \" Hierzu ist es auch gut, einen Überblick über alle Versionen zu haben, vor allem, wenn man \"\n \"doch mal zu einer früheren Version zurückkehren möchte.\"\n \"\\n\\nHierzu stellt Git das Werkzeug der Commit-History bereit. Diese beinhaltet die Liste aller gemachten\"\n \"Commits, also aller versionierten Projektversionen. \")\n text2 = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5, wrap=\"word\")\n text2.place(x=0, rely=0.51, relwidth=1, relheight=0.32)\n text2.insert(\"1.0\", \"Mit dem Befehl 'git log' kann man sich diese Commit-History anzeigen lassen. Jeder Eintrag enhält den Autor des Commits, \"\n \"den Zeitstempel, die mitgelieferte Commitmessage sowie eine Referenz. Diese Referenz ist einen \"\n \"eindeutige Zahlen- und Buchtsbenkombination, die einem Commit zugeordnet ist.\"\n \"\\n\\nHier zeigt sich außerdem, warum es so wichtig ist, eine vernünftige Commitmessage mitzuliefern, \"\n \"da man sonst sehr schwer nachvollziehen kann welche Änderungen in den jeweiligen Commits gemacht \"\n \"wurden.\")\n\n task_title = tk.Label(description_container, text=\"Aufgaben\", font=\"TkFont 14 bold\", bg=\"white\", fg=font_color)\n task_title.place(x=0, rely=0.84)\n task1 = tk.Label(description_container, text=\"1. Lass dir die Commit-History anzeigen.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task1.place(x=0, rely=0.92)\n\n terminal_container = tk.Frame(self, bg=\"#464e51\")\n terminal_container.place(relwidth=1, relheight=0.4, rely=0.6)\n\n command_line = tk.Entry(terminal_container, bg=\"#464e51\", fg=\"#ccc\", font=\"TkFont 10 bold\")\n command_line.place(relwidth=0.8, relheight=0.15)\n run_button = tk.Button(terminal_container, text=\"Run\", command=lambda: run_command(command_line.get()),\n bg=fu_green, fg=\"white\")\n run_button.place(relwidth=0.2, relheight=0.15, relx=0.8)\n output = tk.Label(terminal_container, bg=\"#464e51\", bd=5, height=10, width=20, fg=\"#ccc\", justify=\"left\",\n anchor=\"nw\", font=\"TkFont 10 bold\")\n output.place(relheight=0.85, relwidth=1, rely=0.15)\n\n img = tk.PhotoImage(file=\"./img/commitmessages.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.62, rely=0.05)\n\n\nclass GitResetSoftDescription(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=1)\n\n title = tk.Label(description_container, text=\"7. Git reset soft\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.05, relwidth=1, relheight=0.38)\n text.insert(\"1.0\", \"Warum haben wir uns nun die Commit-History angesehen? Um einen Überblick über alle Commits zu \"\n \"haben und so die Möglichkeit, alte Commits wiederherzustellen bzw. Commits rückgängig zu machen oder zu löschen.\"\n \"\\n\\nEs kann vorkommen, dass man eine Projektversion archiviert hat, also ins Repository committet.\"\n \" Merkt man jedoch, dass man vielleicht noch nicht fertig war mit den Änderungen, dann gibt es eine Möglichkeit,\"\n \"eine Projektversion aud dem Repository zurück in die Staging Area zu holen. Man macht also \"\n \"einen Commit rückgängig, jedoch ohne die Änderungen zu verlieren.\"\n \"\\n\\nHierzu gibt es den Befehl 'git reset --soft HEAD'.\"\n \"\\nUnd genau hier kommt die Commit-History ins Spiel. Es gibt mehrere Wege, auf einen früheren Commit zurückzusetzen.\"\n \" Man kann mit 'HEAD~1' einen Commit (oder entsprechend der Zahl mehrere Commits) zurückspringen. \"\n \"Bei einer großen Menge an Commits wird das aber irgendwann etwas schwierig, auch wenn es selten vorkommt, dass man so weit zurückgeht.\")\n\n img = tk.PhotoImage(file=\"./img/resetsoft.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.17, rely=0.48)\n\n\nclass GitResetSoft(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n def run_command(command):\n if os.getcwd().endswith(\"new_repo\"):\n if file_exists(\"third.txt\"):\n if command == \"git log\":\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n task2['bg'] = fu_green\n elif command == \"git status\":\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n task4['bg'] = fu_green\n\n elif command == \"git add third.txt\":\n subprocess.check_output(command, shell=True)\n output['text'] = \"Die Datei third.txt wurde zur Archivierung vorgemerkt.\"\n elif command.startswith(\"git commit -m\"):\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n task1['bg'] = fu_green\n elif command == \"git reset --soft HEAD~1\":\n subprocess.check_output(command, shell=True)\n output['text'] = \"Der Commit wurde rückgängig gemacht und die Änderungen wurden in die Staging\" \\\n \" Area übernommen.\"\n task3['bg'] = fu_green\n elif command == \"echo \\\"Diese Änderungen gehen nicht verloren\\\" >> third.txt\":\n subprocess.check_output(command, shell=True)\n output['text'] = \"Der Satz 'Diese Änderungen eghen nicht veloren' wurde in die third.txt geschrieben.\"\n else:\n output['text'] = \"Prüfe deine Syntax!\"\n else:\n output['text'] = \"die datei third.txt existiert nicht.\\nGehe einige schritte zurück und arbeite die Aufgaben nacheinander ab!\"\n else:\n output['text'] = \"Du befindest dich nicht im new_repo-Verzeichnis.\\nGehe einige Schritte zurück und befolge die Aufgaben.\"\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=0.6)\n\n title = tk.Label(description_container, text=\"8. Git reset soft\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.1, relwidth=1, relheight=0.37)\n text.insert(\"1.0\", \"Nun wollen wir mit 'git reset --soft HEAD~1' den letzten Commit zurücksetzen. Die dort \"\n \"gespeicherten Änderungen also zurück in die Staging Area verschieben.\"\n \"\\nDa wir bisher nur einen Commit in der History haben, speichern wir einen neuen. \"\n \"Hierzu wollen wir zuerst die Datei third.txt verändern und dann versionieren.\"\n \" Nun können wir die Commit-History checken und sollten den neuen Commit sehen.\"\n \" Diesen können wir dann rückgängig machen, was sowohl die Entfernung des Commits aus der \"\n \"Commit-History bewirkt als auch Veränderungen im Status des Repositorys.\")\n\n task_title = tk.Label(description_container, text=\"Aufgaben\", font=\"TkFont 14 bold\", bg=\"white\", fg=font_color)\n task_title.place(x=0, rely=0.63)\n task1 = tk.Label(description_container, text=\"1. Schreibe 'Diese Änderungen gehen nicht verloren' in die third.txt und versioniere sie. (add & commit)\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task1.place(x=0, rely=0.70)\n task2 = tk.Label(description_container, text=\"2. Lass dir die Commit-History anzeigen\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task2.place(x=0, rely=0.77)\n task3 = tk.Label(description_container, text=\"3. Mach den Commit rückgängig.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task3.place(x=0, rely=0.84)\n task4 = tk.Label(description_container, text=\"3. Prüfe, ob die zuvor commiteten Änderungen nun in der Staging Area vorgemerkt sind. \",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task4.place(x=0, rely=0.92)\n\n terminal_container = tk.Frame(self, bg=\"#464e51\")\n terminal_container.place(relwidth=1, relheight=0.4, rely=0.6)\n\n command_line = tk.Entry(terminal_container, bg=\"#464e51\", fg=\"#ccc\", font=\"TkFont 10 bold\")\n command_line.place(relwidth=0.8, relheight=0.15)\n run_button = tk.Button(terminal_container, text=\"Run\", command=lambda: run_command(command_line.get()),\n bg=fu_green, fg=\"white\")\n run_button.place(relwidth=0.2, relheight=0.15, relx=0.8)\n output = tk.Label(terminal_container, bg=\"#464e51\", bd=5, height=10, width=20, fg=\"#ccc\", justify=\"left\",\n anchor=\"nw\", font=\"TkFont 10 bold\")\n output.place(relheight=0.85, relwidth=1, rely=0.15)\n\n img = tk.PhotoImage(file=\"./img/gitresetsoft.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.37, rely=0.4)\n\n\nclass GitResetHardDescription(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=1)\n\n title = tk.Label(description_container, text=\"9. Git reset hard\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.07, relwidth=1, relheight=0.35)\n text.insert(\"1.0\", \"Man kann also Commits rückgängig machen, ohne dass die Veränderungen verloren gehen und dann \"\n \"weiter an dieser Projektversion arbeiten und irgendwann, wenn man fertig ist, erneut committen.\"\n \"\\n\\nEs kommt aber auch vor, dass man etwas komplett nicht mehr braucht oder aus versehen etwas \"\n \"so doll kaputt gemacht hat, dass es nicht mehr funktioniert und man den Code auch wirklich \"\n \"nicht mehr haben will.\"\n \"\\n\\nHierzu gibt es dann den Befehl 'git reset --hard HEAD'. Dieser funktioniert äquivalent zum \"\n \"vorherigen Befehl, nur dass hier die archivierte Version komplett weggeschmissen wird. Will \"\n \"man also auf den vorvorletzen Commit vor dem aktuellen zurücksetzen, werden alle Commits und \"\n \"Änderungen nach dem vorvorletzen komplett gelöscht. Und hiermit ist das unwiederbringliche Löschen gemeint.\")\n\n img = tk.PhotoImage(file=\"./img/resethard.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.17, rely=0.45)\n\n\nclass GitResetHard(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n def run_command(command):\n if os.getcwd().endswith(\"new_repo\"):\n if file_exists(\"third.txt\"):\n if command.startswith(\"git commit -m\"):\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n task1['bg'] = fu_green\n elif command == \"git status\":\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n task4['bg'] = fu_green\n elif command == \"git log\":\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n task2['bg'] = fu_green\n elif command == \"git reset --hard HEAD~1\":\n subprocess.check_output(command, shell=True)\n output['text'] = \"Letzter Commit wurde gelöscht!\"\n task3['bg'] = fu_green\n else:\n output['text'] = \"Prüfe deine Syntax!\"\n else:\n output['text'] = \"Die Datei third.txt existiert nicht.\\nGehe einige Schritte zurück und erledige die dort gestellten Aufgaben!\"\n else:\n output['text'] = \"Du befindest dich nicht im Verzeichnis new_repo.\\n\" \\\n \"Gehe einige Schritte zurück und erledige alle dort angegebenen Aufgaben!\"\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=0.6)\n\n title = tk.Label(description_container, text=\"10. Git reset hard\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.1, relwidth=1, relheight=0.38)\n text.insert(\"1.0\", \"Jetzt wollen wir einen Commit komplett löschen. Dazu wiederholen wir einen Teil der Schritte \"\n \"aus dem Teil des Soft-Resets.\"\n \"Wir merken erneut die Datei 'third.txt' vor, commiten sie \"\n \"und können uns den neuen Commit erneut in der Commit-History ansehen.\"\n \"\\n\\nNun wollen wir den Commit rückgängig machen bzw. entsorgen indem wir den Befehl 'git reset --hard HEAD~1' ausführen. \"\n \"Hiermit stellen wir den vorletzten Commit wieder her und löschen den letzten Commit komplett.\"\n \" Das bedeutet, die Änderungen des letzten Commits werden NICHT in die Staging Area verschoben sondern unwiederbringlich gelöscht!\")\n\n task_title = tk.Label(description_container, text=\"Aufgaben\", font=\"TkFont 14 bold\", bg=\"white\", fg=font_color)\n task_title.place(x=0, rely=0.63)\n task1 = tk.Label(description_container, text=\"1. Committe die Änderungen der third.txt.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task1.place(x=0, rely=0.70)\n task2 = tk.Label(description_container, text=\"2. Lass dir die Commit-History anzeigen\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task2.place(x=0, rely=0.77)\n task3 = tk.Label(description_container, text=\"3. Lösche den Commit.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task3.place(x=0, rely=0.84)\n task4 = tk.Label(description_container,\n text=\"4. Kontrolliere, ob die zuvor versionierten Änderungen NICHT in der Staging Area gelistet sind.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task4.place(x=0, rely=0.92)\n\n terminal_container = tk.Frame(self, bg=\"#464e51\")\n terminal_container.place(relwidth=1, relheight=0.4, rely=0.6)\n\n command_line = tk.Entry(terminal_container, bg=\"#464e51\", fg=\"#ccc\", font=\"TkFont 10 bold\")\n command_line.place(relwidth=0.8, relheight=0.15)\n run_button = tk.Button(terminal_container, text=\"Run\", command=lambda: run_command(command_line.get()),\n bg=fu_green, fg=\"white\")\n run_button.place(relwidth=0.2, relheight=0.15, relx=0.8)\n output = tk.Label(terminal_container, bg=\"#464e51\", bd=5, height=10, width=20, fg=\"#ccc\", justify=\"left\",\n anchor=\"nw\", font=\"TkFont 10 bold\")\n output.place(relheight=0.85, relwidth=1, rely=0.15)\n\n img = tk.PhotoImage(file=\"./img/gitresethard.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.45, rely=0.50)\n\n#######################################################################################################################\nclass RemoteRepository(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=1)\n\n title = tk.Label(description_container, text=\"11. Remote Repository\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.07, relwidth=1, relheight=0.22)\n text.insert(\"1.0\", \"Nun kennen wir die wichtigsten und am häufigsten genutzten Befehle, um lokal \"\n \"Projektsoftware oder -inhalt zu versionieren. Nun wollen wir uns mit dem Remote Repository \"\n \"und der Platform GitLab auseinandersetzen.\"\n \"\\nHierzu musst du dich mit deinem Institutsaccount auf dem GitLab-Server anmelden: gitlab.met.fu-berlin.de\"\n \"\\n\\nHier beitet dir ein kleiner grüner Button die Möglichkeit, ein neues Repository anzulegen. Was wir jetzt auch machen wollen.\")\n text3 = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text3.place(x=0, rely=0.43, relwidth=1, relheight=0.17)\n text3.insert(\"1.0\", \"Hast du diesen Button gedrückt, kannst du dein neues, leeres Repository benennen. Wir wollen es 'first_repo' nennen.\"\n \"\\n\\nWir haben nun ein neues, leeres Repository, dass sich momentan jedoch nur auf dem GitLab-Server des Instituts befindet, \"\n \"aber noch nicht auf deinem Rechner. Das wollen wir gleich ändern.\")\n\n img = tk.PhotoImage(file=\"./img/newremoterepo.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.12, rely=0.3)\n\n img = tk.PhotoImage(file=\"./img/remotereponaming.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.12, rely=0.61)\n\n\nclass SSHKey(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=1)\n\n title = tk.Label(description_container, text=\"12. SSH-Key\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.07, relwidth=1, relheight=0.40)\n text.insert(\"1.0\", \"Nun haben wir ein Remote Repository auf dem GitLab-Servers des Instituts. \"\n \"Jede Interaktion mit dem Remote Remote Repository über die Konsole würde eine Abfrage deiner\"\n \" Benutzerdaten fordern, also Benutzername und Passwort. Das ist auf Dauer relativ hinderlich und so gibt es die \"\n \"Möglichkeit, einen SSH-Key zu hinterlegen. Dieser besteht aus einem öffentlichen und einem privaten Teil.\"\n \"Den Schlüssel erzeugst du auf deinem Rechner und hinterlegst den öffentlichen teil des Schlüssels im GitLab. \"\n \"So kommunizieren der GitLab-Server und dein rechner über ein gesicherte Verbindung, ohne jedes mal deine Nutzerdaen \"\n \"eingeben zu müssen.\"\n \"\\n\\nUm einen SSH-Key zu hinterlgen, musst du im GitLab auf dein Profil unter Settings nach SSH-Key suchen.\"\n \" Dort kannst du für jeden Rechner einen SSH-Schlüssel hinterlegen. \"\n \" Am oberen Ende der Seite, auf der man SSH-Schlüssel hinterlegen kann, befindet sich ein Link, der erklärt, \"\n \"wie man für sein Betriebssystem einen Schlüssel erzeugt und hinterlegt.\"\n \"\\n\\nSobald du einen Schlüssel erzegt und hinterlegt hast, können wir zum nächsten Schritt übergehen.\")\n\n\n img = tk.PhotoImage(file=\"./img/addkey.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0, rely=0.5)\n\n img = tk.PhotoImage(file=\"./img/sshguide.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.5, rely=0.5)\n\n\nclass CloneRepo(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n def run_command(command):\n if command == \"cd ..\":\n try:\n os.chdir(\"..\")\n output['text'] = \"Gewechselt in das Verzeichnis '..'.{}\".format(os.getcwd())\n task1['bg'] = '#6b9e1f'\n except:\n output['text'] = \"Es ist ein Fehler aufgetreten.\"\n elif command == \"cd first_repo\":\n try:\n os.chdir(\"./first_repo\")\n output['text'] = \"Gewechselt in das Verzeichnis 'first_repo'.{}\".format(os.getcwd())\n task2['bg'] = fu_green\n except:\n if os.getcwd().endswith(\"first_repo\"):\n output['text'] = \"Du befindest dich bereits im first_repo-Verzeichnis. {}\".format(os.getcwd())\n task2['bg'] = fu_green\n else:\n output['text'] = \"Bist du sicher, dass du das Repository geklont hast?\"\n elif command.startswith(\"git clone\"):\n #if not os.getcwd().endswith(\"git_tutorial\"):\n # output['text'] = \"Du befindest dich nicht im übergeordneten Verzeichnis, Wechsel bitte!\"\n #else:\n subprocess.check_output(command, shell=True)\n output['text'] = \"Respository geklont\"\n else:\n output['text'] = \"Überprüfe deine Syntax!\"\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=0.6)\n\n title = tk.Label(description_container, text=\"13. Repository klonen\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.1, relwidth=1, relheight=0.57)\n text.insert(\"1.0\", \"Nun haben wir einen SSH-Key an- und hinterlegt, damit dieser Rechner jederzeit ohne Passwortabfrage mit \"\n \"dem GitLab-Server des Instituts kommunizieren kann.\"\n \"\\n\\nJetzt wollen wir das leere Repository, das wir vorhin angelegt haben, auf unseren Rechner übertragen.\"\n \" Hierzu klont man dieses mit dem Befehl 'git clone'. Wir wollen das Repository von dem GitLab-Server des \"\n \"Instituts klonen und müssen im Befehl den Ort benennen, von dem das Repository geklont werden muss.\"\n \"\\nDieser Ort ist ein Link zum Remote Repository, der in deinem leeren Repository auf der GitLab-Seite angezeigt wird.\"\n \" Den dort angezeigten Befel kopierst du einfach und überträgst ihn ins Terminal, wo du ihn ausführst.\"\n \"\\n\\nIm Moment befinden wir uns noch im Verzeichnis new_repo. Hier wollen wir nun nicht mehr \"\n \"arbeiten, wir müssen also in den darübergelegenen Ordner (..) wechseln.\")\n task_title = tk.Label(description_container, text=\"Aufgaben\", font=\"TkFont 14 bold\", bg=\"white\", fg=font_color)\n task_title.place(x=0, rely=0.77)\n task1 = tk.Label(description_container, text=\"1. Wechsle in das übergeordnete Verzeichnis.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task1.place(x=0, rely=0.84)\n task2 = tk.Label(description_container, text=\"2. Klone dein neues Repository und wechsel in das neue Repository.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task2.place(x=0, rely=0.92)\n\n terminal_container = tk.Frame(self, bg=\"#464e51\")\n terminal_container.place(relwidth=1, relheight=0.4, rely=0.6)\n\n command_line = tk.Entry(terminal_container, bg=\"#464e51\", fg=\"#ccc\", font=\"TkFont 10 bold\")\n command_line.place(relwidth=0.8, relheight=0.15)\n run_button = tk.Button(terminal_container, text=\"Run\", command=lambda: run_command(command_line.get()),\n bg=fu_green, fg=\"white\")\n run_button.place(relwidth=0.2, relheight=0.15, relx=0.8)\n output = tk.Label(terminal_container, bg=\"#464e51\", bd=5, height=10, width=20, fg=\"#ccc\", justify=\"left\",\n anchor=\"nw\", font=\"TkFont 10 bold\")\n output.place(relheight=0.85, relwidth=1, rely=0.15)\n\n img = tk.PhotoImage(file=\"./img/clonerepo.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.42, rely=0.7)\n\n\nclass GitPush(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n def run_command(command):\n if not os.getcwd().endswith(\"first_repo\"):\n output['text'] = \"Du befindest dich nicht im Verzeichnis first_repo. Gehe einen Schritt zurück und wechsel bitte!\"\n else:\n if command == \"git push -u origin master\":\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n task2['bg'] = fu_green\n elif command == \"touch main.txt\":\n if platform.system() == \"Windows\":\n try:\n subprocess.check_output(\"main.txt\", shell=True)\n output['text'] = \"Die Datei main.txt wurde angelegt.\"\n except:\n pass\n else:\n subprocess.check_output(command, shell=True)\n output['text'] = \"Die Datei main.txt wurde erfolgreich angelegt.\"\n elif command == \"git add main.txt\" or command == \"git add .\":\n subprocess.check_output(command, shell=True)\n output['text'] = \"Die Datei wurde voremerkt.\"\n elif command.startswith(\"git commit -m\"):\n subprocess.check_output(command, shell=True)\n output['text'] = \"Änderungen committet.\"\n task1['bg'] = fu_green\n else:\n output['text'] = \"Prüfe deine Syntax!\"\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=0.6)\n\n title = tk.Label(description_container, text=\"14. Git push\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.1, relwidth=1, relheight=0.66)\n text.insert(\"1.0\", \"Nun haben wir das leere neue Repository auf unseren Rechner kopiert und wollen es befüllen. \"\n \"Dazu legen wir mit dem touch-Befehl eine neue Datei an und merken sie erst zur Versionierung \"\n \"vor und dann committen wir sie.\"\n \"\\n\\nSo haben wir dann eine neue Projektversion, die sich jedoch nur lokal auf unserem Rechner befindet. \"\n \"Wenn wir das Repository mitsamt Commit-History jetzt auf den GitLab-Server kopieren wollen,\"\n \"verwenden wir den Befehl 'git push'.\"\n \"\\nIn der Regel ist dies der Befehl, mit dem man das lokale Repository auf den Server kopiert. Beim allerersten Push zum Server jedoch,\"\n \"muss mann den Befehl ergänzen: 'git push -u origin master'.\"\n \"\\n\\nDer Befehl speichert mit '-u origin' die URL, von der wir zuvor das Repositroy geklont haben als Ursprungs-URL,\"\n \"die bei jedem Push als Adresse des Remote Repositorys auf dem Server verwendet wird. Mit 'master' wird\"\n \"ein sogenannter Branch gesetzt. Branches sind ein wichitges und mächtiges Werkzeug in Git, werden hier\"\n \"aber nicht weiter thematisiert, weil das unnötig komplex wäre.\")\n\n task_title = tk.Label(description_container, text=\"Aufgaben\", font=\"TkFont 14 bold\", bg=\"white\", fg=font_color)\n task_title.place(x=0, rely=0.77)\n task1 = tk.Label(description_container, text=\"1. Lege die Datei main.txt an, merke sie vor und committe sie.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task1.place(x=0, rely=0.84)\n task2 = tk.Label(description_container, text=\"2. Pushe eine Kopie deines lokalen Repositorys auf den GitLab-Server.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task2.place(x=0, rely=0.92)\n\n terminal_container = tk.Frame(self, bg=\"#464e51\")\n terminal_container.place(relwidth=1, relheight=0.4, rely=0.6)\n\n command_line = tk.Entry(terminal_container, bg=\"#464e51\", fg=\"#ccc\", font=\"TkFont 10 bold\")\n command_line.place(relwidth=0.8, relheight=0.15)\n run_button = tk.Button(terminal_container, text=\"Run\", command=lambda: run_command(command_line.get()),\n bg=fu_green, fg=\"white\")\n run_button.place(relwidth=0.2, relheight=0.15, relx=0.8)\n output = tk.Label(terminal_container, bg=\"#464e51\", bd=5, height=10, width=20, fg=\"#ccc\", justify=\"left\",\n anchor=\"nw\", font=\"TkFont 10 bold\")\n output.place(relheight=0.85, relwidth=1, rely=0.15)\n\n img = tk.PhotoImage(file=\"./img/gitpush.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.66, rely=0.77)\n\n\nclass RemoteChanges(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=1)\n\n title = tk.Label(description_container, text=\"15. Remote Changes\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.07, relwidth=1, relheight=0.28)\n text.insert(\"1.0\", \"Nun wissen wir, wie man die Version des lokalen Repositorys auf den Server kopiert. \"\n \"Die Platform GitLab bietet die Möglichkeit, auch über die Online-Oberfläche den Inhalt des Projektes zu bearbeiten.\"\n \"Wir wollen jetzt in unserer main.txt-Datei ein paar Änderungen vornehmen.\"\n \"\\n\\nHierzu gibt es bei der Auswahl der Datei einen Button 'Edit'. Daraufhin öffnet sich ein Editor, in dem man die Datei bearbeiten kann.\"\n \" Wir schreiben einfach ein paar Sätze in die Datei. Sind alle Änderungen gemacht, \"\n \"kann man unter dem Editors noch eine Commit-Message eingeben und die Änderungen dann speichern.\"\n \"\\n\\nJede Änderung über die Web-GUI wird sofort als Commit gespeichert.\")\n\n img = tk.PhotoImage(file=\"./img/editfile.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.12, rely=0.35)\n\n img = tk.PhotoImage(file=\"./img/remotechanges.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.19, rely=0.48)\n\n\nclass GitPull(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n def run_command(command):\n if not os.getcwd().endswith(\"first_repo\"):\n output['text'] = \"Du befindest dich nicht im Verzeichnis first_repo. Gehe einige Schritte zurück und wechsel bitte!\"\n else:\n if command == \"git pull\":\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n task1['bg'] = fu_green\n else:\n output['text'] = \"Prüfe deine Syntax!\"\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=0.6)\n\n title = tk.Label(description_container, text=\"16. Git pull\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.1, relwidth=1, relheight=0.4)\n text.insert(\"1.0\", \"So nun haben wir online Änderungen an unserem Projekt vorgenommen und somit auch das Repository verändert.\"\n \"\\n\\nWie bekommen wir nun jedoch die aktuelle Version des Repositorys auf unseren Rechner, um da weiterzuarbeiten?\"\n \"\\n\\nHierzu gibt es den Befehl 'git pull'. Dieser kopiert das online gespeicherte Remote Repository mitsamt Commit-History und überträgt alle Änderungen \"\n \"auf das lokale Repository.\")\n\n task_title = tk.Label(description_container, text=\"Aufgaben\", font=\"TkFont 14 bold\", bg=\"white\", fg=font_color)\n task_title.place(x=0, rely=0.84)\n task1 = tk.Label(description_container, text=\"1. Kopiere den Inhalt des Remote Repositorys in dein lokales Repository.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task1.place(x=0, rely=0.92)\n\n terminal_container = tk.Frame(self, bg=\"#464e51\")\n terminal_container.place(relwidth=1, relheight=0.4, rely=0.6)\n\n command_line = tk.Entry(terminal_container, bg=\"#464e51\", fg=\"#ccc\", font=\"TkFont 10 bold\")\n command_line.place(relwidth=0.8, relheight=0.15)\n run_button = tk.Button(terminal_container, text=\"Run\", command=lambda: run_command(command_line.get()),\n bg=fu_green, fg=\"white\")\n run_button.place(relwidth=0.2, relheight=0.15, relx=0.8)\n output = tk.Label(terminal_container, bg=\"#464e51\", bd=5, height=10, width=20, fg=\"#ccc\", justify=\"left\",\n anchor=\"nw\", font=\"TkFont 10 bold\")\n output.place(relheight=0.85, relwidth=1, rely=0.15)\n\n img = tk.PhotoImage(file=\"./img/gitpull.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.25, rely=0.55)\n\n\nclass MergeConflict(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=1)\n\n title = tk.Label(description_container, text=\"17. Mergekonflikte\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.07, relwidth=1, relheight=0.5)\n text.insert(\"1.0\", \"Es kann jedoch passieren, dass man lokal an seinem Projekt arbeitet und gleichzeitig jemand anderes\"\n \" eine neue Version ins Remote Repository gespeichert hat.\"\n \" Man arbeitet also lokal mit einer veraltetetn Version des Projektes.\"\n \"\\n\\nWill man die eigenen Änderungen nun in das Remote Repository pushen, kann es zu Problemen kommen.\"\n \" Git kann Änderungen zusammenführen aber manchmal kann es sein, dass Änderungen an der gleichen Datei \"\n \"gemacht werden und an diesem Punkt weiß Git nicht, wie diese Änderungen zusammengeführt werden sollen.\"\n \" Es kommt zu einem sogenannten Mergekonflikt.\"\n \"\\n\\nAlle Dateien, die Mergekonflikte enthalten, werden mit 'git status' unter dem Punkt 'Nicht zusammengeführt Pfade' gelistet.\"\n \" Der betroffene Code in der Datei wird gekennzeichnet.\"\n \" Der Code zwischen <<<<<<< HEAD und ======== ist die Version des Codes die sich im Remote Repsoitory befindet.\"\n \" Der Code zwischen ==== und >>>>>> Commit-Referenz (Buchstaben- und Zahlenkombination) enthält die Version deiner Änderungen.\"\n \"\\n\\nDa solche Mergekonflikte öfters mal auftreten können, wenn man mit mehreren Leuten im Team an einem Projekt arbeitet, \"\n \"muss man auch wissen, wie man einen solchen Mergekonflikt löst.\")\n\n\nclass CreateMergeConflict(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n def run_command(command):\n if os.getcwd().endswith(\"first_repo\"):\n if command == \"git add .\" or command == \"git add main.txt\":\n subprocess.check_output(command, shell=True)\n output['text'] = \"Die Datei main.txt wurde vorgemerkt.\"\n elif command.startswith(\"git commit -m\"):\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n task2['bg'] = fu_green\n else:\n output['text'] = \"Prüfe deine Syntax!\"\n else:\n output['text'] = \"Du befindest dich nicht im first_repo-Verzeichnis.\\nGehe einige Schritte zurück und erledige die Aufgaben!\"\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=0.6)\n\n title = tk.Label(description_container, text=\"18. Mergekonflikte provozieren\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.1, relwidth=1, relheight=0.5)\n text.insert(\"1.0\",\"Nun wollen wir einen Megrkonflikt auslösen. Hierzu werden wir die Datei main.txt zum einen lokal verändern\"\n \" und dann außerdem im Remote Repository. Da wir dann zwei unterschiedlcihe Projektversionen haben, \"\n \"die in ein und derselben Datei Änderungen aufweisen, weiß Git nicht, wie die Änderungen zusammengeführt werden sollen.\"\n \"\\n\\nEine gute Praxis, um Mergekonflikte so gut es geht zu vermeiden ist es, bevor man mit der Arbeit am \"\n \"Projekt anfängt, jedes mal zu zu pullen. So hat man immer die neustes Version des projektes und sollten doch mal parallel \"\n \"Änderungen gemacht worden sein, kann Git sie entweder selbstständig zusammenführen oder man muss den Konflikt selbst lösen.\")\n\n task_title = tk.Label(description_container, text=\"Aufgaben\", font=\"TkFont 14 bold\", bg=\"white\", fg=font_color)\n task_title.place(x=0, rely=0.72)\n task1 = tk.Label(description_container,\n text=\"1. Öffne, ändere und speichere die main.txt mit einem Editor deiner Wahl. (wird nicht markiert)\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task1.place(x=0, rely=0.79)\n task2 = tk.Label(description_container,\n text=\"2. Versioniere diese Änderung: git add und git commit.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task2.place(x=0, rely=0.86)\n task3 = tk.Label(description_container,\n text=\"3. Öffne die main.txt über die Web-GUI und bearbeite und speichere sie.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task3.place(x=0, rely=0.93)\n\n terminal_container = tk.Frame(self, bg=\"#464e51\")\n terminal_container.place(relwidth=1, relheight=0.4, rely=0.6)\n\n command_line = tk.Entry(terminal_container, bg=\"#464e51\", fg=\"#ccc\", font=\"TkFont 10 bold\")\n command_line.place(relwidth=0.8, relheight=0.15)\n run_button = tk.Button(terminal_container, text=\"Run\", command=lambda: run_command(command_line.get()),\n bg=fu_green, fg=\"white\")\n run_button.place(relwidth=0.2, relheight=0.15, relx=0.8)\n output = tk.Label(terminal_container, bg=\"#464e51\", bd=5, height=10, width=20, fg=\"#ccc\", justify=\"left\",\n anchor=\"nw\", font=\"TkFont 10 bold\")\n output.place(relheight=0.85, relwidth=1, rely=0.15)\n\n\n\nclass ResolveMergeConflicts(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n def run_command(command):\n if os.getcwd().endswith(\"first_repo\"):\n if command == \"git pull\":\n subprocess.check_output(command, shell=True)\n output['text'] = \"Mergekonflikt!\"\n task1['bg'] = fu_green\n elif command == \"git add main.txt\" or command == \"git add .\":\n subprocess.check_output(command, shell=True)\n output['text'] = \"Datei wurde vorgemerkt.\"\n elif command.startswith(\"git commit -m\"):\n response = subprocess.check_output(command, shell=True)\n output['text'] = response\n task3['bg'] = fu_green\n elif command == \"git push\":\n subprocess.check_output(command, shell=True)\n output['text'] = \"Die neuen Änderungen wurde auf das remote Repository übertragen.\"\n task4['bg'] = fu_green\n else:\n output['text'] = \"Prüfe deine Syntax!\"\n else:\n output['text'] = \"Du befindest dich im falschen Verzeichnis. \\nBitte gehe einige Schritte zurück und wechsel ins first-Repo-Verzeichnis!\"\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=0.6)\n\n title = tk.Label(description_container, text=\"19. Mergekonflikte lösen\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.1, relwidth=1, relheight=0.5)\n text.insert(\"1.0\", \"Wenn wir nun unsere lokalen Änderungen in das Remote Repository pushen wollen, beschwert sich Git, \"\n \"dass es einen Mergekonflikt gibt. Wie löst man diesen nun auf?\"\n \"\\n\\nMit 'git pull' holt man sich die Version des Remote Repositorys. Jetzt sind beide Versionen \"\n \"in den betroffenen Dateien vermekrt und entsprechend markiert.\"\n \"\\nNun gibt es drei Möglichkeiten, den Konflikt zu lösen. Entweder übernimmt man die eigene \"\n \"Version, die Version aus dem Remote Repository oder man verbindet beide Versionen \"\n \"zu einer neuen. \"\n \"\\nEntsprechend der Entscheidung löscht man die andere Version (oder eben nicht) \"\n \"und dann löscht man noch alle Konfliktmarkierungen. Nun muss man diese komplett neue Version \"\n \"der Datei wieder vormerken und comitten und dann kann man sie auch ins Remote Repostory pushen.\")\n\n task_title = tk.Label(description_container, text=\"Aufgaben\", font=\"TkFont 14 bold\", bg=\"white\", fg=font_color)\n task_title.place(x=0, rely=0.65)\n task1 = tk.Label(description_container,\n text=\"1. Pulle vom Remote Repository. (wird ggf. nicht grün markiert)\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task1.place(x=0, rely=0.72)\n task2 = tk.Label(description_container,\n text=\"2. Öffne einen Editor deiner Wahl und lösen den Mergekonflikt. (wird nicht grün markiert)\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task2.place(x=0, rely=0.79)\n task3 = tk.Label(description_container,\n text=\"3. Merke die Änderung zur Versionierung vor und committe sie.\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task3.place(x=0, rely=0.86)\n task4 = tk.Label(description_container,\n text=\"4. Pushe den gelösten Mergekonflikt ins Remote Repository\",\n bg=\"white\", font=\"TkFont 12 bold\", fg=font_color, bd=5)\n task4.place(x=0, rely=0.93)\n\n terminal_container = tk.Frame(self, bg=\"#464e51\")\n terminal_container.place(relwidth=1, relheight=0.4, rely=0.6)\n\n command_line = tk.Entry(terminal_container, bg=\"#464e51\", fg=\"#ccc\", font=\"TkFont 10 bold\")\n command_line.place(relwidth=0.8, relheight=0.15)\n run_button = tk.Button(terminal_container, text=\"Run\", command=lambda: run_command(command_line.get()),\n bg=fu_green, fg=\"white\")\n run_button.place(relwidth=0.2, relheight=0.15, relx=0.8)\n output = tk.Label(terminal_container, bg=\"#464e51\", bd=5, height=10, width=20, fg=\"#ccc\", justify=\"left\",\n anchor=\"nw\", font=\"TkFont 10 bold\")\n output.place(relheight=0.85, relwidth=1, rely=0.15)\n\n\nclass Summary(Page):\n def __init__(self, *args, **kwargs):\n Page.__init__(self, *args, **kwargs)\n\n def callback(filename):\n webbrowser.open('file://' + os.path.realpath(filename))\n\n description_container = tk.Frame(self, bg=\"#fff\", bd=10)\n description_container.place(relwidth=1, relheight=1)\n\n title = tk.Label(description_container, text=\"20. Zusammenfassung\", bg=\"white\", font=\"TkHeaderFont 24 bold\",\n fg=font_color)\n title.place(x=0, y=0)\n text = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n text.place(x=0, rely=0.07, relwidth=1, relheight=0.3)\n text.insert(\"1.0\", \"Großartig, du hast den zweiten Teil des Tutorials geschafft!\"\n \"\\n\\nDu solltest jetzt eine ungefähre Vorstellung von Git und seinem Nutzen haben. Außerdem solltest du\"\n \" die grundlegenden Strukturen und Befehle kennen, die man häufig verwendet. In der \"\n \"Git-Dokumentation findest du noch ausführlichere Beschreibungen der Software und ihres Gebrauchs: https://git-scm.com/\"\n \"\\n\\nAuch im zweiten Teil des Tutorials müssen Aufgaben bearbeitet und nachgewiesen werden:\"\n \"\\n1. Schicke mir den Link deines Remote Repositorys auf dem GitLab-Server\"\n \"\\n2. Bearbeite das Quiz und mache einen Screenshot von deinem Ergebnis\"\n \"\\n(Achtung beim klicken des Buttons öffen sich zwei Fenster. Das Fenster mit Fehlermeldung kann irgnoriert werden.\"\n \"\\n Den Punkt für das Quiz gibt es nur bei mindestens 10 richtigen Antworten.\")\n\n link1 = tk.Button(description_container, text=\"--> Quiz <--\", fg=\"white\", bg=fu_green, font=\"TkFont 12 bold\",\n cursor=\"hand2\")\n link1.place(relx=0.25, rely=0.4, relwidth=0.5)\n link1.bind(\"\", lambda e: callback(\"../questionaire/second_questionaire.html\") or callback(\"./questionaire/second_questionaire.html\"))\n\n img = tk.PhotoImage(file=\"./img/emergency.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0.63, rely=0.5)\n\n additional = tk.Text(description_container, font=\"TkFont 12 bold\", bg=\"white\", fg=font_color, padx=5, pady=5,\n wrap=\"word\")\n additional.place(relx=0.65, rely=0.84, relwidth=0.34, relheight=0.15)\n additional.insert(\"1.0\", \"Link und Sceenshot bis zum 06.05.2020 12:00 Uhr im Whiteboard hochladen. Fragen und Anmerkungen bitte per Mail an janaulrich@zedat.fu-berlin.de.\")\n\n img = tk.PhotoImage(file=\"./img/generalstructure2.png\")\n panel = tk.Label(description_container, image=img)\n panel.image = img\n panel.place(relx=0, rely=0.5)\n\n","repo_name":"BiancaWentzel/git-tutorial","sub_path":"second_part.py","file_name":"second_part.py","file_ext":"py","file_size_in_byte":77527,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"17300571311","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # ** DALL E PROMPT GENERATOR**\n# \n# \n\n# # **Import API**\n\n\n# # Streamlit Web App\n\n# In[2]:\n\n\nimport streamlit as st\n\n\n# In[ ]:\n\n\nst.set_page_config(page_title=\"Nova SBE - DALL E Team\", page_icon=\":tada:\", layout=\"wide\")\n\nif \"page\" not in st.session_state:\n st.session_state[\"page\"] = 1\n\nempty = st.empty()\n\nif st.button(\"Next\"):\n st.session_state[\"page\"] += 1\n\nif st.button(\"Previous\"):\n st.session_state[\"page\"] -= 1\n\nif st.session_state[\"page\"] == 1:\n empty.write(\"Page 1\")\nelif st.session_state[\"page\"] == 2:\n empty.write(\"Page 2\")\nelif st.session_state[\"page\"] == 3:\n empty.write(\"Page 3\")\nelse:\n empty.write(\">3 page\")\n\n# Add a title and intro text\nst.title('DALL-E 2 WEB APP')\nst.text('This is a web app to allow Nova students to easily play around with AI image generation!')\nst.text('Simply answer our questions and we will help you to create your first AI image')\n\n\n# In[ ]:\n\nif st.session_state[\"page\"] == 1:\n theme_list = ['Select Theme', \"Oil painting\", \"Unreal Engine\", \"Photorealistic\"]\n theme_result = st.selectbox(\"Select your image theme:\", theme_list)\n st.write(f'You have picked {theme_result}')\n\n\n# In[ ]:\n\nif st.session_state[\"page\"] == 2:\n subject_list = ['Select Subject', \"Panda\", \"Astronaut\", \"Racing Car\"]\n subject_result = st.selectbox(\"Select your image subject:\", subject_list)\n st.write(f'You have picked {subject_result}')\n\n# In[ ]:\n\n\n#action_list = ['Select Action', \"Eat\", \"Sleep\", \"Compete\", \"Fight\"]\n\n#action_result = st.selectbox(\"Select your image action:\", action_list)\n\n#st.write(f'You have picked {action_result}')\n\n\n","repo_name":"Jannik098/novasbe-streamlit","sub_path":"dalle.py","file_name":"dalle.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73311316856","text":"import numpy as np\nimport os, argparse, math\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm \n\nfrom linearizer import Interpolator\n\narg_parser = argparse.ArgumentParser()\narg_parser.add_argument('-fn', '--file_name', required=True, help=' file name without extension')\narg_parser.add_argument('-roi', '--roi', required=True, help=' version of ROI')\n\nargs = vars(arg_parser.parse_args())\nfile_name = args['file_name']\nversion = int(args['roi'])\n\nos.chdir(\"..\")\noutput_path = os.path.abspath(os.curdir) + '/images/'\ndirectory_1 = output_path + file_name + '/ROI_' + str(version)\ndirectory_2 = directory_1 + '/linear'\ndirectory_3 = directory_2 + '/iterations'\ndirectory_4 = directory_2 + '/updatedMask'\n\nimage_path = directory_1 + '/masked_image.npy'\nmask_path = directory_1 +'/mask.npy'\ncoord_path = directory_1 +'/contour_coord.npy'\n\nfor dir in [directory_3, directory_4]:\n if not os.path.exists(dir):\n os.makedirs(dir)\ntry:\n originalImage = np.load(image_path)\n mask = np.load(mask_path)\n [xcoord, ycoord] = np.load(coord_path)\nexcept:\n print('Error: file not available')\n exit(1)\n\ncx, cy = int(np.mean(xcoord)), int(np.mean(ycoord))\n\nmodel = Interpolator(originalImage, mask, None)\nmodel.linear_interpolator(file_name, directory_2)\nresult = model.result\n\nfloat_result = (result*(np.nanmax(originalImage)-np.nanmin(originalImage)))/(2**16 - 1)+np.nanmin(originalImage)\nmean, std = np.nanmean(float_result), np.nanstd(float_result)\nfor x in range(originalImage.shape[0]):\n for y in range(originalImage.shape[1]):\n if math.isnan(originalImage[x, y]):\n if mask[x, y] == 0:\n float_result[x, y] = 'nan'\n\nf = plt.figure()\nax = f.add_subplot(111)\nax.axis('off')\nsh = 100\nif version == 0:\n co = np.load(coord_path)\n xc , yc = co[0], co[1]\n [x, y] = [[0,float_result.shape[0]], [float_result.shape[1], 0]]\n xm, ym = np.mean(xc), np.mean(yc)\n center_x = int(np.mean(xc-xm+float(x[0]+x[1])/2))\n center_y = int(np.mean(yc-ym+float(y[0]+y[1])/2))\n\n zoom_float = float_result[center_x-sh:center_x+sh+1, center_y-sh:center_y+sh+1]\n\nelse:\n zoom_float = float_result[cy-sh:cy+sh, cx-sh:cx+sh]\n\n mu, std = np.nanmedian(zoom_float), np.nanstd(zoom_float)\n for x in range(zoom_float.shape[0]):\n for y in range(zoom_float.shape[1]):\n if np.absolute(zoom_float[x][y]-mu) >= 3*std:\n zoom_float[x][y] = np.nan\n\nplt.imshow(zoom_float, cmap=cm.seismic)\nplt.tight_layout()\nplt.savefig(directory_2 + '/linear_interpol_image.png')\nplt.close(f)\nnp.save(directory_2 + '/linear_interpol_image.npy', float_result)\n","repo_name":"sakokassounian/background","sub_path":"src/interpolate.py","file_name":"interpolate.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42554423213","text":"from classes import Player, PlayerStat\nfrom match.classes import Match\nimport modules.config as cfg\nfrom display import AllStrings as disp, ContextWrapper\nfrom logging import getLogger\nimport modules.stat_processor as stat_processor\nimport modules.spam_checker as spam_checker\n\nlog = getLogger(\"pog_bot\")\n\n\nasync def on_dm(message):\n # Check if too many requests from this user:\n if await spam_checker.is_spam(message.author, message.channel):\n return\n if message.content[:1] == \"=\":\n message.content = message.content[1:]\n if message.content.lower().startswith((\"stat\", \"stats\", \"s\")):\n await on_stats(message.author)\n elif message.content.lower().startswith((\"modmail \", \"dm \", \"staff \")):\n i = message.content.index(' ')\n message.content = message.content[i+1:]\n player = Player.get(message.author.id)\n await disp.BOT_DM.send(ContextWrapper.channel(cfg.channels[\"staff\"]), player=player, msg=message)\n await disp.BOT_DM_RECEIVED.send(message.author)\n elif message.content.lower().startswith((\"help\", \"h\")):\n await disp.HELP.send(message.author, is_dm=True)\n spam_checker.unlock(message.author.id)\n\n\nasync def on_stats(user):\n player = Player.get(user.id)\n if not player:\n await disp.NO_RULE.send(user, \"stats\", cfg.channels[\"rules\"])\n return\n log.info(f\"Stats request from player id: [{player.id}], name: [{player.name}]\")\n stat_player = await PlayerStat.get_from_database(player.id, player.name)\n recent_stats = await stat_processor.get_new_stats(Match, stat_player)\n await disp.DISPLAY_STATS.send(user, stats=stat_player, recent_stats=recent_stats)\n","repo_name":"yakMM/POG-bot","sub_path":"bot/modules/dm_handler.py","file_name":"dm_handler.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"10554852930","text":"from time import sleep\n\nclass Servr:\n\n def __init__( self ) :\n\n self.miniServer = { 'msg': { 1 : [ 'chat online' , [ 'Admin' , None ] , 'send' ] } }\n self.threadList = { 'Admin' : 0 }\n\n self.msgCount = 1\n self.lista = []\n self.wait_sec = 0.2\n\n def adduser ( self, user ) :\n\n self.miniServer [ user ] = 0\n self.sendy ( 'Admin' , 'added ' + user , None )\n\n # ---------------------------------------------------------------------------\n # ---------------------------------------------------------------------------\n\n def wait ( self , user , type_of_request , is_thread = False ) :\n\n print ( type_of_request , 'wait received for:' , user , 'as thread:' , is_thread )\n\n if not is_thread :\n\n self.lista += [ [ user , type_of_request ] ]\n\n while not ( sum ( [ self.threadList [ key ] for key in self.threadList ] ) == 0 ) : sleep ( self.wait_sec )\n while not ( len ( self.lista ) and self.lista [ 0 ] [ 0 ] == user ) : sleep ( self.wait_sec )\n\n else: self.threadList [ user ] = 1\n\n print ( type_of_request , 'wait done for:' , user , 'as thread:' , is_thread )\n\n def close ( self , user , type_of_request , is_thread = False ) :\n\n print ( type_of_request , 'close received for:' , user , 'as thread:' , is_thread )\n\n if not is_thread :\n\n self.lista = self.lista [ 1 : ]\n\n else: self.threadList [ user ] = 0\n\n print ( type_of_request , 'close done for:' , user , 'as thread:' , is_thread )\n\n # ---------------------------------------------------------------------------\n # ---------------------------------------------------------------------------\n\n def sendy( self , iam , sms , you ):\n\n type_of_action = 'send'\n\n print ( type_of_action , 'received for:' , iam )\n\n if sms and len( sms ) > 1:\n\n self.wait ( iam , type_of_action , False )\n\n if sms[ -1 ] == '\\n': sms = sms[ :-1 ]\n msg = [ sms +' from: '+ iam +' to: '+ str( you ), [ iam, you ] , type_of_action ]\n\n self.miniServer[ 'msg' ][ self.msgCount+1 ] = msg\n self.msgCount += 1\n\n print ( type_of_action , 'done for:' , iam )\n\n self.close ( iam , type_of_action , False )\n\n return 'None'\n\n def Itake ( self , iam , list ) :\n\n type_of_action = 'take'\n\n print ( type_of_action , 'received for:' , iam )\n\n list_values = [ l [ 1 ] for l in self.lista ]\n is_thread = ( len ( list_values ) == 0 ) or ( not 'send' in list_values )\n\n if self.miniServer[ iam ] < self.msgCount:\n\n self.wait ( iam , type_of_action , is_thread )\n\n for key in reversed( range( self.miniServer[ iam ], self.msgCount ) ):\n msg = self.miniServer[ 'msg' ][ key+1 ]\n\n if ( msg and msg [ 1 ] and msg [ 2 ] ) and \\\n ( None in msg [ 1 ] or iam in msg [ 1 ] ) and \\\n ( msg [ 2 ] == 'send' ) : list += [ msg [ 0 ] ]\n\n self.miniServer[ iam ] = self.msgCount\n\n print ( type_of_action , 'done for:' , iam )\n \n self.close ( iam , type_of_action , is_thread )\n\n return '\\n'.join( list ).replace( '.', '\\n' ) if ''.join( list ) else ''\n\n# ---------------------------------------------------------------------------\n# ---------------------------------------------------------------------------\n\ndef comandi ( myServr , i , user , sms , user2 ) :\n\n if i and myServr:\n\n if i == '0' : return myServr.Itake ( user, [] )\n elif i == '1' : return myServr.sendy ( user, sms, user2 )\n elif i == '2' : return myServr.adduser ( user )\n\n return 'None'\n","repo_name":"AmalLight/Avalonx_SoSupersonic_Vision","sub_path":"AvalonX_Chat/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73293136057","text":"import re\n\n__copying__ = \"\"\"\nThis Source Code Form is subject to the terms of the Mozilla Public\nLicense, v. 2.0. If a copy of the MPL was not distributed with this\nfile, You can obtain one at http://mozilla.org/MPL/2.0/.\n\"\"\"\n\n#Ought to have at least one of these three\ntry:\n from json import dumps as _strquote\nexcept:\n try:\n from simplejson import dumps as _strquote\n except:\n _strquote=repr\n\nfrom mdplay import nodes, mdputil\n\ndef html_out_body(nodel,flags=()):\n in_list=()\n r=\"\"\n for node in nodel:\n _r=_html_out_body(node,in_list,flags=flags)\n if len(_r)==2 and type(_r)==type(()):\n _r,in_list=_r\n r+=_r\n while len(in_list)>0:\n if in_list[0]==\"ul\":\n r+=\"\"\n else:\n r+=\"\"\n in_list=in_list[1:]\n return r.strip(\"\\r\\n\")\n\n#import htmlentitydefs\nfrom mdplay import htmlentitydefs_latest as htmlentitydefs\ndef _escape(text,html5=0):\n text=text.decode(\"utf-8\").replace(\"&\",\"&\") #must be done first, else others get broken.\n if not html5:\n keys=list(htmlentitydefs.name2codepoint.keys())\n else:\n keys=list(htmlentitydefs.html5.keys())\n for name in keys:\n if name!=\"amp\":\n if not html5:\n codept=chr(htmlentitydefs.name2codepoint[name])\n else:\n codept=htmlentitydefs.html5[name]\n if (ord(codept)<0xff) and (name not in htmlentitydefs.name2codepoint):\n continue #or face insanity.\n text=text.replace(codept,(\"&\"+name.rstrip(\";\")+\";\").decode(\"ascii\"))\n return text.encode(\"utf-8\")\n\ndef _html_out_body(node,in_list,flags):\n html5=(\"html5\" in flags)\n if in_list and (in_list[0]==\"ul\"):\n if (not isinstance(node,nodes.LiNode)) or ((node.depth+1)\"+_r,in_list[1:]\n elif isinstance(node,nodes.OlliNode) and ((node.depth+1)==len(in_list)):\n in_list=in_list[1:]\n _r=_html_out_body(node,in_list,flags=flags)\n if len(_r)==2 and type(_r)==type(()):\n _r,in_list=_r\n return \"\"+_r,in_list\n if in_list and (in_list[0]==\"ol\"):\n if (not isinstance(node,nodes.LiNode)) or ((node.depth+1)\"+_r,in_list[1:]\n elif isinstance(node,nodes.UlliNode) and ((node.depth+1)==len(in_list)):\n in_list=in_list[1:]\n _r=_html_out_body(node,in_list,flags=flags)\n if len(_r)==2 and type(_r)==type(()):\n _r,in_list=_r\n return \"\"+_r,in_list\n if not isinstance(node,nodes.Node): #i.e. is a string\n return _escape(node,html5).replace(\"\\x20\\x20\",\"  \")\n elif isinstance(node,nodes.TitleNode):\n if node.depth>6: node.depth=6\n return (\"\"%node.depth)+html_out_body(node.content,flags=flags)+(\"\"%node.depth)\n elif isinstance(node,nodes.ParagraphNode):\n return \"

    \"+html_out_body(node.content,flags=flags)+\"

    \"\n elif isinstance(node,nodes.BlockQuoteNode):\n return \"
    \"+html_out_body(node.content,flags=flags)+\"
    \\n\"\n elif isinstance(node,nodes.SpoilerNode):\n if \"ipsspoilers\" in flags:\n # TODO: Does this actually set the title or does IPBoard override it?\n return '
    '+(\"Spoiler\" if not node.label else html_out_body(node.label,flags=flags))+'
    '+html_out_body(node.content,flags=flags)+\"
    \"\n else:\n return \"

    %s

    \"\n elif isinstance(node,nodes.CodeBlockNode):\n return \"
    \"+\"\".join(node.content)+\"
    \"\n elif isinstance(node,nodes.CodeSpanNode):\n return \"\"+html_out_body(node.content,flags=flags)+\"\"\n elif isinstance(node,nodes.UlliNode):\n r=\"\"\n if (node.depth+1)>len(in_list):\n while (node.depth+1)>len(in_list):\n r+=\"
    • \"\n in_list=(\"ul\",)+in_list\n else:\n r+=\"
    • \"\n r+=html_out_body(node.content,flags=flags)\n return r,in_list\n elif isinstance(node,nodes.OlliNode):\n r=\"\"\n def gen_liopen(bullet, flags):\n if (\"autonumberonly\" not in flags):\n return \"
    • \"%_strquote(str(bullet))\n else:\n return \"
    • \"\n if (node.depth+1)>len(in_list):\n while (node.depth+1)>len(in_list):\n r+=\"
        \"+gen_liopen(node.bullet, flags)\n in_list=(\"ol\",)+in_list\n else:\n r+=\"\"+gen_liopen(node.bullet, flags)\n r+=html_out_body(node.content,flags=flags)\n return r,in_list\n elif isinstance(node,nodes.BoldNode):\n if node.emphatic:\n return \"\"+html_out_body(node.content,flags=flags)+\"\"\n else:\n return \"\"+html_out_body(node.content,flags=flags)+\"\"\n elif isinstance(node,nodes.UnderlineNode):\n return \"\"+html_out_body(node.content,flags=flags)+\"\"\n elif isinstance(node,nodes.ItalicNode):\n if node.emphatic:\n return \"\"+html_out_body(node.content,flags=flags)+\"\"\n else:\n return \"\"+html_out_body(node.content,flags=flags)+\"\"\n elif isinstance(node,nodes.SuperNode):\n return \"\"+html_out_body(node.content,flags=flags)+\"\"\n elif isinstance(node,nodes.SubscrNode):\n return \"\"+html_out_body(node.content,flags=flags)+\"\"\n elif isinstance(node,nodes.RubiNode):\n label=html_out_body(node.label)\n content=node.content\n return \"\"+content+\" (\"+label+\") \" # lang='jp'\n elif isinstance(node,nodes.HrefNode):\n label=html_out_body(node.label)\n ht=node.hreftype\n content=node.content\n if ht==\"url\":\n if (\"showtropes\" in flags) and re.match(\"https?://(www\\.)?tvtropes.org\",content):\n return \"\"+label+(\"(TVTropes)\"%_strquote(content))\n return (\"\"%_strquote(content))+label+\"\"\n else: #Including img\n label=label.strip()\n attar=\"\"\n if \"//twemoji.maxcdn.com\" in content:\n # Acceptable attribution per https://github.com/twitter/twemoji/blob/b33c30e78db45be787410567ad6f4c7b56c137a0/README.md#attribution-requirements\n attar=\"\"\n if label:\n return attar+\"<%s alt=%s src=%s />\"%(ht,_strquote(_escape(label,html5)),_strquote(content))\n return attar+\"<%s src=%s />\"%(ht,_strquote(content))\n elif isinstance(node,nodes.NewlineNode):\n return \"
        \"\n elif isinstance(node,nodes.RuleNode):\n return \"
        \"\n elif isinstance(node,nodes.TableNode):\n r=''\n for row in node.table_head:\n r+=\"\"\n for colno,cell in enumerate(row):\n if node.aligns and (len(node.aligns)>colno) and node.aligns[colno]:\n r+='\"\n r+=\"\"\n r+=\"\"\n for row in node.table_body:\n r+=\"\"\n for colno,cell in enumerate(row):\n if node.aligns and (len(node.aligns)>colno) and node.aligns[colno]:\n r+='\"\n r+=\"\"\n return r+\"
        '\n else:\n r+=\"\"\n r+=html_out_body(list(cell))+\"
        '\n else:\n r+=\"\"\n r+=html_out_body(list(cell))+\"
        \"\n elif isinstance(node,nodes.EmptyInterrupterNode):\n return \"\"\n elif isinstance(node,nodes.EmojiNode):\n if (\"notwemoji\" not in flags) and node.emphatic:\n hexcode = node.label[2]\n altcode = node.content\n # Acceptable attribution per https://github.com/twitter/twemoji/blob/b33c30e78db45be787410567ad6f4c7b56c137a0/README.md#attribution-requirements\n return \"%s\"%(altcode,hexcode)\n return _escape(node.content,html5)\n elif isinstance(node,nodes.DirectiveNode) and node.type.startswith(\"html-\") and (\"directive\" in flags):\n r = \"<\"+node.type[len(\"html-\"):]\n for i,j in node.opts:\n r += \" \"+i+\"=\"+_strquote(j)\n for i in node.args:\n r += \" \"+i\n return r + \">\" + html_out_body(node.content) + \"\"\n else:\n return \"ERROR\"+repr(node)\n\ndef html_out(nodes,titl=\"\",flags=()):\n if \"fragment\" in flags:\n return html_out_body(nodes,flags)\n html5=(\"html5\" in flags)\n if not html5:\n return '\\n'+_escape(titl,html5)+''+html_out_body(nodes,flags)+\"\"\n else:\n return '\\n'+_escape(titl,html5)+''+html_out_body(nodes,flags)+\"\"\n\n__mdplay_renderer__=\"html_out\"\n__mdplay_snippet_renderer__=\"html_out_body\"\n","repo_name":"harjitmoe/mdplay","sub_path":"mdplay/writers/attic/out_html_nondom.py","file_name":"out_html_nondom.py","file_ext":"py","file_size_in_byte":11048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"811081139","text":"from cvxpy.utilities.performance_utils import compute_once, lazyprop\nimport networkx as nx\nimport src.geom.r2 as r2\nfrom .mesh2d import *\nfrom collections import OrderedDict as odcit\nfrom src.cvopt import utils as u\nimport matplotlib.pyplot as plt\n\n\nclass Mapping(object):\n @lazyprop\n def faces(self):\n \"\"\" list of [ transformed_face_index ... ] elements of self.space \"\"\"\n return\n\n @lazyprop\n def edges(self):\n \"\"\" list of [ transformed_edge_index ... ] elements of self.space \"\"\"\n return\n\n @lazyprop\n def half_edges(self):\n \"\"\" list of [ transformed_half_edge_index ... ] elements of self.space \"\"\"\n return\n\n @lazyprop\n def vertices(self):\n \"\"\" [ transformed_vert_index ... ]\"\"\"\n return\n\n\nclass MeshMapping(Mapping):\n def __init__(self, space: Mesh2d, tile: Mesh2d, xform, tgt=None, ttype=None):\n \"\"\"\n map tile mesh onto space mesh with affine transformation\n stores a transformed copy of the tile, the space, and the original tile\n\n attributes:\n space: Mesh2d - the base space\n base: Mesh2d - the mesh to be transformed\n tile: Mesh2d - the mesh after transform is applied\n\n # todo -> transformed should return the state of a subset of 'space'\n # todo -> including all metadata\n\n \"\"\"\n self.tgt = tgt\n self.transform_type = ttype\n self._data = odcit()\n self.space = space\n self.transform = xform\n self.base = tile\n # -----------------------------------------------\n base_verts = tile.vertices.geom\n hverts = np.dot(self.transform, r2.to_homo(base_verts, dtype=int).T).T # .T\n new_vert = r2.from_homo(hverts, dtype=int, round=0)\n # all that SHOULD need to happen is the vertices get relabeled\n #\n self._data = [(base_verts[i], new_vert[i]) for i in range(len(base_verts))]\n self.tile = tile.relabel(self._data)\n\n @lazyprop\n def _data_inv(self):\n \"\"\" {new_vert_geom : base_vert_geom}\"\"\"\n return [(v, k) for k, v in self._data]\n\n def _mapping(self, fn, m2t=None, geom=None, mgeom=None, tgeom=None):\n \"\"\"\n\n opts:\n default: tile_index -> index_in_space\n m2t: index_in_space -> tile_index\n geom: return geometry of both indixes instead of elements\n mgeom: instead of index of space element, use geometry\n tgeom: instead of index of transformed element, use geometry\n\n examples:\n default (t2m):\n {0:3, ... }\n geom:\n {((0, 0), (0, 1)) : ((1, 3), (2, 3)) ... }\n\n \"\"\"\n geom_base = fn.fget(self.base).geom\n geom_main = fn.fget(self.space).geom\n geom_transformed = fn.fget(self.transformed).geom\n mapping = {}\n for trns_ix, trns_geom in enumerate(geom_transformed):\n if trns_geom in geom_main:\n main_ix = geom_main.index(trns_geom)\n if geom:\n # returning base tile geom -> geom after transform\n el_trns, el_main = geom_base[trns_ix], trns_geom\n elif mgeom:\n el_trns, el_main = trns_ix, trns_geom\n elif tgeom:\n el_trns, el_main = geom_base[trns_ix], main_ix\n else:\n el_trns, el_main = trns_ix, main_ix\n\n k, v = (el_main, el_trns) if m2t is True else (el_trns, el_main)\n mapping[k] = v\n else:\n return None\n return mapping\n\n def vertex_map(self, **kwargs) -> dict:\n \"\"\" dict { base_edge_index : transformed_edge_index ... } \"\"\"\n return self._mapping(Mesh2d.vertices, **kwargs)\n\n def edge_map(self, **kwargs) -> dict:\n \"\"\" { base_edge_index : transformed_edge_index ... } \"\"\"\n return self._mapping(Mesh2d.edges, **kwargs)\n\n def face_map(self, **kwargs) -> dict:\n \"\"\" { base_face_index : transformed_face_index ... } \"\"\"\n return self._mapping(Mesh2d.faces, **kwargs)\n\n def half_edge_map(self, **kwargs) -> dict:\n return self._mapping(Mesh2d.half_edges, **kwargs)\n\n @lazyprop\n def faces(self):\n \"\"\" list of [ transformed_face_index ... ] elements of self.space \"\"\"\n return list(map(lambda x: x[1], sorted([(k, v) for k, v in self.face_map().items()])))\n\n @lazyprop\n def edges(self):\n \"\"\" list of [ transformed_edge_index ... ] elements of self.space \"\"\"\n return list(map(lambda x: x[1], sorted([(k, v) for k, v in self.edge_map().items()])))\n\n @lazyprop\n def half_edges(self):\n \"\"\" list of [ transformed_half_edge_index ... ] elements of self.space \"\"\"\n return list(map(lambda x: x[1], sorted([(k, v) for k, v in self.half_edges().items()])))\n\n @lazyprop\n def vertices(self):\n \"\"\" [ transformed_vert_index ... ]\"\"\"\n return list(map(lambda x: x[1], sorted([(k, v) for k, v in self.vertex_map().items()])))\n\n def show(self, save=None, size=7, he=False):\n \"\"\" show the initial and transformed\n opts:\n he: (boolean) if True, then label half edges instead of edges\n \"\"\"\n if self.is_valid() is False:\n raise Exception('cannot display invalid maapping')\n if isinstance(size, int):\n size = (size, size)\n fig, ax = plt.subplots(1, figsize=size)\n ax = u.draw_edges(self.space, ax, color='gray')\n\n if he:\n ax = u.draw_half_edges(self.base, ax, label=True, color='black')\n ax = u.draw_half_edges(self.transformed, ax, label=True, color='red')\n else:\n ax = u.draw_edges(self.base, ax, label=True, color='black')\n ax = u.draw_edges(self.transformed, ax, label=True, color='red')\n\n u.finalize(ax, save=save)\n print(self.__repr__())\n\n def match_col(self, he=None):\n \"\"\"\n return a map of edge colors given action in mapping\n dict { edge_index, signed int }\n \"\"\"\n template_colors = {}\n colors = self.base._half_edge_meta\n for local_edge, he_index in self.half_edge_map().items():\n # convert half edges to edges\n if he:\n edge_index = he_index\n else:\n edge_index = self.space.half_edges.to_edges_index[he_index]\n if local_edge in colors and colors[local_edge].get('color', None):\n template_colors[edge_index] = colors[local_edge]['color']\n # print(template_colors)\n return template_colors\n\n def describe(self, v=None, e=None, f=None):\n \"\"\" printing utility \"\"\"\n def _desc(s, l, kvs):\n for k, v in kvs.items():\n s += '\\n{}.{} -> {}'.format(l, k, v)\n return s\n\n st = 'transform {} {} to {}\\n'.format(\n self.transform_type, self.tgt, self.tile.anchor(half_edge=True)\n )\n\n if v:\n st = _desc(st, 'v', self.vertex_map())\n if e:\n st = _desc(st, 'v', self.edge_map())\n if f:\n st = _desc(st, 'v', self.face_map())\n return st\n\n def __repr__(self):\n return self.describe(True, True, True)\n\n @compute_once\n def is_valid(self):\n m1 = self.vertex_map()\n if m1 is None:\n return False\n m2 = self.face_map()\n m3 = self.edge_map()\n m4 = self.half_edge_map()\n return all([x is not None for x in [m1, m2, m3, m4]])\n\n @property\n def boundary(self):\n return self.tile.boundary\n\n @property\n def transformed(self):\n return self.tile\n\n\nclass Facemap(Mapping):\n def __init__(self, index):\n self.value = index\n\n @property\n def faces(self):\n return self.value\n\n","repo_name":"psavine42/juststuff","sub_path":"src/cvopt/mesh/mapping.py","file_name":"mapping.py","file_ext":"py","file_size_in_byte":7803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3468486495","text":"from collections import defaultdict\nfrom typing import Collection, Dict, Iterable, Optional, Set, cast\nfrom spinn_utilities.ordered_set import OrderedSet\nfrom spinn_machine import CoreSubsets, FrozenCoreSubsets\nfrom spinnman.exceptions import SpinnmanInvalidParameterException\nfrom .enums import ExecutableType\n\n\nclass ExecutableTargets(object):\n \"\"\"\n Encapsulate the binaries and cores on which to execute them.\n \"\"\"\n __slots__ = [\n \"_all_core_subsets\",\n \"_targets\",\n \"_total_processors\",\n \"_binary_type_map\"]\n\n __EMPTY_SUBSET = FrozenCoreSubsets()\n\n def __init__(self) -> None:\n self._targets: Dict[str, CoreSubsets] = dict()\n self._total_processors = 0\n self._all_core_subsets = CoreSubsets()\n self._binary_type_map: Dict[\n ExecutableType, Set[str]] = defaultdict(\n # Need to pretend!\n lambda: cast(Set, OrderedSet()))\n\n def add_subsets(\n self, binary: str, subsets: CoreSubsets,\n executable_type: Optional[ExecutableType] = None):\n \"\"\"\n Add core subsets to a binary.\n\n :param str binary: the path to the binary needed to be executed\n :param ~spinn_machine.CoreSubsets subsets:\n the subset of cores that the binary needs to be loaded on\n :param ~spinnman.model.enum.ExecutableType executable_type:\n The type of this executable.\n ``None`` means don't record it.\n \"\"\"\n try:\n for subset in subsets.core_subsets:\n for p in subset.processor_ids:\n self.add_processor(binary, subset.x, subset.y, p)\n except AttributeError:\n if subsets is not None:\n raise\n if executable_type is not None:\n self._binary_type_map[executable_type].add(binary)\n\n def add_processor(\n self, binary: str, chip_x: int, chip_y: int, chip_p: int,\n executable_type: Optional[ExecutableType] = None):\n \"\"\"\n Add a processor to the executable targets\n\n :param str binary: the binary path for executable\n :param int chip_x:\n the coordinate on the machine in terms of x for the chip\n :param int chip_y:\n the coordinate on the machine in terms of y for the chip\n :param int chip_p: the processor ID to place this executable on\n :param ~spinnman.model.enum.ExecutableType executable_type:\n the executable type for locating n cores of\n \"\"\"\n if self.known(binary, chip_x, chip_y, chip_p):\n return\n if binary not in self._targets:\n self._targets[binary] = CoreSubsets()\n if executable_type is not None:\n self._binary_type_map[executable_type].add(binary)\n self._targets[binary].add_processor(chip_x, chip_y, chip_p)\n self._all_core_subsets.add_processor(chip_x, chip_y, chip_p)\n self._total_processors += 1\n\n def get_n_cores_for_executable_type(\n self, executable_type: ExecutableType) -> int:\n \"\"\"\n Get the number of cores that the executable type is using.\n\n :param ~spinnman.model.enum.ExecutableType executable_type:\n :return: the number of cores using this executable type\n :rtype: int\n \"\"\"\n return sum(\n len(self.get_cores_for_binary(aplx))\n for aplx in self._binary_type_map[executable_type])\n\n def get_binaries_of_executable_type(\n self, executable_type: ExecutableType) -> Iterable[str]:\n \"\"\"\n Get the binaries of a given a executable type.\n\n :param ~spinnman.model.enum.ExecutableType executable_type:\n the executable type enum value\n :return: iterable of binaries with that executable type\n :rtype: iterable(str)\n \"\"\"\n return self._binary_type_map[executable_type]\n\n def executable_types_in_binary_set(self) -> Iterable[ExecutableType]:\n \"\"\"\n Get the executable types in the set of binaries.\n\n :return: iterable of the executable types in this binary set.\n :rtype:\n iterable(~spinnman.model.enum.ExecutableType)\n \"\"\"\n return self._binary_type_map.keys()\n\n def get_cores_for_binary(self, binary: str) -> CoreSubsets:\n \"\"\"\n Get the cores that a binary is to run on.\n\n :param str binary: The binary to find the cores for\n :rtype: ~spinn_machine.CoreSubsets\n \"\"\"\n return self._targets.get(binary, self.__EMPTY_SUBSET)\n\n @property\n def binaries(self) -> Collection[str]:\n \"\"\"\n The binaries of the executables.\n\n :rtype: iterable(str)\n \"\"\"\n return self._targets.keys()\n\n @property\n def total_processors(self) -> int:\n \"\"\"\n The total number of cores to be loaded.\n\n :rtype: int\n \"\"\"\n return self._total_processors\n\n @property\n def all_core_subsets(self) -> CoreSubsets:\n \"\"\"\n All the core subsets for all the binaries.\n\n :rtype: ~spinn_machine.CoreSubsets\n \"\"\"\n return self._all_core_subsets\n\n def known(self, binary, chip_x, chip_y, chip_p) -> bool:\n \"\"\"\n :param str binary:\n :param int chip_x:\n :param int chip_y:\n :param int chip_p:\n :rtype: bool\n \"\"\"\n if not self._all_core_subsets.is_core(chip_x, chip_y, chip_p):\n return False\n # OK if and only if the chip is in this binary already\n if binary in self._targets:\n if self._targets[binary].is_core(chip_x, chip_y, chip_p):\n return True\n\n raise SpinnmanInvalidParameterException(\n f\"x:{chip_x} y:{chip_y} p:{chip_p}\", binary,\n \"Already associated with a different binary\")\n","repo_name":"SpiNNakerManchester/SpiNNMan","sub_path":"spinnman/model/executable_targets.py","file_name":"executable_targets.py","file_ext":"py","file_size_in_byte":5817,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"21723259411","text":"#!/usr/bin/env python\n# author: lwang107@ucsc.edu\nimport argparse\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport math\n\np = argparse.ArgumentParser(\n usage='plot statistic CSV output from perf stat -r in bar chart',\n description='''\nperf stat -e cpu-clock -x, -o file ...\nbar-plot.py -n 100 file (or stdin)\ndelimeter must be ,\nthis is for data that is not normalized.''')\np.add_argument('file', help='CSV file to plot (or stdin)', nargs='?')\np.add_argument('--num', '-n', help='Number of output columns. 20 by default', \n nargs='?')\np.add_argument('--output', '-o', help='Output to file. Otherwise show.', \n nargs='?')\nargs = p.parse_args()\n\nif args.file:\n inf = open(args.file, \"r\")\nelse:\n inf = sys.stdin\n\nts = []\nfor line in inf:\n t = line.split(',')[0]\n try:\n ts.append(float(t))\n except ValueError:\n pass\nts.sort()\nnum = int(args.num) if args.num else 20\ndelta = math.ceil((ts[-1]-ts[0])/num)\nstep = 0\nstat = [0]\nstart = math.floor(ts[0])\nfor t in ts:\n if(t=start+(step+1)*delta):\n step+=1\n stat.append(0)\n stat[step]=stat[step]+1\n \nlabel = [str((start+(s+0.5)*delta)) for s in range(step+1)]\n\ndef plot_bar_x():\n # this is for plotting purpose\n index = np.arange(len(label))\n plt.bar(index, stat)\n plt.xlabel('cpu-clock(ms)', fontsize=10)\n plt.ylabel('count', fontsize=10)\n plt.xticks(index, label, fontsize=7, rotation=0)\n plt.title('Response time distribution of chatterbot')\n if args.output:\n plt.savefig(args.output)\n else:\n plt.show()\n\nplot_bar_x()","repo_name":"wlwlw/achatbot","sub_path":"pmu-tools/bar-plot.py","file_name":"bar-plot.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12405723572","text":"import argparse\nimport json\nimport logging\nimport sys, os\nfrom datetime import datetime\nimport uuid\n\ntry:\n import paho.mqtt.client as mqtt\n import psycopg2\nexcept:\n sys.exit(\"Please use your favorite method to install the following module paho.mqtt.client and psycopg2 to use this program\")\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\nargs = argparse.Namespace\nts_connection: str = \"\"\nconnection: str = \"\"\n\ndesc=\"\"\"This program subscribe to an MQTT topic and insert message into a TimescaleDB.\"\"\"\n\nepilog=\"\"\"Subscribe to an MQTT topic.\nInsert message into a TimescaleDB.\"\"\"\n\ndef main():\n global args\n global ts_connection\n global connection\n\n args = parse_args()\n\n try:\n database = os.environ['DATABASE']\n except KeyError:\n print('Environment variable %s does not exist' % ('DATABASE'))\n\n ts_connection = \"postgres://{}:{}@{}:{}/{}\".format(args.ts_username, args.ts_password, args.ts_host,\n args.ts_port, args.ts_database)\n mqtt_connection = \"mqtt://{}:{}@{}:{}/{}\".format(args.mqqt_username, args.mqqt_password, args.mqqt_host,\n args.mqqt_port, args.mqqt_topic)\n logger.debug(\"TimescaleDB connection: {}\".format(ts_connection))\n logger.debug(\"MQTT connection: {}\".format(mqtt_connection))\n\n client_id = f'python-mqtt-{args.ts_database}-{uuid.uuid4()}'\n client = mqtt.Client(client_id)\n client.on_connect = on_connect\n client.on_message = on_message\n\n client.connect(args.mqqt_host, args.mqqt_port, 60)\n #client.username_pw_set(args.mqqt_username, args.mqqt_password)\n\n # Create table if not exist\n try:\n connection = psycopg2.connect(ts_connection, connect_timeout=3)\n cursor = connection.cursor()\n # SQL query to create a new table\n create_table_query = \"\"\"-- Step 1: Define regular table\n CREATE TABLE IF NOT EXISTS sensor_metrics (\n time TIMESTAMP WITHOUT TIME ZONE NOT NULL,\n device_id text NOT NULL,\n path text NOT NULL,\n value DOUBLE PRECISION NULL\n );\n -- Step 2: Turn into hypertable\n SELECT create_hypertable('sensor_metrics','time');\"\"\"\n # Execute a command: this creates a new table\n cursor.execute(create_table_query)\n connection.commit()\n print(\"Table created successfully in PostgreSQL \")\n\n except psycopg2.DatabaseError as error:\n logger.warning(\"Exception: {}\".format(error.pgerror))\n except psycopg2.OperationalError as error:\n logger.error(\"Exception: {}\".format(error.pgerror))\n finally:\n if connection:\n cursor.close()\n connection.close()\n logger.debug(\"PostgreSQL TABLE connection is closed\")\n\n # MQTT loop\n # Blocking call that processes network traffic, dispatches callbacks and\n # handles reconnecting.\n # Other loop*() functions are available that give a threaded interface and a\n # manual interface.\n try:\n with psycopg2.connect(ts_connection, connect_timeout=3) as connection:\n client.loop_forever()\n except psycopg2.OperationalError as error:\n logger.error(\"Exception: {}\".format(error.pgerror))\n finally:\n if connection:\n connection.close()\n logger.debug(\"PostgreSQL data ingest connection is closed\")\n\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n logger.debug(\"MQTT - Connected with result code {}\".format(str(rc)))\n\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n client.subscribe(args.mqqt_topic)\n\n\n# The callback for when a PUBLISH message is received from the server.\ndef on_message(client, userdata, msg):\n logger.debug(\"MQTT - Topic: {}, Message Payload: {}\".format(msg.topic, str(msg.payload)))\n publish_message_to_db(msg)\n\ndef date_converter(o):\n if isinstance(o, datetime):\n return o.__str__()\n\ndef publish_message_to_db(message):\n global connection\n message_payload = json.loads(message.payload)\n #logger.debug(\"message.payload: {}\".format(json.dumps(message_payload, default=date_converter)))\n\n sql = \"\"\"INSERT INTO sensor_metrics(time, device_id, path, value)\n VALUES (%s, %s, %s, %s);\"\"\"\n\n data = (\n message_payload[\"time\"],\n message_payload[\"context\"], \n message_payload[\"path\"],\n message_payload[\"value\"]\n )\n\n #logger.debug(\"PostgreSQL - sql.insert: {} {}\".format(sql, data))\n try:\n with connection.cursor() as curs:\n try:\n curs.execute(sql, data)\n logger.debug(\"PostgreSQL - sql.insert: {} {}\".format(sql, data))\n except psycopg2.Error as error:\n logger.error(\"Exception: {}\".format(error.pgerror))\n except Exception as error:\n logger.error(\"Exception: {}\".format(error))\n except psycopg2.OperationalError as error:\n logger.error(\"Exception: {}\".format(error.pgerror))\n finally:\n connection.commit()\n\n\n# Read in command-line parameters\ndef parse_args():\n parser = argparse.ArgumentParser(description=desc, epilog=epilog)\n parser.add_argument('--mqqt_topic', help='MQTT topic', default='+/signalk/delta')\n parser.add_argument('--mqqt_host', help='MQTT host', default='172.30.0.1')\n parser.add_argument('--mqqt_port', help='MQTT port', type=int, default=1883)\n parser.add_argument('--mqqt_username', help='MQTT username', default='')\n parser.add_argument('--mqqt_password', help='MQTT password', default='')\n parser.add_argument('--ts_host', help='TimescaleDB host', default='172.30.0.1')\n parser.add_argument('--ts_port', help='TimescaleDB port', type=int, default=5432)\n parser.add_argument('--ts_username', help='TimescaleDB username', default='username')\n parser.add_argument('--ts_password', help='TimescaleDB password', default='password')\n parser.add_argument('--ts_database', help='TimescaleDB database', default='example')\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"xbgmsharp/mqtt_to_timescaledb","sub_path":"mqtt_to_timescaledb.py","file_name":"mqtt_to_timescaledb.py","file_ext":"py","file_size_in_byte":6461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10714181550","text":"import numpy as np \r\nimport matplotlib.pyplot as plt\r\nimport math\r\n\r\ndef f1(x):\r\n\tif x>0:\r\n\t\treturn 1\r\n\telse :\r\n\t\treturn 0\r\ndef f2(x):\r\n\treturn [1/(1+math.exp(x))]\r\ndef f3(x):\r\n\treturn 1/(1+math.exp(-x))\r\n\r\n\r\nx = np.arange(-20, 20., 0.01)\r\ny1 = np.zeros(len(x))\r\ny2 = np.zeros(len(x))\r\n\r\nfor i in range(0,len(x)):\r\n\ty1[i] = 1/(1+math.exp(x[i]))\r\n\ty2[i] = 1/(1+math.exp(-x[i]))\r\n\r\nplt.plot(x, np.piecewise(x, [x > 0, x <= 0], [0, 1]), 'r', label = 'CS-LDP threshold function')\r\nplt.plot(x,y1, label='Modified sigmoid function')\r\nplt.plot(x,y2, label = 'Sigmoid function')\r\nplt.legend(loc=\"lower right\")\r\n\r\nplt.axis([-5, 5, -2, 3])\r\n\r\nplt.show()","repo_name":"bhargav265/Local-descriptors","sub_path":"plotpy.py","file_name":"plotpy.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"41615182808","text":"# Author: Drew Byron\n# Date: 04/07/2023\n\n# Imports.\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom lmfit import minimize, Parameters, fit_report\nfrom pathlib import Path\n\n# Path to local imports.\nsys.path.append(\"/home/drew/He6CRES/he6-cres-spec-sims/\")\nsys.path.append(\"/home/drew/He6CRES/rocks_analysis_notebooks/\")\n\n# Local imports.\nimport he6_cres_spec_sims.spec_tools.spec_calc.spec_calc as sc\nimport he6_cres_spec_sims.experiment as exp\nimport he6_cres_spec_sims.spec_tools.beta_source.beta_spectrum as bs\n\n# Local imports for plotting ratios and such.\nimport analysis_functions.ratio_experiment as re\nimport analysis_functions.ratio_prediction as rp\nimport analysis_functions.plotting_methods as pm\nimport mc_functions.simple_mc as mc\nimport mc_functions.const_prop_vs_counts as cpc\nimport mc_functions.const_prop_vs_mon as cpm\nimport mc_functions.B_sensitivity as btest\nimport mc_functions.mon_drift_sensitivity as montest\nimport mc_functions.wall_effect as we\nimport mc_functions.energy_domain as ed\nimport mc_functions.from_below as fb\n\n# Set plot parameters.\nparams = {\n \"axes.titlesize\": 15,\n \"legend.fontsize\": 12,\n \"axes.labelsize\": 12,\n \"xtick.labelsize\": 12,\n \"ytick.labelsize\": 12,\n}\nplt.rcParams.update(params)\n\n# Set fig path.\nfig_dir = Path(\"/media/drew/T7 Shield/thesis_figures/monte_carlo\")\nfig_name = Path(\"MC_fig_0_simple_illustration.png\")\nfig_path = fig_dir / fig_name\nprint(fig_path)\n\n\n# Plotting functions.\ndef plot_sim_exp_ratio(ratio_exp, ax):\n\n label = f\"Monte Carlo\"\n ax.errorbar(\n ratio_exp.index,\n ratio_exp.Ratio,\n yerr=ratio_exp[\"sRatio\"],\n label=label,\n marker=\"o\",\n ms=6,\n color=\"tab:blue\",\n )\n\n return None\n\n\ndef plot_predicted_ratio(ratio_pre, ax, label=None):\n\n if label is None:\n label = f\"Prediction\"\n ax.plot(\n ratio_pre.index,\n ratio_pre.Ratio,\n label=label,\n marker=\"o\",\n ms=6,\n color=\"tab:orange\",\n )\n\n return None\n\n\n# Select set fields.\nset_fields = np.arange(0.75, 3.5, 0.25)\n# Freq BW.\nfreq_BW = np.array([19.0e9, 19.1e9])\n# Tile freq_BW.\nfreq_BWs = np.tile(freq_BW, (len(set_fields), 1))\n\n# C, relationship between he and ne monitor.\nC_exp = np.random.uniform(0.5, 1.5)\n\n# Number of counts:\nN = 10**4\n# monitor rate tot:\nmon = 10**8\n# Set little b.\nb = 0\n\n# Simulate simple experiment.\nratio_exp, spectra_ne_exp, spectra_he_exp = mc.simple_MC(\n set_fields,\n freq_BWs,\n C_exp,\n b,\n counts_per_isotope=N,\n monitor_rate=mon,\n counts_pois=True,\n mon_pois=True,\n)\n\nratio_pred = rp.AUC_expectation(set_fields, freq_BWs, b=b, plot=False)\n\n# Conduct fit.\nmy_pars = Parameters()\nmy_pars.add(\"C\", value=1, min=0, max=10, vary=True)\nmy_pars.add(\"b\", value=0.1, min=-10, max=10, vary=True)\n\nresult = minimize(mc.objfunc_chisq, my_pars, args=(freq_BWs, set_fields, ratio_exp))\n\n# Fit report.\nprint(fit_report(result.params))\n\n# Plot results.\nf, (ax0, ax1) = plt.subplots(\n 2, 1, gridspec_kw={\"height_ratios\": [3, 1]}, figsize=(12, 7)\n)\n\nC = result.params[\"C\"].value\n\nratio_exp_cp = ratio_exp.copy()\nratio_exp_cp[\"Ratio\"] = C * ratio_exp_cp[\"Ratio\"]\nratio_exp_cp[\"sRatio\"] = C * ratio_exp_cp[\"sRatio\"]\n\nplot_sim_exp_ratio(ratio_exp_cp, ax0)\nplot_predicted_ratio(ratio_pred, ax0)\n\n# ax0.set_yscale(\"log\")\nax0.set_ylabel(\"ratio\")\nax1.set_ylabel(r\"$\\sigma$\")\nax0.set_title(f\"Simulated Experiment. Counts per isotope: 10^4\")\nax0.legend()\n\nax0.set_ylabel(\"ratio\")\nax1.set_xlabel(\"Set Field (T)\")\nax1.set_ylim(-2,2)\n\n\nax1.plot(\n ratio_pred.index,\n (ratio_exp_cp.Ratio - ratio_pred.Ratio) / ratio_exp_cp.sRatio,\n label=f\"residuals\",\n marker=\"o\",\n ls=\"None\",\n ms=6,\n color=\"tab:blue\",\n)\nax1.legend()\n\n# Save and display the figure.\nplt.savefig(fig_path, bbox_inches=\"tight\", dpi=300)\nplt.show()\n","repo_name":"drewbyron/rocks-analysis-notebooks","sub_path":"thesis_figure_scripts/MC_fig_0.py","file_name":"MC_fig_0.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"17964003409","text":"from typing import NamedTuple\nfrom dataclasses import dataclass\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport ipywidgets as widgets\n\nfrom matplotlib.colors import ListedColormap, BoundaryNorm\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Patch\n\n\ndef in_notebook() -> bool:\n try:\n from IPython import get_ipython # pylint: disable=import-outside-toplevel\n if 'IPKernelApp' not in get_ipython().config: # pragma: no cover\n return False\n except ImportError:\n return False\n except AttributeError:\n return False\n return True\n\n\nGRAPH_SMALL = np.array([\n [0, 0, 0, 0, 0, 0],\n [1, 0, 1, 1, 1, 0],\n [1, 0, 1, 1, 1, 0],\n [0, 0, 1, 0, 0, 0],\n [0, 1, 1, 0, 1, 0],\n [0, 0, 0, 0, 1, 0],\n [1, 1, 1, 1, 1, 0],\n])\n\nGRAPH_LARGE = np.zeros((29, 49))\nGRAPH_LARGE[14, 9:20] = 1\nGRAPH_LARGE[14:28, 19] = 1\nGRAPH_LARGE[1:15, 29] = 1\nGRAPH_LARGE[14:29, 39] = 1\n\n\nclass State(NamedTuple):\n \"\"\"A state in a graph represented by a 2D array.\"\"\"\n x: int\n y: int\n\n\n@dataclass\nclass ARAStar_State:\n \"\"\"A snapshot of the different States being tracked by ARA*,\n as well as the current path being expanded.\"\"\"\n OPEN: dict\n CLOSED: set\n INCONS: set\n current_path: list[State]\n\n\nclass ARAStar_Plotter:\n CELL_TYPES = ['CLEAR', 'OBSTACLE', 'OPEN', 'INCONS', 'CLOSED']\n CELL_COLORS = ['white', 'black', 'dodgerblue', 'chartreuse', 'silver']\n UNKNOWN_CELL_COLOR = 'red'\n PATH_COLOR = 'red'\n GOAL_COLOR = 'magenta'\n PATH_MARKER = 'x'\n COLORMAP = ListedColormap(CELL_COLORS, 'ARA*_planner').with_extremes(\n under=UNKNOWN_CELL_COLOR, over=UNKNOWN_CELL_COLOR)\n COLOR_BOUNDS = BoundaryNorm(np.arange(len(CELL_TYPES) + 1), COLORMAP.N)\n\n def __init__(self, graph: np.ndarray, start: State, goal: State):\n self.graph = graph\n self.start = start\n self.goal = goal\n self._selected_axes = {}\n\n def visualize_graph(self):\n fig, ax = plt.subplots()\n fig.canvas.toolbar_visible = False\n fig.canvas.footer_visible = False\n\n ax.pcolormesh(self.graph, cmap=self.COLORMAP, norm=self.COLOR_BOUNDS,\n edgecolors='k', linewidth=0.5)\n ax.invert_yaxis() # do this otherwise graph looks upside down\n ax.xaxis.set_tick_params(labeltop=True, labelbottom=False, bottom=False)\n\n # plot start and goal markers\n ax.plot(self.start.y + 0.5, self.start.x + 0.5, color=self.PATH_COLOR,\n marker=self.PATH_MARKER, linestyle='none')\n ax.plot(self.goal.y + 0.5, self.goal.x + 0.5, color=self.GOAL_COLOR,\n marker=self.PATH_MARKER, linestyle='none')\n\n # add start/goal to legend\n handles = []\n handles.append(Line2D([], [], color=self.PATH_COLOR, linestyle='none',\n marker=self.PATH_MARKER,label='Start'))\n handles.append(Line2D([], [], color=self.GOAL_COLOR, linestyle='none',\n marker=self.PATH_MARKER, label='Goal'))\n ax.legend(handles=handles)\n\n plt.show()\n\n\n def _add_legend(self, ax: plt.Axes):\n handles = []\n for cell_type, color in zip(self.CELL_TYPES, self.CELL_COLORS):\n p = Patch(facecolor=color, label=cell_type, edgecolor='k')\n handles.append(p)\n\n handles.append(Line2D([], [], color=self.PATH_COLOR,\n marker=self.PATH_MARKER,label='Path'))\n handles.append(Line2D([], [], color=self.GOAL_COLOR, linestyle='none',\n marker=self.PATH_MARKER, label='Goal'))\n ax.legend(handles=handles)\n\n def _path_to_xyvals(self, path: list[State]) -> tuple[list[float], list[float]]:\n # reverse x/y order since graph is inverted\n xvals, yvals = [], []\n for x, y in path:\n xvals.append(y + 0.5)\n yvals.append(x + 0.5)\n return xvals, yvals\n\n def _add_path(self, ax: plt.Axes, path: list[State]):\n xvals, yvals = self._path_to_xyvals(path)\n ax.plot(xvals, yvals, color=self.PATH_COLOR)\n ax.plot(xvals[0], yvals[0], xvals[-1], yvals[-1], color=self.PATH_COLOR,\n marker=self.PATH_MARKER, linestyle='none') # plot path start/end markers\n ax.plot(self.goal.y + 0.5, self.goal.x + 0.5, color=self.GOAL_COLOR,\n marker=self.PATH_MARKER, linestyle='none') # plot goal marker\n\n def plot_episode(self, epsilon: float, history: list[ARAStar_State]):\n with plt.ioff():\n fig = plt.figure()\n fig.canvas.toolbar_visible = False\n fig.canvas.footer_visible = False\n axs = []\n\n for i, alg_state in enumerate(history):\n ax = fig.add_axes([0.125, 0.12, .8, 0.75], label=i, visible=i == 0)\n graph = self.graph.copy()\n for s in alg_state.OPEN:\n graph[s] = self.CELL_TYPES.index('OPEN')\n for s in alg_state.INCONS:\n graph[s] = self.CELL_TYPES.index('INCONS')\n for s in alg_state.CLOSED:\n graph[s] = self.CELL_TYPES.index('CLOSED')\n\n ax.pcolormesh(graph, cmap=self.COLORMAP, norm=self.COLOR_BOUNDS,\n edgecolors='k', linewidth=0.5)\n self._add_path(ax, alg_state.current_path)\n\n ax.set_title(rf'Anytime Repairing A*, $\\epsilon={epsilon}$, '\n f'Iteration {i}')\n ax.invert_yaxis() # do this otherwise graph looks upside down\n ax.xaxis.set_tick_params(labeltop=True, labelbottom=False, bottom=False)\n self._add_legend(ax)\n axs.append(ax)\n\n self._selected_axes[epsilon] = 0\n def select_ax(new_ax):\n current_ax = self._selected_axes[epsilon]\n new_ax %= len(history)\n if new_ax != current_ax:\n axs[current_ax].set_visible(False)\n axs[new_ax].set_visible(True)\n self._selected_axes[epsilon] = new_ax\n fig.canvas.draw_idle()\n\n bforward = widgets.Button(\n disabled=False,\n button_style='',\n icon='caret-right'\n )\n bbackward = widgets.Button(\n disabled=False,\n button_style='',\n icon='caret-left'\n )\n bbackward.on_click(lambda _: select_ax(self._selected_axes[epsilon] - 1))\n bforward.on_click(lambda _: select_ax(self._selected_axes[epsilon] + 1))\n footer = widgets.HBox([bbackward, bforward])\n return widgets.VBox([fig.canvas, footer])\n\n def plot_final_state(self, epsilon: float, history: list[ARAStar_State]):\n fig, ax = plt.subplots()\n fig.canvas.toolbar_visible = False\n fig.canvas.footer_visible = False\n\n alg_state = history[-1] # only care about final expansion\n graph = self.graph.copy()\n for s in alg_state.OPEN:\n graph[s] = self.CELL_TYPES.index('OPEN')\n for s in alg_state.INCONS:\n graph[s] = self.CELL_TYPES.index('INCONS')\n for s in alg_state.CLOSED:\n graph[s] = self.CELL_TYPES.index('CLOSED')\n\n ax.pcolormesh(graph, cmap=self.COLORMAP, norm=self.COLOR_BOUNDS,\n edgecolors='k', linewidth=0.5)\n self._add_path(ax, alg_state.current_path)\n\n ax.set_title(rf'Anytime Repairing A*, $\\epsilon={epsilon}$')\n ax.invert_yaxis() # do this otherwise graph looks upside down\n ax.xaxis.set_tick_params(labeltop=True, labelbottom=False, bottom=False)\n self._add_legend(ax)\n\n plt.show()\n\n def plot_paths_found(self, paths_found: dict[float, list[State]]):\n fig, ax = plt.subplots()\n fig.canvas.toolbar_visible = False\n fig.canvas.footer_visible = False\n\n ax.pcolormesh(self.graph, cmap=self.COLORMAP, norm=self.COLOR_BOUNDS,\n edgecolors='k', linewidth=0.5)\n ax.set_title('Anytime Repairing A*: All Paths Found')\n ax.invert_yaxis() # do this otherwise graph looks upside down\n ax.xaxis.set_tick_params(labeltop=True, labelbottom=False, bottom=False)\n\n for epsilon, path in paths_found.items():\n xvals, yvals = self._path_to_xyvals(path)\n ax.plot(xvals, yvals, label=rf'$\\epsilon={round(epsilon, 2)}$')\n\n ax.legend()\n plt.show()\n","repo_name":"rlargaespada/incremental_alg_pset","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"47608542989","text":"from tables import Doctor,Patient,Treatment,Grade,Device,Music,SessionClass\nfrom tools import timeStampToYMD, patientInfo2List, treatmentInfo2List, gradeInfo2List,writeRowExcel\nimport time, xlsxwriter\n\ndef getPatientIDDeduplicate(device_mac,*args):\n session = SessionClass()\n device=session.query(Device).filter(Device.device_mac==device_mac).first()\n if len(args)==0:\n patient_id_value=session.query(Patient).filter(Patient.device_id==device.device_id).with_entities(Patient.patient_id).all()\n else:\n patient_id_value = session.query(Patient).filter(Patient.device_id == device.device_id, Patient.patient_name==args[0]).with_entities(Patient.patient_id).all()\n value_list=[]\n for index in range(len(patient_id_value)):\n value_list.append(patient_id_value[index][0])\n session.close()\n return value_list\n\n\ndef getDoctorIDDedulicate(device_mac):\n session = SessionClass()\n device = session.query(Device).filter(Device.device_mac == device_mac).first()\n doctor_id_value = session.query(Doctor).filter(Doctor.device_id == device.device_id).with_entities(Doctor.doctor_id).all()\n value_list = []\n for index in range(len(doctor_id_value)):\n value_list.append(doctor_id_value[index][0])\n session.close()\n return value_list\n\n\ndef getSinglePatientInfo(patient_id):\n session=SessionClass()\n patient=session.query(Patient).filter(Patient.patient_id==patient_id).first()\n treatment_count=session.query(Treatment).filter(Treatment.patient_id==patient_id).count()\n grade=session.query(Grade).order_by(Grade.grade_time.desc()).filter(Grade.patient_id==patient_id).first()\n # 处理None情况\n if (grade == None):\n grade_level = ''\n grade_score=''\n grade_time=0\n else:\n grade_level = grade.grade_level\n grade_score=grade.grade_score\n grade_time=grade.grade_time\n #处理None情况结束\n per_patient={\n 'patient_id':patient.patient_id,\n 'patient_name':patient.patient_name,\n 'patient_gender':patient.patient_gender,\n 'patient_age':patient.patient_age,\n 'patient_self_reproted': patient.patient_self_reported,\n 'patient_medical_history':patient.patient_medical_history,\n 'patient_examination':patient.patient_examination,\n 'patient_doctor_category1':patient.patient_doctor_category1,\n 'patient_doctor_category2':patient.patient_doctor_category2,\n 'doctor':patient.doctor.doctor_name,\n 'treatment_count':treatment_count,\n 'grade_level':grade_level,\n 'grade_score':grade_score,\n 'grade_time':grade_time,\n 'device_id':patient.device_id\n }\n session.close()\n return per_patient\n\n#单个病人疗效评估\ndef getSinglePatirntGradeLevelChange(patient_id):\n session = SessionClass()\n grade_latest = session.query(Grade).order_by(Grade.grade_time.desc()).filter(Grade.patient_id == patient_id).first()\n grade_oldest = session.query(Grade).order_by(Grade.grade_time).filter(Grade.patient_id == patient_id).first()\n if grade_latest !=None:\n difference = grade_oldest.grade_level-grade_latest.grade_level\n session.close()\n if grade_latest.grade_score==0: # 痊愈\n return 0\n elif difference >= 2: # 显效\n return 1\n elif difference == 1: # 有效\n return 2\n else:\n return 3 # 无效\n\n#获取所有病人信息\ndef getAllPatientsInfo(device_mac):\n patients_id=getPatientIDDeduplicate(device_mac)\n patients_info=[]\n for id in patients_id:\n patients_info.append(getSinglePatientInfo(id))\n patients_info.sort(key=lambda patient: -patient['grade_time'])\n return patients_info\n\n#获取每天治疗人数\ndef getTreatmentPatientNumber(device_mac):\n session=SessionClass()\n patients_id=getPatientIDDeduplicate(device_mac)\n timestamps_string=[]\n date_nums=[]\n for patient_id in patients_id:\n treatments=session.query(Treatment).filter(Treatment.patient_id==patient_id).all()\n for treatment in treatments:\n timestamps_string.append(timeStampToYMD(treatment.treatment_time))\n for i in set(timestamps_string):\n counts={}\n counts['date']=i\n counts['nums']=timestamps_string.count(i)\n date_nums.append(counts)\n session.close()\n return date_nums\n\n#获取性别占比\ndef getGenderPatientProportion(device_mac):\n session = SessionClass()\n patients_id = getPatientIDDeduplicate(device_mac)\n gender_num = [0, 0]\n gender_name = ['男', '女']\n type_percent = []\n for patient_id in patients_id:\n patient = session.query(Patient).filter(Patient.patient_id == patient_id).first()\n if patient.patient_gender == 1: #1为男,2为女\n gender_num[0] += 1\n elif patient.patient_gender == 2:\n gender_num[1] += 1\n all_nums = sum(gender_num)\n for i in range(len(gender_num)):\n counts = {}\n counts['name'] = gender_name[i]\n counts['percent'] = round((gender_num[i] / all_nums), 2)\n counts['a'] = '1'\n type_percent.append(counts)\n session.close()\n return type_percent\n\n#获取每种分型占比\ndef getTypePatientProportion(device_mac):\n session = SessionClass()\n patients_id = getPatientIDDeduplicate(device_mac)\n types_num = [0,0,0,0,0]\n types_name = ['风热侵袭','肝火上扰','痰火郁结','肾精亏损','脾胃虚弱']\n type_percent = []\n for patient_id in patients_id:\n patient = session.query(Patient).filter(Patient.patient_id==patient_id).first()\n if patient.patient_doctor_category1 == 1:\n types_num[0] += 1\n elif patient.patient_doctor_category1==2:\n types_num[1] += 1\n elif patient.patient_doctor_category1==3:\n types_num[2] += 1\n elif patient.patient_doctor_category1==4:\n types_num[3] += 1\n elif patient.patient_doctor_category1==5:\n types_num[4] += 1\n all_nums=sum(types_num)\n for i in range(len(types_num)):\n counts = {}\n counts['name'] = types_name[i]\n counts['percent'] = round((types_num[i]/all_nums),2)\n counts['a']='1'\n type_percent.append(counts)\n session.close()\n return type_percent\n\n\n#获取每个年龄段人数 小于等于18、大于18小于等于44、大于44小于等于60、大于60\ndef getAgePatientProportion(device_mac):\n session = SessionClass()\n patients_id = getPatientIDDeduplicate(device_mac)\n age_stages = [0,0,0,0]\n age_stages_name=['18岁以下','18岁-44岁','44岁-60岁','60岁以上']\n age_nums=[]\n for patient_id in patients_id:\n patient = session.query(Patient).filter(Patient.patient_id == patient_id).first()\n if patient.patient_age <= 18:\n age_stages[0] += 1\n elif (patient.patient_age > 18) and (patient.patient_age<=44):\n age_stages[1]+=1\n elif (patient.patient_age > 44) and (patient.patient_age <= 60):\n age_stages[2] += 1\n elif patient.patient_age > 60:\n age_stages[3] += 1\n for i in range(len(age_stages)):\n per_count = {}\n per_count['name'] = age_stages_name[i]\n per_count['percent'] = round((age_stages[i]/sum(age_stages)),2)\n per_count['a']='1'\n age_nums.append(per_count)\n return age_nums\n\n\n#获取每个医生的患者数目\ndef getPerDoctorPatientNumber(device_mac):\n session=SessionClass()\n doctors_id=getDoctorIDDedulicate(device_mac)\n perdoctor_nums=[]\n for doctor_id in doctors_id:\n counts={}\n doctor=session.query(Doctor).filter(Doctor.doctor_id==doctor_id).first()\n patient_count=session.query(Patient).filter(Patient.doctor_id==doctor_id).count()\n counts['name']=doctor.doctor_name\n counts['num']=patient_count\n perdoctor_nums.append(counts)\n session.close()\n return perdoctor_nums\n\n\n#获取每种音乐类型数目\ndef getPerMusicNumber():\n session=SessionClass()\n permusic_nums=[]\n music_types_name=['宫','商','角', '徵','羽','阿是乐']\n for i in range(6):\n count={}\n count['name']=music_types_name[i]\n count['num']=session.query(Music).filter(Music.music_group==str(i+1)).count()\n permusic_nums.append(count)\n session.close()\n return permusic_nums\n\n\n#获取整体疗效(痊愈——无耳鸣、显效——降低2个等级及以上,有效——降低1个等级,无效——等级不变化,甚至更糟)\ndef getResultAll(device_mac):\n nums=[0,0,0,0]\n patients_id = getPatientIDDeduplicate(device_mac)\n result_name=['痊愈','显效','有效','无效']\n result_nums=[]\n for patient_id in patients_id:\n result=getSinglePatirntGradeLevelChange(patient_id)\n if result == 0:\n nums[0]+=1\n elif result == 1:\n nums[1] +=1\n elif result == 2:\n nums[2] += 1\n elif result == 3:\n nums[3] += 1\n for i in range(4):\n count={}\n count['name']=result_name[i]\n count['nums']=nums[i]\n result_nums.append(count)\n return result_nums\n\n\n#单个病人信息转换成excel\ndef singlePatientToExcel(patient_id):\n session=SessionClass()\n\n patient_col_name = ['编号','姓名','性别','年龄','主诉','既往史','检查','设备分型','医生第一分型','医生第二分型','医生姓名']\n treatment_col_name = ['编号','治疗时间','患者编号']\n grade_col_name =['编号','等级','分数','评分时间','患者编号']\n\n now_time = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n filename = 'singlePatient_'+ now_time + '.xlsx'\n workbook_filepath = 'emailData/' + filename\n\n with xlsxwriter.Workbook(workbook_filepath) as workbook:\n worksheet_patient = workbook.add_worksheet('患者信息')\n writeRowExcel(0, patient_col_name, worksheet_patient)\n worksheet_treatment=workbook.add_worksheet('治疗信息')\n writeRowExcel(0, treatment_col_name, worksheet_treatment)\n worksheet_grade=workbook.add_worksheet('评分信息')\n writeRowExcel(0, grade_col_name, worksheet_grade)\n\n patient = session.query(Patient).filter(Patient.patient_id == patient_id).first()\n doctor = session.query(Doctor).filter(Doctor.doctor_id == patient.doctor_id).first()\n treatments = session.query(Treatment).filter(Treatment.patient_id == patient_id).all()\n grades = session.query(Grade).filter(Grade.patient_id == patient_id).all()\n\n #患者信息sheet表\n patient_info = patientInfo2List(patient, doctor)\n writeRowExcel(1, patient_info, worksheet_patient)\n\n #治疗信息sheet表\n for i in range(len(treatments)):\n treatment_info=treatmentInfo2List(treatments[i])\n writeRowExcel(i+1,treatment_info,worksheet_treatment)\n\n # 评分信息sheet表\n for i in range(len(grades)):\n grade_info=gradeInfo2List(grades[i])\n writeRowExcel(i+1,grade_info,worksheet_grade)\n session.close()\n return filename, workbook_filepath\n\n#多个病人信息转换成excel,传入为patient_id list\ndef allPatientToExcel(device_mac):\n patients_id=getPatientIDDeduplicate(device_mac)\n session = SessionClass()\n patient_col_name = ['编号', '姓名', '性别', '年龄', '主诉', '既往史', '检查', '设备分型', '医生第一分型', '医生第二分型', '医生姓名']\n treatment_col_name = ['编号', '治疗时间','患者编号','音乐编号']\n grade_col_name = ['编号', '等级', '分数', '评分时间','患者编号']\n\n now_time = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n filename = 'allPatients_' + now_time + '.xlsx'\n workbook_filepath ='./emailData/' + filename\n\n with xlsxwriter.Workbook(workbook_filepath) as workbook:\n worksheet_patient = workbook.add_worksheet('患者信息') # 创建患者信息sheet表\n worksheet_treatment = workbook.add_worksheet('治疗信息') # 创建治疗信息sheet表\n worksheet_grade = workbook.add_worksheet('评分信息') # 创建评分信息sheet表\n writeRowExcel(0, patient_col_name, worksheet_patient)\n writeRowExcel(0, treatment_col_name, worksheet_treatment)\n writeRowExcel(0, grade_col_name, worksheet_grade)\n len_treatment = 0\n len_grade = 0\n for index in range(len(patients_id)):\n #从数据库获取单个患者数据\n patient = session.query(Patient).filter(Patient.patient_id == patients_id[index]).first()\n doctor = session.query(Doctor).filter(Doctor.doctor_id == patient.doctor_id).first()\n treatments = session.query(Treatment).filter(Treatment.patient_id == patients_id[index]).all()\n grades = session.query(Grade).filter(Grade.patient_id == patients_id[index]).all()\n\n #填充患者信息表\n patient_info = patientInfo2List(patient, doctor)\n writeRowExcel(index + 1, patient_info, worksheet_patient)\n\n # 填充治疗信息表\n for i in range(len(treatments)):\n treatment_info = treatmentInfo2List(treatments[i])\n writeRowExcel(len_treatment + i + 1, treatment_info, worksheet_treatment)\n len_treatment += len(treatments)\n\n # 填充评分信息表\n for i in range(len(grades)):\n grade_info = gradeInfo2List(grades[i])\n writeRowExcel(len_grade + i + 1, grade_info, worksheet_grade)\n len_grade += len(grades)\n session.close()\n return filename,workbook_filepath\n\n\n#根据患者姓名和设备mac查询\ndef getPatientInfoDependName(device_mac,patient_name):\n patients_id = getPatientIDDeduplicate(device_mac,patient_name)\n patients_info = []\n for id in patients_id:\n patients_info.append(getSinglePatientInfo(id))\n patients_info.sort(key=lambda patient: -patient['grade_time'])\n return patients_info\n\n#根据device_mac查询设备是否存在\ndef getDevice(device_mac):\n session = SessionClass()\n device = session.query(Device).filter(Device.device_mac == device_mac).first()\n session.close()\n if device == None:\n return None\n else:\n return device\n\n\n#查询所有音乐信息,返回字段音乐名,编号和时间戳\ndef getAllMusicInfo():\n session = SessionClass()\n musics_info = []\n musics = session.query(Music).all()\n for music in musics:\n per_music_info ={}\n per_music_info['musicName'] = music.music_name\n per_music_info['musicId'] = music.music_human_no_and_group\n per_music_info['musicType'] = music.music_group\n #per_music_info['timestamp'] = music.music_insert_time\n musics_info.append(per_music_info)\n session.close()\n return musics_info\n\n\n#查询所有音乐music_human_no_and_group\ndef getCertainMusic():\n session = SessionClass()\n musics = session.query(Music).filter(Music.music_insert_time != 0).all()\n #musics = session.query(Music).all()\n a = []\n for music in musics:\n a.append(music.music_human_no_and_group + '.' +'mp3')\n return a\n\nif __name__=='__main__':\n # patients_info=getAllPatientsInfo('63:8D:56:86:A1:6B')\n # print(patients_info)\n # for a in patients_info['patients_info']:\n # print(a['grade_time'])\n # timestamps=getTreatmentPatientNumber('5A:D7:5E:52:2F:6E')\n # print(timestamps)\n # a = getAllMusicInfo()\n # print(a)\n a = getCertainMusic()\n print(a)\n print(len(a))\n\n\n","repo_name":"liguang-ops/FiveNotesSqlalchemy","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":15640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24770122961","text":"from weighted_G import G,printG\nimport heapq\nprintG()\n\ndef spf(G,s,d):\n visited=set()\n heap = [(0,s)]\n while heap:\n cost,u = heapq.heappop(heap)\n if u in visited:\n continue\n visited.add(u)\n if u == d:\n return cost\n for v,c in G[u]:\n if v in visited:\n continue\n else:\n next = cost+c\n heapq.heappush(heap,(next,v))\n return -1\nprint(spf(G,'E','F'))\n\n\n\n","repo_name":"thinkamin/MyDailyExercise","sub_path":"2021-6-3.py","file_name":"2021-6-3.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36340141234","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2017-06-25\n@summary: app\n@author: YangHaitao\n'''\n\nimport os\nimport logging\n\nimport tornado.web\n\nfrom config import CONFIG\nimport modules.bootstrap as bootstrap\nimport handlers.search as search\nimport handlers.view as view\nimport handlers.login as login\nimport handlers.static as static\nimport handlers.note as note\nimport handlers.rich as rich\nimport handlers.help as help\nimport handlers.picture as picture\n\nLOG = logging.getLogger(__name__)\n\ncwd = CONFIG[\"APP_PATH\"]\n\nclass Application(tornado.web.Application):\n def __init__(self):\n handlers = [\n (r\"/\", login.RedirectHandler),\n (r\"/home\", search.IndexHandler),\n (r\"/delete\", login.DeleteCookiesHandler),\n (r\"/login\", login.LoginHandler),\n (r\"/register\", login.RegisterHandler),\n (r\"/settings\", login.SettingsHandler),\n (r\"/delete_user\", login.DeleteUserHandler),\n (r\"/logout\", login.LogoutHandler),\n (r\"/search\", search.SearchHandler),\n (r\"/view/html/(?P[a-fA-F\\d]{40})\", view.ViewHandler),\n (r\"/getstatic/(?P[a-fA-F\\d]{40})/(?P.*)\", static.StaticHandler),\n (r\"/note\", note.NoteHandler),\n (r\"/note/\", note.NoteHandler),\n (r\"/note/websocket\", note.NoteSocketHandler),\n (r\"/note/websocket/\", note.NoteSocketHandler),\n (r\"/deletenotes\", note.DeleteHandler),\n (r\"/exportnotes\", note.ExportHandler),\n (r\"/uploadnotesajax\", note.UploadAjaxHandler),\n (r\"/importnotesajax\", note.ImportAjaxHandler),\n (r\"/indexnotesajax\", note.IndexAjaxHandler),\n (r\"/exportnotesajax\", note.ExportAjaxHandler),\n (r\"/archivenotesajax\", note.ArchiveAjaxHandler),\n (r\"/rich\", rich.RichHandler),\n (r\"/rich/\", rich.RichHandler),\n (r\"/rich/websocket\", rich.RichSocketHandler),\n (r\"/rich/websocket/\", rich.RichSocketHandler),\n (r\"/exportrichnotes\", rich.ExportHandler),\n (r\"/deleterichnotes\", rich.DeleteHandler),\n (r\"/uploadrichnotesajax\", rich.UploadAjaxStreamHandler),\n (r\"/importrichnotesajax\", rich.ImportAjaxHandler),\n (r\"/indexrichnotesajax\", rich.IndexAjaxHandler),\n (r\"/exportrichnotesajax\", rich.ExportAjaxHandler),\n (r\"/archiverichnotesajax\", rich.ArchiveAjaxHandler),\n (r\"/picture\", picture.PictureHandler),\n (r\"/picture/\", picture.PictureHandler),\n (r\"/picture/(?P[a-fA-F\\d]{40})\", picture.PictureHandler),\n (r\"/picture/(?P[a-fA-F\\d]{40})/\", picture.PictureHandler),\n (r\"/help\", help.HelpHandler),\n (r\"/help/\", help.HelpHandler),\n ]\n settings = dict(template_path = os.path.join(cwd, \"templates\"),\n static_path = os.path.join(cwd, \"static\"),\n ui_modules = [bootstrap,],\n debug = CONFIG[\"APP_DEBUG\"],\n cookie_secret=\"yhtx4GsTTzuyOP6ja/HpLGFWOK8hI0dwueN+VwQvxVs=\",\n login_url=\"/login\",\n xsrf_cookies = True)\n tornado.web.Application.__init__(self, handlers, **settings)\n","repo_name":"fiefdx/MyNote","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"22"} +{"seq_id":"74636237494","text":"#!/usr/bin/env python\n# -*-coding:utf8-*-\n# author = felicitychou\n# date = 2017/03/14\n# support python 2\n\nimport sys\nimport csv\n\ndef main():\n\n with open(sys.argv[1], 'rb') as fr:\n reader = csv.reader(fr)\n \n first_row = True\n with open(sys.argv[2], 'wb') as fw:\n for row in reader:\n if first_row:\n fw.write('|%s|\\n' % (\"|\".join(row)))\n fw.write('|%s\\n' % ('---|'*len(row)))\n first_row = False\n else:\n fw.write('|%s|\\n' % (\"|\".join(row)))\n\nif __name__ == '__main__':\n main()\n\n'''\nusage: python csv2md.py *.csv *.md\n'''","repo_name":"felicitychou/useful_code","sub_path":"csv2md.py","file_name":"csv2md.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11839241939","text":"\nimport matplotlib.pyplot as plt\nimport pickle\nimport numpy\n\n\nchar_spaces = pickle.load(open(\"char_spaces.pkl\", \"rb\" ) )\n\nfig = plt.figure()\n\n# plt.hist(x=char_spaces, bins=[0, 1, 2, 3, 4, 5, 6], color='#0504aa', alpha=0.7, rwidth=1)\n\nplt.hist(x=char_spaces, bins=numpy.arange(-0.45,6.05,0.1))\n\nplt.title(\"Histogram of gap distances\", fontsize=15)\nplt.xlabel(\"gap distance in pixels\", fontsize=15)\nplt.tick_params(axis='x', labelsize=15)\nplt.tick_params(axis='y', labelsize=15)\n\nplt.xticks(numpy.arange(0, 6, step=1))\n\nplt.show()\n\n\n# plt.savefig('E:/hist.png')\n# fig.savefig('E:/plot.png')","repo_name":"senyalin/MEExtraction","sub_path":"plotting/plot_hist.py","file_name":"plot_hist.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25448072820","text":"\"\"\"\nProvides urls to access the mocked views\n\"\"\"\n\nfrom django.conf.urls import url\n\nfrom . import testsviews as views\n\nurlpatterns = [\n url(r'^collection/$', views.collection),\n url(r'^domain.json$', views.domain),\n url(r'^viewsets/(?P\\d+).xml$', views.viewsets),\n url(r'^schema_localization.json$', views.schema_localization),\n url(r'^app.resource$', views.app_resource),\n url(r'^available_related_searches.json$', views.available_related_searches),\n]\n","repo_name":"specify/specify7","sub_path":"specifyweb/context/testurls.py","file_name":"testurls.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"22"} +{"seq_id":"33665826452","text":"from tkinter import *\nfrom tkinter import ttk\nimport subprocess\n\n\ndef options():\n from PIL import Image, ImageTk\n tela = Tk()\n tela.title(\"Sofisticão Petshop\")\n tela.config(background='#FFFFFF')\n tela.resizable(False, False)\n tela.maxsize(width=1280, height=720)\n tela.minsize(width=1280, height=720)\n\n photo = PhotoImage(file=\"assets\\\\favicon.png\")\n tela.iconphoto(False, photo)\n\n def home_open():\n tela.destroy()\n subprocess.run([\"python\", \"index.py\"])\n\n def register_open():\n tela.destroy()\n subprocess.run([\"python\", \"register.py\"])\n\n def animal_register_open():\n tela.destroy()\n subprocess.run([\"python\", \"addPet.py\"])\n\n def services_open():\n tela.destroy()\n subprocess.run([\"python\", \"options.py\"])\n\n def login_open():\n tela.destroy()\n subprocess.run([\"python\", \"login.py\"])\n\n font = \"Inter 13 bold\"\n home_option = Button(\n tela, text=\"Home\", font=font, fg=\"#18191F\", bg=\"#FFFFFF\", padx=15, bd=0, border=0, command=home_open)\n home_option.grid(row=0, column=0)\n\n signup_option = Button(\n tela, text=\"Client Register\", font=font, fg=\"#18191F\", bg=\"#FFFFFF\", padx=15, bd=0, border=0, command=register_open)\n signup_option.grid(row=0, column=1)\n\n animal_register_option = Button(\n tela, text=\"Animal register\", font=font, fg=\"#18191F\", bg=\"#FFFFFF\", padx=15, bd=0, border=0, command=animal_register_open)\n animal_register_option.grid(row=0, column=2)\n\n services_option = Button(\n tela, text=\"Services\", font=font, fg=\"#18191F\", bg=\"#FFFFFF\", padx=15, bd=0, border=0, command=services_open)\n services_option.grid(row=0, column=3)\n\n login_option = Button(\n tela, text=\"Login\", font=font, fg=\"#18191F\", bg=\"#FFFFFF\", padx=15, bd=0, border=0, command=login_open)\n login_option.grid(row=0, column=4)\n\n containerForm = Frame(tela, width=500, height=500)\n containerForm.place(x=450, y=130)\n\n title_page = Label(tela, text=\"Services\",\n font=\"Inter 25 bold\")\n title_page.place(x=635, y=155)\n\n codeEntry = Entry(tela, width=55, bg=\"white\")\n codeEntry.place(x=550, y=290)\n codeLabel = Label(tela, text=\"Code: \", font=\"Inter 10 bold\",)\n codeLabel.place(x=495, y=287)\n\n nameEntry = Entry(tela, width=55, bg=\"white\")\n nameEntry.place(x=550, y=320)\n nameLabel = Label(tela, text=\"Name: \", font=\"Inter 10 bold\",)\n nameLabel.place(x=495, y=317)\n\n serviceBox = ttk.Combobox(tela, values=[\"Health\", \"Education\", \"Dog Bath\"])\n serviceBox.place(x=550, y=350)\n serviceLabel = Label(tela, text=\"Service type: \", font=\"Inter 10 bold\",)\n serviceLabel.place(x=455, y=347)\n\n valueEntry = Entry(tela, width=55, bg=\"white\")\n valueEntry.place(x=550, y=380)\n valueLabel = Label(tela, text=\"Value: \", font=\"Inter 10 bold\",)\n valueLabel.place(x=495, y=377)\n\n descriptionEntry = Entry(tela, width=55, bg=\"white\")\n descriptionEntry.place(x=550, y=410)\n descriptionLabel = Label(tela, text=\"Description: \", font=\"Inter 10 bold\",)\n descriptionLabel.place(x=460, y=407)\n\n durationEntry = Entry(tela, width=55, bg=\"white\")\n durationEntry.place(x=550, y=440)\n durationLabel = Label(tela, text=\"Duration: \", font=\"Inter 10 bold\",)\n durationLabel.place(x=475, y=437)\n\n btnSignUp = Button(tela, text=\"Register service\",\n font=\"Inter 10 bold\", fg=\"white\", bg=\"#8C30F5\", border=5, background=\"#8C30F5\", bd=0, width=48)\n btnSignUp.place(x=495, y=475)\n\n btnDelete = Button(tela, text=\"Delete\",\n font=\"Inter 10 bold\", fg=\"white\", bg=\"red\", border=5, background=\"red\", bd=0, width=48)\n btnDelete.place(x=495, y=505)\n\n btnUpdate = Button(tela, text=\"Update\",\n font=\"Inter 10 bold\", fg=\"white\", bg=\"green\", border=5, background=\"green\", bd=0, width=48)\n btnUpdate.place(x=495, y=535)\n\n btnSearch = Button(tela, text=\"Search\",\n font=\"Inter 10 bold\", fg=\"white\", bg=\"blue\", border=5, background=\"blue\", bd=0, width=48)\n btnSearch.place(x=495, y=565)\n\n tela.mainloop()\n\n\noptions()\n","repo_name":"luizlopes12/Petshop-py","sub_path":"services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"14983238448","text":"a = int(input('Введите число '))\n\nfib1 = 0\nfib2 = 1\ni = 0\nfibonachi_1 = []\nfibonachi_2 = []\nwhile i < a - 1:\n fib_sum = fib1 + fib2\n fib1 = fib2\n fib2 = fib_sum\n i = i + 1\n fibonachi_1.append(-fib2)\n fibonachi_2.append(fib2)\nfibonachi = fibonachi_1[::-1] + [1, 0, 1] + fibonachi_2\nprint(fibonachi)\n","repo_name":"AlekseyMuzyukin/seminar_3-PYTHON","sub_path":"Task/Example 5.py","file_name":"Example 5.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"54973246","text":"import os\nfrom datetime import datetime\n\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom rest_framework import status\n\nfrom rest_framework.decorators import action\n\nfrom rest_framework.viewsets import ModelViewSet\nfrom rest_framework.response import Response\n\nfrom Learn_Django import settings\nfrom apps.configures.models import Configures\nfrom apps.interfaces.models import Interfaces\nfrom apps.interfaces.serializers import InterfacesModelSerializer\nfrom apps.projects.serializers import InterfaceNameSerializer\nfrom apps.interfaces.utils import get_count_by_project\nfrom apps.testcases.models import Testcases\nfrom apps.envs.models import Envs\nfrom utils import common\nfrom apps.interfaces import serializers\n\n\nclass InterfacesViewSet(ModelViewSet):\n \"\"\"\n create:\n 创建接口\n\n retrieve:\n 获取接口详情数据\n\n update:\n 更新接口\n\n partial_update:\n 部分更新接口\n\n destroy:\n 删除接口\n\n list:\n 获取接口列表数据\n\n names:\n 获取所有接口名称\n \"\"\"\n queryset = Interfaces.objects.all()\n serializer_class = InterfacesModelSerializer\n ordering_fields = ['name']\n filterset_fields = ['id','name']\n\n # methods默认get detail 指定该动作处理的是否为详情资源对象(url是否需要传递pk键值)\n @action(methods=['get'],detail=False)\n def names(self,request,*args,**kwargs):\n queryset = self.get_queryset()\n serializer = InterfaceNameSerializer(instance=queryset,many=True)\n return Response(serializer.data)\n\n @action(methods=['get'],detail=True,url_path='configs')\n def configures(self,request,pk=None):\n configures_models = Configures.objects.filter(interface_id=pk,is_delete=False)\n one_list = []\n for obj in configures_models:\n one_list.append({\n 'id':obj.id,\n 'name':obj.name\n })\n return Response(data=one_list)\n\n @action(methods=['get'], detail=True, url_path='testcases')\n def testcases(self,request,pk=None):\n testceses_models = Testcases.objects.filter(interface_id=pk,is_delete=False)\n one_list = []\n for obj in testceses_models:\n one_list.append({\n 'id':obj.id,\n 'name':obj.name,\n 'status_code':200\n })\n return Response(data=one_list)\n\n def list(self, request, *args, **kwargs):\n queryset = self.filter_queryset(self.get_queryset())\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page,many=True)\n datas = serializer.data\n datas = get_count_by_project(datas)\n return self.get_paginated_response(datas)\n\n serializer = self.get_serializer(queryset,many=True)\n datas = serializer.data\n datas = get_count_by_project(datas)\n return Response(datas)\n\n def perform_destory(self,instance):\n instance.is_delete = True\n instance.save()\n\n @action(methods=['post'], detail=True)\n def run(self, request, pk=None):\n instance = self.get_object()\n serializer = self.get_serializer(instance, data=request.data)\n serializer.is_valid(raise_exception=True)\n datas = serializer.validated_data\n\n env_id = datas.get('env_id')\n # 指定测试文件存放目录 suites+时间\n testcase_dir_path = os.path.join(settings.SUITES_DIR, datetime.strftime(datetime.now(), '%Y%m%d%H%M%S%f'))\n # 创建文件夹\n if not os.path.exists(testcase_dir_path):\n os.mkdir(testcase_dir_path)\n # 获取环境\n env = Envs.objects.filter(id=env_id, is_delete=False).first()\n testcase_objs = Testcases.objects.filter(is_delete=False,interface=instance)\n if not testcase_objs.exists(): #如果接口下没有接口则无法运行\n data_dict = {\n \"detail\":\"此接口下无接口,无法运行!\"\n }\n return Response(data_dict,status=status.HTTP_400_BAD_REQUEST)\n\n for one_obj in testcase_objs:\n\n # 生成yaml用例文件\n common.generate_testcase_files(one_obj, env, testcase_dir_path)\n # 运行用例\n return common.run_testcase(instance, testcase_dir_path)\n\n def get_serializer_class(self):\n if self.action=='run':\n\n return serializers.InterfacesRunSerializer\n else:\n return self.serializer_class","repo_name":"liqi629/learn_nm_drf","sub_path":"apps/interfaces/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38061964329","text":"import time, datetime, os, threading, sys, asyncio\nfrom livestreamer import Livestreamer\nfrom mfcauto import Client, Model, FCTYPE, STATE\n\n\n#specify path to save to ie \"/Users/Joe/MFC\"\nsave_directory = \"/Users/Joe/MFC\"\n#specify the path to the wishlist file ie \"/Users/Joe/MFC/wanted.txt\"\nwishlist = \"/Users/Joe/MFC/wanted.txt\"\nonline = []\nif not os.path.exists(\"{path}\".format(path=save_directory)):\n os.makedirs(\"{path}\".format(path=save_directory))\n\nrecording = []\nrecordingNames = []\n\ndef getOnlineModels():\n try:\n wanted = []\n loop = asyncio.get_event_loop()\n client = Client(loop)\n with open(wishlist) as f:\n for model in f:\n models = model.split()\n for theModel in models:\n wanted.append(int(theModel))\n f.close()\n\n def query():\n try:\n MFConline = Model.find_models(lambda m: m.bestsession[\"vs\"] == STATE.FreeChat.value)\n client.disconnect()\n for model in MFConline:\n if model.bestsession['uid'] in wanted and model.bestsession['uid'] not in recording:\n thread = threading.Thread(target=startRecording, args=(model.bestsession,))\n thread.start()\n\n except:\n client.disconnect()\n pass\n\n client.on(FCTYPE.CLIENT_MODELSLOADED, query)\n loop.run_until_complete(client.connect())\n loop.run_forever()\n except:\n pass\n\ndef startRecording(model):\n try:\n session = Livestreamer()\n streams = session.streams(\"hlsvariant://http://video{srv}.myfreecams.com:1935/NxServer/ngrp:mfc_{id}.f4v_mobile/playlist.m3u8\"\n .format(id=(int(model['uid']) + 100000000),\n srv=(int(model['camserv']) - 500)))\n stream = streams[\"best\"]\n fd = stream.open()\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime(\"%Y.%m.%d_%H.%M.%S\")\n if not os.path.exists(\"{path}/{model}\".format(path=save_directory, model=model['uid'])):\n os.makedirs(\"{path}/{model}\".format(path=save_directory, model=model['uid']))\n with open(\"{path}/{uid}/{st}_{model}.mp4\".format(path=save_directory, uid=model['uid'], model=model['nm'],\n st=st), 'wb') as f:\n recording.append(model['uid'])\n recordingNames.append(model['nm'])\n while True:\n try:\n data = fd.read(1024)\n f.write(data)\n except:\n f.close()\n recording.remove(model['uid'])\n recordingNames.remove(model['nm'])\n return\n\n if model in recording:\n recording.remove(model['uid'])\n recordingNames.remove(model['nm'])\n except:\n if model in recording:\n recording.remove(model['uid'])\n recordingNames.remove(model['nm'])\n\n\nif __name__ == '__main__':\n print(\"____________________Connection Status____________________\")\n while True:\n getOnlineModels()\n sys.stdout.write(\"\\033[F\")\n sys.stdout.write(\"\\033[K\")\n sys.stdout.write(\"\\033[F\")\n sys.stdout.write(\"\\033[K\")\n sys.stdout.write(\"\\033[F\")\n sys.stdout.write(\"\\033[F\")\n print()\n print()\n print(\"Disconnected:\")\n print(\"Waiting for next check\")\n print(\"____________________Recording Status_____________________\")\n for i in range(20, 0, -1):\n sys.stdout.write(\"\\033[K\")\n print(\"{} model(s) are being recorded. Next check in {} seconds\".format(len(recording), i))\n sys.stdout.write(\"\\033[K\")\n print(\"the following models are being recorded: {}\".format(recordingNames), end=\"\\r\")\n time.sleep(1)\n sys.stdout.write(\"\\033[F\")\n sys.stdout.write(\"\\033[F\")\n sys.stdout.write(\"\\033[F\")\n sys.stdout.write(\"\\033[F\")\n","repo_name":"wangroot/MFCRecorder","sub_path":"MFCRecorder.py","file_name":"MFCRecorder.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"24249040286","text":"import torch\nfrom moment_detr.model import build_transformer, build_position_encoding, MomentDETR\n\n\ndef build_inference_model(ckpt_path, **kwargs):\n ckpt = torch.load(ckpt_path, map_location=\"cpu\")\n args = ckpt[\"opt\"]\n if len(kwargs) > 0: # used to overwrite default args\n args.update(kwargs)\n transformer = build_transformer(args)\n position_embedding, txt_position_embedding = build_position_encoding(args)\n\n model = MomentDETR(\n transformer,\n position_embedding,\n txt_position_embedding,\n txt_dim=args.t_feat_dim,\n vid_dim=args.v_feat_dim,\n num_queries=args.num_queries,\n input_dropout=args.input_dropout,\n aux_loss=args.aux_loss,\n contrastive_align_loss=args.contrastive_align_loss,\n contrastive_hdim=args.contrastive_hdim,\n span_loss_type=args.span_loss_type,\n use_txt_pos=args.use_txt_pos,\n n_input_proj=args.n_input_proj,\n )\n\n model.load_state_dict(ckpt[\"model\"])\n return model\n\n\n","repo_name":"jayleicn/moment_detr","sub_path":"run_on_video/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":208,"dataset":"github-code","pt":"22"} +{"seq_id":"24090954539","text":"import cv2\nimport numpy as np\nimport mediapipe as mp\nimport apriltag\nfrom matplotlib import pyplot as plt\nfrom scipy.spatial.transform import Rotation as R\nfrom ekf import *\n\nfrom kf import *\nfrom utils import *\n\n\ndef bhattacharyya(mean1, cov1, mean2, cov2):\n cov=(1/2)*(cov1+cov2)\n t1=(1/8)*np.sqrt((mean1-mean2)@np.linalg.inv(cov)@(mean1-mean2).T)\n t2=(1/2)*np.log(np.linalg.det(cov)/np.sqrt(np.linalg.det(cov1)*np.linalg.det(cov2)))\n return t1+t2\n\n\nmp_drawing = mp.solutions.drawing_utils\nmp_hands = mp.solutions.hands\n\nmtx = np.array([[1.66016657e+03, 0.00000000e+00, 5.45114552e+02],\n [0.00000000e+00, 1.66228213e+03, 9.94387658e+02],\n [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])\n\nimage_path = '/Users/yuhaoyou/PycharmProjects/pythonProject1/output_frame_100.jpg'\nimage = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)\n\noptions_1 = apriltag.DetectorOptions(families='tag36h11',\n border=1,\n nthreads=4,\n quad_decimate=5.0,\n quad_blur=0.0,\n refine_edges=True,\n refine_decode=False,\n refine_pose=False,\n debug=False,\n quad_contours=True)\n\n\ndef main():\n video_path = \"/Users/yuhaoyou/PycharmProjects/pythonProject1/IMG_6003.MOV\"\n # video_path = \"/Users/yuhaoyou/PycharmProjects/pythonProject1/IMG_5940.MOV\"\n # read_videos(video_path)\n cap = cv2.VideoCapture(video_path) # Use 0 for the default camera# 创建AprilTag检测器\n tag_trajectories = {}\n ekf_trajectories = {}\n hand_traj = []\n # flag for initialize\n flag_initial = False\n # initial setting\n epi = 0.00001\n tag_states = np.zeros((5, 4)) # target number x state dimension\n tag_states_p = np.zeros((5, 4))\n error_cov = np.array([np.eye(4), np.eye(4), np.eye(4), np.eye(4), np.eye(4)]) * epi\n hand_world_coor_p = np.zeros((3, 1))\n # define observation covirance\n obs_cov = np.eye(3) * 0.0005\n with mp_hands.Hands(min_detection_confidence=0.5, min_tracking_confidence=0.5) as hands:\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n print(\"Unable to read the frame.\")\n break\n\n fimage = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n results = hands.process(frame_rgb)\n # results = hands.process(frame_rgb)\n\n detector_1 = apriltag.Detector(options_1)\n\n # 对图像进行AprilTag检测\n detections_1 = detector_1.detect(fimage)\n\n # define tag size\n tag_size = 3.1 * 0.01\n camera_params = (mtx[0, 0], mtx[1, 1], mtx[0, 2], mtx[1, 2])\n pose, e0, e1 = detector_1.detection_pose(detections_1[0], camera_params, tag_size)\n T = pose[:3, -1].reshape(-1, 1) # T^C_W\n R_base = pose[:3, :3] # R^C_W\n # pixel_world = np.zeros(())\n pixel_world = {}\n\n target = {0, 1, 2, 3, 4}\n detected = []\n\n for detection in detections_1:\n pose_cur, e0, e1 = detector_1.detection_pose(detection, camera_params, tag_size)\n tag_id = detection.tag_id\n vec_1 = pose_cur[:3, -1].reshape(-1, 1)\n\n R_cur = R_base[:3, :3].T @ pose_cur[:3, :3]\n # 从旋转矩阵创建一个Rotation对象\n rotation = R.from_matrix(R_cur)\n # 将旋转对象转换为欧拉角(使用ZYX顺序,也称为yaw-pitch-roll顺序)\n euler_angles = rotation.as_euler('ZYX', degrees=False)\n theta = euler_angles[0]\n pixel_world[tag_id] = (R_base.T @ (vec_1 - T))\n\n if not flag_initial:\n tag_states[tag_id][:2] = pixel_world[tag_id][:2].reshape(-1)\n tag_states[tag_id][2] = theta\n if tag_id == 4:\n flag_initial = True # means that we've finished the initialization.\n else:\n v = np.linalg.norm((tag_states[tag_id][:2] - tag_states_p[tag_id][:2])) / (1 / 30)\n cur_obs = np.hstack((pixel_world[tag_id][:2].reshape(-1), theta, np.array(v))).squeeze()\n tag_states_p[tag_id] = tag_states[tag_id]\n tag_states[tag_id], error_cov[tag_id] = kf(cur_obs, tag_states[tag_id], 1 / 30, error_cov[tag_id])\n\n detected.append(tag_id)\n if tag_id not in tag_trajectories:\n tag_trajectories[tag_id] = []\n\n if tag_id not in ekf_trajectories:\n ekf_trajectories[tag_id] = []\n\n tag_trajectories[tag_id].append([int(detection.center[0]), int(detection.center[1])])\n ekf_trajectories[tag_id].append(\n world_to_image_coordinates([tag_states[tag_id][0], tag_states[tag_id][1], 0], mtx, pose))\n\n detected = set(detected)\n # missed is missed tag_id\n missed = target - detected\n\n if results.multi_hand_landmarks:\n for hand_landmarks in results.multi_hand_landmarks:\n middle_finger_mcp = hand_landmarks.landmark[9]\n\n # Extract normalized x and y coordinates\n normalized_x = middle_finger_mcp.x\n normalized_y = middle_finger_mcp.y\n image_height, image_width, _ = frame.shape\n x = int(normalized_x * image_width)\n y = int(normalized_y * image_height)\n\n hand_homo_coor = np.vstack((x, y, 1))\n hand_world_coor = np.linalg.inv(mtx) @ hand_homo_coor * 0.7\n hand_world_coor = np.linalg.inv(pose[0:3, 0:3]) @ (hand_world_coor - pose[0:3, -1].reshape(3, 1))\n hand_traj.append([x, y])\n\n v_hand = np.linalg.norm((hand_world_coor[:2] - hand_world_coor_p[:2])) / (1 / 30)\n hand_world_coor_p = hand_world_coor\n\n if len(missed) != 0:\n # for tag_id in missed:\n # cur_obs = np.hstack((hand_world_coor[:2].reshape(-1), np.array(v_hand))).squeeze()\n # tag_states[tag_id], error_cov[tag_id] = ekf_missed(cur_obs, tag_states[tag_id], 1 / 30, error_cov[tag_id])\n # ekf_trajectories[tag_id].append(world_to_image_coordinates([tag_states[tag_id][0], tag_states[tag_id][1], 0], mtx, pose))\n dis_collection = []\n for tag_id in missed: # 3, 5, 6\n dis_collection.append(\n bhattacharyya(tag_states[tag_id][:3], error_cov[tag_id][:3, :3], hand_world_coor.reshape(-1),\n obs_cov))\n # check the tag id blocked by hand\n missed = list(missed)\n blocked_index = missed[np.argmin(np.array(dis_collection))]\n print('tag ' + str(blocked_index) + ' has been blocked by hand')\n for tag_id in missed:\n if tag_id == blocked_index:\n # print('tag ' + str(blocked_index) + ' has been blocked by hand')\n cur_obs = np.hstack((hand_world_coor[:2].reshape(-1), np.array(v_hand))).squeeze()\n tag_states[blocked_index], error_cov[blocked_index] = kf(cur_obs, tag_states[blocked_index], 1 / 30, error_cov[blocked_index])\n ekf_trajectories[blocked_index].append(world_to_image_coordinates([tag_states[tag_id][0], tag_states[tag_id][1], 0], mtx, pose))\n else:\n ekf_trajectories[tag_id].append(world_to_image_coordinates([tag_states[tag_id][0], tag_states[tag_id][1], 0], mtx, pose))\n\n plt.figure(figsize=(10, 6))\n\n for tag_id, trajectory in ekf_trajectories.items():\n for idx in range(len(trajectory) - 1):\n ekf_last_frame_pixel = trajectory[idx]\n ekf_cur_frame_pixel = trajectory[idx + 1]\n cv2.line(frame, ekf_last_frame_pixel, ekf_cur_frame_pixel, (0, 255, 0), 2)\n\n for tag_id, trajectory in tag_trajectories.items():\n for idx in range(len(trajectory) - 1):\n tag_last_frame_pixel = trajectory[idx]\n tag_cur_frame_pixel = trajectory[idx + 1]\n cv2.line(frame, tag_last_frame_pixel, tag_cur_frame_pixel, (136, 20, 8), 2)\n\n # for idx in range(len(hand_traj) - 1):\n # hand_last_frame_pixel = hand_traj[idx]\n # hand_cur_frame_pixel = hand_traj[idx + 1]\n # cv2.line(frame, hand_last_frame_pixel, hand_cur_frame_pixel, (255, 255, 255), 2)\n\n # Display the video frame with the trajectories\n cv2.imshow('Trajectories', frame)\n\n if cv2.waitKey(5) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"UpennGitttt/Multiple-Object-Tracking-Under-Noise-and-Occlusion","sub_path":"object_tracking_2d.py","file_name":"object_tracking_2d.py","file_ext":"py","file_size_in_byte":9281,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"13888561863","text":"def Patternn(pattern):\r\n for i in range(largest):\r\n\t for j in range(len(numbers)):\r\n\t\t print(pattern[j][i], end=' ')\r\n\t print()\r\n\t\t\r\ndef Patterrn(arr):\r\n\tpattern = []\r\n\tfor number in numbers:\r\n\t\tcol = []\r\n\t\tfor i in range(largest-1, -1, -1):\r\n\t\t\tif int(number) < largest:\r\n\t\t\t\tif i > int(number) - 1:\r\n\t\t\t\t\tcol.append(' ')\r\n\t\t\t\telse:\r\n\t\t\t\t\tcol.append('*')\r\n\t\t\telse:\r\n\t\t\t\tcol.append('*')\r\n\t\tpattern.append(col)\r\n\treturn(pattern)\r\n\r\nnumbers = input('Enter the Numbers:').split(' ')\r\nlargest = int(max(numbers))\r\nPatternn(Patterrn(numbers))\r\n","repo_name":"salima-kpv/Dockode_python","sub_path":"pattern1.py","file_name":"pattern1.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"71451154296","text":"import time\nfrom typing import Tuple, List, Callable\nfrom pathlib import Path\n\nfrom functools import partial\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom split_learn.modifiers import Modifier\nfrom split_learn.nn.resnet import make_resnet20_modules\nfrom split_learn.nn.efficient_net import make_efficient_net_b0_64x64_modules\nfrom split_learn.nn.gru4rec import make_gru4rec_modules\nfrom split_learn.nn.wide_and_deep import WideAndDeepNet\nfrom split_learn.nn.textcnn import make_textcnn_modules\n\n\nclass SplitModelFactory(nn.Module):\n def __init__(self):\n super(SplitModelFactory, self).__init__()\n\n def get_splits(self) -> List[List[int]]:\n \"\"\"\n :return: output shapes of different splits. Notice: split position starts from 1\n \"\"\"\n raise NotImplementedError()\n\n def get_modified_model(self, split_position: int, modifier: Modifier, seed: int = int(time.time())) -> nn.Module:\n raise NotImplementedError()\n\n def get_bottom_model(self) -> nn.Module:\n raise NotImplementedError()\n\n\nclass SequentialSplitFactory(SplitModelFactory):\n def __init__(self, get_modules: Callable[[], List[nn.Module]], output_shapes: List[List[int]]):\n super(SequentialSplitFactory, self).__init__()\n self.get_modules = get_modules\n self.output_shapes = output_shapes\n\n self.module_list = None\n self.split_position = -1\n\n def get_splits(self) -> List[List[int]]:\n return self.output_shapes\n\n def get_modified_model(self, split_position: int, modifier: Modifier, seed: int = None) -> nn.Module:\n self.split_position = split_position\n if seed is not None:\n torch.manual_seed(seed)\n self.module_list = self.get_modules()\n return nn.Sequential(\n *self.module_list[:split_position],\n modifier,\n *self.module_list[split_position:]\n )\n\n def get_bottom_model(self):\n return nn.Sequential(*self.module_list[:self.split_position])\n\n\nclass MnistDNNFactory(SequentialSplitFactory):\n def __init__(self):\n super(MnistDNNFactory, self).__init__(lambda: [\n nn.Sequential(nn.Linear(784, 128), nn.LeakyReLU()),\n nn.Sequential(nn.Linear(128, 32), nn.Tanh()),\n nn.Linear(32, 10)\n ], [[128], [32]])\n\n\nclass Resnet20Factory(SequentialSplitFactory):\n def __init__(self, outdim: int = 10):\n super(Resnet20Factory, self).__init__(\n lambda: make_resnet20_modules(outdim),\n [\n [16, 32, 32],\n [32, 32, 32],\n [64, 16, 16],\n [128, 8, 8],\n [128],\n ]\n )\n\n\nclass EfficientNetFactory(SequentialSplitFactory):\n def __init__(self, outdim: int=200, pretrained: bool=False):\n super(EfficientNetFactory, self).__init__(\n lambda: make_efficient_net_b0_64x64_modules(outdim, pretrained=pretrained),\n [\n [32, 32, 32], # 1\n [16, 32, 32], # 2\n [24, 16, 16], # 3\n [40, 8, 8], # 4\n [80, 4, 4], # 5\n [112, 4, 4], # 6\n [192, 2, 2], # 7\n [320, 2, 2], # 8\n [1280, 2, 2], # 9\n [1280] # 10\n ]\n )\n\n\nclass TextCNNFactory(SequentialSplitFactory):\n def __init__(self, vocab_size: int, output_dim: int, input_len: int = 100, embedding_dim: int = 50, n_channels: int = 200,\n kernel_sizes: List[int] = None, initial_embedding: dict = None, word_map: dict = None):\n kernel_sizes = kernel_sizes or [3, 4, 5]\n super(TextCNNFactory, self).__init__(\n lambda: make_textcnn_modules(input_len, output_dim, vocab_size, embedding_dim, n_channels, kernel_sizes,\n initial_embedding, word_map),\n [[n_channels * len(kernel_sizes)]]\n )\n\n\nclass GRU4RecFactory(SequentialSplitFactory):\n def __init__(self, n_items: int, embedding_dim: int, gru_hidden_size: int, additional_linear: int = None):\n split_layer_size = [gru_hidden_size]\n if additional_linear:\n split_layer_size = [additional_linear]\n super(GRU4RecFactory, self).__init__(lambda: make_gru4rec_modules(\n n_items, embedding_dim, gru_hidden_size, n_grus=1, additional_linear=additional_linear\n ), [split_layer_size])\n\n\nclass WideAndDeepFactory(SplitModelFactory):\n def __init__(self, n_numeric_features: int, categorical_features: List[int], dnn_layer_sizes: List[int],\n embedding_dim: int = 16):\n super(WideAndDeepFactory, self).__init__()\n self.get_wide_and_deep = lambda: WideAndDeepNet(n_numeric_features, categorical_features, dnn_layer_sizes, embedding_dim)\n self.deep_factory = SequentialSplitFactory(\n lambda: self.wide_and_deep.deep_model.sequential_modules,\n [39 * embedding_dim] + [[d] for d in dnn_layer_sizes],\n )\n self.wide_and_deep = None\n\n def get_splits(self) -> List[List[int]]:\n return self.deep_factory.get_splits()\n\n def get_modified_model(self, split_position: int, modifier: Modifier, seed: int = int(time.time())) -> nn.Module:\n torch.random.manual_seed(seed)\n self.wide_and_deep = self.get_wide_and_deep()\n self.wide_and_deep.deep_model.mlp = self.deep_factory.get_modified_model(split_position, modifier, seed)\n return self.wide_and_deep\n\n def get_bottom_model(self):\n return self.deep_factory.get_bottom_model()\n\n\nclass LambdaModule(nn.Module):\n def __init__(self, original_module: nn.Module, forward_transform: Callable):\n super(LambdaModule, self).__init__()\n self.original_module = original_module\n self.forward_transform = forward_transform\n \n def forward(self, x):\n y = self.original_module(x)\n return self.forward_transform(x, y)\n \n\nclass InputPreservingModule(LambdaModule):\n def __init__(self, original_module):\n super(InputPreservingModule, self).__init__(original_module, (lambda x, y: (y, x)))\n\n\n\nclass MultiOutputModel(nn.Module):\n def __init__(self, modules: List[nn.Module], split_position: int):\n super(MultiOutputModel, self).__init__()\n self.modules = modules\n self.bottom_model = nn.Sequential(*modules[:split_position])\n self.top_model = InputPreservingModule(nn.Sequential(*modules[split_position:]))\n self.split_position = split_position\n\n def forward(self, x):\n h = self.bottom_model(x)\n y = self.top_model(h)\n return y\n\n\nclass MultiOutputModelFactory(SplitModelFactory):\n def __init__(self, get_modules: Callable[[], List[nn.Module]], output_shapes: List[List[int]]):\n super(MultiOutputModelFactory, self).__init__()\n self.get_modules = get_modules\n self.output_shapes = output_shapes\n\n self.module_list = None\n self.split_position = -1\n\n def get_splits(self) -> List[List[int]]:\n return self.output_shapes\n\n def get_modified_model(self, split_position: int, modifier: Modifier, seed: int = None) -> nn.Module:\n self.split_position = split_position\n torch.manual_seed(seed)\n self.module_list = self.get_modules()\n self.multi_output_model = MultiOutputModel(self.module_list, self.split_position)\n return nn.Sequential(\n self.multi_output_model.bottom_model,\n modifier,\n self.multi_output_model.top_model\n )\n\n def get_bottom_model(self):\n return self.multi_output_model.bottom_model\n\n\nclass MultiOutputResnet20Factory(MultiOutputModelFactory):\n def __init__(self, outdim: int = 10):\n super(MultiOutputResnet20Factory, self).__init__(\n partial(make_resnet20_modules, outdim=outdim),\n [\n [16, 32, 32],\n [32, 32, 32],\n [64, 16, 16],\n [128, 8, 8],\n [128],\n ])\n\n\nclass MultiOutputGRU4RecFactory(MultiOutputModelFactory):\n def __init__(self, n_items: int, embedding_dim: int, gru_hidden_size: int):\n super(MultiOutputGRU4RecFactory, self).__init__(\n partial(make_gru4rec_modules,\n n_items=n_items, embedding_dim=embedding_dim, gru_hidden_size=gru_hidden_size),\n [\n [gru_hidden_size]\n ])\n\n\nclass MultiOutputTextCNNFactory(MultiOutputModelFactory):\n def __init__(self, vocab_size: int, output_dim: int, input_len: int = 100, \n embedding_dim: int = 50, n_channels: int = 200,\n kernel_sizes: List[int] = None, initial_embedding: dict = None, word_map: dict = None):\n kernel_sizes = kernel_sizes or [3, 4, 5]\n super(MultiOutputTextCNNFactory, self).__init__(\n partial(make_textcnn_modules, vocab_size=vocab_size, output_dim=output_dim, input_len=input_len,\n embedding_dim=embedding_dim, n_channels=n_channels, initial_embedding=initial_embedding,\n word_map=word_map, kernel_sizes=kernel_sizes),\n [[n_channels * len(kernel_sizes)]])\n\n\nclass MultiOutputEfficientNetFactory(MultiOutputModelFactory):\n def __init__(self, outdim: int=200, pretrained: bool=False):\n super(MultiOutputEfficientNetFactory, self).__init__(\n partial(make_efficient_net_b0_64x64_modules, num_classes=outdim, pretrained=pretrained),\n [\n [32, 32, 32], # 1\n [16, 32, 32], # 2\n [24, 16, 16], # 3\n [40, 8, 8], # 4\n [80, 4, 4], # 5\n [112, 4, 4], # 6\n [192, 2, 2], # 7\n [320, 2, 2], # 8\n [1280, 2, 2], # 9\n [1280] # 10\n ]\n )\n\n","repo_name":"zfscgy/SplitLearning","sub_path":"split_learn/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9883,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"38579794883","text":"#!/usr/bin/env python\n\nimport os\nimport joblib\nimport argparse\nfrom PIL import Image\nfrom inference.util import draw_bb_on_img\nfrom inference.constants import MODEL_PATH\nfrom face_recognition import preprocessing\nfrom PIL import ImageDraw, ImageFont\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n 'Script for detecting and classifying faces on user-provided image. This script will process image, draw '\n 'bounding boxes and labels on image and display it. It will also optionally save that image.')\n parser.add_argument('--image-path' , required=True, help='Path to image file.')\n parser.add_argument('--save-dir', help='If save dir is provided image will be saved to specified directory.')\n return parser.parse_args()\n\n\ndef recognise_faces(img):\n faces = joblib.load(MODEL_PATH)(img)\n print ((faces))\n if faces :\n draw_bb_on_img(faces, img)\n return faces, img\n\n\ndef main():\n args = parse_args()\n #print (type (args))\n preprocess = preprocessing.ExifOrientationNormalize()\n img = Image.open(args.image_path)\n filename = img.filename\n img = preprocess(img)\n img = img.convert('RGB')\n\n faces, img = recognise_faces(img)\n for face in faces:\n top_prediction = face.top_prediction\n bb = face.bb\n all_predictions = face.all_predictions\n\n #print(\"Top prediction:\", top_prediction.label)\n print(\"Confidence:\", top_prediction.confidence)\n #print(\"BoundingBox Left:\", bb.left)\n #print(\"BoundingBox Top:\", bb.top)\n #print(\"BoundingBox Right:\", bb.right)\n #print(\"BoundingBox Bottom:\", bb.bottom)\n #print(\"All predictions:\")\n for prediction in all_predictions:\n print(\"\\tLabel:\", prediction.label)\n print(\"\\tConfidence:\", prediction.confidence)\n\n print(\"====================================\")\n if not faces:\n print('No faces found in this image.')\n\n if args.save_dir:\n basename = os.path.basename(filename)\n name = basename.split('.')[0]\n ext = basename.split('.')[1]\n img.save('{}_tagged.{}'.format(name, ext))\n\n img.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"VuongNguyen-yb/vuongnguyen.github.io","sub_path":"face-recognition-master/inference/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6370692437","text":"puzzle =\"\"\"\nbe cfbegad cbdgef fgaecd cgeb fdcge agebfd fecdb fabcd edb | fdgacbe cefdb cefbgd gcbe\nedbfga begcd cbg gc gcadebf fbgde acbgfd abcde gfcbed gfec | fcgedb cgb dgebacf gc\nfgaebd cg bdaec gdafb agbcfd gdcbef bgcad gfac gcb cdgabef | cg cg fdcagb cbg\nfbegcd cbd adcefb dageb afcb bc aefdc ecdab fgdeca fcdbega | efabcd cedba gadfec cb\naecbfdg fbg gf bafeg dbefa fcge gcbea fcaegb dgceab fcbdga | gecf egdcabf bgf bfgea\nfgeab ca afcebg bdacfeg cfaedg gcfdb baec bfadeg bafgc acf | gebdcfa ecba ca fadegcb\ndbcfg fgd bdegcaf fgec aegbdf ecdfab fbedc dacgb gdcebf gf | cefg dcbef fcge gbcadfe\nbdfegc cbegaf gecbf dfcage bdacg ed bedf ced adcbefg gebcd | ed bcgafe cdgba cbgef\negadfb cdbfeg cegd fecab cgb gbdefca cg fgcdab egfdb bfceg | gbdfcae bgc cg cgb\ngcafb gcf dcaebfg ecagb gf abcdeg gaef cafbge fdbac fegbdc | fgae cfgab fg bagce\n\"\"\".splitlines()\npuzzle = [[[set(z) for z in y.split(\" \")] for y in x.split(\" | \")] for x in puzzle if x]\n\nuniquedigits = {2:1, 4:4, 3:7, 7:8}\npart1ans = 0\nfor _, code in puzzle:\n for c in code:\n if len(c) in uniquedigits:\n part1ans += 1\nprint(f'part 1: {part1ans}')\n\npart2ans = 0\nfor enc, code in puzzle:\n enc = sorted(enc, key=len)\n one, four, seven, eight = enc.pop(0), enc.pop(1), enc.pop(0), enc.pop()\n dm = {1:one, 4:four, 7:seven, 8:eight}\n up = (dm[7] - dm[4]).pop()\n\n lo5s = enc[:3]\n lo6s = enc[-3:]\n\n r1, r2 = None, None\n for lo6 in lo6s:\n diff = dm[1] - lo6\n if len(diff) == 1:\n dm[6] = lo6\n r1 = diff.pop()\n r2 = (dm[1] - set(r1)).pop()\n elif len(diff) == 0:\n leftmid = dm[4] - dm[1]\n if leftmid <= lo6:\n dm[9] = lo6\n else:\n dm[0] = lo6\n\n for lo5 in lo5s:\n diff = dm[1] - lo5\n if len(diff) == 0:\n dm[3] = lo5\n continue\n diff = lo5 - set(r1)\n if len(diff) == 5:\n dm[5] = lo5\n elif len(diff) == 4:\n dm[2] = lo5\n\n md = {''.join(sorted(list(v), key=str.lower)):str(k) for k,v in dm.items()}\n\n digit = ''\n for c in [''.join(sorted(list(cset), key=str.lower)) for cset in code]:\n digit += md[c]\n part2ans += int(digit)\nprint(f'part 2: {part2ans}')\n\n \n","repo_name":"thomasfernsatencompass/aoc2021","sub_path":"solutions/aoc_d08.py","file_name":"aoc_d08.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13464063722","text":"#! /usr/bin/env python\n\nimport argparse\nimport os\nimport subprocess\nimport errno\nimport multiprocessing\nimport glob\nimport sys\n\ndef fullPath(path):\n return os.path.realpath(os.path.abspath(os.path.expanduser(path)))\n\ndef ensureDir(path):\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\ndef updateSource(source_dir):\n orig_dir = os.getcwd()\n os.chdir(source_dir)\n try: subprocess.check_call([\"git\",\"pull\"])\n finally: os.chdir(orig_dir)\n\ndef includeDir():\n base_path = os.path.join(sys.prefix,\"include\")\n version = sys.version_info\n subdir = \"python\"+str(version.major)+\".\"+str(version.minor)+\"*\"\n return glob.glob(os.path.join(base_path,subdir))[-1]\n\ndef libFile():\n base_path = os.path.join(sys.prefix,\"lib\")\n version = sys.version_info\n filename = \"libpython\"+str(version.major)+\".\"+str(version.minor)+\"*.dylib\"\n return glob.glob(os.path.join(base_path,filename))[-1]\n\ndef build(source_dir, build_dir):\n ensureDir(build_dir)\n orig_dir = os.getcwd()\n os.chdir(build_dir)\n try:\n config = [\"cmake\"]\n if sys.version_info.major >= 3 and sys.version_info.minor >= 1:\n config.append(\"-Dpython3:BOOL=ON\")\n config.extend([\"-DPYTHON_EXECUTABLE:PATH=\"+sys.executable,\n \"-DPYTHON_INCLUDE_DIR:PATH=\"+includeDir(),\n \"-DPYTHON_LIBRARY:PATH=\"+libFile(),\n \"-Droofit:BOOL=ON\",\"-Dminuit2:BOOL=ON\",\n \"-Drpath:BOOL=ON\",\"-Dlibcxx:BOOL=ON\",\n \"-Dmathmore:BOOL=ON\",\n source_dir])\n\n subprocess.check_call(config)\n subprocess.call([\"cmake\",\"--build\",\".\",\"--\",\"-k\",\"-j\",str(multiprocessing.cpu_count())])\n finally:\n os.chdir(build_dir)\n\ndef fixLinks(build_dir):\n lib_dir = os.path.join(sys.prefix,\"lib\")\n for f in glob.glob(os.path.join(build_dir, \"lib/*.so\")):\n out=subprocess.check_output([\"otool\",\"-L\",f])\n for line in out.decode(\"utf-8\").splitlines():\n if not line.startswith(\"\\t\"): continue\n if line.startswith(\"\\t@rpath\") or line.startswith(\"\\t/\"): continue\n libname = line.split()[0]\n subprocess.call([\"install_name_tool\",\"-change\",\n libname, os.path.join(lib_dir, libname), f])\n\ndef upROOT(source_dir, build_dir):\n source_dir = fullPath(source_dir)\n build_dir = fullPath(build_dir)\n\n try: subprocess.check_call([\"git\",\"clone\",\"http://root.cern.ch/git/root.git\", source_dir])\n except subprocess.CalledProcessError as e:\n updateSource(source_dir)\n\n build(source_dir, build_dir)\n fixLinks(build_dir)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Updates ROOT, installing if necessary\",\n formatter_class = argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--source\", default=\"~/root/source\",\n help=\"Directory for ROOT source code\")\n parser.add_argument(\"--build\", default=\"~/root/build\",\n help=\"Directory for ROOT build files\")\n args = parser.parse_args()\n\n upROOT(args.source, args.build)\n","repo_name":"ald77/scripts","sub_path":"upROOT.py","file_name":"upROOT.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"681100331","text":"# ../weapons/manager.py\n\n\"\"\"Provides weapon information for the current game.\"\"\"\n\n# =============================================================================\n# >> IMPORTS\n# =============================================================================\n# Site-Package Imports\n# Configobj\nfrom configobj import ConfigObj\n\n# Source.Python Imports\n# Core\nfrom core import GAME_NAME\n# Paths\nfrom paths import SP_DATA_PATH\n# Weapons\nfrom weapons.default import NoWeaponManager\nfrom weapons.instance import WeaponClass\n\n\n# =============================================================================\n# >> ALL DECLARATION\n# =============================================================================\n__all__ = ('_WeaponManager',\n 'weapon_manager',\n )\n\n\n# =============================================================================\n# >> GLOBAL VARIABLES\n# =============================================================================\n# Get the game's ini path\n_gamepath = SP_DATA_PATH / 'weapons' / GAME_NAME + '.ini'\n\n\n# =============================================================================\n# >> CLASSES\n# =============================================================================\nclass _WeaponManager(dict):\n \"\"\"Dictionary class to store basic weapon information.\"\"\"\n\n def __init__(self):\n \"\"\"Load the ini file into the dictionary.\"\"\"\n # Initialize the dictionary\n super().__init__()\n\n # Get the ConfigObj instance of the file\n self.ini = ConfigObj(_gamepath, unrepr=True)\n\n # Get the \"properties\"\n properties = self.ini['properties']\n\n # Get the game's weapon prefix\n self._prefix = properties['prefix']\n\n # Get the game's m_iAmmo property\n self._ammoprop = properties['ammoprop']\n\n # Get the game's m_hMyWeapons property\n self._myweapons = properties['myweapons']\n\n # Store any special names\n self._special_names = self.ini.get('special names', {})\n\n # Store projectile names\n self._projectiles = self.ini.get('projectiles', {})\n\n # Store tags as a set\n self._tags = set()\n\n # Loop through all weapons\n for basename in self.ini['weapons']:\n\n # Get the weapon's full name\n name = self._format_name(basename)\n\n # Add the weapon to the dictionary\n self[name] = WeaponClass(\n name, basename, self.ini['weapons'][basename]\n )\n\n # Add the weapon's tags to the set of tags\n self._tags.update(self[name].tags)\n\n def __getitem__(self, item):\n \"\"\"Return the :class:`weapons.instance.WeaponClass` for the weapon.\n\n :param str item: The weapon to retrieve the instance of.\n :rtype: WeaponClass\n \"\"\"\n name = self._format_name(item)\n return super().__getitem__(name)\n\n def __contains__(self, item):\n \"\"\"Return whether the weapon is in the manager.\n\n :param str item: The weapon to retrieve the instance of.\n :rtype: WeaponClass\n \"\"\"\n name = self._format_name(item)\n return super().__contains__(name)\n\n def get(self, item, default=None):\n \"\"\"Return the :class:`weapons.instance.WeaponClass` for the weapon.\n\n :param str item: The weapon to retrieve the instance of.\n :param default: The value to return if the item is not found.\n :rtype: WeaponClass\n \"\"\"\n name = self._format_name(item)\n return super().get(name, default)\n\n def _format_name(self, item):\n \"\"\"Format the name to include the game's weapon prefix.\"\"\"\n # Set the weapon to lower-case\n name = item.lower()\n\n # Is the item a member of the special names?\n if name in self.special_names:\n\n # Get the value of the special name\n name = self.special_names[name]\n\n # Is the item a member of the projectiles?\n if name in self.projectiles:\n\n # Get the value of the projectile name\n name = self.projectiles[name]\n\n # Does the weapon start with the prefix?\n if not name.startswith(self.prefix):\n\n # Add the prefix to the name\n name = self.prefix + name\n\n # Return the name\n return name\n\n @property\n def prefix(self):\n \"\"\"Return the weapon prefix value for the server.\"\"\"\n return self._prefix\n\n @property\n def ammoprop(self):\n \"\"\"Return the ammoprop property for the server.\"\"\"\n return self._ammoprop\n\n @property\n def myweapons(self):\n \"\"\"Return the myweapons property for the server.\"\"\"\n return self._myweapons\n\n @property\n def special_names(self):\n \"\"\"Return the special_names for the server.\"\"\"\n return self._special_names\n\n @property\n def projectiles(self):\n \"\"\"Return the projectiles for the server.\"\"\"\n return self._projectiles\n\n @property\n def tags(self):\n \"\"\"Return the weapon tags for the server.\"\"\"\n return self._tags\n\n# Does the current game have an ini file?\nif _gamepath.isfile():\n\n # Get the _WeaponManager instance\n weapon_manager = _WeaponManager()\n\n# Is there no ini file for the current game?\nelse:\n\n # Store weapon_manager as a NoWeaponManager instance\n # to raise an error anytime the manager is utilized\n weapon_manager = NoWeaponManager()\n","repo_name":"Source-Python-Dev-Team/Source.Python","sub_path":"addons/source-python/packages/source-python/weapons/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":5405,"program_lang":"python","lang":"en","doc_type":"code","stars":160,"dataset":"github-code","pt":"22"} +{"seq_id":"7417201965","text":"# Created by Leon Hunter at 11:23 AM 10/24/2020\r\nfrom unittest import TestCase\r\n\r\nfrom src.main.predicator import Predicator\r\n\r\n\r\nclass PredicatorTester(TestCase):\r\n def _test(self, method_to_be_tested, value_sets):\r\n for value_set in value_sets:\r\n # given\r\n first_value = value_set[0]\r\n expected_calculation = value_set[1]\r\n\r\n # when\r\n actual_calculation = method_to_be_tested(first_value)\r\n\r\n calculation_error_message = '''\r\n first_value = {}\r\n expected_calculation = {}\r\n actual_calculation = {}\r\n '''.format(first_value, expected_calculation, actual_calculation)\r\n\r\n return_type_error_message = '''\r\n expected return value of `{}` to be of type `bool`\r\n instead was of type `{}`\r\n '''.format(method_to_be_tested.__name__, type(actual_calculation))\r\n\r\n # then\r\n self.assertTrue(isinstance(actual_calculation, bool), return_type_error_message)\r\n self.assertAlmostEqual(expected_calculation, actual_calculation, calculation_error_message)\r\n\r\n def test_is_greater_than_5(self):\r\n self._test(Predicator().is_greater_than_5, [\r\n (1, False),\r\n (5, False),\r\n (6, True),\r\n (7, True)\r\n ])\r\n\r\n def test_is_greater_than_8(self):\r\n self._test(Predicator().is_greater_than_8, [\r\n (1, False),\r\n (8, False),\r\n (10, True),\r\n (17, True)\r\n ])\r\n\r\n def test_is_less_than_1(self):\r\n self._test(Predicator().is_less_than_1, [\r\n (5, False),\r\n (1, False),\r\n (-6, True),\r\n (-7, True)\r\n ])\r\n\r\n def test_is_less_than_4(self):\r\n self._test(Predicator().is_less_than_4, [\r\n (5, False),\r\n (4, False),\r\n (2, True),\r\n (-7, True)\r\n ])\r\n","repo_name":"curriculeon/jenkins.python.unittest_python-fundamentals","sub_path":"src/test/predicator_test.py","file_name":"predicator_test.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"73757517815","text":"import torch\nfrom torch import Tensor\nfrom torch.nn.modules.loss import _Loss, L1Loss\n\nSECONDS_IN_HOUR = 60 * 60\nPADDING_VALUE = -100\n\n\ndef calculate_corridor_func(device, video_length, mean_video_length, d=5):\n \"\"\"\n Calculates the corridor function, presented in the paper:\n https://arxiv.org/pdf/2002.11367.pdf. \n This implementation follows the same notions as the paper.\n \"\"\"\n c_x = torch.arange(video_length, device=device)\n\n g_t = video_length - c_x\n n_t = torch.maximum(mean_video_length - c_x, torch.zeros_like(c_x))\n\n a_t = 1 - (2 / (1 + torch.exp((c_x / video_length) * d)))\n\n c_t = (a_t * g_t) + ((1 - a_t) * n_t)\n\n return c_t\n\n\ndef calculate_corridor_mask(preds, labels, mean_video_length, d=5, tolerance=0):\n \"\"\"\n Calculates mask of which pred lays between the corridor function and label.\n Following https://arxiv.org/pdf/2002.11367.pdf.\n This implementation follows the same notions as the paper\n \"\"\"\n c_t = calculate_corridor_func(\n device=preds.device,\n video_length=len(preds),\n mean_video_length=mean_video_length,\n d=d,\n )\n\n mask = torch.logical_or(\n torch.logical_and(c_t <= preds, preds <= labels + tolerance),\n torch.logical_and(labels - tolerance <= preds, preds <= c_t),\n )\n\n return mask, c_t\n\n\ndef calculate_corridor_weights(\n preds,\n labels,\n video_length,\n mean_video_length,\n d=5,\n tolerance=0,\n off_corridor_penalty=1,\n):\n \"\"\"\n Calculates the loss weight for each index.\n Following https://arxiv.org/pdf/2002.11367.pdf.\n This implementation follows the same notions as the paper.\n \"\"\"\n w = torch.ones(len(preds) - video_length, device=preds.device) * PADDING_VALUE\n\n p = preds[:video_length]\n l = labels[:video_length]\n mask, c_t = calculate_corridor_mask(\n preds=p, labels=l, mean_video_length=mean_video_length, d=d, tolerance=tolerance\n )\n\n weights = torch.pow(torch.abs(p - l) / torch.abs(c_t - l), 2)\n\n weights[torch.logical_not(mask)] = off_corridor_penalty\n weights = torch.cat([weights, w])\n\n return weights\n\n\nclass ETCLoss(_Loss):\n \"\"\"\n The input of this loss is B X S X 2, where the vector in zero dim is the ETC normalized\n by max hours and the vector in 1 dim is the Progress (0-1) of this second.\n S is the sequence (video) length.\n The labels are given as dict with two keys 'etc' and 'progress'. Each holding matrix of shape (B X S).\n This is because the batch can hold videos with different sizes,\n for example:\n Batch Size is 1 and sequence (video) length is 5 \n pred: [[[0., 0.],\n [0., 0.],\n [0., 0.],\n [0., 0.],\n [0., 0.]]]\n labels:\n {\n \"etc\": [[0., 0., 0., 0., 0.]]\n \"progress\": [[0., 0., 0., 0., 0.]]\n }\n S will be the size of the longest video in the batch and the extra sequence for each video\n will padded using PADDING_VALUE and be ignored\n \"\"\"\n\n def __init__(\n self,\n device,\n mean_length,\n max_hours=3,\n off_corridor_penalty=1,\n alpha=1,\n beta=1,\n gamma=1,\n delta=1,\n d=5,\n **rest\n ):\n super().__init__(reduction=\"none\")\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n self.delta = delta\n self.device = device\n self.mean_length = mean_length\n self.d = d\n self.off_corridor_penalty = off_corridor_penalty\n self.max_hours = max_hours\n\n def seq_mae(self, preds: Tensor, labels: Tensor, mask, weights=None):\n if weights is None:\n weights = torch.ones_like(preds)\n\n abs_error = torch.abs(preds - labels) * weights\n\n lengths = torch.sum(mask, dim=1)\n\n abs_error[mask == False] = 0\n\n mses = torch.sum(abs_error, dim=1) / lengths\n return torch.mean(mses)\n\n def seq_smape(self, preds, labels, mask, weights):\n lengths = torch.sum(mask, dim=1)\n\n smape_pp = (\n torch.abs(preds - labels) / (torch.abs(preds) + torch.abs(labels))\n ) * weights\n\n smape_pp[mask == False] = 0\n\n smape = torch.sum(smape_pp, dim=1) / lengths\n\n return torch.mean(smape)\n\n def seq_var_loss(self, preds, labels):\n rolled_preds = torch.roll(preds, 1, dims=1)\n rolled_preds[:, 0] = preds[:, 0]\n return self.seq_mae(preds, rolled_preds, labels != PADDING_VALUE)\n\n def corridor_weights(self, preds, labels):\n weights = torch.zeros_like(preds)\n for i in range(len(preds)):\n lengths = torch.sum(labels != PADDING_VALUE, dim=1)\n weights[i] = calculate_corridor_weights(\n preds=preds[i] * SECONDS_IN_HOUR * self.max_hours,\n labels=labels[i] * SECONDS_IN_HOUR * self.max_hours,\n video_length=lengths[i],\n mean_video_length=self.mean_length,\n d=self.d,\n off_corridor_penalty=self.off_corridor_penalty,\n )\n return weights\n\n def forward(self, preds: Tensor, labels: Tensor):\n etc_preds = preds[:, :, 0]\n prog_preds = preds[:, :, 1]\n\n weights = self.corridor_weights(etc_preds, labels[\"etc\"])\n\n if not self.weighted_var_loss:\n interval_loss_weights = torch.ones_like(weights)\n\n loss = (\n (\n self.alpha\n * self.seq_mae(\n etc_preds, labels[\"etc\"], labels[\"etc\"] != PADDING_VALUE, weights\n )\n )\n + (\n self.beta\n * self.seq_smape(\n etc_preds, labels[\"etc\"], labels[\"etc\"] != PADDING_VALUE, weights\n )\n )\n + (\n self.gamma\n * self.seq_mae(\n prog_preds,\n labels[\"progress\"],\n labels[\"etc\"] != PADDING_VALUE,\n weights,\n )\n )\n + (\n self.delta\n * self.seq_var_loss(etc_preds, labels[\"etc\"], interval_loss_weights)\n )\n )\n return loss\n\n\nclass TotalVarLoss(_Loss):\n \"\"\"\n Loss for the ETCouple model.\n When using a model like ETCouple, we would like to smooth the result by total var.\n Given the wanted loss (L1/L2), averaging each couple losses, then adding the real diff (which is negative)\n in order to allow the loss to be 0.\n\n penalty_type: allowing to penalize the result by progress / late stage\n\n \"\"\"\n\n def __init__(self, device, reduction: str = \"mean\") -> None:\n super().__init__(reduction=reduction)\n self.device = device\n\n def forward(\n self,\n preds0: Tensor,\n preds1: Tensor,\n labels0: Tensor,\n labels1: Tensor,\n ) -> float:\n total_var = 0\n for i in range(len(labels0)):\n var_loss = (\n torch.pow((labels0[i] - preds0[i]), 2)\n + torch.pow((labels1[i] - preds1[i]), 2)\n ) / 2\n diff = torch.abs(\n torch.abs(preds0[i] - preds1[i]) - torch.abs(labels0[i] - labels1[i])\n )\n total_var += var_loss + diff\n\n total_var /= len(labels0)\n\n return total_var\n\n\nclass SMAPELoss(_Loss):\n def __init__(self, device, **rest):\n super().__init__(reduction=\"none\")\n self.l1_loss = L1Loss(reduction=\"none\")\n self.device = device\n\n def forward(\n self,\n preds0: Tensor,\n preds1: Tensor,\n labels0: Tensor,\n labels1: Tensor,\n ):\n l1_loss_0 = self.l1_loss(preds0, labels0)\n x = preds0 + labels0\n l1_loss_1 = self.l1_loss(preds1, labels1)\n y = preds1 + labels1\n\n return torch.mean(l1_loss_0 / x) + torch.mean(l1_loss_1 / y)\n\n\nclass ETCoupleLoss(_Loss):\n \"\"\"\n Loss for ETCouple model. Calculate the loss given two points (current second and interval second before).\n \"\"\"\n\n def __init__(\n self,\n device,\n reduction: str = \"mean\",\n mae_weight=1,\n smape_weight=1,\n total_var_weight=1,\n ) -> None:\n super().__init__(reduction=reduction)\n\n self.total_var = TotalVarLoss(device, reduction=reduction)\n self.smape_loss = SMAPELoss(device)\n self.l1_loss = L1Loss()\n self.mae_weight = mae_weight\n self.smape_weight = smape_weight\n self.total_var_weight = total_var_weight\n\n def forward(self, preds: Tensor, labels: Tensor):\n prog_preds = preds[0]\n prog_labels = labels[\"progress\"]\n\n etc_preds = preds[1]\n etc_labels = labels[\"etc\"]\n\n prog_mae = self.l1_loss.forward(prog_preds, prog_labels)\n etc_mae = self.l1_loss.forward(etc_preds, etc_labels)\n mae = self.mae_weight * (prog_mae + etc_mae)\n\n etc_first_preds = etc_preds[0::2]\n etc_second_preds = etc_preds[1::2]\n etc_first_labels = etc_labels[0::2]\n etc_second_labels = etc_labels[1::2]\n\n smape = self.smape_weight * self.smape_loss.forward(\n etc_first_preds, etc_second_preds, etc_first_labels, etc_second_labels\n )\n\n total_var = self.total_var_weight * self.total_var.forward(\n etc_first_preds, etc_second_preds, etc_first_labels, etc_second_labels\n )\n\n return [mae, smape, total_var]\n","repo_name":"theator/etc","sub_path":"losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":9425,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"22"} +{"seq_id":"31898157683","text":"import config\r\nimport cubeHandler\r\nimport sys\r\nimport traceback\r\nfrom telegram import ParseMode\r\nfrom telegram.ext import Updater, CommandHandler\r\nfrom telegram.utils.helpers import mention_html\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom sqlalchemy import create_engine\r\n\r\n# Set up basic logging\r\nimport logging\r\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\r\n filename=config.log_file,\r\n level=config.log_level)\r\n\r\n\r\ndef main():\r\n # Create the EventHandler and pass it your bot's token.\r\n updater = Updater(config.telegram_token,\r\n use_context=True,\r\n request_kwargs={'read_timeout': 10, 'connect_timeout': 7})\r\n \r\n # Get the dispatcher to register handlers\r\n dp = updater.dispatcher\r\n \r\n # bH = botHandlers.BotHandlers(dp)\r\n cube_h = cubeHandler.CubeHandler(dp)\r\n \r\n # log all errors\r\n # dp.add_error_handler(error)\r\n\r\n # help handler\r\n dp.add_handler(CommandHandler(\"help\", send_help))\r\n \r\n # Start the Bot\r\n updater.start_polling()\r\n logging.info(\"Game Bot Started\")\r\n \r\n # Run the bot until the you presses Ctrl-C or the process receives SIGINT,\r\n # SIGTERM or SIGABRT. This should be used most of the time, since\r\n # start_polling() is non-blocking and will stop the bot gracefully.\r\n updater.idle()\r\n\r\n\r\n# this is a general error handler function. If you need more information about specific type of update, add it to the\r\n# payload in the respective if clause\r\ndef error(update, context):\r\n # add all the dev user_ids in this list. You can also add ids of channels or groups.\r\n devs = [config.admin_id]\r\n # we want to notify the user of this problem. This will always work, but not notify users if the update is an \r\n # callback or inline query, or a poll update. In case you want this, keep in mind that sending the message \r\n # could fail\r\n if update.effective_message:\r\n text = \"Hey. I'm sorry to inform you that an error happened while I tried to handle your update. \" \\\r\n \"My developer(s) will be notified.\"\r\n update.effective_message.reply_text(text)\r\n # This traceback is created with accessing the traceback object from the sys.exc_info, which is returned as the\r\n # third value of the returned tuple. Then we use the traceback.format_tb to get the traceback as a string, which\r\n # for a weird reason separates the line breaks in a list, but keeps the linebreaks itself. So just joining an\r\n # empty string works fine.\r\n trace = \"\".join(traceback.format_tb(sys.exc_info()[2]))\r\n # lets try to get as much information from the telegram update as possible\r\n payload = \"\"\r\n # normally, we always have an user. If not, its either a channel or a poll update.\r\n if update.effective_user:\r\n payload += f' with the user {mention_html(update.effective_user.id, update.effective_user.first_name)}'\r\n # there are more situations when you don't get a chat\r\n if update.effective_chat:\r\n payload += f' within the chat {update.effective_chat.title}'\r\n if update.effective_chat.username:\r\n payload += f' (@{update.effective_chat.username})'\r\n # but only one where you have an empty payload by now: A poll (buuuh)\r\n if update.poll:\r\n payload += f' with the poll id {update.poll.id}.'\r\n # lets put this in a \"well\" formatted text\r\n text = f\"Hey.\\n The error {context.error} happened{payload}. The full traceback:\\n\\n{trace}\" \\\r\n f\"\"\r\n # and send it to the dev(s)\r\n for dev_id in devs:\r\n context.bot.send_message(dev_id, text, parse_mode=ParseMode.HTML)\r\n # we raise the error again, so the logger module catches it. If you don't use the logger module, use it.\r\n raise\r\n\r\n\r\ndef send_help(update, context):\r\n \"\"\"Send commands and if admin request send additionnals admin commands\"\"\"\r\n text = \"\"\r\n if update.effective_user.id == config.admin_id:\r\n # ADMIN: Send all commands\r\n text += f\"Pour inviter de nouveaux joueurs envoie leur {config.share_url}\\n\"\\\r\n \"/init - initialize game\\n\"\\\r\n \"/play [mode]- start playing game mode\\n\"\\\r\n \"/win - stop game\\n\"\\\r\n \"/sealed - send sealed pool\\n\"\\\r\n \"/draft - start draft\\n\"\\\r\n \"/rematch - reload last decks\\n\"\r\n\r\n text += \"/scan - scanner ses cartes\\n\"\\\r\n \"/load_deck - charger son dernier deck\\n\"\\\r\n \"/load_deckstats [url]- load deck from url\\n\"\\\r\n \"/mydeck - editer son deck\\n\"\\\r\n \"/sign - signer une carte\\n\"\\\r\n \"Tips:\\n- Pour supprimer toutes les cartes de son deck, \"\\\r\n \"taper 'REMOVE ALL CARDS' dans le menu d'édition des cartes\"\r\n\r\n if update.effective_user.id == config.admin_id:\r\n context.bot.send_message(update.effective_user.id, text=text)\r\n else:\r\n context.bot.send_message(chat_id=update.effective_chat.id,\r\n text=text)\r\n\r\n\r\nif __name__ == '__main__': \r\n main()\r\n","repo_name":"NicolasCapon/cubebot","sub_path":"cubebot/startBot.py","file_name":"startBot.py","file_ext":"py","file_size_in_byte":5139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70828981496","text":"# Kaggle exercise for Python - external libraries and other codes\n# Link: https://www.kaggle.com/akshitgupta29/exercise-working-with-external-libraries/edit\n# @author: Akshit Gupta\n\n\n#Problem 3\ndef blackjack_hand_greater_than(hand_1, hand_2):\n \"\"\"\n Return True if hand_1 beats hand_2, and False otherwise.\n \n In order for hand_1 to beat hand_2 the following must be true:\n - The total of hand_1 must not exceed 21\n - The total of hand_1 must exceed the total of hand_2 OR hand_2's total must exceed 21\n \n Hands are represented as a list of cards. Each card is represented by a string.\n \n When adding up a hand's total, cards with numbers count for that many points. Face\n cards ('J', 'Q', and 'K') are worth 10 points. 'A' can count for 1 or 11.\n \n When determining a hand's total, you should try to count aces in the way that \n maximizes the hand's total without going over 21. e.g. the total of ['A', 'A', '9'] is 21,\n the total of ['A', 'A', '9', '3'] is 14.\n \n Examples:\n >>> blackjack_hand_greater_than(['K'], ['3', '4'])\n True\n >>> blackjack_hand_greater_than(['K'], ['10'])\n False\n >>> blackjack_hand_greater_than(['K', 'K', '2'], ['3'])\n False\n \"\"\"\n h1_sum = 0\n h2_sum = 0\n \n for card1 in hand_1:\n if card1 in ['J', 'Q', 'K']:\n h1_sum +=10\n elif card1 != 'A':\n h1_sum += int(card1)\n \n if card1 == 'A':\n if h1_sum < 10 :\n h1_sum += 11\n else:\n h1_sum += 1\n \n for card2 in hand_2:\n if card2 in ['J', 'Q', 'K']:\n h2_sum +=10\n elif card2 != 'A':\n h2_sum += int(card2)\n \n if card2 == 'A':\n if h2_sum < 10:\n h2_sum += 11\n else:\n h2_sum += 1\n \n if (h1_sum <= 21) and (h1_sum > h2_sum or h2_sum > 21):\n return True\n return False\n \n \n \nif __name__ == \"__main__\":\n hand_1=['2', '8', '3']\n hand_2=['A', '4', '4', '7']\n result = blackjack_hand_greater_than(hand_1, hand_2)\n print (result)","repo_name":"akshitgupta29/DS-ML","sub_path":"Kaggle/working_external_lib.py","file_name":"working_external_lib.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72198932217","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.naive_bayes import GaussianNB\n\nX = np.array([[-1, 1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\ny = np.array([1, 1, 1, 2, 2, 2])\n# y = np.array([1,2,2,1,2,2])\n# y = np.array([1,1,2,1,1,2])\n\nX_min = -4\nX_max = 4\ny_min = -4\ny_max = 4\nh = .005\nxx, yy = np.meshgrid(np.arange(X_min, X_max, h),\n np.arange(y_min, y_max, h))\n\nclf1 = GaussianNB()\nclf1.fit(X, y)\nZ = clf1.predict(np.c_[xx.ravel(), yy.ravel()])\n\nZ = Z.reshape(xx.shape)\nplt.xlim(xx.min(), xx.max())\nplt.ylim(yy.min(), yy.max())\nplt.pcolormesh(xx, yy, Z)\n\nXB, YB, XR, YR = [], [], [], []\nindex = 0\nfor index in range(len(y)):\n if y[index] == 1:\n print(\"B equal to: \", X[index, :])\n XB.append(X[index, 0])\n YB.append(X[index, 1])\n if y[index] == 2:\n print(\"R equal to: \", X[index, :])\n XR.append(X[index, 0])\n YR.append(X[index, 1])\n pass\nplt.scatter(XB, YB, color='b', label='blue, type1')\nplt.scatter(XR, YR, color='r', label='red, type2')\nplt.legend()\nplt.xlabel('variable 1')\nplt.ylabel('variable 2')\nplt.show()","repo_name":"leviliangtw/PYKT-MLLab","sub_path":"demo25.py","file_name":"demo25.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"70045571258","text":"from ..models import lekert_adatok\n\ncurrent = 0\nonPage = 100\nshowndata = lekert_adatok.objects.none()\ngeneralt = False\n\n\ndef init(db, gen=False):\n global showndata, current, generalt, onPage\n current = 0\n showndata = db\n generalt = gen\n\n\ndef show():\n global current, showndata\n if (showndata.count() < onPage):\n return showndata\n return showndata[current:current + onPage]\n\n\ndef forward():\n global current\n if ((current + onPage) >= showndata.count()):\n return show()\n else:\n current += onPage\n return show()\n\n\ndef forwardMore():\n global current\n if ((current + (onPage * 10)) >= showndata.count()):\n return show()\n else:\n current += (onPage * 10)\n return show()\n\n\ndef back():\n global current\n if ((current - onPage) < 0):\n current = 0\n return show()\n else:\n current -= onPage\n return show()\n\n\ndef backMore():\n global current\n if ((current - (onPage * 10)) < 0):\n current = 0\n return show()\n else:\n current -= (onPage * 10)\n return show()\n\n\ndef getPage():\n return int(current / onPage) + 1\n\n\ndef getMaxPage():\n return int(showndata.count() / onPage) + 1\n\n\ndef isGen():\n return generalt\n\n\ndef setOnPage(ps):\n if not ps.isnumeric():\n return\n ps = int(ps)\n if ps not in [25, 50, 100, 250]:\n ps = 25\n if ps <= 0:\n return\n global onPage\n onPage = ps\n\n\ndef getOnPage():\n return onPage\n","repo_name":"zsoltkovacs94/koffein-flu-prediction","sub_path":"koffein_flu_prediction/website/static/pager.py","file_name":"pager.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"1419642526","text":"# https://leetcode.com/problems/course-schedule-iv/\n\n\n\nclass Solution:\n def checkIfPrerequisite(self, numCourses: int, prerequisites: List[List[int]], queries: List[List[int]]) -> List[bool]:\n indegree = [0] * numCourses\n graph = [[] for _ in range(numCourses)]\n for pre in prerequisites:\n indegree[pre[1]] += 1\n graph[pre[0]].append(pre[1])\n queue = []\n for i in range(numCourses):\n if indegree[i] == 0:\n queue.append(i)\n while queue:\n node = queue.pop(0)\n for i in graph[node]:\n indegree[i] -= 1\n if indegree[i] == 0:\n queue.append(i)\n res = []\n for q in queries:\n if q[1] in graph[q[0]]:\n res.append(True)\n else:\n res.append(False)\n return res\n","repo_name":"zsegla/Camp-II","sub_path":"course-schedule-iv.py","file_name":"course-schedule-iv.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4604742406","text":"import requests\nimport json\n\nfrom secret_key import KEY\n\nfrom kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.button import Button\n# from kivy.config import Config\n\nfrom kivy.uix.popup import Popup\n\n\n# Config.set('graphics', 'resizable', '0')\n# Config.set('graphics', 'width', '640')\n# Config.set('graphics', 'heigth', '480')\n\ndata = requests.get(KEY)\nmovie = data.text\nmovie_list = json.loads(movie)\nmovie_list_result = []\nmovie_list_overview = []\nmovie_list_lang = []\nmovie_list_popularity = []\nmovie_list_release_date = []\n\n\ndef movie_popular(movie_list_result): \n \n for i in range(len(movie_list['results'])):\n movie_list_result.append(movie_list['results'][i]['original_title'])\n movie_list_overview.append(movie_list['results'][i]['overview'])\n movie_list_lang.append(movie_list['results'][i]['original_language'])\n movie_list_popularity.append(movie_list['results'][i]['popularity'])\n movie_list_release_date.append(movie_list['results'][i]['release_date'])\n\n\nclass MobileApp(App):\n bl = BoxLayout(orientation='vertical', padding=[20, 40])\n def build(self):\n bl = self.bl\n if movie_list:\n for i in range(len(movie_list['results'])):\n bl.add_widget(Button(text=movie_list_result[i], on_press=self.btn_press))\n else:\n for i in range(20):\n bl.add_widget(Button(text='данные не получены', on_press=self.btn_press))\n return bl\n\n\n\n\n def btn_press(self, instance):\n key = 0\n for i in range(len(movie_list['results'])):\n if movie_list_result[i] == instance.text:\n key = i\n content = Button(\n text=f'''\n Описание: {movie_list_overview[key]}\\n\n Дата выхода: {movie_list_release_date[key]}\\n\n Основной язык: {movie_list_lang[key]}\\n\n Рейтинг популярности: {movie_list_popularity[key]}\n ''',\n font_size = 16,\n halign ='left',\n valign = 'middle',\n \n split_str = '.',\n size_hint=(1, 1),\n text_size = (500, 700)\n )\n popup = Popup(title=instance.text, content=content, auto_dismiss=False)\n content.bind(on_press=popup.dismiss)\n popup.open()\n \n\n\nif __name__ == \"__main__\":\n movie_popular(movie_list_result)\n MobileApp().run()","repo_name":"Mikhail-1985/data_prime","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2068335942","text":"import importlib\nimport json\nimport os\nimport shutil\nimport sqlite3\nfrom contextlib import contextmanager\nfrom operator import itemgetter\nfrom typing import Any, Dict, List, Mapping, Optional\n\nimport langchain\nimport numpy as np\nimport openai\nimport pytest\nimport transformers\nfrom langchain import SQLDatabase\nfrom langchain.agents import AgentType, initialize_agent\nfrom langchain.chains import (\n APIChain,\n ConversationChain,\n HypotheticalDocumentEmbedder,\n LLMChain,\n RetrievalQA,\n)\nfrom langchain.chains.api import open_meteo_docs\nfrom langchain.chains.base import Chain\nfrom langchain.chains.qa_with_sources import load_qa_with_sources_chain\nfrom langchain.document_loaders import TextLoader\nfrom langchain.embeddings.base import Embeddings\nfrom langchain.embeddings.fake import FakeEmbeddings\nfrom langchain.evaluation.qa import QAEvalChain\nfrom langchain.llms import HuggingFacePipeline, OpenAI\nfrom langchain.llms.base import LLM\nfrom langchain.memory import ConversationBufferMemory\nfrom langchain.prompts import PromptTemplate\nfrom langchain.requests import TextRequestsWrapper\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.tools import Tool\nfrom langchain.vectorstores import FAISS\nfrom langchain_experimental.sql import SQLDatabaseChain\nfrom packaging import version\nfrom packaging.version import Version\nfrom pydantic import BaseModel\nfrom pyspark.sql import SparkSession\n\nimport mlflow\nimport mlflow.pyfunc.scoring_server as pyfunc_scoring_server\nfrom mlflow.deployments import PredictionsResponse\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.models.signature import ModelSignature, Schema\nfrom mlflow.types.schema import ColSpec\nfrom mlflow.utils.openai_utils import (\n TEST_CONTENT,\n TEST_INTERMEDIATE_STEPS,\n TEST_SOURCE_DOCUMENTS,\n _mock_chat_completion_response,\n _mock_request,\n _MockResponse,\n)\n\nfrom tests.helper_functions import pyfunc_serve_and_score_model\n\n\n@contextmanager\ndef _mock_async_request(content=TEST_CONTENT):\n with _mock_request(return_value=_mock_chat_completion_response(content)) as m:\n yield m\n\n\n@pytest.fixture\ndef model_path(tmp_path):\n return tmp_path / \"model\"\n\n\n@pytest.fixture(scope=\"module\")\ndef spark():\n with SparkSession.builder.master(\"local[*]\").getOrCreate() as s:\n yield s\n\n\n@pytest.fixture(autouse=True)\ndef set_envs(monkeypatch):\n monkeypatch.setenvs(\n {\n \"MLFLOW_TESTING\": \"true\",\n \"OPENAI_API_KEY\": \"test\",\n \"SERPAPI_API_KEY\": \"test\",\n }\n )\n importlib.reload(openai)\n\n\ndef create_huggingface_model(model_path):\n architecture = \"lordtt13/emo-mobilebert\"\n mlflow.transformers.save_model(\n transformers_model={\n \"model\": transformers.TFMobileBertForSequenceClassification.from_pretrained(\n architecture\n ),\n \"tokenizer\": transformers.AutoTokenizer.from_pretrained(architecture),\n },\n path=model_path,\n )\n llm = mlflow.transformers.load_model(model_path)\n prompt = PromptTemplate(\n input_variables=[\"product\"],\n template=\"What is a good name for a company that makes {product}?\",\n )\n hf_pipe = HuggingFacePipeline(pipeline=llm)\n return LLMChain(llm=hf_pipe, prompt=prompt)\n\n\ndef create_openai_llmchain():\n llm = OpenAI(temperature=0.9)\n prompt = PromptTemplate(\n input_variables=[\"product\"],\n template=\"What is a good name for a company that makes {product}?\",\n )\n return LLMChain(llm=llm, prompt=prompt)\n\n\ndef create_qa_eval_chain():\n llm = OpenAI(temperature=0)\n return QAEvalChain.from_llm(llm)\n\n\ndef create_qa_with_sources_chain():\n # StuffDocumentsChain\n return load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"stuff\")\n\n\ndef create_openai_llmagent(return_intermediate_steps=False):\n from langchain.agents import AgentType, initialize_agent, load_tools\n\n # First, let's load the language model we're going to use to control the agent.\n llm = OpenAI(temperature=0)\n\n # Next, let's load some tools to use.\n tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n\n # Finally, let's initialize an agent with the tools.\n return initialize_agent(\n tools,\n llm,\n agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n verbose=True,\n return_intermediate_steps=return_intermediate_steps,\n )\n\n\nclass FakeLLM(LLM):\n \"\"\"Fake LLM wrapper for testing purposes.\"\"\"\n\n queries: Optional[Mapping] = None\n\n @property\n def _llm_type(self) -> str:\n \"\"\"Return type of llm.\"\"\"\n return \"fake\"\n\n # pylint: disable=arguments-differ\n def _call(self, prompt: str, stop: Optional[List[str]] = None, run_manager=None) -> str:\n \"\"\"First try to lookup in queries, else return 'foo' or 'bar'.\"\"\"\n if self.queries is not None:\n return self.queries[prompt]\n if stop is None:\n return \"foo\"\n else:\n return \"bar\"\n\n @property\n def _identifying_params(self) -> Mapping[str, Any]:\n return {}\n\n\nclass FakeChain(Chain):\n \"\"\"Fake chain class for testing purposes.\"\"\"\n\n be_correct: bool = True\n the_input_keys: List[str] = [\"foo\"]\n the_output_keys: List[str] = [\"bar\"]\n\n @property\n def input_keys(self) -> List[str]:\n \"\"\"Input keys.\"\"\"\n return self.the_input_keys\n\n @property\n def output_keys(self) -> List[str]:\n \"\"\"Output key of bar.\"\"\"\n return self.the_output_keys\n\n # pylint: disable=arguments-differ\n def _call(self, inputs: Dict[str, str], run_manager=None) -> Dict[str, str]:\n if self.be_correct:\n return {\"bar\": \"baz\"}\n else:\n return {\"baz\": \"bar\"}\n\n\ndef test_langchain_native_save_and_load_model(model_path):\n model = create_openai_llmchain()\n mlflow.langchain.save_model(model, model_path)\n\n loaded_model = mlflow.langchain.load_model(model_path)\n assert type(loaded_model) == langchain.chains.llm.LLMChain\n assert type(loaded_model.llm) == langchain.llms.openai.OpenAI\n assert type(loaded_model.prompt) == langchain.prompts.PromptTemplate\n assert loaded_model.prompt.template == \"What is a good name for a company that makes {product}?\"\n\n\ndef test_langchain_native_log_and_load_model():\n model = create_openai_llmchain()\n with mlflow.start_run():\n logged_model = mlflow.langchain.log_model(model, \"langchain_model\")\n\n loaded_model = mlflow.langchain.load_model(logged_model.model_uri)\n\n assert \"langchain\" in logged_model.flavors\n assert str(logged_model.signature.inputs) == \"['product': string]\"\n assert str(logged_model.signature.outputs) == \"['text': string]\"\n\n assert type(loaded_model) == langchain.chains.llm.LLMChain\n assert type(loaded_model.llm) == langchain.llms.openai.OpenAI\n assert type(loaded_model.prompt) == langchain.prompts.PromptTemplate\n assert loaded_model.prompt.template == \"What is a good name for a company that makes {product}?\"\n\n\ndef test_pyfunc_load_openai_model():\n model = create_openai_llmchain()\n with mlflow.start_run():\n logged_model = mlflow.langchain.log_model(model, \"langchain_model\")\n\n loaded_model = mlflow.pyfunc.load_model(logged_model.model_uri)\n\n assert \"langchain\" in logged_model.flavors\n assert type(loaded_model) == mlflow.pyfunc.PyFuncModel\n\n\ndef test_langchain_model_predict():\n with _mock_request(return_value=_mock_chat_completion_response()):\n model = create_openai_llmchain()\n with mlflow.start_run():\n logged_model = mlflow.langchain.log_model(model, \"langchain_model\")\n loaded_model = mlflow.pyfunc.load_model(logged_model.model_uri)\n result = loaded_model.predict([{\"product\": \"MLflow\"}])\n assert result == [TEST_CONTENT]\n\n\ndef test_pyfunc_spark_udf_with_langchain_model(spark):\n model = create_openai_llmchain()\n with mlflow.start_run():\n logged_model = mlflow.langchain.log_model(model, \"langchain_model\")\n loaded_model = mlflow.pyfunc.spark_udf(spark, logged_model.model_uri, result_type=\"string\")\n df = spark.createDataFrame([(\"MLflow\",), (\"Spark\",)], [\"product\"])\n df = df.withColumn(\"answer\", loaded_model())\n pdf = df.toPandas()\n assert pdf[\"answer\"].tolist() == [TEST_CONTENT, TEST_CONTENT]\n\n\ndef test_langchain_log_huggingface_hub_model_metadata(model_path):\n model = create_huggingface_model(model_path)\n with mlflow.start_run():\n logged_model = mlflow.langchain.log_model(model, \"langchain_model\")\n\n loaded_model = mlflow.langchain.load_model(logged_model.model_uri)\n\n assert \"langchain\" in logged_model.flavors\n assert str(logged_model.signature.inputs) == \"['product': string]\"\n assert str(logged_model.signature.outputs) == \"['text': string]\"\n\n assert type(loaded_model) == langchain.chains.llm.LLMChain\n assert type(loaded_model.llm) == langchain.llms.huggingface_pipeline.HuggingFacePipeline\n assert type(loaded_model.prompt) == langchain.prompts.PromptTemplate\n assert loaded_model.prompt.template == \"What is a good name for a company that makes {product}?\"\n\n\n@pytest.mark.parametrize(\"return_intermediate_steps\", [False, True])\ndef test_langchain_agent_model_predict(return_intermediate_steps):\n langchain_agent_output = {\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"choices\": [\n {\n \"index\": 0,\n \"finish_reason\": \"stop\",\n \"text\": f\"Final Answer: {TEST_CONTENT}\",\n }\n ],\n \"usage\": {\"prompt_tokens\": 9, \"completion_tokens\": 12, \"total_tokens\": 21},\n }\n model = create_openai_llmagent(return_intermediate_steps=return_intermediate_steps)\n with mlflow.start_run():\n logged_model = mlflow.langchain.log_model(model, \"langchain_model\")\n loaded_model = mlflow.pyfunc.load_model(logged_model.model_uri)\n langchain_input = {\n \"input\": \"What was the high temperature in SF yesterday in Fahrenheit?\"\n \"What is that number raised to the .023 power?\"\n }\n\n if return_intermediate_steps:\n langchain_output = [{\"output\": TEST_CONTENT, \"intermediate_steps\": TEST_INTERMEDIATE_STEPS}]\n # hardcoded output key because that is the default for an agent\n # but it is not an attribute of the agent or anything that we log\n else:\n langchain_output = [TEST_CONTENT]\n\n with _mock_request(return_value=_MockResponse(200, langchain_agent_output)):\n result = loaded_model.predict([langchain_input])\n assert result == langchain_output\n\n inference_payload = json.dumps({\"inputs\": langchain_input})\n langchain_agent_output_serving = {\"predictions\": langchain_agent_output}\n with _mock_request(return_value=_MockResponse(200, langchain_agent_output_serving)):\n response = pyfunc_serve_and_score_model(\n logged_model.model_uri,\n data=inference_payload,\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,\n extra_args=[\"--env-manager\", \"local\"],\n )\n\n assert (\n PredictionsResponse.from_json(response.content.decode(\"utf-8\"))\n == langchain_agent_output_serving\n )\n\n\ndef test_langchain_native_log_and_load_qaevalchain():\n # QAEvalChain is a subclass of LLMChain\n model = create_qa_eval_chain()\n with mlflow.start_run():\n logged_model = mlflow.langchain.log_model(model, \"langchain_model\")\n\n loaded_model = mlflow.langchain.load_model(logged_model.model_uri)\n assert model == loaded_model\n\n\ndef test_langchain_native_log_and_load_qa_with_sources_chain():\n # StuffDocumentsChain is a subclass of Chain\n model = create_qa_with_sources_chain()\n with mlflow.start_run():\n logged_model = mlflow.langchain.log_model(model, \"langchain_model\")\n\n loaded_model = mlflow.langchain.load_model(logged_model.model_uri)\n assert model == loaded_model\n\n\n@pytest.mark.skipif(\n version.parse(langchain.__version__) < version.parse(\"0.0.194\"),\n reason=\"Saving RetrievalQA chains requires langchain>=0.0.194\",\n)\ndef test_log_and_load_retrieval_qa_chain(tmp_path):\n # Create the vector db, persist the db to a local fs folder\n loader = TextLoader(\"tests/langchain/state_of_the_union.txt\")\n documents = loader.load()\n text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n docs = text_splitter.split_documents(documents)\n embeddings = FakeEmbeddings(size=5)\n db = FAISS.from_documents(docs, embeddings)\n persist_dir = str(tmp_path / \"faiss_index\")\n db.save_local(persist_dir)\n\n # Create the RetrievalQA chain\n retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=db.as_retriever())\n\n # Log the RetrievalQA chain\n def load_retriever(persist_directory):\n embeddings = FakeEmbeddings(size=5)\n vectorstore = FAISS.load_local(persist_directory, embeddings)\n return vectorstore.as_retriever()\n\n with mlflow.start_run():\n logged_model = mlflow.langchain.log_model(\n retrievalQA,\n \"retrieval_qa_chain\",\n loader_fn=load_retriever,\n persist_dir=persist_dir,\n )\n\n # Remove the persist_dir\n shutil.rmtree(persist_dir)\n\n # Load the chain\n loaded_model = mlflow.langchain.load_model(logged_model.model_uri)\n assert loaded_model == retrievalQA\n\n loaded_pyfunc_model = mlflow.pyfunc.load_model(logged_model.model_uri)\n langchain_input = {\"query\": \"What did the president say about Ketanji Brown Jackson\"}\n langchain_output = [TEST_CONTENT]\n result = loaded_pyfunc_model.predict([langchain_input])\n assert result == langchain_output\n\n # Serve the chain\n inference_payload = json.dumps({\"inputs\": langchain_input})\n langchain_output_serving = {\"predictions\": langchain_output}\n\n response = pyfunc_serve_and_score_model(\n logged_model.model_uri,\n data=inference_payload,\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,\n extra_args=[\"--env-manager\", \"local\"],\n )\n\n assert (\n PredictionsResponse.from_json(response.content.decode(\"utf-8\")) == langchain_output_serving\n )\n\n\n@pytest.mark.skipif(\n version.parse(langchain.__version__) < version.parse(\"0.0.194\"),\n reason=\"Saving RetrievalQA chains requires langchain>=0.0.194\",\n)\ndef test_log_and_load_retrieval_qa_chain_multiple_output(tmp_path):\n # Create the vector db, persist the db to a local fs folder\n loader = TextLoader(\"tests/langchain/state_of_the_union.txt\")\n documents = loader.load()\n text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n docs = text_splitter.split_documents(documents)\n embeddings = FakeEmbeddings(size=5)\n db = FAISS.from_documents(docs, embeddings)\n persist_dir = str(tmp_path / \"faiss_index\")\n db.save_local(persist_dir)\n\n # Create the RetrievalQA chain\n retrievalQA = RetrievalQA.from_llm(\n llm=OpenAI(), retriever=db.as_retriever(), return_source_documents=True\n )\n\n # Log the RetrievalQA chain\n def load_retriever(persist_directory):\n embeddings = FakeEmbeddings(size=5)\n vectorstore = FAISS.load_local(persist_directory, embeddings)\n return vectorstore.as_retriever()\n\n with mlflow.start_run():\n logged_model = mlflow.langchain.log_model(\n retrievalQA,\n \"retrieval_qa_chain\",\n loader_fn=load_retriever,\n persist_dir=persist_dir,\n )\n\n # Remove the persist_dir\n shutil.rmtree(persist_dir)\n\n # Load the chain\n loaded_model = mlflow.langchain.load_model(logged_model.model_uri)\n assert loaded_model == retrievalQA\n\n loaded_pyfunc_model = mlflow.pyfunc.load_model(logged_model.model_uri)\n langchain_input = {\"query\": \"What did the president say about Ketanji Brown Jackson\"}\n langchain_output = [\n {loaded_model.output_key: TEST_CONTENT, \"source_documents\": TEST_SOURCE_DOCUMENTS}\n ]\n result = loaded_pyfunc_model.predict([langchain_input])\n\n assert result == langchain_output\n\n # Serve the chain\n inference_payload = json.dumps({\"inputs\": langchain_input})\n langchain_output_serving = {\"predictions\": langchain_output}\n\n response = pyfunc_serve_and_score_model(\n logged_model.model_uri,\n data=inference_payload,\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,\n extra_args=[\"--env-manager\", \"local\"],\n )\n\n assert (\n PredictionsResponse.from_json(response.content.decode(\"utf-8\")) == langchain_output_serving\n )\n\n\n# Define a special embedding for testing\nclass DeterministicDummyEmbeddings(Embeddings, BaseModel):\n size: int\n\n def _get_embedding(self, text: str) -> List[float]:\n seed = abs(hash(text)) % (10**8)\n np.random.seed(seed)\n return list(np.random.normal(size=self.size))\n\n def embed_documents(self, texts: List[str]) -> List[List[float]]:\n return [self._get_embedding(t) for t in texts]\n\n def embed_query(self, text: str) -> List[float]:\n return self._get_embedding(text)\n\n\ndef assert_equal_retrievers(retriever, expected_retreiver):\n from langchain.schema.retriever import BaseRetriever\n\n assert isinstance(retriever, BaseRetriever)\n assert isinstance(retriever, type(expected_retreiver))\n assert isinstance(retriever.vectorstore, type(expected_retreiver.vectorstore))\n assert retriever.tags == expected_retreiver.tags\n assert retriever.metadata == expected_retreiver.metadata\n assert retriever.search_type == expected_retreiver.search_type\n assert retriever.search_kwargs == expected_retreiver.search_kwargs\n\n\ndef test_log_and_load_retriever_chain(tmp_path):\n # Create the vector db, persist the db to a local fs folder\n loader = TextLoader(\"tests/langchain/state_of_the_union.txt\")\n documents = loader.load()\n text_splitter = CharacterTextSplitter(chunk_size=10, chunk_overlap=0)\n docs = text_splitter.split_documents(documents)\n embeddings = DeterministicDummyEmbeddings(size=5)\n db = FAISS.from_documents(docs, embeddings)\n persist_dir = str(tmp_path / \"faiss_index\")\n db.save_local(persist_dir)\n\n # Define the loader_fn\n def load_retriever(persist_directory):\n from typing import List # pylint: disable=lazy-builtin-import\n\n import numpy as np\n from langchain.embeddings.base import Embeddings\n from pydantic import BaseModel\n\n class DeterministicDummyEmbeddings(Embeddings, BaseModel):\n size: int\n\n def _get_embedding(self, text: str) -> List[float]:\n if isinstance(text, np.ndarray):\n text = text.item()\n seed = abs(hash(text)) % (10**8)\n np.random.seed(seed)\n return list(np.random.normal(size=self.size))\n\n def embed_documents(self, texts: List[str]) -> List[List[float]]:\n return [self._get_embedding(t) for t in texts]\n\n def embed_query(self, text: str) -> List[float]:\n return self._get_embedding(text)\n\n embeddings = DeterministicDummyEmbeddings(size=5)\n vectorstore = FAISS.load_local(persist_directory, embeddings)\n return vectorstore.as_retriever()\n\n # Log the retriever\n with mlflow.start_run():\n logged_model = mlflow.langchain.log_model(\n db.as_retriever(),\n \"retriever\",\n loader_fn=load_retriever,\n persist_dir=persist_dir,\n )\n\n # Remove the persist_dir\n shutil.rmtree(persist_dir)\n\n # Load the retriever\n loaded_model = mlflow.langchain.load_model(logged_model.model_uri)\n assert_equal_retrievers(loaded_model, db.as_retriever())\n\n loaded_pyfunc_model = mlflow.pyfunc.load_model(logged_model.model_uri)\n query = \"What did the president say about Ketanji Brown Jackson\"\n langchain_input = {\"query\": query}\n result = loaded_pyfunc_model.predict([langchain_input])\n expected_result = [\n {\"page_content\": doc.page_content, \"metadata\": doc.metadata}\n for doc in db.as_retriever().get_relevant_documents(query)\n ]\n assert result == [expected_result]\n\n # Serve the retriever\n inference_payload = json.dumps({\"inputs\": langchain_input})\n response = pyfunc_serve_and_score_model(\n logged_model.model_uri,\n data=inference_payload,\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,\n extra_args=[\"--env-manager\", \"local\"],\n )\n pred = PredictionsResponse.from_json(response.content.decode(\"utf-8\"))[\"predictions\"]\n assert type(pred) == list\n assert len(pred) == 1\n docs_list = pred[0]\n assert type(docs_list) == list\n assert len(docs_list) == 4\n # The returned docs are non-deterministic when used with dummy embeddings,\n # so we cannot assert pred == {\"predictions\": [expected_result]}\n\n\ndef load_requests_wrapper(_):\n return TextRequestsWrapper(headers=None, aiosession=None)\n\n\ndef test_log_and_load_api_chain():\n llm = OpenAI(temperature=0)\n apichain = APIChain.from_llm_and_api_docs(\n llm, open_meteo_docs.OPEN_METEO_DOCS, verbose=True, limit_to_domains=[\"test.com\"]\n )\n\n # Log the APIChain\n with mlflow.start_run():\n logged_model = mlflow.langchain.log_model(\n apichain,\n \"api_chain\",\n loader_fn=load_requests_wrapper,\n )\n\n # Load the chain\n loaded_model = mlflow.langchain.load_model(logged_model.model_uri)\n assert loaded_model == apichain\n\n\ndef test_log_and_load_subclass_of_specialized_chain():\n class APIChainSubclass(APIChain):\n pass\n\n llm = OpenAI(temperature=0)\n apichain_subclass = APIChainSubclass.from_llm_and_api_docs(\n llm, open_meteo_docs.OPEN_METEO_DOCS, verbose=True, limit_to_domains=[\"test.com\"]\n )\n\n with mlflow.start_run():\n logged_model = mlflow.langchain.log_model(\n apichain_subclass,\n \"apichain_subclass\",\n loader_fn=load_requests_wrapper,\n )\n\n # Load the chain\n loaded_model = mlflow.langchain.load_model(logged_model.model_uri)\n assert loaded_model == apichain_subclass\n\n\ndef load_base_embeddings(_):\n return FakeEmbeddings(size=32)\n\n\n@pytest.mark.skip(reason=\"This fails due to https://github.com/hwchase17/langchain/issues/5131\")\ndef test_log_and_load_hyde_chain():\n # Create the HypotheticalDocumentEmbedder chain\n base_embeddings = FakeEmbeddings(size=32)\n llm = OpenAI()\n # Load with `web_search` prompt\n embeddings = HypotheticalDocumentEmbedder.from_llm(llm, base_embeddings, \"web_search\")\n\n # Log the hyde chain\n with mlflow.start_run():\n logged_model = mlflow.langchain.log_model(\n embeddings,\n \"hyde_chain\",\n loader_fn=load_base_embeddings,\n )\n\n # Load the chain\n loaded_model = mlflow.langchain.load_model(logged_model.model_uri)\n assert loaded_model == embeddings\n\n\ndef create_sqlite_db_file(db_dir):\n # Connect to SQLite database (or create it if it doesn't exist)\n with sqlite3.connect(db_dir) as conn:\n # Create a cursor\n c = conn.cursor()\n\n # Create a dummy table\n c.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS employees(\n id INTEGER PRIMARY KEY,\n name TEXT,\n salary REAL,\n department TEXT,\n position TEXT,\n hireDate TEXT);\n \"\"\"\n )\n\n # Insert dummy data into the table\n c.execute(\n \"\"\"\n INSERT INTO employees (name, salary, department, position, hireDate)\n VALUES ('John Doe', 80000, 'IT', 'Engineer', '2023-06-26');\n \"\"\"\n )\n\n\ndef load_db(persist_dir):\n db_file_path = os.path.join(persist_dir, \"my_database.db\")\n sqlite_uri = f\"sqlite:///{db_file_path}\"\n return SQLDatabase.from_uri(sqlite_uri)\n\n\n@pytest.mark.skipif(\n version.parse(langchain.__version__) < version.parse(\"0.0.297\"),\n reason=\"Saving SQLDatabaseChain chains requires langchain>=0.0.297\",\n)\ndef test_log_and_load_sql_database_chain(tmp_path):\n # Create the SQLDatabaseChain\n db_file_path = tmp_path / \"my_database.db\"\n sqlite_uri = f\"sqlite:///{db_file_path}\"\n llm = OpenAI(temperature=0)\n create_sqlite_db_file(db_file_path)\n db = SQLDatabase.from_uri(sqlite_uri)\n db_chain = SQLDatabaseChain.from_llm(llm, db)\n\n # Log the SQLDatabaseChain\n with mlflow.start_run():\n logged_model = mlflow.langchain.log_model(\n db_chain,\n \"sql_database_chain\",\n loader_fn=load_db,\n persist_dir=tmp_path,\n )\n\n # Load the chain\n loaded_model = mlflow.langchain.load_model(logged_model.model_uri)\n assert loaded_model == db_chain\n\n\ndef test_saving_not_implemented_for_memory():\n conversation = ConversationChain(llm=OpenAI(temperature=0), memory=ConversationBufferMemory())\n with pytest.raises(\n ValueError,\n match=\"Saving of memory is not yet supported.\",\n ):\n with mlflow.start_run():\n mlflow.langchain.log_model(conversation, \"conversation_model\")\n\n\ndef test_saving_not_implemented_chain_type():\n chain = FakeChain()\n if version.parse(langchain.__version__) < version.parse(\"0.0.309\"):\n error_message = \"Saving not supported for this chain type\"\n else:\n error_message = f\"Chain {chain} does not support saving.\"\n with pytest.raises(\n NotImplementedError,\n match=error_message,\n ):\n with mlflow.start_run():\n mlflow.langchain.log_model(chain, \"fake_chain\")\n\n\ndef test_unsupported_class():\n llm = FakeLLM()\n with pytest.raises(\n MlflowException,\n match=\"MLflow langchain flavor only supports subclasses of \"\n + \"langchain.chains.base.Chain\",\n ):\n with mlflow.start_run():\n mlflow.langchain.log_model(llm, \"fake_llm\")\n\n\ndef test_agent_with_unpicklable_tools(tmp_path):\n tmp_file = tmp_path / \"temp_file.txt\"\n with open(tmp_file, mode=\"w\") as temp_file:\n # files that aren't opened for reading cannot be pickled\n tools = [\n Tool.from_function(\n func=lambda: temp_file,\n name=\"Write 0\",\n description=\"If you need to write 0 to a file\",\n )\n ]\n agent = initialize_agent(\n llm=OpenAI(temperature=0), tools=tools, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION\n )\n\n with pytest.raises(\n MlflowException,\n match=(\n \"Error when attempting to pickle the AgentExecutor tools. \"\n \"This model likely does not support serialization.\"\n ),\n ):\n with mlflow.start_run():\n mlflow.langchain.log_model(agent, \"unpicklable_tools\")\n\n\n@pytest.mark.skipif(\n Version(langchain.__version__) < Version(\"0.0.311\"),\n reason=\"feature not existing\",\n)\ndef test_save_load_runnable_passthrough():\n from langchain.schema.runnable import RunnablePassthrough\n\n runnable = RunnablePassthrough()\n assert runnable.invoke(\"hello\") == \"hello\"\n\n with mlflow.start_run():\n model_info = mlflow.langchain.log_model(runnable, \"model_path\")\n\n loaded_model = mlflow.langchain.load_model(model_info.model_uri)\n assert loaded_model.invoke(\"hello\") == \"hello\"\n pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)\n assert pyfunc_loaded_model.predict([\"hello\"]) == [\"hello\"]\n\n response = pyfunc_serve_and_score_model(\n model_info.model_uri,\n data=json.dumps({\"inputs\": [\"hello\"]}),\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,\n extra_args=[\"--env-manager\", \"local\"],\n )\n assert PredictionsResponse.from_json(response.content.decode(\"utf-8\")) == {\n \"predictions\": [\"hello\"]\n }\n\n\n@pytest.mark.skipif(\n Version(langchain.__version__) < Version(\"0.0.311\"),\n reason=\"feature not existing\",\n)\ndef test_save_load_runnable_lambda():\n from langchain.schema.runnable import RunnableLambda\n\n def add_one(x: int) -> int:\n return x + 1\n\n runnable = RunnableLambda(add_one)\n\n assert runnable.invoke(1) == 2\n assert runnable.batch([1, 2, 3]) == [2, 3, 4]\n\n with mlflow.start_run():\n model_info = mlflow.langchain.log_model(runnable, \"runnable_lambda\")\n\n loaded_model = mlflow.langchain.load_model(model_info.model_uri)\n assert loaded_model.invoke(1) == 2\n assert loaded_model.batch([1, 2, 3]) == [2, 3, 4]\n\n loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)\n assert loaded_model.predict(1) == 2\n assert loaded_model.predict([1, 2, 3]) == [2, 3, 4]\n\n response = pyfunc_serve_and_score_model(\n model_info.model_uri,\n data=json.dumps({\"inputs\": [1, 2, 3]}),\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,\n extra_args=[\"--env-manager\", \"local\"],\n )\n assert PredictionsResponse.from_json(response.content.decode(\"utf-8\")) == {\n \"predictions\": [2, 3, 4]\n }\n\n\n@pytest.mark.skipif(\n Version(langchain.__version__) < Version(\"0.0.311\"),\n reason=\"feature not existing\",\n)\ndef test_save_load_runnable_lambda_in_sequence():\n from langchain.schema.runnable import RunnableLambda\n\n def add_one(x):\n return x + 1\n\n def mul_two(x):\n return x * 2\n\n runnable_1 = RunnableLambda(add_one)\n runnable_2 = RunnableLambda(mul_two)\n sequence = runnable_1 | runnable_2\n assert sequence.invoke(1) == 4\n\n with mlflow.start_run():\n model_info = mlflow.langchain.log_model(sequence, \"model_path\")\n\n loaded_model = mlflow.langchain.load_model(model_info.model_uri)\n assert loaded_model.invoke(1) == 4\n pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)\n assert pyfunc_loaded_model.predict(1) == 4\n assert pyfunc_loaded_model.predict([1, 2, 3]) == [4, 6, 8]\n\n response = pyfunc_serve_and_score_model(\n model_info.model_uri,\n data=json.dumps({\"inputs\": [1, 2, 3]}),\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,\n extra_args=[\"--env-manager\", \"local\"],\n )\n assert PredictionsResponse.from_json(response.content.decode(\"utf-8\")) == {\n \"predictions\": [4, 6, 8]\n }\n\n\n@pytest.mark.skipif(\n Version(langchain.__version__) < Version(\"0.0.311\"),\n reason=\"feature not existing\",\n)\ndef test_save_load_runnable_parallel():\n from langchain.schema.runnable import RunnableParallel\n\n def fake_llm(prompt: str) -> str:\n return \"completion\"\n\n runnable = RunnableParallel({\"llm\": fake_llm})\n assert runnable.invoke(\"hello\") == {\"llm\": \"completion\"}\n assert runnable.batch([\"hello\", \"world\"]) == [{\"llm\": \"completion\"}, {\"llm\": \"completion\"}]\n with mlflow.start_run():\n model_info = mlflow.langchain.log_model(runnable, \"model_path\")\n loaded_model = mlflow.langchain.load_model(model_info.model_uri)\n assert loaded_model.invoke(\"hello\") == {\"llm\": \"completion\"}\n pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)\n assert pyfunc_loaded_model.predict(\"hello\") == {\"llm\": \"completion\"}\n assert pyfunc_loaded_model.predict([\"hello\", \"world\"]) == [\n {\"llm\": \"completion\"},\n {\"llm\": \"completion\"},\n ]\n\n response = pyfunc_serve_and_score_model(\n model_info.model_uri,\n data=json.dumps({\"inputs\": [\"hello\", \"world\"]}),\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,\n extra_args=[\"--env-manager\", \"local\"],\n )\n assert PredictionsResponse.from_json(response.content.decode(\"utf-8\")) == {\n \"predictions\": [{\"llm\": \"completion\"}, {\"llm\": \"completion\"}]\n }\n\n\n@pytest.mark.skipif(\n Version(langchain.__version__) < Version(\"0.0.311\"),\n reason=\"feature not existing\",\n)\ndef tests_save_load_complex_runnable_parallel():\n from langchain.schema.runnable import RunnableParallel\n\n with _mock_request(return_value=_mock_chat_completion_response()):\n chain = create_openai_llmchain()\n runnable = RunnableParallel({\"llm\": chain})\n expected_result = {\"llm\": {\"product\": \"MLflow\", \"text\": TEST_CONTENT}}\n assert runnable.invoke({\"product\": \"MLflow\"}) == expected_result\n with mlflow.start_run():\n model_info = mlflow.langchain.log_model(runnable, \"model_path\")\n loaded_model = mlflow.langchain.load_model(model_info.model_uri)\n assert loaded_model.invoke(\"MLflow\") == expected_result\n pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)\n assert pyfunc_loaded_model.predict([{\"product\": \"MLflow\"}]) == [expected_result]\n\n response = pyfunc_serve_and_score_model(\n model_info.model_uri,\n data=json.dumps({\"inputs\": [{\"product\": \"MLflow\"}, {\"product\": \"MLflow\"}]}),\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,\n extra_args=[\"--env-manager\", \"local\"],\n )\n assert PredictionsResponse.from_json(response.content.decode(\"utf-8\")) == {\n \"predictions\": [expected_result, expected_result]\n }\n\n\n@pytest.mark.skipif(\n Version(langchain.__version__) < Version(\"0.0.311\"),\n reason=\"feature not existing\",\n)\ndef test_save_load_runnable_parallel_and_assign_in_sequence():\n from langchain.schema.runnable import RunnablePassthrough\n\n def fake_llm(prompt: str) -> str:\n return \"completion\"\n\n runnable = {\n \"llm1\": fake_llm,\n \"llm2\": fake_llm,\n } | RunnablePassthrough.assign(total_chars=lambda inputs: len(inputs[\"llm1\"] + inputs[\"llm2\"]))\n expected_result = {\n \"llm1\": \"completion\",\n \"llm2\": \"completion\",\n \"total_chars\": 20,\n }\n assert runnable.invoke(\"hello\") == expected_result\n\n with mlflow.start_run():\n model_info = mlflow.langchain.log_model(runnable, \"model_path\")\n loaded_model = mlflow.langchain.load_model(model_info.model_uri)\n assert loaded_model.invoke(\"hello\") == expected_result\n pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)\n assert pyfunc_loaded_model.predict([\"hello\"]) == [expected_result]\n\n response = pyfunc_serve_and_score_model(\n model_info.model_uri,\n data=json.dumps({\"inputs\": [\"hello\", \"world\"]}),\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,\n extra_args=[\"--env-manager\", \"local\"],\n )\n assert PredictionsResponse.from_json(response.content.decode(\"utf-8\")) == {\n \"predictions\": [expected_result, expected_result]\n }\n\n\n@pytest.mark.skipif(\n Version(langchain.__version__) < Version(\"0.0.311\"), reason=\"feature not existing\"\n)\ndef test_save_load_runnable_sequence():\n from langchain.schema.output_parser import StrOutputParser\n from langchain.schema.runnable import RunnableSequence\n\n prompt1 = PromptTemplate.from_template(\"what is the city {person} is from?\")\n llm = OpenAI(temperature=0.9)\n model = prompt1 | llm | StrOutputParser()\n\n with mlflow.start_run():\n model_info = mlflow.langchain.log_model(model, \"model_path\")\n\n loaded_model = mlflow.langchain.load_model(model_info.model_uri)\n assert type(loaded_model) == RunnableSequence\n assert type(loaded_model.steps[0]) == PromptTemplate\n assert type(loaded_model.steps[1]) == OpenAI\n assert type(loaded_model.steps[2]) == StrOutputParser\n\n\n@pytest.mark.skipif(\n Version(langchain.__version__) < Version(\"0.0.311\"), reason=\"feature not existing\"\n)\ndef test_save_load_long_runnable_sequence(model_path):\n from langchain.schema.output_parser import StrOutputParser\n from langchain.schema.runnable import RunnablePassthrough, RunnableSequence\n\n prompt1 = PromptTemplate.from_template(\"what is the city {person} is from?\")\n llm = OpenAI(temperature=0.9)\n model = prompt1 | llm | StrOutputParser()\n for _ in range(10):\n model = model | RunnablePassthrough()\n\n with mlflow.start_run():\n mlflow.langchain.save_model(model, model_path)\n\n loaded_model = mlflow.langchain.load_model(model_path)\n assert type(loaded_model) == RunnableSequence\n assert type(loaded_model.steps[0]) == PromptTemplate\n assert type(loaded_model.steps[1]) == OpenAI\n assert type(loaded_model.steps[2]) == StrOutputParser\n for i in range(3, 13):\n assert type(loaded_model.steps[i]) == RunnablePassthrough\n\n\n@pytest.mark.skipif(\n Version(langchain.__version__) < Version(\"0.0.311\"),\n reason=\"feature not existing\",\n)\ndef test_save_load_complex_runnable_sequence():\n from langchain.schema.runnable import RunnablePassthrough\n\n with _mock_request(return_value=_mock_chat_completion_response()):\n llm_chain = create_openai_llmchain()\n chain = llm_chain | RunnablePassthrough()\n expected_result = {\"product\": \"MLflow\", \"text\": TEST_CONTENT}\n assert chain.invoke({\"product\": \"MLflow\"}) == expected_result\n\n with mlflow.start_run():\n model_info = mlflow.langchain.log_model(chain, \"model_path\")\n\n loaded_model = mlflow.langchain.load_model(model_info.model_uri)\n result = loaded_model.invoke({\"product\": \"MLflow\"})\n assert result == expected_result\n pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)\n assert pyfunc_loaded_model.predict([{\"product\": \"MLflow\"}]) == [expected_result]\n\n response = pyfunc_serve_and_score_model(\n model_info.model_uri,\n data=json.dumps({\"inputs\": [{\"product\": \"MLflow\"}]}),\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,\n extra_args=[\"--env-manager\", \"local\"],\n )\n assert PredictionsResponse.from_json(response.content.decode(\"utf-8\")) == {\n \"predictions\": [expected_result]\n }\n\n\n@pytest.mark.skipif(\n Version(langchain.__version__) < Version(\"0.0.311\"), reason=\"feature not existing\"\n)\ndef test_save_load_simple_chat_model(spark):\n from langchain.prompts import ChatPromptTemplate\n from langchain.schema.output_parser import StrOutputParser\n\n from mlflow.langchain.utils import _fake_simple_chat_model\n\n prompt = ChatPromptTemplate.from_template(\n \"What is a good name for a company that makes {product}?\"\n )\n chat_model = _fake_simple_chat_model()()\n chain = prompt | chat_model | StrOutputParser()\n assert chain.invoke({\"product\": \"MLflow\"}) == \"Databricks\"\n # signature is required for spark_udf\n # TODO: support inferring signature from runnables\n signature = ModelSignature(inputs=Schema([ColSpec(\"string\", \"product\")]))\n with mlflow.start_run():\n model_info = mlflow.langchain.log_model(chain, \"model_path\", signature=signature)\n loaded_model = mlflow.langchain.load_model(model_info.model_uri)\n assert loaded_model.invoke({\"product\": \"MLflow\"}) == \"Databricks\"\n pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)\n assert pyfunc_loaded_model.predict([{\"product\": \"MLflow\"}]) == [\"Databricks\"]\n\n udf = mlflow.pyfunc.spark_udf(spark, model_info.model_uri, result_type=\"string\")\n df = spark.createDataFrame([(\"MLflow\",), (\"Spark\",)], [\"product\"])\n df = df.withColumn(\"answer\", udf(\"product\"))\n pdf = df.toPandas()\n assert pdf[\"answer\"].tolist() == [\"Databricks\", \"Databricks\"]\n\n response = pyfunc_serve_and_score_model(\n model_info.model_uri,\n data=json.dumps({\"inputs\": {\"product\": \"MLflow\"}}),\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,\n extra_args=[\"--env-manager\", \"local\"],\n )\n # Because of the schema enforcement converts input to pandas dataframe\n # the prediction result is wrapped in a list in api_request_parallel_processor\n assert PredictionsResponse.from_json(response.content.decode(\"utf-8\")) == {\n \"predictions\": [\"Databricks\"]\n }\n\n\n@pytest.mark.skipif(\n Version(langchain.__version__) < Version(\"0.0.311\"), reason=\"feature not existing\"\n)\ndef test_save_load_rag(tmp_path, spark):\n from langchain.prompts import ChatPromptTemplate\n from langchain.schema.output_parser import StrOutputParser\n from langchain.schema.runnable import RunnablePassthrough\n\n from mlflow.langchain.utils import _fake_simple_chat_model\n\n chat_model = _fake_simple_chat_model()()\n\n # Create the vector db, persist the db to a local fs folder\n loader = TextLoader(\"tests/langchain/state_of_the_union.txt\")\n documents = loader.load()\n text_splitter = CharacterTextSplitter(chunk_size=10, chunk_overlap=0)\n docs = text_splitter.split_documents(documents)\n embeddings = DeterministicDummyEmbeddings(size=5)\n db = FAISS.from_documents(docs, embeddings)\n persist_dir = str(tmp_path / \"faiss_index\")\n db.save_local(persist_dir)\n retriever = db.as_retriever()\n\n def load_retriever(persist_directory):\n embeddings = FakeEmbeddings(size=5)\n vectorstore = FAISS.load_local(persist_directory, embeddings)\n return vectorstore.as_retriever()\n\n prompt = ChatPromptTemplate.from_template(\n \"Answer the following question based on the context: {context}\\nQuestion: {question}\"\n )\n retrieval_chain = (\n {\n \"context\": retriever,\n \"question\": RunnablePassthrough(),\n }\n | prompt\n | chat_model\n | StrOutputParser()\n )\n question = \"What is a good name for a company that makes MLflow?\"\n answer = \"Databricks\"\n assert retrieval_chain.invoke(question) == answer\n signature = ModelSignature(inputs=Schema([ColSpec(\"string\", \"question\")]))\n with mlflow.start_run():\n model_info = mlflow.langchain.log_model(\n retrieval_chain,\n \"model_path\",\n loader_fn=load_retriever,\n persist_dir=persist_dir,\n signature=signature,\n )\n\n # Remove the persist_dir\n shutil.rmtree(persist_dir)\n\n loaded_model = mlflow.langchain.load_model(model_info.model_uri)\n assert loaded_model.invoke(question) == answer\n pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)\n assert pyfunc_loaded_model.predict({\"question\": [question]}) == [answer]\n\n udf = mlflow.pyfunc.spark_udf(spark, model_info.model_uri, result_type=\"string\")\n df = spark.createDataFrame([(question,), (question,)], [\"question\"])\n df = df.withColumn(\"answer\", udf(\"question\"))\n pdf = df.toPandas()\n assert pdf[\"answer\"].tolist() == [answer, answer]\n\n response = pyfunc_serve_and_score_model(\n model_info.model_uri,\n data=json.dumps({\"inputs\": [question]}),\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,\n extra_args=[\"--env-manager\", \"local\"],\n )\n assert PredictionsResponse.from_json(response.content.decode(\"utf-8\")) == {\n \"predictions\": [answer]\n }\n\n\n@pytest.mark.skipif(\n Version(langchain.__version__) < Version(\"0.0.311\"), reason=\"feature not existing\"\n)\ndef test_runnable_branch_save_load():\n from langchain.schema.runnable import RunnableBranch\n\n branch = RunnableBranch(\n (lambda x: isinstance(x, str), lambda x: x.upper()),\n (lambda x: isinstance(x, int), lambda x: x + 1),\n (lambda x: isinstance(x, float), lambda x: x * 2),\n lambda x: \"goodbye\",\n )\n\n assert branch.invoke(\"hello\") == \"HELLO\"\n assert branch.invoke({}) == \"goodbye\"\n\n with mlflow.start_run():\n model_info = mlflow.langchain.log_model(branch, \"model_path\")\n\n loaded_model = mlflow.langchain.load_model(model_info.model_uri)\n assert loaded_model.invoke(\"hello\") == \"HELLO\"\n assert loaded_model.invoke({}) == \"goodbye\"\n pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)\n assert pyfunc_loaded_model.predict(\"hello\") == \"HELLO\"\n assert pyfunc_loaded_model.predict({}) == \"goodbye\"\n\n response = pyfunc_serve_and_score_model(\n model_info.model_uri,\n data=json.dumps({\"inputs\": \"hello\"}),\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,\n extra_args=[\"--env-manager\", \"local\"],\n )\n assert PredictionsResponse.from_json(response.content.decode(\"utf-8\")) == {\n \"predictions\": \"HELLO\"\n }\n\n\n@pytest.mark.skipif(\n Version(langchain.__version__) < Version(\"0.0.311\"), reason=\"feature not existing\"\n)\ndef test_complex_runnable_branch_save_load():\n from langchain.prompts import ChatPromptTemplate\n from langchain.schema.output_parser import StrOutputParser\n from langchain.schema.runnable import RunnableBranch, RunnableLambda\n\n from mlflow.langchain.utils import _fake_mlflow_question_classifier, _fake_simple_chat_model\n\n chat_model = _fake_mlflow_question_classifier()()\n prompt = ChatPromptTemplate.from_template(\"{question_is_relevant}\\n{query}\")\n # Need to add prompt here as the chat model doesn't accept dict input\n answer_model = prompt | _fake_simple_chat_model()()\n\n decline_to_answer = RunnableLambda(\n lambda x: \"I cannot answer questions that are not about MLflow.\"\n )\n something_went_wrong = RunnableLambda(lambda x: \"Something went wrong.\")\n\n is_question_about_mlflow_prompt = ChatPromptTemplate.from_template(\n \"You are classifying documents to know if this question \"\n \"is related with MLflow. Only answer with yes or no. The question is: {query}\"\n )\n\n branch_node = RunnableBranch(\n (lambda x: x[\"question_is_relevant\"].lower() == \"yes\", answer_model),\n (lambda x: x[\"question_is_relevant\"].lower() == \"no\", decline_to_answer),\n something_went_wrong,\n )\n\n chain = (\n {\n \"question_is_relevant\": is_question_about_mlflow_prompt\n | chat_model\n | StrOutputParser(),\n \"query\": itemgetter(\"query\"),\n }\n | branch_node\n | StrOutputParser()\n )\n\n assert chain.invoke({\"query\": \"Who owns MLflow?\"}) == \"Databricks\"\n assert (\n chain.invoke({\"query\": \"Do you like cat?\"})\n == \"I cannot answer questions that are not about MLflow.\"\n )\n assert chain.invoke({\"query\": \"Are you happy today?\"}) == \"Something went wrong.\"\n\n with mlflow.start_run():\n model_info = mlflow.langchain.log_model(chain, \"model_path\")\n\n loaded_model = mlflow.langchain.load_model(model_info.model_uri)\n assert loaded_model.invoke({\"query\": \"Who owns MLflow?\"}) == \"Databricks\"\n assert (\n loaded_model.invoke({\"query\": \"Do you like cat?\"})\n == \"I cannot answer questions that are not about MLflow.\"\n )\n assert loaded_model.invoke({\"query\": \"Are you happy today?\"}) == \"Something went wrong.\"\n pyfunc_loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)\n assert pyfunc_loaded_model.predict({\"query\": \"Who owns MLflow?\"}) == \"Databricks\"\n assert (\n pyfunc_loaded_model.predict({\"query\": \"Do you like cat?\"})\n == \"I cannot answer questions that are not about MLflow.\"\n )\n assert pyfunc_loaded_model.predict({\"query\": \"Are you happy today?\"}) == \"Something went wrong.\"\n\n response = pyfunc_serve_and_score_model(\n model_info.model_uri,\n data=json.dumps({\"inputs\": {\"query\": \"Who owns MLflow?\"}}),\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,\n extra_args=[\"--env-manager\", \"local\"],\n )\n assert PredictionsResponse.from_json(response.content.decode(\"utf-8\")) == {\n \"predictions\": \"Databricks\"\n }\n","repo_name":"mlflow/mlflow","sub_path":"tests/langchain/test_langchain_model_export.py","file_name":"test_langchain_model_export.py","file_ext":"py","file_size_in_byte":47146,"program_lang":"python","lang":"en","doc_type":"code","stars":15878,"dataset":"github-code","pt":"22"} +{"seq_id":"73504670135","text":"# idapython\r\n# raw = get_bytes(here(), 0x10)\r\n# print(''.join('\\\\x{:02x}'.format(ord(c)) for c in raw))\r\n\r\nct = b'\\x4a\\x82\\x43\\xab\\x95\\xed\\x8f\\x7e\\x9c\\xbc\\xad\\x84\\x17\\x91\\x06\\x15'\r\n#cpu = b'cpufreq_'\r\nfs = b'fs'\r\nsig = b'Microsoft'\r\n################### these values are wrong, I use gef-gdb on WSL to get correct values.\r\naux = b'\\x70\\xff\\xff\\xff\\xff'*3\r\ndirn = b'\\0\\0\\0\\0' \r\n\r\nct1 = bytearray([i^ord('O') for i in ct])\r\n\r\nsumm = fs+sig+aux+dirn\r\nfor i in range(len(summ)):\r\n ct1[i%0xf] ^= summ[i]\r\n\r\nprint(\"ct1:\", ct1)\r\n#print(\"ct2:\", ct2)\r\n\r\n# https://stackoverflow.com/questions/49071746/how-to-get-the-size-of-the-vdso-on-a-linux-x86-64-system\r\n\r\n# flag: c1ArF/P2CjiDXQIZ@flare-on.com","repo_name":"enderdzz/ReverseThings","sub_path":"2020/flareon7/8/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"22"} +{"seq_id":"35473622205","text":"#Get user input using input(“Enter your age: ”). If user is 18 or older, give feedback: You are old enough to drive. If below 18 give feedback to wait for the missing amount of years. Output: Enter your age: 30 You are old enough to learn to drive. Output: Enter your age: 15 You need 3 more years to learn to drive.\n#Compare the values of my_age and your_age using if … else. Who is older (me or you)? Use input(“Enter your age: ”) to get the age as input. You can use a nested condition to print 'year' for 1 year difference in age, 'years' for bigger differences, and a custom text if my_age = your_age. Output: Enter your age: 30 You are 5 years older than me.\n\nuser=int(input(\"Enter the user age: \"))\n\nyear=18-user\nif user>=18:\n print(\"You are old enough to drive.\")\n\nelse:\n print(f\"You need {year} year to learn to drive\")\n\n#age check\nmy_age=int(input(\"Enter your age: \"))\nage=21\n\na=age-my_age\nb=my_age-age\nif my_age>age:\n print(f\"You are {b} years younger than me\")\n\nelif my_age':\n return # Nothing to do\n if self.conflict_path == '':\n path_to_create = self.path\n revid = tt._tree.get_parent_ids()[0]\n elif winner == 'other':\n if self.conflict_path == '':\n return # Nothing to do\n if self.path == '':\n path_to_create = self.conflict_path\n # FIXME: If there are more than two parents we may need to\n # iterate. Taking the last parent is the safer bet in the mean\n # time. -- vila 20100309\n revid = tt._tree.get_parent_ids()[-1]\n else:\n # Programmer error\n raise AssertionError(f'bad winner: {winner!r}')\n if path_to_create is not None:\n tid = tt.trans_id_tree_path(path_to_create)\n tree = self._revision_tree(tt._tree, revid)\n transform.create_from_tree(\n tt, tid, tree, tree.id2path(file_id))\n tt.version_file(tid, file_id=file_id)\n else:\n tid = tt.trans_id_file_id(file_id)\n # Adjust the path for the retained file id\n parent_tid = tt.get_tree_parent(tid)\n tt.adjust_path(osutils.basename(path), parent_tid, tid)\n tt.apply()\n\n def _revision_tree(self, tree, revid):\n return tree.branch.repository.revision_tree(revid)\n\n def _infer_file_id(self, tree):\n # Prior to bug #531967, file_id wasn't always set, there may still be\n # conflict files in the wild so we need to cope with them\n # Establish which path we should use to find back the file-id\n possible_paths = []\n for p in (self.path, self.conflict_path):\n if p == '':\n # special hard-coded path\n continue\n if p is not None:\n possible_paths.append(p)\n # Search the file-id in the parents with any path available\n file_id = None\n for revid in tree.get_parent_ids():\n revtree = self._revision_tree(tree, revid)\n for p in possible_paths:\n file_id = revtree.path2id(p)\n if file_id is not None:\n return revtree, file_id\n return None, None\n\n def action_take_this(self, tree):\n if self.file_id is not None:\n self._resolve_with_cleanups(tree, self.file_id, self.path,\n winner='this')\n else:\n # Prior to bug #531967 we need to find back the file_id and restore\n # the content from there\n revtree, file_id = self._infer_file_id(tree)\n tree.revert([revtree.id2path(file_id)],\n old_tree=revtree, backups=False)\n\n def action_take_other(self, tree):\n if self.file_id is not None:\n self._resolve_with_cleanups(tree, self.file_id,\n self.conflict_path,\n winner='other')\n else:\n # Prior to bug #531967 we need to find back the file_id and restore\n # the content from there\n revtree, file_id = self._infer_file_id(tree)\n tree.revert([revtree.id2path(file_id)],\n old_tree=revtree, backups=False)\n\n\nclass ContentsConflict(PathConflict):\n \"\"\"The files are of different types (or both binary), or not present.\"\"\"\n\n has_files = True\n\n typestring = 'contents conflict'\n\n format = 'Contents conflict in %(path)s'\n\n def associated_filenames(self):\n return [self.path + suffix for suffix in ('.BASE', '.OTHER')]\n\n def _resolve(self, tt, suffix_to_remove):\n \"\"\"Resolve the conflict.\n\n :param tt: The TreeTransform where the conflict is resolved.\n :param suffix_to_remove: Either 'THIS' or 'OTHER'\n\n The resolution is symmetric: when taking THIS, OTHER is deleted and\n item.THIS is renamed into item and vice-versa.\n \"\"\"\n try:\n # Delete 'item.THIS' or 'item.OTHER' depending on\n # suffix_to_remove\n tt.delete_contents(\n tt.trans_id_tree_path(self.path + '.' + suffix_to_remove))\n except _mod_transport.NoSuchFile:\n # There are valid cases where 'item.suffix_to_remove' either\n # never existed or was already deleted (including the case\n # where the user deleted it)\n pass\n try:\n this_path = tt._tree.id2path(self.file_id)\n except errors.NoSuchId:\n # The file is not present anymore. This may happen if the user\n # deleted the file either manually or when resolving a conflict on\n # the parent. We may raise some exception to indicate that the\n # conflict doesn't exist anymore and as such doesn't need to be\n # resolved ? -- vila 20110615\n this_tid = None\n else:\n this_tid = tt.trans_id_tree_path(this_path)\n if this_tid is not None:\n # Rename 'item.suffix_to_remove' (note that if\n # 'item.suffix_to_remove' has been deleted, this is a no-op)\n parent_tid = tt.get_tree_parent(this_tid)\n tt.adjust_path(osutils.basename(self.path), parent_tid, this_tid)\n tt.apply()\n\n def action_take_this(self, tree):\n self._resolve_with_cleanups(tree, 'OTHER')\n\n def action_take_other(self, tree):\n self._resolve_with_cleanups(tree, 'THIS')\n\n\n# TODO: There should be a base revid attribute to better inform the user about\n# how the conflicts were generated.\nclass TextConflict(Conflict):\n \"\"\"The merge algorithm could not resolve all differences encountered.\"\"\"\n\n has_files = True\n\n typestring = 'text conflict'\n\n format = 'Text conflict in %(path)s'\n\n rformat = '%(class)s(%(path)r, %(file_id)r)'\n\n _conflict_re = re.compile(b'^(<{7}|={7}|>{7})')\n\n def associated_filenames(self):\n return [self.path + suffix for suffix in CONFLICT_SUFFIXES]\n\n def _resolve(self, tt, winner_suffix):\n \"\"\"Resolve the conflict by copying one of .THIS or .OTHER into file.\n\n :param tt: The TreeTransform where the conflict is resolved.\n :param winner_suffix: Either 'THIS' or 'OTHER'\n\n The resolution is symmetric, when taking THIS, item.THIS is renamed\n into item and vice-versa. This takes one of the files as a whole\n ignoring every difference that could have been merged cleanly.\n \"\"\"\n # To avoid useless copies, we switch item and item.winner_suffix, only\n # item will exist after the conflict has been resolved anyway.\n item_tid = tt.trans_id_file_id(self.file_id)\n item_parent_tid = tt.get_tree_parent(item_tid)\n winner_path = self.path + '.' + winner_suffix\n winner_tid = tt.trans_id_tree_path(winner_path)\n winner_parent_tid = tt.get_tree_parent(winner_tid)\n # Switch the paths to preserve the content\n tt.adjust_path(osutils.basename(self.path),\n winner_parent_tid, winner_tid)\n tt.adjust_path(osutils.basename(winner_path),\n item_parent_tid, item_tid)\n # Associate the file_id to the right content\n tt.unversion_file(item_tid)\n tt.version_file(winner_tid, file_id=self.file_id)\n tt.apply()\n\n def action_auto(self, tree):\n # GZ 2012-07-27: Using NotImplementedError to signal that a conflict\n # can't be auto resolved does not seem ideal.\n try:\n kind = tree.kind(self.path)\n except _mod_transport.NoSuchFile:\n return\n if kind != 'file':\n raise NotImplementedError(\"Conflict is not a file\")\n conflict_markers_in_line = self._conflict_re.search\n # GZ 2012-07-27: What if not tree.has_id(self.file_id) due to removal?\n with tree.get_file(self.path) as f:\n for line in f:\n if conflict_markers_in_line(line):\n raise NotImplementedError(\"Conflict markers present\")\n\n def action_take_this(self, tree):\n self._resolve_with_cleanups(tree, 'THIS')\n\n def action_take_other(self, tree):\n self._resolve_with_cleanups(tree, 'OTHER')\n\n\nclass HandledConflict(Conflict):\n \"\"\"A path problem that has been provisionally resolved.\n This is intended to be a base class.\n \"\"\"\n\n rformat = \"%(class)s(%(action)r, %(path)r, %(file_id)r)\"\n\n def __init__(self, action, path, file_id=None):\n Conflict.__init__(self, path, file_id)\n self.action = action\n\n def _cmp_list(self):\n return Conflict._cmp_list(self) + [self.action]\n\n def as_stanza(self):\n s = Conflict.as_stanza(self)\n s.add('action', self.action)\n return s\n\n def associated_filenames(self):\n # Nothing has been generated here\n return []\n\n\nclass HandledPathConflict(HandledConflict):\n \"\"\"A provisionally-resolved path problem involving two paths.\n This is intended to be a base class.\n \"\"\"\n\n rformat = \"%(class)s(%(action)r, %(path)r, %(conflict_path)r,\"\\\n \" %(file_id)r, %(conflict_file_id)r)\"\n\n def __init__(self, action, path, conflict_path, file_id=None,\n conflict_file_id=None):\n HandledConflict.__init__(self, action, path, file_id)\n self.conflict_path = conflict_path\n # the factory blindly transfers the Stanza values to __init__,\n # so they can be unicode.\n if isinstance(conflict_file_id, str):\n conflict_file_id = cache_utf8.encode(conflict_file_id)\n self.conflict_file_id = conflict_file_id\n\n def _cmp_list(self):\n return HandledConflict._cmp_list(self) + [self.conflict_path,\n self.conflict_file_id]\n\n def as_stanza(self):\n s = HandledConflict.as_stanza(self)\n s.add('conflict_path', self.conflict_path)\n if self.conflict_file_id is not None:\n s.add('conflict_file_id', self.conflict_file_id.decode('utf8'))\n\n return s\n\n\nclass DuplicateID(HandledPathConflict):\n \"\"\"Two files want the same file_id.\"\"\"\n\n typestring = 'duplicate id'\n\n format = 'Conflict adding id to %(conflict_path)s. %(action)s %(path)s.'\n\n\nclass DuplicateEntry(HandledPathConflict):\n \"\"\"Two directory entries want to have the same name.\"\"\"\n\n typestring = 'duplicate'\n\n format = 'Conflict adding file %(conflict_path)s. %(action)s %(path)s.'\n\n def action_take_this(self, tree):\n tree.remove([self.conflict_path], force=True, keep_files=False)\n tree.rename_one(self.path, self.conflict_path)\n\n def action_take_other(self, tree):\n tree.remove([self.path], force=True, keep_files=False)\n\n\nclass ParentLoop(HandledPathConflict):\n \"\"\"An attempt to create an infinitely-looping directory structure.\n\n This is rare, but can be produced like so:\n\n tree A:\n mv foo bar\n tree B:\n mv bar foo\n merge A and B\n \"\"\"\n\n typestring = 'parent loop'\n\n format = 'Conflict moving %(path)s into %(conflict_path)s. %(action)s.'\n\n def action_take_this(self, tree):\n # just acccept brz proposal\n pass\n\n def action_take_other(self, tree):\n with tree.transform() as tt:\n p_tid = tt.trans_id_file_id(self.file_id)\n parent_tid = tt.get_tree_parent(p_tid)\n cp_tid = tt.trans_id_file_id(self.conflict_file_id)\n cparent_tid = tt.get_tree_parent(cp_tid)\n tt.adjust_path(osutils.basename(self.path), cparent_tid, cp_tid)\n tt.adjust_path(osutils.basename(self.conflict_path),\n parent_tid, p_tid)\n tt.apply()\n\n\nclass UnversionedParent(HandledConflict):\n \"\"\"An attempt to version a file whose parent directory is not versioned.\n Typically, the result of a merge where one tree unversioned the directory\n and the other added a versioned file to it.\n \"\"\"\n\n typestring = 'unversioned parent'\n\n format = 'Conflict because %(path)s is not versioned, but has versioned'\\\n ' children. %(action)s.'\n\n # FIXME: We silently do nothing to make tests pass, but most probably the\n # conflict shouldn't exist (the long story is that the conflict is\n # generated with another one that can be resolved properly) -- vila 091224\n def action_take_this(self, tree):\n pass\n\n def action_take_other(self, tree):\n pass\n\n\nclass MissingParent(HandledConflict):\n \"\"\"An attempt to add files to a directory that is not present.\n Typically, the result of a merge where THIS deleted the directory and\n the OTHER added a file to it.\n See also: DeletingParent (same situation, THIS and OTHER reversed).\n \"\"\"\n\n typestring = 'missing parent'\n\n format = 'Conflict adding files to %(path)s. %(action)s.'\n\n def action_take_this(self, tree):\n tree.remove([self.path], force=True, keep_files=False)\n\n def action_take_other(self, tree):\n # just acccept brz proposal\n pass\n\n\nclass DeletingParent(HandledConflict):\n \"\"\"An attempt to add files to a directory that is not present.\n Typically, the result of a merge where one OTHER deleted the directory and\n the THIS added a file to it.\n \"\"\"\n\n typestring = 'deleting parent'\n\n format = \"Conflict: can't delete %(path)s because it is not empty. \"\\\n \"%(action)s.\"\n\n # FIXME: It's a bit strange that the default action is not coherent with\n # MissingParent from the *user* pov.\n\n def action_take_this(self, tree):\n # just acccept brz proposal\n pass\n\n def action_take_other(self, tree):\n tree.remove([self.path], force=True, keep_files=False)\n\n\nclass NonDirectoryParent(HandledConflict):\n \"\"\"An attempt to add files to a directory that is not a directory or\n an attempt to change the kind of a directory with files.\n \"\"\"\n\n typestring = 'non-directory parent'\n\n format = \"Conflict: %(path)s is not a directory, but has files in it.\"\\\n \" %(action)s.\"\n\n # FIXME: .OTHER should be used instead of .new when the conflict is created\n\n def action_take_this(self, tree):\n # FIXME: we should preserve that path when the conflict is generated !\n if self.path.endswith('.new'):\n conflict_path = self.path[:-(len('.new'))]\n tree.remove([self.path], force=True, keep_files=False)\n tree.add(conflict_path)\n else:\n raise NotImplementedError(self.action_take_this)\n\n def action_take_other(self, tree):\n # FIXME: we should preserve that path when the conflict is generated !\n if self.path.endswith('.new'):\n conflict_path = self.path[:-(len('.new'))]\n tree.remove([conflict_path], force=True, keep_files=False)\n tree.rename_one(self.path, conflict_path)\n else:\n raise NotImplementedError(self.action_take_other)\n\n\nctype = {}\n\n\ndef register_types(*conflict_types):\n \"\"\"Register a Conflict subclass for serialization purposes.\"\"\"\n global ctype\n for conflict_type in conflict_types:\n ctype[conflict_type.typestring] = conflict_type\n\n\nregister_types(ContentsConflict, TextConflict, PathConflict, DuplicateID,\n DuplicateEntry, ParentLoop, UnversionedParent, MissingParent,\n DeletingParent, NonDirectoryParent)\n","repo_name":"breezy-team/breezy","sub_path":"breezy/bzr/conflicts.py","file_name":"conflicts.py","file_ext":"py","file_size_in_byte":22269,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"22"} +{"seq_id":"26500726393","text":"\"\"\"Oracle target class.\"\"\"\n\nfrom __future__ import annotations\n\nfrom singer_sdk.target_base import SQLTarget\nfrom singer_sdk import typing as th\n\nfrom target_oracle.sinks import (\n OracleSink,\n)\n\n\nclass TargetOracle(SQLTarget):\n \"\"\"Sample target for Oracle.\"\"\"\n\n name = \"target-oracle\"\n config_jsonschema = th.PropertiesList(\n th.Property(\n \"sqlalchemy_url\",\n th.StringType,\n secret=True, # Flag config as protected.\n description=\"SQLAlchemy connection string\",\n ),\n th.Property(\n \"driver_name\",\n th.StringType,\n default=\"oracle+cx_oracle\",\n description=\"SQLAlchemy driver name\",\n ),\n th.Property(\n \"username\",\n th.StringType,\n secret=True, # Flag config as protected.\n description=\"Oracle username\",\n ),\n th.Property(\n \"password\",\n th.StringType,\n secret=True, # Flag config as protected.\n description=\"Oracle password\",\n ),\n th.Property(\n \"host\",\n th.StringType,\n description=\"Oracle host\",\n ),\n th.Property(\n \"port\",\n th.StringType,\n description=\"Oracle port\",\n ),\n th.Property(\n \"database\",\n th.StringType,\n description=\"Oracle database\",\n ),\n th.Property(\n \"prefer_float_over_numeric\",\n th.BooleanType,\n description=\"Use float data type for numbers (otherwise number type is used)\",\n default=False\n ),\n ).to_dict()\n\n default_sink_class = OracleSink\n\n\nif __name__ == \"__main__\":\n TargetOracle.cli()\n","repo_name":"radbrt/target-oracle","sub_path":"target_oracle/target.py","file_name":"target.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25154830624","text":"import copy\n\n\ndef print_graph(g):\n print(\"=============\")\n for i in g:\n print(i)\n\n\ndef dfs(x, y, g):\n global D, visited\n cur_color = g[x][y]\n for i, j in D:\n n_x, n_y = x+i, y+j\n if n_x < 0 or n_y < 0 or n_x >= N or n_y >= N:\n continue\n if visited[n_x][n_y]:\n continue\n if g[n_x][n_y] == cur_color:\n visited[n_x][n_y] = True\n dfs(n_x, n_y, g)\n\n\nN = int(input())\ngraph = []\nfor _ in range(N):\n graph.append(list(input()))\n\nngraph = copy.deepcopy(graph)\nfor i in range(N):\n for j in range(N):\n if ngraph[i][j] == 'G':\n ngraph[i][j] = 'R'\n\nprint(graph)\nprint(ngraph)\nvisited = [[False for _ in range(N)] for _ in range(N)]\nD = [(1,0),(0,1),(-1,0),(0,-1)]\na, na = 0, 0\n\nfor i in range(N):\n for j in range(N):\n if visited[i][j] == False:\n dfs(i,j, graph)\n a += 1\nvisited = [[False for _ in range(N)] for _ in range(N)]\nfor i in range(N):\n for j in range(N):\n if visited[i][j] == False:\n print_graph(ngraph)\n print_graph(visited)\n dfs(i,j, ngraph)\n na += 1\n\nprint(a, na)\n\n\n\n\n# 5\n# RRRBB\n# GGBBB\n# BBBRR\n# BBRRR\n# RRRRR","repo_name":"moonsumhi/bj-algorithm","sub_path":"Backjoon/etc/10026.py","file_name":"10026.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70412767095","text":"from AhpAnpLib import structs_AHPLib as str\n\nmyModel=str.Model(\"my model name\") \nclusterX=str.Cluster(\"cluster x\",1)\nclusterY=str.Cluster(\"cluster y\",2)\n\nnodeA=str.Node(\"node A\", 0)\nclusterX.addNode2Cluster(nodeA)\nnodeB=str.Node(\"node B\", 1)\nnodeC=str.Node(\"node C\", 2)\nclusterY.addNode2Cluster(nodeB)\nclusterY.addNode2Cluster(nodeC)\n\nmyModel.addCluster2Model(clusterX)\nmyModel.addCluster2Model(clusterY)\n\nclusterYID=myModel.getClusterIDByName(\"cluster y\")\n\nmyObjCluster = myModel.getClusterObjByID(clusterYID) \n\nprint(f\"Nodes of cluster y in the model’s cluster list: {myObjCluster.nodes}\")\n","repo_name":"CreativeDecisions/AhpAnpLib","sub_path":"Examples/Dictionary/Str_Model/dictExp7_getclusterbyID.py","file_name":"dictExp7_getclusterbyID.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20556563539","text":"def a_to_i(a):\n ascii = ord(a)\n if 97 <= ascii <= 122:\n return ascii - 96\n if 65 <= ascii <= 90:\n return ascii - 38\n\n\ndef priority_of_common_item(things):\n n_things = len(things)\n left = set()\n right = set()\n for i in range(n_things // 2):\n left.add(things[i])\n for i in range(n_things // 2, n_things):\n right.add(things[i])\n common_item = left & right\n return a_to_i(common_item.pop())\n\n\nwith open(\"day3/input.txt\", \"r\") as f:\n total = 0\n lines = f.readlines()\n for line in lines:\n total += priority_of_common_item(line.strip())\n print(total)\n","repo_name":"perryizgr8/aoc22","sub_path":"day3/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15870793192","text":"from manim import *\r\nimport numpy as np\r\n\r\nquality_factor = 1\r\n\r\nconfig['pixel_height'] = int(1080/quality_factor)\r\nconfig['pixel_width'] = int(1080/quality_factor)\r\nconfig['frame_height'] = 8\r\nconfig['frame_width'] = 8\r\n\r\nconfig[\"tex_template\"] = TexFontTemplates.libertine\r\n\r\nclass trianguloPascalFB(Scene):\r\n def construct(self):\r\n\r\n ##------------------------------\r\n ## Logo LME en la esquina\r\n ##------------------------------\r\n brand = Text(\r\n \"LED Me Explain\",\r\n fill_opacity = 0.5,\r\n color = WHITE,\r\n font = \"Arial Rounded MT Bold\",\r\n t2c = {\"[:1]\":LME_A,\"[3:4]\":LME_A,\"[5:6]\":LME_A} ## Los espacios no cuentan como caracteres\r\n ).scale(0.4).to_edge(DR*.8).set_z_index(2)\r\n # Añade el logo\r\n self.add(brand)\r\n ##---------------------------------\r\n ##---------------------------------\r\n\r\n def Pascal(rows = 5, colors = ['#236B8E', '#83C167', '#FFFF00', '#FC6255'], height = None, width = None):\r\n pas_tri = VGroup()\r\n color_array = color_gradient(colors,rows)\r\n for n in range(rows):\r\n for k in range(n+1):\r\n hex = RegularPolygon(n=6, color = color_array[n], fill_opacity=0.7, stroke_width = DEFAULT_STROKE_WIDTH*6/(rows+1)).rotate(PI/2).shift(DOWN*n*(1+np.sin(PI/6))+RIGHT*(n-k*2)*np.cos(PI/6))\r\n num = int(np.math.factorial(n)/np.math.factorial(k)/np.math.factorial(n-k))\r\n lbl = Text(str(num)).rescale_to_fit(max(hex.width,hex.height)*0.4,[0,1]).move_to(hex)\r\n pas_tri.add(VGroup(hex,lbl))\r\n\r\n if width is not None:\r\n pas_tri.scale_to_fit_width(width)\r\n elif height is not None:\r\n pas_tri.scale_to_fit_height(height)\r\n else:\r\n pas_tri.scale_to_fit_width(config['frame_width']*0.9)\r\n\r\n pas_tri.move_to(ORIGIN)\r\n return pas_tri\r\n\r\n pascal = Pascal(11)\r\n\r\n self.add(pascal)\r\n","repo_name":"LedMeExplain/LedMeExplain","sub_path":"TrianguloPascal.py","file_name":"TrianguloPascal.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"7305090749","text":"# class Dog:\n#\n# legs_no=4 # variabila de clasa/ atribut de clasa\n#\n# def __init__(self, name):\n# self.__name=name\n#\n# @property\n# def nume(self):\n# return self.__name\n#\n# @nume_de_familie.setter\n# def nume_de_familie(self,prenume):\n# self.__name=prenume\n#\n# @nume_de_familie.deleter\n# def nume_de_familie(self):\n# del self.__name\n#\n# # @staticmethod\n# # def speak():\n# # return \"ham ham\"\n#\n# def __str__(self):\n# return str(self.name)\n#\n# # def change_name(self,name):\n# # self.name=name\n# # return self.name\n#\n# caine=Dog(\"rex\")\n# rasa=Dog(\"max\")\n# print(caine.nume_de_familie)\n# caine.nume='Jon'\n# print(caine.nume)\n# del caine.nume_de_familie\n# print(caine.nume_de_familie)\n# print(caine)\n# print(caine.legs_no, Dog.legs_no)\n# print(rasa)\n# print(caine.change_name(\"rex1\"))\n# Dog.legs_no=2\n# caine.legs_no=3\n# #Dog.legs_no=3\n# print('rasa', rasa.legs_no,'legs no class', Dog.legs_no)\n# print('caine', caine.legs_no, 'legs no class', Dog.legs_no)\n# print(caine.name)\n# print(caine.speak())\n# print(caine._Dog__nume)\n\n# def decorator_simplu(parametru):\n# print(f\"apelam functia {parametru.__name__}\")\n# return parametru\n#\n# @decorator_simplu\n# def functie_simpla():\n# return \"buna seara\"\n#\n# @decorator_simplu\n# def functie_complexa():\n# return\"noapte buna\"\n#\n# # print (functie_simpla())\n#\n# print(functie_complexa())\n\n# def decorator_depozit(functia_noastra):\n# def ambalaj(carti):\n# return f\"ambalam produse din {functia_noastra.__name__} cu {carti}\"\n# return ambalaj\n#\n# @decorator_depozit\n# def impachetare_carti(args):\n# return args\n\n\n# def decorator_depozit(material):\n# def ambalaj(functia_noastra):\n# def ambalaj_intern(*carte):\n# return f\"ambalam produse din {functia_noastra.__name__} cu {material} care contine cartea {carte}\"\n# return ambalaj_intern\n# return ambalaj\n#\n#\n# @decorator_depozit(\"hartie\")\n# def impachetare_carti(*nume):\n# return nume\n#\n# print(impachetare_carti(\"amintiri din copilarie\",\"baltagul\"))\n# print(impachetare_carti(\"amintiri din copilarie\",\"baltagul\"))\n\n\n# def decorator(simbol):\n# def adauga_simbol(functie):\n# def functie_upper(parametru):\n# return parametru.upper() + simbol\n# return functie_upper\n# return adauga_simbol\n#\n# @decorator(\".\")\n# def functie(propozitie):\n# return propozitie\n#\n# print(functie(\"ana are mere\"))\nimport time\n\ndef calculeaza_timpul(functia):\n def functie_interioara(*param):\n inceput=time.time()\n functia(*param)\n sfarsit=time.time()\n return f\"timp total de executie:{sfarsit-inceput}\"\n return functie_interioara\n\n@calculeaza_timpul\ndef adunare(*args):\n suma=0\n for i in args:\n suma+=i\n print(suma)\n return suma\n\nprint(adunare(1,2,3))\n\n\n\n\n","repo_name":"SamuelArdelean/python-Sami","sub_path":"SCRIPT_PYTHON/7. decoratori.py","file_name":"7. decoratori.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30525509688","text":"from __future__ import absolute_import\n\nfrom django.core.urlresolvers import reverse\n\nfrom sentry.api.base import Endpoint\nfrom sentry.utils.http import absolute_uri\n\nfrom .client import JIRA_KEY\n\n\nclass JiraDescriptorEndpoint(Endpoint):\n authentication_classes = ()\n permission_classes = ()\n\n def get(self, request):\n return self.respond(\n {\n \"name\": \"Sentry\",\n \"description\": \"Sentry\",\n \"key\": JIRA_KEY,\n \"baseUrl\": absolute_uri(),\n \"vendor\": {\"name\": \"Sentry\", \"url\": \"https://sentry.io\"},\n \"authentication\": {\"type\": \"jwt\"},\n \"lifecycle\": {\n \"installed\": \"/extensions/jira/installed/\",\n \"uninstalled\": \"/extensions/jira/uninstalled/\",\n },\n \"apiVersion\": 1,\n \"modules\": {\n \"postInstallPage\": {\n \"url\": \"/extensions/jira/configure\",\n \"name\": {\"value\": \"Configure Sentry Add-on\"},\n \"key\": \"post-install-sentry\",\n },\n \"configurePage\": {\n \"url\": \"/extensions/jira/configure\",\n \"name\": {\"value\": \"Configure Sentry Add-on\"},\n \"key\": \"configure-sentry\",\n },\n \"webhooks\": [\n {\n \"event\": \"jira:issue_updated\",\n \"url\": reverse(\"sentry-extensions-jira-issue-updated\"),\n \"excludeBody\": False,\n }\n ],\n },\n \"apiMigrations\": {\"gdpr\": True},\n \"scopes\": [\"read\", \"write\", \"act_as_user\"],\n }\n )\n","repo_name":"lizardkinggg/sentry-cicd","sub_path":"src/sentry/integrations/jira/descriptor.py","file_name":"descriptor.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"25960889043","text":"class Solution(object):\n def kClosest(self, points, K):\n \"\"\"\n :type points: List[List[int]]\n :type K: int\n :rtype: List[List[int]]\n \"\"\"\n items = []\n for point in points:\n items.append((-(point[0]**2 + point[1]**2)**.5, point))\n heap = items[:K]\n heapq.heapify(heap)\n for i in range(K, len(items)):\n heapq.heappushpop(heap, items[i])\n return [item[1] for item in heap]\n ","repo_name":"emily1749/Leetcode","sub_path":"973_k_closest_points_to_origin.py","file_name":"973_k_closest_points_to_origin.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"22034772253","text":"import discord\nimport os\n\nintents = discord.Intents.default()\nintents.message_content = True\n\nclient = discord.Client(intents=intents)\n\n@client.event\nasync def on_ready():\n print('We have logged in as {0.user}'.format(client))\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n\n if message.content.startswith('$hello'):\n await message.channel.send('Hello!')\n\nclient.run(os.getenv('TOKEN'))\n","repo_name":"SadmanYasar/Discord-Bot-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"2062684252","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef _cumulative_gain_curve(y_true, y_score, pos_label=None):\n \"\"\"\n This method is copied from scikit-plot package.\n See https://github.com/reiinakano/scikit-plot/blob/2dd3e6a76df77edcbd724c4db25575f70abb57cb/scikitplot/helpers.py#L157\n\n This function generates the points necessary to plot the Cumulative Gain\n\n Note: This implementation is restricted to the binary classification task.\n\n Args:\n y_true (array-like, shape (n_samples)): True labels of the data.\n\n y_score (array-like, shape (n_samples)): Target scores, can either be\n probability estimates of the positive class, confidence values, or\n non-thresholded measure of decisions (as returned by\n decision_function on some classifiers).\n\n pos_label (int or str, default=None): Label considered as positive and\n others are considered negative\n\n Returns:\n percentages (numpy.ndarray): An array containing the X-axis values for\n plotting the Cumulative Gains chart.\n\n gains (numpy.ndarray): An array containing the Y-axis values for one\n curve of the Cumulative Gains chart.\n\n Raises:\n ValueError: If `y_true` is not composed of 2 classes. The Cumulative\n Gain Chart is only relevant in binary classification.\n \"\"\"\n y_true, y_score = np.asarray(y_true), np.asarray(y_score)\n\n # ensure binary classification if pos_label is not specified\n classes = np.unique(y_true)\n if pos_label is None and not (\n np.array_equal(classes, [0, 1])\n or np.array_equal(classes, [-1, 1])\n or np.array_equal(classes, [0])\n or np.array_equal(classes, [-1])\n or np.array_equal(classes, [1])\n ):\n raise ValueError(\"Data is not binary and pos_label is not specified\")\n elif pos_label is None:\n pos_label = 1.0\n\n # make y_true a boolean vector\n y_true = y_true == pos_label\n\n sorted_indices = np.argsort(y_score)[::-1]\n y_true = y_true[sorted_indices]\n gains = np.cumsum(y_true)\n\n percentages = np.arange(start=1, stop=len(y_true) + 1)\n\n gains = gains / float(np.sum(y_true))\n percentages = percentages / float(len(y_true))\n\n gains = np.insert(gains, 0, [0])\n percentages = np.insert(percentages, 0, [0])\n\n return percentages, gains\n\n\ndef plot_lift_curve(\n y_true,\n y_probas,\n title=\"Lift Curve\",\n ax=None,\n figsize=None,\n title_fontsize=\"large\",\n text_fontsize=\"medium\",\n pos_label=None,\n):\n \"\"\"\n This method is copied from scikit-plot package.\n See https://github.com/reiinakano/scikit-plot/blob/2dd3e6a76df77edcbd724c4db25575f70abb57cb/scikitplot/metrics.py#L1133\n\n Generates the Lift Curve from labels and scores/probabilities\n\n The lift curve is used to determine the effectiveness of a\n binary classifier. A detailed explanation can be found at\n http://www2.cs.uregina.ca/~dbd/cs831/notes/lift_chart/lift_chart.html.\n The implementation here works only for binary classification.\n\n Args:\n y_true (array-like, shape (n_samples)):\n Ground truth (correct) target values.\n\n y_probas (array-like, shape (n_samples, n_classes)):\n Prediction probabilities for each class returned by a classifier.\n\n title (string, optional): Title of the generated plot. Defaults to\n \"Lift Curve\".\n\n ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to\n plot the learning curve. If None, the plot is drawn on a new set of\n axes.\n\n figsize (2-tuple, optional): Tuple denoting figure size of the plot\n e.g. (6, 6). Defaults to ``None``.\n\n title_fontsize (string or int, optional): Matplotlib-style fontsizes.\n Use e.g. \"small\", \"medium\", \"large\" or integer-values. Defaults to\n \"large\".\n\n text_fontsize (string or int, optional): Matplotlib-style fontsizes.\n Use e.g. \"small\", \"medium\", \"large\" or integer-values. Defaults to\n \"medium\".\n\n pos_label (optional): Label for the positive class.\n\n Returns:\n ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was\n drawn.\n\n Example:\n >>> lr = LogisticRegression()\n >>> lr = lr.fit(X_train, y_train)\n >>> y_probas = lr.predict_proba(X_test)\n >>> plot_lift_curve(y_test, y_probas)\n \n >>> plt.show()\n\n .. image:: _static/examples/plot_lift_curve.png\n :align: center\n :alt: Lift Curve\n \"\"\"\n y_true = np.array(y_true)\n y_probas = np.array(y_probas)\n\n classes = np.unique(y_true)\n if len(classes) != 2:\n raise ValueError(f\"Cannot calculate Lift Curve for data with {len(classes)} category/ies\")\n\n # Compute Cumulative Gain Curves\n percentages, gains1 = _cumulative_gain_curve(y_true, y_probas[:, 0], classes[0])\n percentages, gains2 = _cumulative_gain_curve(y_true, y_probas[:, 1], classes[1])\n\n percentages = percentages[1:]\n gains1 = gains1[1:]\n gains2 = gains2[1:]\n\n gains1 = gains1 / percentages\n gains2 = gains2 / percentages\n\n if ax is None:\n _, ax = plt.subplots(1, 1, figsize=figsize)\n\n ax.set_title(title, fontsize=title_fontsize)\n\n label0 = f\"Class {classes[0]}\"\n label1 = f\"Class {classes[1]}\"\n # show (positive) next to the positive class in the legend\n if pos_label:\n if pos_label == classes[0]:\n label0 = f\"Class {classes[0]} (positive)\"\n elif pos_label == classes[1]:\n label1 = f\"Class {classes[1]} (positive)\"\n # do not mark positive class if pos_label is not in classes\n\n ax.plot(percentages, gains1, lw=3, label=label0)\n ax.plot(percentages, gains2, lw=3, label=label1)\n\n ax.plot([0, 1], [1, 1], \"k--\", lw=2, label=\"Baseline\")\n\n ax.set_xlabel(\"Percentage of sample\", fontsize=text_fontsize)\n ax.set_ylabel(\"Lift\", fontsize=text_fontsize)\n ax.tick_params(labelsize=text_fontsize)\n ax.grid(\"on\")\n ax.legend(loc=\"best\", fontsize=text_fontsize)\n\n return ax\n","repo_name":"mlflow/mlflow","sub_path":"mlflow/models/evaluation/lift_curve.py","file_name":"lift_curve.py","file_ext":"py","file_size_in_byte":6161,"program_lang":"python","lang":"en","doc_type":"code","stars":15878,"dataset":"github-code","pt":"22"} +{"seq_id":"2067902852","text":"import base64\nimport datetime\nimport json\nimport os\nfrom collections import defaultdict\nfrom copy import deepcopy\nfrom functools import partial\nfrom json import JSONEncoder\nfrom typing import Any, Dict, Optional\n\nfrom google.protobuf.descriptor import FieldDescriptor\nfrom google.protobuf.json_format import MessageToJson, ParseDict\n\nfrom mlflow.exceptions import MlflowException\n\n_PROTOBUF_INT64_FIELDS = [\n FieldDescriptor.TYPE_INT64,\n FieldDescriptor.TYPE_UINT64,\n FieldDescriptor.TYPE_FIXED64,\n FieldDescriptor.TYPE_SFIXED64,\n FieldDescriptor.TYPE_SINT64,\n]\n\nfrom mlflow.protos.databricks_pb2 import BAD_REQUEST\n\n\ndef _mark_int64_fields_for_proto_maps(proto_map, value_field_type):\n \"\"\"Converts a proto map to JSON, preserving only int64-related fields.\"\"\"\n json_dict = {}\n for key, value in proto_map.items():\n # The value of a protobuf map can only be a scalar or a message (not a map or repeated\n # field).\n if value_field_type == FieldDescriptor.TYPE_MESSAGE:\n json_dict[key] = _mark_int64_fields(value)\n elif value_field_type in _PROTOBUF_INT64_FIELDS:\n json_dict[key] = int(value)\n elif isinstance(key, int):\n json_dict[key] = value\n return json_dict\n\n\ndef _mark_int64_fields(proto_message):\n \"\"\"Converts a proto message to JSON, preserving only int64-related fields.\"\"\"\n json_dict = {}\n for field, value in proto_message.ListFields():\n if (\n # These three conditions check if this field is a protobuf map.\n # See the official implementation: https://bit.ly/3EMx1rl\n field.type == FieldDescriptor.TYPE_MESSAGE\n and field.message_type.has_options\n and field.message_type.GetOptions().map_entry\n ):\n # Deal with proto map fields separately in another function.\n json_dict[field.name] = _mark_int64_fields_for_proto_maps(\n value, field.message_type.fields_by_name[\"value\"].type\n )\n continue\n\n if field.type == FieldDescriptor.TYPE_MESSAGE:\n ftype = partial(_mark_int64_fields)\n elif field.type in _PROTOBUF_INT64_FIELDS:\n ftype = int\n else:\n # Skip all non-int64 fields.\n continue\n\n json_dict[field.name] = (\n [ftype(v) for v in value]\n if field.label == FieldDescriptor.LABEL_REPEATED\n else ftype(value)\n )\n return json_dict\n\n\ndef _merge_json_dicts(from_dict, to_dict):\n \"\"\"Merges the json elements of from_dict into to_dict. Only works for json dicts\n converted from proto messages\n \"\"\"\n for key, value in from_dict.items():\n if isinstance(key, int) and str(key) in to_dict:\n # When the key (i.e. the proto field name) is an integer, it must be a proto map field\n # with integer as the key. For example:\n # from_dict is {'field_map': {1: '2', 3: '4'}}\n # to_dict is {'field_map': {'1': '2', '3': '4'}}\n # So we need to replace the str keys with int keys in to_dict.\n to_dict[key] = to_dict[str(key)]\n del to_dict[str(key)]\n\n if key not in to_dict:\n continue\n\n if isinstance(value, dict):\n _merge_json_dicts(from_dict[key], to_dict[key])\n elif isinstance(value, list):\n for i, v in enumerate(value):\n if isinstance(v, dict):\n _merge_json_dicts(v, to_dict[key][i])\n else:\n to_dict[key][i] = v\n else:\n to_dict[key] = from_dict[key]\n return to_dict\n\n\ndef message_to_json(message):\n \"\"\"Converts a message to JSON, using snake_case for field names.\"\"\"\n\n # Google's MessageToJson API converts int64 proto fields to JSON strings.\n # For more info, see https://github.com/protocolbuffers/protobuf/issues/2954\n json_dict_with_int64_as_str = json.loads(\n MessageToJson(message, preserving_proto_field_name=True)\n )\n # We convert this proto message into a JSON dict where only int64 proto fields\n # are preserved, and they are treated as JSON numbers, not strings.\n json_dict_with_int64_fields_only = _mark_int64_fields(message)\n # By merging these two JSON dicts, we end up with a JSON dict where int64 proto fields are not\n # converted to JSON strings. Int64 keys in proto maps will always be converted to JSON strings\n # because JSON doesn't support non-string keys.\n json_dict_with_int64_as_numbers = _merge_json_dicts(\n json_dict_with_int64_fields_only, json_dict_with_int64_as_str\n )\n return json.dumps(json_dict_with_int64_as_numbers, indent=2)\n\n\ndef _stringify_all_experiment_ids(x):\n \"\"\"Converts experiment_id fields which are defined as ints into strings in the given json.\n This is necessary for backwards- and forwards-compatibility with MLflow clients/servers\n running MLflow 0.9.0 and below, as experiment_id was changed from an int to a string.\n To note, the Python JSON serializer is happy to auto-convert strings into ints (so a\n server or client that sees the new format is fine), but is unwilling to convert ints\n to strings. Therefore, we need to manually perform this conversion.\n\n This code can be removed after MLflow 1.0, after users have given reasonable time to\n upgrade clients and servers to MLflow 0.9.1+.\n \"\"\"\n if isinstance(x, dict):\n items = x.items()\n for k, v in items:\n if k == \"experiment_id\":\n x[k] = str(v)\n elif k == \"experiment_ids\":\n x[k] = [str(w) for w in v]\n elif k == \"info\" and isinstance(v, dict) and \"experiment_id\" in v and \"run_uuid\" in v:\n # shortcut for run info\n v[\"experiment_id\"] = str(v[\"experiment_id\"])\n elif k not in (\"params\", \"tags\", \"metrics\"): # skip run data\n _stringify_all_experiment_ids(v)\n elif isinstance(x, list):\n for y in x:\n _stringify_all_experiment_ids(y)\n\n\ndef parse_dict(js_dict, message):\n \"\"\"Parses a JSON dictionary into a message proto, ignoring unknown fields in the JSON.\"\"\"\n _stringify_all_experiment_ids(js_dict)\n ParseDict(js_dict=js_dict, message=message, ignore_unknown_fields=True)\n\n\nclass NumpyEncoder(JSONEncoder):\n \"\"\"Special json encoder for numpy types.\n Note that some numpy types doesn't have native python equivalence,\n hence json.dumps will raise TypeError.\n In this case, you'll need to convert your numpy types into its closest python equivalence.\n \"\"\"\n\n def try_convert(self, o):\n import numpy as np\n import pandas as pd\n\n def encode_binary(x):\n return base64.encodebytes(x).decode(\"ascii\")\n\n if isinstance(o, np.ndarray):\n if o.dtype == object:\n return [self.try_convert(x)[0] for x in o.tolist()], True\n elif o.dtype == np.bytes_:\n return np.vectorize(encode_binary)(o), True\n else:\n return o.tolist(), True\n\n if isinstance(o, np.generic):\n return o.item(), True\n if isinstance(o, (bytes, bytearray)):\n return encode_binary(o), True\n if isinstance(o, np.datetime64):\n return np.datetime_as_string(o), True\n if isinstance(o, (pd.Timestamp, datetime.date, datetime.datetime, datetime.time)):\n return o.isoformat(), True\n return o, False\n\n def default(self, o):\n res, converted = self.try_convert(o)\n if converted:\n return res\n else:\n return super().default(o)\n\n\nclass MlflowFailedTypeConversion(MlflowException):\n def __init__(self, col_name, col_type, ex):\n super().__init__(\n message=f\"Data is not compatible with model signature. \"\n f\"Failed to convert column {col_name} to type '{col_type}'. Error: '{ex!r}'\",\n error_code=BAD_REQUEST,\n )\n\n\ndef cast_df_types_according_to_schema(pdf, schema):\n import numpy as np\n\n from mlflow.types.schema import DataType\n\n actual_cols = set(pdf.columns)\n if schema.has_input_names():\n dtype_list = zip(schema.input_names(), schema.input_types())\n elif schema.is_tensor_spec() and len(schema.input_types()) == 1:\n dtype_list = zip(actual_cols, [schema.input_types()[0] for _ in actual_cols])\n else:\n n = min(len(schema.input_types()), len(pdf.columns))\n dtype_list = zip(pdf.columns[:n], schema.input_types()[:n])\n\n for col_name, col_type_spec in dtype_list:\n if isinstance(col_type_spec, DataType):\n col_type = col_type_spec.to_pandas()\n else:\n col_type = col_type_spec\n if col_name in actual_cols:\n try:\n if isinstance(col_type_spec, DataType) and col_type_spec == DataType.binary:\n # NB: We expect binary data to be passed base64 encoded\n pdf[col_name] = pdf[col_name].map(\n lambda x: base64.decodebytes(bytes(x, \"utf8\"))\n )\n elif col_type == np.dtype(bytes):\n pdf[col_name] = pdf[col_name].map(lambda x: bytes(x, \"utf8\"))\n elif schema.is_tensor_spec() and isinstance(pdf[col_name].iloc[0], list):\n # For dataframe with multidimensional column, it contains\n # list type values, we cannot convert\n # its type by `astype`, skip conversion.\n # The conversion will be done in `_enforce_schema` while\n # `PyFuncModel.predict` being called.\n pass\n else:\n pdf[col_name] = pdf[col_name].astype(col_type, copy=False)\n except Exception as ex:\n raise MlflowFailedTypeConversion(col_name, col_type, ex)\n return pdf\n\n\nclass MlflowBadScoringInputException(MlflowException):\n def __init__(self, message):\n super().__init__(message, error_code=BAD_REQUEST)\n\n\ndef dataframe_from_parsed_json(decoded_input, pandas_orient, schema=None):\n \"\"\"\n Convert parsed json into pandas.DataFrame. If schema is provided this methods will attempt to\n cast data types according to the schema. This include base64 decoding for binary columns.\n\n :param decoded_input: Parsed json - either a list or a dictionary.\n :param schema: MLflow schema used when parsing the data.\n :param pandas_orient: pandas data frame convention used to store the data.\n :return: pandas.DataFrame.\n \"\"\"\n import pandas as pd\n\n if pandas_orient == \"records\":\n if not isinstance(decoded_input, list):\n if isinstance(decoded_input, dict):\n typemessage = \"dictionary\"\n else:\n typemessage = f\"type {type(decoded_input)}\"\n raise MlflowBadScoringInputException(\n f\"Dataframe records format must be a list of records. Got {typemessage}.\"\n )\n try:\n pdf = pd.DataFrame(data=decoded_input)\n except Exception as ex:\n raise MlflowBadScoringInputException(\n f\"Provided dataframe_records field is not a valid dataframe representation in \"\n f\"'records' format. Error: '{ex}'\"\n )\n elif pandas_orient == \"split\":\n if not isinstance(decoded_input, dict):\n if isinstance(decoded_input, list):\n typemessage = \"list\"\n else:\n typemessage = f\"type {type(decoded_input)}\"\n raise MlflowBadScoringInputException(\n f\"Dataframe split format must be a dictionary. Got {typemessage}.\"\n )\n keys = set(decoded_input.keys())\n missing_data = \"data\" not in keys\n extra_keys = keys.difference({\"columns\", \"data\", \"index\"})\n if missing_data or extra_keys:\n raise MlflowBadScoringInputException(\n f\"Dataframe split format must have 'data' field and optionally 'columns' \"\n f\"and 'index' fields. Got {keys}.'\"\n )\n try:\n pdf = pd.DataFrame(\n index=decoded_input.get(\"index\"),\n columns=decoded_input.get(\"columns\"),\n data=decoded_input[\"data\"],\n )\n except Exception as ex:\n raise MlflowBadScoringInputException(\n f\"Provided dataframe_split field is not a valid dataframe representation in \"\n f\"'split' format. Error: '{ex}'\"\n )\n if schema is not None:\n pdf = cast_df_types_according_to_schema(pdf, schema)\n return pdf\n\n\ndef dataframe_from_raw_json(path_or_str, schema=None, pandas_orient: str = \"split\"):\n \"\"\"\n Parse raw json into a pandas.Dataframe.\n\n If schema is provided this methods will attempt to cast data types according to the schema. This\n include base64 decoding for binary columns.\n\n :param path_or_str: Path to a json file or a json string.\n :param schema: MLflow schema used when parsing the data.\n :param pandas_orient: pandas data frame convention used to store the data.\n :return: pandas.DataFrame.\n \"\"\"\n if os.path.exists(path_or_str):\n with open(path_or_str) as f:\n parsed_json = json.load(f)\n else:\n parsed_json = json.loads(path_or_str)\n\n return dataframe_from_parsed_json(parsed_json, pandas_orient, schema)\n\n\ndef _get_jsonable_obj(data, pandas_orient=\"records\"):\n \"\"\"Attempt to make the data json-able via standard library.\n Look for some commonly used types that are not jsonable and convert them into json-able ones.\n Unknown data types are returned as is.\n\n :param data: data to be converted, works with pandas and numpy, rest will be returned as is.\n :param pandas_orient: If `data` is a Pandas DataFrame, it will be converted to a JSON\n dictionary using this Pandas serialization orientation.\n \"\"\"\n import numpy as np\n import pandas as pd\n\n if isinstance(data, np.ndarray):\n return data.tolist()\n if isinstance(data, pd.DataFrame):\n return data.to_dict(orient=pandas_orient)\n if isinstance(data, pd.Series):\n return pd.DataFrame(data).to_dict(orient=pandas_orient)\n else: # by default just return whatever this is and hope for the best\n return data\n\n\ndef parse_tf_serving_input(inp_dict, schema=None):\n \"\"\"\n :param inp_dict: A dict deserialized from a JSON string formatted as described in TF's\n serving API doc\n (https://www.tensorflow.org/tfx/serving/api_rest#request_format_2)\n :param schema: MLflow schema used when parsing the data.\n \"\"\"\n import numpy as np\n\n def cast_schema_type(input_data):\n input_data = deepcopy(input_data)\n if schema is not None:\n if schema.has_input_names():\n input_names = schema.input_names()\n if (\n len(input_names) == 1\n and isinstance(input_data, list)\n and not any(isinstance(x, dict) for x in input_data)\n ):\n # for schemas with a single column, match input with column\n input_data = {input_names[0]: input_data}\n if not isinstance(input_data, dict):\n raise MlflowException(\n \"Failed to parse input data. This model contains a tensor-based model\"\n \" signature with input names, which suggests a dictionary input mapping\"\n f\" input name to tensor, but an input of type {type(input_data)} was found.\"\n )\n type_dict = dict(zip(schema.input_names(), schema.numpy_types()))\n for col_name in input_data.keys():\n input_data[col_name] = np.array(\n input_data[col_name], dtype=type_dict.get(col_name)\n )\n else:\n if not isinstance(input_data, list):\n raise MlflowException(\n \"Failed to parse input data. This model contains an un-named tensor-based\"\n \" model signature which expects a single n-dimensional array as input,\"\n f\" however, an input of type {type(input_data)} was found.\"\n )\n input_data = np.array(input_data, dtype=schema.numpy_types()[0])\n else:\n if isinstance(input_data, dict):\n input_data = {k: np.array(v) for k, v in input_data.items()}\n else:\n input_data = np.array(input_data)\n return input_data\n\n # pylint: disable=broad-except\n if \"signature_name\" in inp_dict:\n raise MlflowException(\n 'Failed to parse data as TF serving input. \"signature_name\" is currently'\n \" not supported.\"\n )\n\n if not (list(inp_dict.keys()) == [\"instances\"] or list(inp_dict.keys()) == [\"inputs\"]):\n raise MlflowException(\n 'Failed to parse data as TF serving input. One of \"instances\" and'\n ' \"inputs\" must be specified (not both or any other keys).'\n )\n\n # Read the JSON\n try:\n if \"instances\" in inp_dict:\n items = inp_dict[\"instances\"]\n if len(items) > 0 and isinstance(items[0], dict):\n # convert items to column format (map column/input name to tensor)\n data = defaultdict(list)\n for item in items:\n for k, v in item.items():\n data[k].append(v)\n data = cast_schema_type(data)\n else:\n data = cast_schema_type(items)\n else:\n # items already in column format, convert values to tensor\n items = inp_dict[\"inputs\"]\n data = cast_schema_type(items)\n except Exception:\n raise MlflowException(\n \"Failed to parse data as TF serving input. Ensure that the input is\"\n \" a valid JSON-formatted string that conforms to the request body for\"\n \" TF serving's Predict API as documented at\"\n \" https://www.tensorflow.org/tfx/serving/api_rest#request_format_2\"\n )\n\n # Sanity check inputted data. This check will only be applied when the row-format `instances`\n # is used since it requires same 0-th dimension for all items.\n if isinstance(data, dict) and \"instances\" in inp_dict:\n # ensure all columns have the same number of items\n expected_len = len(list(data.values())[0])\n if not all(len(v) == expected_len for v in data.values()):\n raise MlflowException(\n \"Failed to parse data as TF serving input. The length of values for\"\n \" each input/column name are not the same\"\n )\n\n return data\n\n\n# Reference: https://stackoverflow.com/a/12126976\nclass _CustomJsonEncoder(json.JSONEncoder):\n def default(self, o):\n import numpy as np\n import pandas as pd\n\n if isinstance(o, (datetime.datetime, datetime.date, datetime.time, pd.Timestamp)):\n return o.isoformat()\n\n if isinstance(o, np.ndarray):\n return o.tolist()\n\n return super().default(o)\n\n\ndef get_jsonable_input(name, data):\n import numpy as np\n\n if isinstance(data, np.ndarray):\n return data.tolist()\n else:\n raise MlflowException(f\"Incompatible input type:{type(data)} for input {name}.\")\n\n\ndef dump_input_data(data, inputs_key=\"inputs\", params: Optional[Dict[str, Any]] = None):\n \"\"\"\n :param data: Input data.\n :param inputs_key: Key to represent data in the request payload.\n :param params: Additional parameters to pass to the model for inference.\n\n .. Note:: Experimental: This parameter may change or be removed in a future\n release without warning.\n \"\"\"\n import numpy as np\n import pandas as pd\n\n if isinstance(data, pd.DataFrame):\n post_data = {\"dataframe_split\": data.to_dict(orient=\"split\")}\n elif isinstance(data, dict):\n post_data = {inputs_key: {k: get_jsonable_input(k, v) for k, v in data}}\n elif isinstance(data, np.ndarray):\n post_data = {inputs_key: data.tolist()}\n elif isinstance(data, list):\n post_data = {inputs_key: data}\n else:\n post_data = data\n\n if params is not None:\n if not isinstance(params, dict):\n raise MlflowException(\n f\"Params must be a dictionary. Got type '{type(params).__name__}'.\"\n )\n # if post_data is not dictionary, params should be included in post_data directly\n if isinstance(post_data, dict):\n post_data[\"params\"] = params\n\n if not isinstance(post_data, str):\n post_data = json.dumps(post_data, cls=_CustomJsonEncoder)\n\n return post_data\n","repo_name":"mlflow/mlflow","sub_path":"mlflow/utils/proto_json_utils.py","file_name":"proto_json_utils.py","file_ext":"py","file_size_in_byte":20957,"program_lang":"python","lang":"en","doc_type":"code","stars":15878,"dataset":"github-code","pt":"22"} +{"seq_id":"367726006","text":"import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom horch.common import _tuple\nfrom horch.models.utils import get_loc_cls_preds\nfrom horch.models.detection.head import SSDHead\nfrom horch.models.modules import Conv2d, get_norm_layer, get_activation, SELayerM, SEModule\n\n\nclass Bottleneck(nn.Module):\n\n def __init__(self, in_channels, out_channels, stride=1, expansion=4):\n super().__init__()\n self.stride = stride\n self.in_channels = in_channels\n self.out_channels = out_channels\n channels = out_channels // expansion\n\n self.conv1 = Conv2d(in_channels, channels, kernel_size=1,\n norm_layer='default', activation='default')\n self.conv2 = Conv2d(channels, channels, kernel_size=3, stride=stride,\n norm_layer='default', activation='default')\n\n self.conv3 = Conv2d(channels, out_channels, kernel_size=1,\n norm_layer='default')\n self.relu3 = get_activation('default')\n\n self.downsample = None\n if stride != 1 or in_channels != out_channels:\n self.downsample = Conv2d(in_channels, out_channels, kernel_size=1, stride=stride,\n norm_layer='default')\n\n def forward(self, x):\n identity = x\n out = self.conv1(x)\n out = self.conv2(out)\n out = self.conv3(out)\n if self.downsample is not None:\n identity = self.downsample(x)\n out += identity\n out = self.relu3(out)\n return out\n\n\nclass TransferConnection(nn.Module):\n def __init__(self, in_channels, out_channels, last=False):\n super().__init__()\n self.last = last\n self.conv1 = nn.Sequential(\n Conv2d(in_channels, out_channels, kernel_size=5,\n norm_layer='default', activation='relu', depthwise_separable=True),\n Conv2d(out_channels, out_channels, kernel_size=5,\n norm_layer='default', depthwise_separable=True),\n SEModule(out_channels, reduction=4),\n )\n if not last:\n self.deconv1 = Conv2d(out_channels, out_channels, kernel_size=4, stride=2,\n norm_layer='default', depthwise_separable=True, transposed=True)\n self.nl1 = get_activation('default')\n self.conv2 = Conv2d(\n out_channels, out_channels, kernel_size=5,\n norm_layer='default', activation='default', depthwise_separable=True)\n\n def forward(self, x, x_next=None):\n x = self.conv1(x)\n if not self.last:\n x = x + self.deconv1(x_next)\n x = self.nl1(x)\n x = self.conv2(x)\n return x\n\n\nclass RefineDet(nn.Module):\n def __init__(self, backbone, num_anchors, num_classes, f_channels, inference, extra_levels=(6,)):\n super().__init__()\n self.num_classes = num_classes\n self.backbone = backbone\n self._inference = inference\n\n stages = backbone.out_channels\n\n self.extra_levels = _tuple(extra_levels)\n self.extra_layers = nn.ModuleList([])\n for l in self.extra_levels:\n self.extra_layers.append(\n Bottleneck(stages[-1], f_channels, stride=2)\n )\n stages.append(f_channels)\n\n self.r_head = SSDHead(num_anchors, 1, stages,\n norm_layer='default', lite=True)\n\n self.tcbs = nn.ModuleList([\n TransferConnection(stages[-1], f_channels, last=True)])\n for c in reversed(stages[:-1]):\n self.tcbs.append(\n TransferConnection(c, f_channels, norm_layer='default')\n )\n\n self.d_head = SSDHead(num_anchors, num_classes, _tuple(f_channels, 3),\n norm_layer='default', lite=True)\n\n def forward(self, x):\n cs = self.backbone(x)\n cs = [cs] if torch.is_tensor(cs) else list(cs)\n for l in self.extra_layers:\n cs.append(l(cs[-1]))\n\n r_loc_p, r_cls_p = self.r_head(*cs)\n\n dcs = [self.tcbs[0](cs[-1])]\n for c, tcb in zip(reversed(cs[:-1]), self.tcbs[1:]):\n dcs.append(tcb(c, dcs[-1]))\n dcs.reverse()\n\n d_loc_p, d_cls_p = self.d_head(*dcs)\n\n return r_loc_p, r_cls_p, d_loc_p, d_cls_p\n\n def inference(self, x):\n self.eval()\n with torch.no_grad():\n preds = self.forward(x)\n dets = self._inference(*_tuple(preds))\n self.train()\n return dets\n","repo_name":"MenGuangwen-CN-0411/pytorch-hrvvi-ext","sub_path":"horch/models/detection/light/refinedet.py","file_name":"refinedet.py","file_ext":"py","file_size_in_byte":4518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"40507034453","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 5 01:41:06 2021\n\n@author: victor\n\"\"\"\nclass Solution(object):\n def majorityElement(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n \n keys = []\n values = []\n \n for i in nums:\n if i not in keys:\n keys.append(i)\n values.append(1)\n \n else:\n index = keys.index(i)\n values[index] += 1\n\n majority = keys[values.index(max(values))]\n \n return majority\n \nif __name__ == '__main__':\n \n nums = [3,2,3]\n # nums = [2,2,1,1,1,2,2]\n \n ans = Solution().majorityElement(nums)\n \n print(ans)","repo_name":"vic4code/leetcode","sub_path":"python/169_Majority_Element.py","file_name":"169_Majority_Element.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"29413579441","text":"#$env:FLASK_ENV = \"development\"\r\nimport math\r\nimport os\r\nimport time\r\n\r\nfrom flask import Flask, render_template, request, json\r\nimport mimetypes\r\nmimetypes.init()\r\nmimetypes.add_type('application/javascript', '.mjs')\r\nmimetypes.add_type('application/javascript', '.js')\r\nmimetypes.add_type('text/css', '.css')\r\napp = Flask(__name__)\r\nnewestMessage = {}\r\n\r\n@app.route('/', methods=['GET'])\r\ndef main():\r\n return render_template('./index.html')\r\n\r\n\r\n@app.route('/poll', methods=['POST'])\r\ndef poll():\r\n requestData = json.loads(request.data)\r\n print(requestData)\r\n print(newestMessage)\r\n print(requestData == newestMessage)\r\n requestTime = math.ceil(time.time())*1000\r\n pollBreak = requestTime + 1000*10\r\n while requestData == newestMessage:\r\n time.sleep(0.01)\r\n if pollBreak < math.ceil(time.time())*1000:\r\n return {'msg': False}\r\n return {'msg': newestMessage}\r\n\r\n@app.route('/printMessage', methods=['POST'])\r\ndef printMessage():\r\n global newestMessage\r\n requestData = json.loads(request.data)\r\n print(requestData)\r\n counter = 0\r\n for x in time.ctime((time.time())):\r\n if x == \":\":\r\n requestData[\"time\"]=time.ctime((time.time()))[counter-2:counter+6]\r\n break\r\n counter = counter+1\r\n newestMessage = requestData\r\n return {}\r\n\r\n@app.route('/getLatest', methods=['POST'])\r\ndef getLatest():\r\n return {'huizong': newestMessage}\r\n\r\nif __name__ == '__main__':\r\n port = int(os.environ.get(\"PORT\", 5001))\r\n app.run(host='0.0.0.0', port=port, threaded=True)\r\n","repo_name":"tapala/IRC-Flask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73631033337","text":"import copy\n\nimport matplotlib\nfrom matplotlib.axes import Axes\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\nimport rclpy\nfrom rclpy.node import Node\n\nfrom driverless_msgs.msg import DoubleMatrix\n\nfrom driverless_common.common import QOS_ALL\n\n\ndef mypause(interval):\n backend = plt.rcParams[\"backend\"]\n if backend in matplotlib.rcsetup.interactive_bk:\n figManager = matplotlib._pylab_helpers.Gcf.get_active()\n if figManager is not None:\n canvas = figManager.canvas\n if canvas.figure.stale:\n canvas.draw()\n canvas.start_event_loop(interval)\n return\n\n\nclass NodeMatrixVisualisation(Node):\n fig: Figure\n heatmap_ax: Axes\n cbar_ax: Axes\n\n def __init__(self):\n super().__init__(\"matrix_visualisation\")\n\n self.create_subscription(DoubleMatrix, \"matrix\", self.matrix_callback, QOS_ALL)\n\n self.fig, (self.heatmap_ax, self.cbar_ax) = plt.subplots(1, 2, gridspec_kw=dict(width_ratios=[0.9, 0.1]))\n plt.show(block=False)\n\n def matrix_callback(self, msg: DoubleMatrix):\n matrix = np.reshape(np.array(msg.values), (msg.rows, msg.columns))\n self.heatmap_ax.clear()\n self.cbar_ax.clear()\n sns.heatmap(\n matrix,\n ax=copy.copy(self.heatmap_ax),\n cbar_ax=copy.copy(self.cbar_ax),\n vmin=-0.5,\n vmax=0.5,\n cmap=\"seismic\",\n )\n mypause(0.001)\n\n\ndef main():\n rclpy.init()\n node = NodeMatrixVisualisation()\n rclpy.spin(node)\n node.destroy_node()\n rclpy.shutdown()\n","repo_name":"QUT-Motorsport/QUTMS_Driverless","sub_path":"src/common/custom_vis/custom_vis/node_matrix_vis.py","file_name":"node_matrix_vis.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"22"} +{"seq_id":"31963530064","text":"import tkinter as t\n\nwindow = t.Tk()\nwindow.title(\"First GUI Program\")\nwindow.minsize(width=500, height=500)\n\nwindow.config(padx=10, pady=10)\n\n#Labels\nlabel = t.Label(text=\"This is old text\")\nlabel.config(text=\"This is new text\")\n#label.pack()\n#label.place(x=100, y=50)\nlabel.grid(column=0, row=0)\n\n#Buttons\ndef action():\n print(\"Do something\")\n\n#calls action() when pressed\nbutton = t.Button(text=\"Click Me\", command=action)\n#button.pack()\nbutton.grid(column=1, row=1)\n\n#calls action() when pressed\nbutton2 = t.Button(text=\"Click Me 2\", command=action)\n#button.pack()\nbutton2.grid(column=2, row=0)\n\n#Entries\nentry = t.Entry(width=30)\n#Add some text to begin with\nentry.insert(t.END, string=\"Some text to begin with.\")\n#Gets text in entry\nprint(entry.get())\n#entry.pack()\nentry.grid(column=3, row=2)\n\n\nwindow.mainloop()","repo_name":"nurmatthias/100DaysOfCode","sub_path":"day27/tkinter_layout.py","file_name":"tkinter_layout.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3434094840","text":"# -*- coding: utf-8 -*-\r\n# -----------------------------------------------------------------------------\r\n# PoseMemorizer GUI (Maya2018-)\r\n# -----------------------------------------------------------------------------\r\n\r\nimport os\r\nimport traceback\r\nimport json\r\nimport functools\r\n\r\nfrom maya import cmds\r\nfrom maya import mel\r\n\r\nfrom maya.app.general.mayaMixin import MayaQWidgetDockableMixin\r\nfrom maya.OpenMayaUI import MQtUtil\r\n\r\nfrom PySide2 import QtCore\r\nfrom PySide2 import QtWidgets\r\n\r\nimport pose_memorizer as pomezer\r\nimport pose_memorizer.core as pomezer_core\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n\r\nWINDOWS_NAME = \"PoseMemorizer\"\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n# -----------------------------------------------------------------------------\r\n# Callback\r\nclass Callback(object):\r\n \"\"\"docstring for Callback.\"\"\"\r\n\r\n def __init__(self, func, *args, **kwargs):\r\n super(Callback, self).__init__(*args, **kwargs)\r\n self._func = func\r\n self._args = args\r\n self._kwargs = kwargs\r\n return\r\n\r\n def __call__(self):\r\n cmds.undoInfo(openChunk=True)\r\n try:\r\n return self._func(*self._args, **self._kwargs)\r\n except:\r\n traceback.print_exc()\r\n finally:\r\n cmds.undoInfo(closeChunk=True)\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n# -----------------------------------------------------------------------------\r\nclass OptionFile(object):\r\n\r\n FILENAME = \"option.json\"\r\n\r\n def __init__(self):\r\n super(OptionFile, self).__init__()\r\n self.version = pomezer._version\r\n self.parameter = {}\r\n self._file_path = self._get_file_path()\r\n return\r\n\r\n def unify_sep(func):\r\n\r\n @functools.wraps(func)\r\n def _wrap(*args, **kwargs):\r\n\r\n def unify_path(path):\r\n sep = os.sep\r\n if sep == \"\\\\\":\r\n return path.replace(\"/\", sep)\r\n else:\r\n return path.replace(\"\\\\\", sep)\r\n\r\n path = func(*args, **kwargs)\r\n\r\n if hasattr(path, \"__iter__\") is True:\r\n return [unify_path(p) for p in path]\r\n else:\r\n return unify_path(path)\r\n return _wrap\r\n\r\n def _check_file_path(self):\r\n dir_path = os.path.dirname(self._file_path)\r\n if os.path.exists(dir_path) is False:\r\n os.makedirs(dir_path)\r\n return\r\n\r\n @unify_sep\r\n def _get_file_path(self):\r\n prefs_path = os.path.join(cmds.about(preferences=True), \"prefs\")\r\n ui_lang = cmds.about(uiLanguage=True)\r\n if ui_lang != \"en_US\":\r\n prefs_path = os.path.join(prefs_path, ui_lang, \"prefs\")\r\n\r\n return os.path.join(prefs_path, \"scripts\", pomezer._config_dir, self.FILENAME)\r\n\r\n def set_parameter(self, parameter):\r\n self.parameter = parameter\r\n return\r\n\r\n def load(self):\r\n data = {}\r\n with open(self._file_path, \"r\") as f:\r\n data = json.load(f)\r\n file_version = data.get(\"version\", None)\r\n if file_version != self.version:\r\n return None\r\n\r\n return data\r\n\r\n def save(self):\r\n data = {\"version\": self.version}\r\n data.update(self.parameter)\r\n self._check_file_path()\r\n with open(self._file_path, \"w\") as f:\r\n json.dump(data, f, indent=4)\r\n return\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n# -----------------------------------------------------------------------------\r\n# ScrollWidget\r\nclass ScrollWidget(QtWidgets.QScrollArea):\r\n\r\n def __init__(self, parent=None):\r\n super(ScrollWidget, self).__init__(parent)\r\n self._parent = parent\r\n # scroll\r\n self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\r\n self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\r\n\r\n self.setWidgetResizable(True)\r\n self.setFrameShape(QtWidgets.QFrame.NoFrame)\r\n\r\n self.setSizePolicy(QtWidgets.QSizePolicy.Expanding,\r\n QtWidgets.QSizePolicy.Expanding)\r\n self.setFocusPolicy(QtCore.Qt.NoFocus)\r\n return\r\n\r\n\r\n# HorizontalLine\r\nclass HorizontalLine(QtWidgets.QFrame):\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(HorizontalLine, self).__init__(*args, **kwargs)\r\n self.setFrameShape(QtWidgets.QFrame.HLine)\r\n return\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n# PoseListWidget\r\nclass PoseListWidget(QtWidgets.QListWidget):\r\n\r\n itemRightClicked = QtCore.Signal(QtWidgets.QListWidgetItem)\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(PoseListWidget, self).__init__(*args, **kwargs)\r\n self.__start_index = None\r\n self.__drag_button = None\r\n\r\n self.setObjectName((\"pose_list\"))\r\n self.setUniformItemSizes(True)\r\n self.setFocusPolicy(QtCore.Qt.NoFocus)\r\n return\r\n\r\n def mousePressEvent(self, event):\r\n self.clearSelection()\r\n self.__start_index = self.indexAt(event.pos())\r\n self.__drag_button = event.button()\r\n super(self.__class__, self).mousePressEvent(event)\r\n return\r\n\r\n def mouseMoveEvent(self, event):\r\n if self.__drag_button == QtCore.Qt.RightButton:\r\n index = self.indexAt(event.pos())\r\n if index.row() >= 0:\r\n self.setSelection(self.rectForIndex(index),\r\n self.selectionCommand(index))\r\n super(self.__class__, self).mouseMoveEvent(event)\r\n return\r\n\r\n def mouseReleaseEvent(self, event):\r\n if event.button() == QtCore.Qt.RightButton:\r\n items = self.selectedItems()\r\n if len(items) > 0 and self.__start_index == self.indexAt(event.pos()):\r\n self.itemRightClicked.emit(items.pop())\r\n self.__start_index = None\r\n self.__drag_button = None\r\n super(self.__class__, self).mouseReleaseEvent(event)\r\n return\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n# PoseMemorizerDockableWidget\r\nclass PoseMemorizerDockableWidget(MayaQWidgetDockableMixin, ScrollWidget):\r\n\r\n MIRRORNAME = [\"Left : Right\", \"left : right\", \"_L : _R\", \"_l : _r\"]\r\n MIRRORAXIS = [\"X\", \"Y\", \"Z\"]\r\n\r\n def __init__(self, parent=None):\r\n super(PoseMemorizerDockableWidget, self).__init__(parent=parent)\r\n\r\n self.setAttribute(QtCore.Qt.WA_DeleteOnClose)\r\n\r\n self.pomezer = pomezer_core.PoseMemorizer()\r\n self.op_file = OptionFile()\r\n\r\n self.widget = QtWidgets.QWidget(self)\r\n widget = self.widget\r\n\r\n # layout\r\n self.layout = QtWidgets.QVBoxLayout(self)\r\n layout = self.layout\r\n layout.setSpacing(6)\r\n layout.setContentsMargins(8, 8, 8, 8)\r\n\r\n button_layout = QtWidgets.QHBoxLayout(self)\r\n button_layout.setSpacing(4)\r\n button_layout.setContentsMargins(0, 0, 0, 0)\r\n\r\n mirror_layout = QtWidgets.QHBoxLayout(self)\r\n mirror_layout.setSpacing(16)\r\n mirror_layout.setContentsMargins(0, 0, 0, 0)\r\n\r\n check_layout = QtWidgets.QHBoxLayout(self)\r\n check_layout.setSpacing(16)\r\n check_layout.setContentsMargins(0, 0, 0, 0)\r\n\r\n # Widget\r\n self.memorize_button = QtWidgets.QPushButton(\"Memorize\", self)\r\n memorize_button = self.memorize_button\r\n memorize_button.clicked.connect(Callback(self._click_memorize))\r\n\r\n self.update_button = QtWidgets.QPushButton(\"Update\", self)\r\n update_button = self.update_button\r\n update_button.clicked.connect(self._click_update)\r\n\r\n self.delete_button = QtWidgets.QPushButton(\"Delete\", self)\r\n delete_button = self.delete_button\r\n delete_button.clicked.connect(self._click_delete)\r\n\r\n self.pose_list = PoseListWidget(self)\r\n pose_list = self.pose_list\r\n pose_list.itemDoubleClicked.connect(self._edit_item_name)\r\n pose_list.itemRightClicked.connect(self._right_click_item)\r\n\r\n self.mirror_name_combo = QtWidgets.QComboBox(self)\r\n mirror_name_combo = self.mirror_name_combo\r\n mirror_name_combo.addItems(self.MIRRORNAME)\r\n\r\n self.mirror_axis_combo = QtWidgets.QComboBox(self)\r\n mirror_axis_combo = self.mirror_axis_combo\r\n mirror_axis_combo.addItems(self.MIRRORAXIS)\r\n\r\n self.mirror_check = QtWidgets.QCheckBox(\"Mirror\", self)\r\n mirror_check = self.mirror_check\r\n mirror_check.setChecked(True)\r\n\r\n self.setkey_check = QtWidgets.QCheckBox(\"Set Key\", self)\r\n setkey_check = self.setkey_check\r\n setkey_check.setChecked(False)\r\n # setkey_check.setFixedHeight(28)\r\n\r\n self.namespace_check = QtWidgets.QCheckBox(\"Namespace Match\", self)\r\n namespace_check = self.namespace_check\r\n namespace_check.setChecked(True)\r\n # namespace_check.setFixedHeight(28)\r\n\r\n self.apply_button = QtWidgets.QPushButton(\"Apply\", self)\r\n apply_button = self.apply_button\r\n apply_button.clicked.connect(Callback(self._click_apply))\r\n apply_button.setFixedHeight(28)\r\n\r\n button_layout.addWidget(memorize_button, 3)\r\n button_layout.addWidget(update_button, 2)\r\n button_layout.addWidget(delete_button, 1)\r\n\r\n mirror_layout.addWidget(mirror_axis_combo)\r\n mirror_layout.addWidget(mirror_check)\r\n\r\n check_layout.addWidget(setkey_check)\r\n check_layout.addWidget(namespace_check)\r\n\r\n layout.addLayout(button_layout)\r\n layout.addWidget(pose_list)\r\n layout.addWidget(mirror_name_combo)\r\n layout.addLayout(mirror_layout)\r\n layout.addWidget(HorizontalLine())\r\n layout.addLayout(check_layout)\r\n layout.addWidget(HorizontalLine())\r\n layout.addWidget(apply_button)\r\n\r\n widget.setLayout(layout)\r\n self.setWidget(widget)\r\n\r\n self._option_load()\r\n QtWidgets.qApp.aboutToQuit.connect(self._option_save, QtCore.Qt.UniqueConnection)\r\n return\r\n\r\n def dockCloseEventTriggered(self):\r\n self._option_save()\r\n return\r\n\r\n def _add_pose(self, pose_data):\r\n name = pose_data.keys()[0]\r\n item = QtWidgets.QListWidgetItem()\r\n item.setData(QtCore.Qt.DisplayRole, name)\r\n item.setData(QtCore.Qt.UserRole + 1, pose_data)\r\n item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)\r\n self.pose_list.addItem(item)\r\n self.pose_list.clearSelection()\r\n return\r\n\r\n def _get_ui_parameter(self):\r\n reslut = {}\r\n reslut[\"mirror_name\"] = self.mirror_name_combo.currentText()\r\n reslut[\"mirror_axis\"] = self.mirror_axis_combo.currentText()\r\n reslut[\"mirror\"] = self.mirror_check.isChecked()\r\n reslut[\"setkey\"] = self.setkey_check.isChecked()\r\n reslut[\"namespace\"] = self.namespace_check.isChecked()\r\n return reslut\r\n\r\n def _get_sel_item(self):\r\n items = self.pose_list.selectedItems()\r\n if len(items) == 0:\r\n return None\r\n return items[0]\r\n\r\n def _edit_item_name(self, item):\r\n self.pose_list.editItem(item)\r\n return\r\n\r\n def _right_click_item(self):\r\n item = self._get_sel_item()\r\n if item is None:\r\n return\r\n pose_data = item.data(QtCore.Qt.UserRole + 1)\r\n cmds.select(pose_data.keys(), replace=True)\r\n return\r\n\r\n def _click_memorize(self):\r\n pose_data = self.pomezer.get_pose()\r\n if len(pose_data) > 0:\r\n self._add_pose(pose_data)\r\n return\r\n\r\n def _click_update(self):\r\n item = self._get_sel_item()\r\n if item is None:\r\n return\r\n transform = item.data(QtCore.Qt.UserRole + 1).keys()\r\n pose_data = self.pomezer.get_pose(transform)\r\n item.setData(QtCore.Qt.UserRole + 1, pose_data)\r\n return\r\n\r\n def _click_delete(self):\r\n item = self._get_sel_item()\r\n if item is None:\r\n return\r\n self.pose_list.takeItem(self.pose_list.row(item))\r\n del(item)\r\n return\r\n\r\n def _click_apply(self):\r\n item = self._get_sel_item()\r\n if item is None:\r\n return\r\n pose_data = item.data(QtCore.Qt.UserRole + 1)\r\n ui_parameter = self._get_ui_parameter()\r\n mirror_name = ui_parameter[\"mirror_name\"]\r\n mirror_axis = ui_parameter[\"mirror_axis\"]\r\n mirror = ui_parameter[\"mirror\"]\r\n setkey = ui_parameter[\"setkey\"]\r\n namespace = ui_parameter[\"namespace\"]\r\n self.pomezer.apply_pose(pose=pose_data,\r\n mirror=mirror,\r\n mirror_name=mirror_name,\r\n mirror_axis=mirror_axis,\r\n setkey=setkey,\r\n namespace=namespace)\r\n return\r\n\r\n def _option_load(self):\r\n ui_parameter = self.op_file.load()\r\n if ui_parameter is None:\r\n return\r\n self.mirror_name_combo.setCurrentText(ui_parameter[\"mirror_name\"])\r\n self.mirror_axis_combo.setCurrentText(ui_parameter[\"mirror_axis\"])\r\n self.mirror_check.setChecked(ui_parameter[\"mirror\"])\r\n self.setkey_check.setChecked(ui_parameter[\"setkey\"])\r\n self.namespace_check.setChecked(ui_parameter[\"namespace\"])\r\n return\r\n\r\n def _option_save(self):\r\n ui_parameter = self._get_ui_parameter()\r\n self.op_file.set_parameter(ui_parameter)\r\n self.op_file.save()\r\n return\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n# PoseMemorizerMainWindow\r\nclass PoseMemorizerMainWindow(object):\r\n\r\n HEIGHT = 360\r\n WIDTH = 280\r\n\r\n _windows_name = WINDOWS_NAME\r\n _windows_title = WINDOWS_NAME\r\n\r\n def __init__(self, restore=False):\r\n super(PoseMemorizerMainWindow, self).__init__()\r\n self.name = self._windows_name.replace(\" \", \"_\").lower()\r\n self.workspace_name = \"{}WorkspaceControl\".format(self.name)\r\n\r\n self.widget = None\r\n\r\n # Restore\r\n if restore is True:\r\n self._make_widget()\r\n # Restore parent\r\n mixinPtr = MQtUtil.findControl(self.name)\r\n wks = MQtUtil.findControl(self.workspace_name)\r\n MQtUtil.addWidgetToMayaLayout(long(mixinPtr), long(wks))\r\n\r\n # Create New Workspace\r\n else:\r\n self._check_workspase()\r\n self._make_widget()\r\n\r\n self._set_stylesheet()\r\n return\r\n\r\n def _check_workspase(self):\r\n wks = MQtUtil.findControl(self.workspace_name)\r\n if wks is not None:\r\n self.close()\r\n return\r\n\r\n def _set_stylesheet(self):\r\n try:\r\n styleFile = os.path.join(os.path.dirname(__file__), \"style.css\")\r\n with open(styleFile, \"r\") as f:\r\n style = f.read()\r\n except IOError:\r\n style = \"\"\r\n\r\n self.widget.setStyleSheet(style)\r\n return\r\n\r\n def _resize(self, height, width):\r\n workspace_name = self.workspace_name\r\n cmds.workspaceControl(workspace_name, edit=True, resizeHeight=height)\r\n cmds.workspaceControl(workspace_name, edit=True, resizeWidth=width)\r\n return\r\n\r\n def _make_uiscript(self):\r\n reslut = (\"from pose_memorizer import gui;\"\r\n \"pomezer_ui=gui.{classname}(restore=True)\")\r\n\r\n class_name = self.__class__.__name__\r\n return reslut.format(classname=class_name)\r\n\r\n def _make_close_command(self):\r\n return \"deleteUI {};\".format(self.workspace_name)\r\n\r\n def _make_widget(self):\r\n self.widget = PoseMemorizerDockableWidget()\r\n self.widget.setObjectName(self.name)\r\n return\r\n\r\n def close(self):\r\n # Mel Command\r\n cmd = self._make_close_command()\r\n mel.eval(cmd)\r\n return\r\n\r\n def show(self):\r\n widget = self.widget\r\n uiscript = self._make_uiscript()\r\n\r\n # Show Workspace & Set uiscript\r\n widget.show(dockable=True, uiScript=uiscript, retain=False)\r\n # Resize Workspace\r\n self._resize(self.HEIGHT, self.WIDTH)\r\n # Set Windows Title\r\n widget.setWindowTitle(self._windows_title)\r\n return\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\ndef main():\r\n # show gui\r\n pomezer_window = PoseMemorizerMainWindow()\r\n pomezer_window.show()\r\n return\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n# EOF\r\n# -----------------------------------------------------------------------------\r\n","repo_name":"shita-parap/maya-pose_memorizer","sub_path":"pose_memorizer/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":16832,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"12134410262","text":"#!/usr/bin/env python\nimport os\nimport requests\nimport time\nfrom datetime import datetime\nfrom threading import Thread\nfrom kombu.mixins import ConsumerMixin\nfrom kombu.log import get_logger\nfrom kombu import Connection\nfrom kombu.utils.debug import setup_logging\nfrom kombu import Exchange, Queue, binding\nfrom requests import HTTPError\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\nfrom optscale_client.auth_client.client_v2 import Client as AuthClient\nfrom optscale_client.config_client.client import Client as ConfigClient\nfrom optscale_client.rest_api_client.client_v2 import Client as RestClient\nfrom optscale_client.slacker_client.client import Client as SlackerClient\n\nLOG = get_logger(__name__)\nQUEUE_NAME = 'slacker-task'\nTASK_EXCHANGE = Exchange('activities-tasks', type='topic')\nTASK_QUEUE = Queue(QUEUE_NAME, TASK_EXCHANGE, bindings=[\n binding(TASK_EXCHANGE, routing_key='booking.action.#'),\n binding(TASK_EXCHANGE, routing_key='alert.#')])\nACTION_MSG_MAP = {\n 'booking_acquire': 'env_acquired',\n 'booking_release': 'env_released',\n 'constraint_violated': 'constraint_violated_alert',\n 'expenses_alert': 'alert',\n 'alert_added': 'alert_added',\n 'alert_removed': 'alert_removed',\n 'env_active_state_changed': 'env_active_state_changed',\n 'env_property_updated': 'env_property_updated'\n}\n\n\nclass InvalidExecutorTask(Exception):\n pass\n\n\nclass SlackerExecutorWorker(ConsumerMixin):\n def __init__(self, connection, config_cl):\n self.connection = connection\n self.config_cl = config_cl\n self._auth_cl = None\n self._rest_cl = None\n self._slacker_cl = None\n self.running = True\n self.thread = Thread(target=self.heartbeat)\n self.thread.start()\n\n @property\n def auth_cl(self):\n if self._auth_cl is None:\n self._auth_cl = AuthClient(\n url=self.config_cl.auth_url(),\n secret=self.config_cl.cluster_secret(),\n verify=False)\n return self._auth_cl\n\n @property\n def rest_cl(self):\n if self._rest_cl is None:\n self._rest_cl = RestClient(\n url=self.config_cl.restapi_url(),\n secret=self.config_cl.cluster_secret(),\n verify=False)\n return self._rest_cl\n\n @property\n def slacker_cl(self):\n if not self._slacker_cl:\n self._slacker_cl = SlackerClient(\n url=self.config_cl.slacker_url(),\n secret=self.config_cl.cluster_secret())\n return self._slacker_cl\n\n def send(self, type_, params, channel_id=None, team_id=None,\n auth_user_id=None, archived_channel_handle=False,\n organization_id=None, pool_id=None, warning_params=None):\n try:\n _, resp = self.slacker_cl.send_message(\n type_=type_, channel_id=channel_id, team_id=team_id,\n auth_user_id=auth_user_id, parameters=params)\n except HTTPError as exc:\n if 'archived' in exc.args[0] and archived_channel_handle:\n slack_managers = self._get_managers_connected_to_slack(\n organization_id, pool_id)\n for manager in slack_managers:\n params['warning'] = 'is_archived'\n if warning_params:\n params['warning_params'] = warning_params\n self.slacker_cl.send_message(\n type_=type_,\n auth_user_id=manager['auth_user_id'],\n parameters=params)\n else:\n raise\n\n def get_consumers(self, consumer, channel):\n return [consumer(queues=[TASK_QUEUE], accept=['json'],\n callbacks=[self.process_task])]\n\n @staticmethod\n def ts_to_slacker_time_format(timestamp):\n if timestamp:\n date = datetime.utcfromtimestamp(timestamp)\n return datetime.strftime(date, \"%m/%d/%Y %H:%M UTC\")\n else:\n return 'Not set'\n\n def get_pool_slack_channels_map(self, alerts):\n \"\"\"\n Gets a map of alerts ids and slack contacts\n :param alerts: list of dictionary alerts\n :return: dict: {pool_id: {'alert_id': [(slack_channel_id, slack_team_id)]}}\n \"\"\"\n pool_channels_map = {}\n for alert in alerts:\n contacts = [(x['slack_channel_id'], x['slack_team_id'])\n for x in alert['contacts'] if x.get('slack_channel_id')]\n if not pool_channels_map.get(alert['pool_id']):\n pool_channels_map[alert['pool_id']] = {}\n if not pool_channels_map[alert['pool_id']].get(alert['id']):\n pool_channels_map[alert['pool_id']][alert['id']] = set()\n pool_channels_map[alert['pool_id']][alert['id']].update(contacts)\n if alert['include_children']:\n _, pool = self.rest_cl.pool_get(alert['pool_id'],\n children=True)\n children_pools = [x['id'] for x in pool['children']]\n for child in children_pools:\n if not pool_channels_map.get(child):\n pool_channels_map[child] = {alert['id']: set()}\n if not pool_channels_map[child].get(alert['id']):\n pool_channels_map[child][alert['id']] = set()\n pool_channels_map[child][alert['id']].update(contacts)\n return pool_channels_map\n\n def resource_booking_status(self, current_booking):\n if not current_booking:\n booking_status = 'available'\n elif current_booking['released_at'] != 0:\n booking_status = 'occupied by {0} until {1}'.format(\n current_booking['acquired_by']['name'],\n self.ts_to_slacker_time_format(\n current_booking['released_at']))\n else:\n booking_status = 'occupied by {0}'.format(\n current_booking['acquired_by']['name'])\n return booking_status\n\n @staticmethod\n def get_current_booking(bookings):\n now_ts = int(datetime.utcnow().timestamp())\n for booking in bookings:\n if booking['acquired_since'] <= now_ts and (\n booking['released_at'] == 0 or\n booking['released_at'] > now_ts):\n return booking\n\n @staticmethod\n def get_upcoming_booking(bookings, current_booking=None):\n acquired_since = int(datetime.utcnow().timestamp())\n if current_booking and current_booking.get('released_at'):\n acquired_since = current_booking['released_at']\n future_bookings = [x for x in bookings\n if x['acquired_since'] > acquired_since]\n if future_bookings:\n return min(future_bookings, key=lambda x: x['acquired_since'])\n\n def get_resource_booking_info(self, resource_id):\n _, bookings = self.rest_cl.resource_bookings_get(resource_id)\n current_booking = self.get_current_booking(bookings['bookings'])\n booking_status = self.resource_booking_status(current_booking)\n upcoming_booking = self.get_upcoming_booking(bookings['bookings'],\n current_booking)\n if upcoming_booking:\n upcoming_booking[\n 'acquired_since'] = self.ts_to_slacker_time_format(\n upcoming_booking['acquired_since'])\n upcoming_booking['released_at'] = self.ts_to_slacker_time_format(\n upcoming_booking['released_at'])\n return current_booking, booking_status, upcoming_booking\n\n @staticmethod\n def get_resource_name(resource):\n return resource.get('name') or resource.get('cloud_resource_id')\n\n @staticmethod\n def check_action_object_type(action, object_type):\n action_objects_map = {\n 'expenses_alert': ['pool'],\n 'booking_acquire': ['booking'],\n 'booking_release': ['booking'],\n 'constraint_violated': ['pool', 'user'],\n 'env_property_updated': ['resource'],\n 'env_active_state_changed': ['resource'],\n 'alert_added': ['pool_alert'],\n 'alert_removed': ['pool_alert']\n }\n if object_type not in action_objects_map.get(action, []):\n raise InvalidExecutorTask('Invalid object type %s for task %s' % (\n object_type, action))\n\n def _get_managers_connected_to_slack(self, organization_id, pool_id):\n pool_permission = ['MANAGE_POOLS']\n _, pool_managers = self.rest_cl.authorized_employee_list(\n organization_id, 'pool', pool_id, pool_permission)\n _, users = self.auth_cl.user_list([x['auth_user_id']\n for x in pool_managers['employees']])\n slack_users = [x['id'] for x in users if x['slack_connected']]\n slack_managers = list(filter(lambda x: x['auth_user_id'] in slack_users,\n pool_managers['employees']))\n if not slack_managers:\n org_permission = ['EDIT_PARTNER']\n _, org_managers = self.rest_cl.authorized_employee_list(\n organization_id, 'organization', organization_id,\n org_permission)\n _, users = self.auth_cl.user_list(\n [x['auth_user_id'] for x in org_managers['employees']])\n slack_users = [x['id'] for x in users if x['slack_connected']]\n slack_managers = list(\n filter(lambda x: x['auth_user_id'] in slack_users,\n org_managers['employees']))\n return slack_managers\n\n def get_warning_params(self, alert, pool, organization, channel_id):\n return {\n 'organization_id': organization.get('id'),\n 'public_ip': self.config_cl.public_ip(),\n 'channel_id': channel_id,\n 'based': alert.get('based'),\n 'limit': pool.get('limit'),\n 'threshold': alert.get('threshold'),\n 'threshold_type': alert.get('threshold_type'),\n 'include_children': alert.get('include_children'),\n 'pool_id': pool.get('id'),\n 'pool_name': pool.get('name'),\n 'currency': organization.get('currency')\n }\n\n def execute_booking_acquire_release(self, organization_id, booking_id,\n action, object_type, meta=None):\n _, pool_alerts = self.rest_cl.alert_list(organization_id)\n slack_alerts = {x['id']: x for x in pool_alerts['alerts']\n if x['based'] == 'env_change' and\n any(contact.get('slack_channel_id')\n for contact in x['contacts'])}\n if not slack_alerts:\n return\n _, booking = self.rest_cl.shareable_book_get(booking_id)\n\n resource_id = booking['resource_id']\n _, organization = self.rest_cl.organization_get(organization_id)\n current_booking, booking_status, upcoming_booking = \\\n self.get_resource_booking_info(resource_id)\n _, resource = self.rest_cl.cloud_resource_get(resource_id)\n params = {'resource_id': resource_id,\n 'resource_name': self.get_resource_name(resource),\n 'public_ip': self.config_cl.public_ip(),\n 'org_id': organization_id,\n 'org_name': organization['name'],\n 'upcoming_booking': upcoming_booking,\n 'booking_status': booking_status}\n pool_channels_map = self.get_pool_slack_channels_map(\n slack_alerts.values())\n for alert_id, contacts in pool_channels_map.get(\n resource['pool_id']).items():\n _, alert = self.rest_cl.alert_get(alert_id)\n _, pool = self.rest_cl.pool_get(alert['pool_id'])\n for contact in contacts:\n warning_params = self.get_warning_params(\n slack_alerts.get(alert_id, {}), pool, organization,\n contact[0])\n self.send(\n ACTION_MSG_MAP.get(action), params, contact[0], contact[1],\n archived_channel_handle=True,\n organization_id=organization_id, pool_id=pool['id'],\n warning_params=warning_params)\n\n def execute_expense_alert(self, organization_id, pool_id, action,\n object_type, meta):\n alert_id = meta['alert_id']\n pool_limit = meta.get('limit', 0)\n cost = meta.get('cost', 0)\n _, alert = self.rest_cl.alert_get(alert_id)\n _, organization = self.rest_cl.organization_get(organization_id)\n _, pool = self.rest_cl.pool_get(pool_id)\n params = {\n 'pool_name': pool['name'],\n 'organization_name': organization['name'],\n 'organization_id': organization_id,\n 'public_ip': self.config_cl.public_ip(),\n 'pool_id': pool_id,\n 'limit': pool_limit,\n 'cost': cost,\n 'based': alert['based'],\n 'threshold': alert['threshold'],\n 'threshold_type': alert['threshold_type'],\n 'currency': organization['currency']\n }\n contacts = alert['contacts']\n alert_pool = pool\n if alert['pool_id'] != pool_id:\n _, alert_pool = self.rest_cl.pool_get(alert['pool_id'])\n for contact in contacts:\n if contact.get('slack_channel_id'):\n warning_params = self.get_warning_params(\n alert, alert_pool, organization,\n contact['slack_channel_id'])\n self.send(\n ACTION_MSG_MAP.get(action), params,\n contact['slack_channel_id'], contact['slack_team_id'],\n archived_channel_handle=True,\n organization_id=organization_id, pool_id=pool_id,\n warning_params=warning_params)\n\n def _env_active_state_changed_params(self, resource, meta):\n return {\n 'previous_state': meta.get('previous_state'),\n 'new_state': meta.get('new_state')\n }\n\n def _env_property_updated_params(self, resource, meta):\n _, booking_status, _ = self.get_resource_booking_info(resource['id'])\n _, bookings = self.rest_cl.resource_bookings_get(resource['id'])\n\n return {\n 'env_properties': meta.get('env_properties'),\n 'current_properties': resource.get('env_properties'),\n 'booking_status': booking_status\n }\n\n def execute_env_change_alert(self, organization_id, resource_id, action,\n object_type, meta):\n alert_id = meta['alert_id']\n _, alert = self.rest_cl.alert_get(alert_id)\n _, organization = self.rest_cl.organization_get(organization_id)\n _, resource = self.rest_cl.cloud_resource_get(resource_id)\n\n msg_type_func_map = {\n 'env_active_state_changed': self._env_active_state_changed_params,\n 'env_property_updated': self._env_property_updated_params\n }\n params = msg_type_func_map[action](resource, meta)\n params.update({\n 'resource_id': resource_id,\n 'resource_name': self.get_resource_name(resource),\n 'public_ip': self.config_cl.public_ip(),\n 'org_name': organization['name'],\n 'org_id': organization_id,\n })\n\n contacts = alert['contacts']\n pool_id = alert['pool_id']\n _, alert_pool = self.rest_cl.pool_get(alert['pool_id'])\n for contact in contacts:\n if contact.get('slack_channel_id'):\n warning_params = self.get_warning_params(\n alert, alert_pool, organization,\n contact['slack_channel_id'])\n self.send(\n ACTION_MSG_MAP.get(action), params,\n contact['slack_channel_id'], contact['slack_team_id'],\n archived_channel_handle=True,\n organization_id=organization_id, pool_id=pool_id,\n warning_params=warning_params)\n\n def execute_constraint_violated(self, organization_id, object_id,\n action, object_type, meta):\n _, organization = self.rest_cl.organization_get(organization_id)\n params = {\n 'violations': meta.get('violations'),\n 'public_ip': self.config_cl.public_ip(),\n 'org_name': organization['name'],\n 'org_id': organization_id,\n }\n if object_type == 'user':\n self.send(ACTION_MSG_MAP.get(action), params,\n auth_user_id=object_id)\n elif object_type == 'pool':\n alert_id = meta.get('alert_id')\n if not alert_id:\n pass\n _, alert = self.rest_cl.alert_get(alert_id)\n for contact in alert['contacts']:\n if contact.get('slack_channel_id'):\n _, pool = self.rest_cl.pool_get(alert['pool_id'])\n warning_params = self.get_warning_params(\n alert, pool, organization, contact['slack_channel_id'])\n self.send(\n ACTION_MSG_MAP.get(action), params,\n contact['slack_channel_id'], contact['slack_team_id'],\n archived_channel_handle=True,\n organization_id=organization_id, pool_id=pool['id'],\n warning_params=warning_params)\n else:\n raise InvalidExecutorTask(\n 'Unsupported object_type %s for task type %s' % (object_type,\n action))\n\n def execute_alert_added_removed(self, organization_id, alert_id, action,\n object_type, meta):\n _, organization = self.rest_cl.organization_get(organization_id)\n alert = meta.get('alert', {})\n pool_id = meta.get('alert', {}).get('pool_id')\n _, pool = self.rest_cl.pool_get(pool_id)\n params = {\n 'pool_name': pool['name'],\n 'pool_id': pool['id'],\n 'limit': pool['limit'],\n 'initiator_name': meta.get('initiator_name'),\n 'initiator_email': meta.get('initiator_email'),\n 'public_ip': self.config_cl.public_ip(),\n 'organization_id': organization_id,\n 'currency': organization['currency']\n }\n for p in ['based', 'threshold', 'threshold_type', 'include_children']:\n params[p] = alert.get(p)\n\n for contact in alert['contacts']:\n if contact.get('slack_channel_id'):\n warning_params = self.get_warning_params(\n alert, pool, organization, contact['slack_channel_id'])\n self.send(\n ACTION_MSG_MAP.get(action), params,\n contact['slack_channel_id'], contact['slack_team_id'],\n archived_channel_handle=True,\n organization_id=organization_id, pool_id=pool['id'],\n warning_params=warning_params)\n\n def execute(self, task):\n organization_id = task.get('organization_id')\n object_id = task.get('object_id')\n object_type = task.get('object_type')\n action = task.get('action')\n meta = task.get('meta')\n LOG.info('Started processing for object %s task type for %s '\n 'for organization %s' % (object_id, action, organization_id))\n task_params = {\n 'organization_id': organization_id,\n 'object_type': object_type,\n 'object_id': object_id,\n 'action': action,\n }\n if any(map(lambda x: x is None, task_params.values())):\n raise InvalidExecutorTask(\n 'Invalid task received: {}'.format(task))\n self.check_action_object_type(action, object_type)\n\n action_func_map = {\n 'expenses_alert': self.execute_expense_alert,\n 'booking_acquire': self.execute_booking_acquire_release,\n 'booking_release': self.execute_booking_acquire_release,\n 'constraint_violated': self.execute_constraint_violated,\n 'env_property_updated': self.execute_env_change_alert,\n 'env_active_state_changed': self.execute_env_change_alert,\n 'alert_added': self.execute_alert_added_removed,\n 'alert_removed': self.execute_alert_added_removed\n }\n try:\n func = action_func_map[action]\n except KeyError:\n raise InvalidExecutorTask(\n 'Unknown action type: %s' % action)\n func(organization_id, object_id, action, object_type, meta)\n\n def process_task(self, body, message):\n try:\n self.execute(body)\n except Exception as exc:\n LOG.exception('Processing task failed: %s', str(exc))\n message.ack()\n\n def heartbeat(self):\n while self.running:\n self.connection.heartbeat_check()\n time.sleep(1)\n\n\nif __name__ == '__main__':\n requests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n debug = os.environ.get('DEBUG', False)\n log_level = 'DEBUG' if debug else 'INFO'\n setup_logging(loglevel=log_level, loggers=[''])\n\n config_cl = ConfigClient(\n host=os.environ.get('HX_ETCD_HOST'),\n port=int(os.environ.get('HX_ETCD_PORT')),\n )\n config_cl.wait_configured()\n conn_str = 'amqp://{user}:{pass}@{host}:{port}'.format(\n **config_cl.read_branch('/rabbit'))\n with Connection(conn_str) as conn:\n try:\n worker = SlackerExecutorWorker(conn, config_cl)\n worker.run()\n except KeyboardInterrupt:\n worker.running = False\n worker.thread.join()\n LOG.info('Shutdown received')\n","repo_name":"hystax/optscale","sub_path":"docker_images/slacker_executor/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":22024,"program_lang":"python","lang":"en","doc_type":"code","stars":646,"dataset":"github-code","pt":"22"} +{"seq_id":"44173160130","text":"\n# coding: utf-8\n\n# ## Evaluation of Model Performance With Quantiles\n# \n# One thing I had to learn quickly in my new job as a data scientist was to evaluate the performance of models in quantiles instead of with a confusion matrix. I hope this little tutorial helps those who are in the same boat!\n\n# In[16]:\n\n#Load packages\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nfrom operator import itemgetter \nimport os\nfrom sklearn.metrics import confusion_matrix\nos.getcwd()\nget_ipython().magic('pylab inline')\nimport time\n\n# In[17]:\n\n#Start clock to get a runbook time at the end. Useful for optimiation of your code\nstart_time = time.time()\n\n\n# In[18]:\n\n# Set Seed to get consistent random data\nrandom.seed(101)\n\n# Test Set Seed. for random.seed(101), this number should equal 863\nnp.random.randint(1000)\n\n\n# In[19]:\n\n#randomely generate some 'probabilities'. This will be our model output.\npropensity_scores = pd.DataFrame(np.random.rand(10000))\n\n\n# In[20]:\n\n#randomly generate a binary target variable\nactual = pd.DataFrame(np.random.randint(2, size = (1,10000))).transpose()\n\n\n# In[21]:\n\n# Combine and rename our randomly generated data to get two columns and 10,000 rows\ndf = pd.concat([propensity_scores,actual],axis = 1)\ndf.columns = ['pos_prob','target']\n\ndec = df\n\n\n# In[22]:\n\n#You need the output of your models to look something like this. Pos_prob indicates the model score \n# (probability, log_likelyhood, etc.) for each instance, while target indicates the actual dependent variable\n# (which for this case will be binary).\nprint(dec.head())\nprint(dec.shape)\n\n\n# In[23]:\n\n# Change a1 for number of bins you want. Example: quartiles would be a1 = 4. Deciles would be a1 = 10\na1 = 4\n\n#function in pandas to put data in to equal bins\ndec['quantile'] = pd.qcut(dec['pos_prob'],a1,labels=False)\n\n#This will give us the ranges of the bins later\ndec['quantile_values'] = pd.qcut(dec['pos_prob'],a1)\n\n#Only take the positive dependent variable for this exercise\nquantiles = dec[dec['target'] == 1]\n\n#subset the Dataframe to only capture the variables that are needed\nquantiles = quantiles[['quantile','target','quantile_values']]\n\n#reset index \ndec1 = pd.DataFrame(quantiles.groupby(['quantile','target','quantile_values']).size().reset_index())\n\n#rename columns for new dataframe\ndec1.columns = ['quantile','target','quantile_values','freq']\n\n#calculate accuracy of each quantile\ndec1['accuracy'] = (dec1['freq'] / (len(dec)/a1))\n\n# The target column is no longer needed, so throw it out to keep results clean\nactuals = dec1['target']\ndel[[dec1['target']]]\n\n\n# In[24]:\n\n#sort probabilities in descending order to get a visualization of your results, then create a line plot. \n# This results in a very linear orientation, which is expected with random data.\nscore_values = pd.DataFrame(dec['pos_prob'].sort_values(ascending = False))\nx_ax = pd.Series(list(range(len(dec))))\nscore_values.plot(x = x_ax,color = 'red', xlim = [(len(dec)/(a1*-1)),len(dec)],yticks = np.arange(0,1,.1)\n ,xticks = np.arange(0,len(dec),len(dec)/a1)\n ,figsize = (9,7))\n\n\n# In[25]:\n\n#Create a table showing quantile values, frequency of hits, and accuracy of each quantile. A good model will have higher accuracy\n# in the higher bins with scores and lower accuracies in the lower bins:\n#Example:\n# quantile accuracy\n# 3 .75 \n# 2 .50\n# 1 .30\n# 0 .10\n\n#The results of this model indicate that the model is completely random, which in this case is good because I randomly genderated\n# this data to use as an example! \ndec1.sort_values(by = 'quantile',ascending = False)\n\n\n# ## Just for fun, let's also make a Confusion Matrix to compare.\n\n# In[26]:\n\n#First, grab the actuals and the propensity scores of the data.\ncmdf = pd.concat([actual,propensity_scores], axis = 1)\ncmdf.columns = ['actuals','propensity_score']\n\n\n# In[27]:\n\n# Create a little loop that says if our probability score is greater the .5, then predict yes (1), if not then predict no (0)\ndef predict_outcome(c):\n # a is the name of the output (or propensity score) of the model\n a = 'propensity_score'\n if c[a] >= .5:\n return 1\n else:\n return 0\n\n\n# In[28]:\n\n#run the loop to create our predicted_outcome variable\ncmdf['predicted_outcome'] = cmdf.apply(predict_outcome, axis=1)\n\n\n# In[29]:\n\ncmdf.head()\n\n\n# In[30]:\n\n# Display confusion matrix. This is also indicating That our 'model' is completely random. \ncm = confusion_matrix(cmdf['actuals'], cmdf['predicted_outcome'])\nplt.matshow(cm)\nplt.title('Confusion matrix')\nplt.colorbar()\nplt.ylabel('True label')\nplt.xlabel('Predicted label')\nplt.show()\n\ncm_mat = np.array(cm)\nprint(cm_mat)\n\n\n# In[31]:\n\n#Indicates total run time of notebook\nprint('Total Runtime =',time.time()-start_time,'seconds')\n\n","repo_name":"folza1992/quantiles","sub_path":"Evaluating+Models+in+Quantiles.py","file_name":"Evaluating+Models+in+Quantiles.py","file_ext":"py","file_size_in_byte":4841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"821491437","text":"#!/usr/bin/env python\n\"\"\"This script gives insight on Titanic survivors data.\"\"\"\n\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nmpl.style.use('ggplot')\nplt.rcParams['figure.figsize'] = (16, 8)\nmpl.rcParams['figure.dpi'] = 300\n\n# \"Tableau 20\" colors as RGB.\ntableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),\n (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),\n (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),\n (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),\n (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]\n\n# Scale RGB values to [0, 1] range.\nfor i in range(len(tableau20)):\n r, g, b = tableau20[i]\n tableau20[i] = (r / 255., g / 255., b / 255.)\n\n# read data\ndf_titanic = sns.load_dataset('titanic')\n\n# some insight on data\n# survivalByGender\ngrp = df_titanic.groupby('sex')[['survived']].mean()*100\nax = grp.plot.bar(color=tableau20[0], legend=None)\nax.set_axis_bgcolor('white')\nplt.xticks(np.arange(0, 5), ['Female', 'Male'], rotation=0, ha ='center', fontsize=14)\nax.set_xlabel(\"\")\nplt.yticks([])\nplt.title(\"Survival rate by gender\\n\", fontsize=22, loc='left')\nrects = ax.patches\nlabels = [np.round(value, decimals=1) for value in np.concatenate(grp.values)]\nfor rect, label in zip(rects, labels):\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2, height-5., label, ha='center',\n va='bottom', color='white', fontsize=16)\nax.axis('tight')\nplt.savefig(\"survivalByGender.png\",\n bbox_inches='tight',\n dpi=300,\n format='png')\nplt.savefig(\"survivalByGender.pdf\",\n bbox_inches='tight',\n dpi=300,\n format='pdf')\n\n# survivalByGenderClass\ngrp = df_titanic.pivot_table('survived', index='sex', columns='class')*100\nax = grp.plot.bar(color=tableau20, legend=False, align='center')\n#ax.legend(loc=1)\nax.set_axis_bgcolor('white')\nplt.xticks(np.arange(0, 5), ['Female', 'Male'], rotation=0, ha ='center', fontsize=14)\nax.set_xlabel(\"\")\nplt.yticks([])\nplt.title(\"Survival rate by gender and class\\n\", fontsize=22, loc='left')\nrects = ax.patches\nlabels = [np.round(value, decimals=1) for value in np.concatenate(grp.T.values)]\nlegends = ['First class', 'First class',\n 'Second class', 'Second class',\n 'Third class', 'Third class']\nfor rect, label, legend in zip(rects, labels, legends):\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2, height-5., label, ha='center',\n va='bottom', color='white', fontsize=16)\n if float(label) >= 30:\n ax.text(rect.get_x() + rect.get_width()/2, height-7.5, legend,\n ha='center', va='top', color='white', fontsize=16, rotation=90)\n else:\n ax.text(rect.get_x() + rect.get_width()/2, height+7.5, legend,\n ha='center', va='bottom', color='black', fontsize=16, rotation=90)\nax.axis('tight')\nplt.savefig(\"survivalByGenderClass.png\",\n bbox_inches='tight',\n dpi=300,\n format='png')\nplt.savefig(\"survivalByGenderClass.pdf\",\n bbox_inches='tight',\n dpi=300,\n format='pdf')\n\n# survivalByGenderClassAgeSlice\nage = pd.cut(df_titanic['age'], [0, 18, 80])\ngrp = df_titanic.pivot_table('survived', ['sex', age], 'class').unstack()*100\nax = grp.plot.bar(align='center', color=tableau20, legend=False)\nax.set_axis_bgcolor('white')\nplt.xticks(np.arange(0, 5), ['Female', 'Male'], rotation=0, ha ='center', fontsize=14)\nax.set_xlabel(\"\")\nplt.yticks([])\nplt.title(\"Survival rate by gender and class, and age range\\n\", fontsize=22, loc='left')\nrects = ax.patches\nlabels = [np.round(value, decimals=1) for value in np.concatenate(grp.T.values)]\nlegends = ['First class, minors', 'First class, minors',\n 'First class, adults', 'First class, adults',\n 'Second class, minors', 'Second class, minors',\n 'Second class, adults', 'Second class, adults',\n 'Third class, minors', 'Third class, minors',\n 'Third class, adults', 'Third class, adults']\nfor rect, label, legend in zip(rects, labels, legends):\n height = rect.get_height()\n if float(label) >= 40:\n ax.text(rect.get_x() + rect.get_width()/2, height-5., label, ha='center',\n va='bottom', color='white', fontsize=16)\n ax.text(rect.get_x() + rect.get_width()/2, height-7.5, legend,\n ha='center', va='top', color='white', fontsize=16, rotation=90)\n else:\n ax.text(rect.get_x() + rect.get_width()/2,\n height-5.,\n label,\n ha='center',\n va='bottom',\n color='white',\n fontsize=16)\n ax.text(rect.get_x() + rect.get_width()/2,\n height+7.5,\n legend,\n ha='center',\n va='bottom',\n color='black',\n fontsize=16,\n rotation=90)\nax.axis('tight')\nplt.savefig(\"survivalByGenderClassAgeSlice.png\",\n bbox_inches='tight',\n dpi=300,\n format='png')\nplt.savefig(\"survivalByGenderClassAgeSlice.pdf\",\n bbox_inches='tight',\n dpi=300,\n format='pdf')\n\n\n# fare = pd.qcut(df_titanic['fare'], 2)\n# df_titanic.pivot_table('survived', ['sex', age], [fare, 'class'])\n# df_titanic.pivot_table(index='sex', columns='class',\n# aggfunc={'survived': sum, 'fare': 'mean'})\ndf_titanic.pivot_table('survived', index='sex', columns='class', margins=True)\n\n# ageByGenderClassSurvival\ngrp = df_titanic.pivot_table('age', ['sex', 'survived'], 'class')\nax = grp.plot.bar(color=tableau20, legend=False, align='center')\n#plt.legend(loc=2)\nax.set_axis_bgcolor('white')\nplt.xticks(np.arange(0, 5), ['Female victims',\n 'Female survivors',\n 'Male victims',\n 'Male survivors'],\n rotation=0,\n ha='center',\n fontsize=14)\nax.set_xlabel(\"\")\nplt.yticks([])\nplt.title(\"Average age of survivors and victims by gender and class\\n\",\n fontsize=22,\n loc='left')\nrects = ax.patches\nlabels = [int(np.round(value, decimals=0)) for value in np.concatenate(grp.T.values)]\nlegends = ['First class', 'First class', 'First class', 'First class',\n 'Second class', 'Second class', 'Second class', 'Second class',\n 'Third class', 'Third class', 'Third class', 'Third class']\nfor rect, label, legend in zip(rects, labels, legends):\n height = rect.get_height()\n if float(label) >= 19:\n ax.text(rect.get_x() + rect.get_width()/2, height-2., label, ha='center',\n va='bottom', color='white', fontsize=16)\n ax.text(rect.get_x() + rect.get_width()/2, height-3, legend,\n ha='center', va='top', color='white', fontsize=16, rotation=90)\n else:\n ax.text(rect.get_x() + rect.get_width()/2, height-2., label, ha='center',\n va='bottom', color='white', fontsize=16)\n ax.text(rect.get_x() + rect.get_width()/2, height+3, legend,\n ha='center', va='bottom', color='black', fontsize=16, rotation=90)\nax.axis('tight')\nplt.savefig(\"ageByGenderClassSurvival.png\",\n bbox_inches='tight',\n dpi=300,\n format='png')\nplt.savefig(\"ageByGenderClassSurvival.pdf\",\n bbox_inches='tight',\n dpi=300,\n format='pdf')\n","repo_name":"guillaumedavidphd/titanic-data-science","sub_path":"titanic_insight.py","file_name":"titanic_insight.py","file_ext":"py","file_size_in_byte":7570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"17772397404","text":"import json\nimport logging\n\nfrom polyswarmclient.filters.filter import Filter, MetadataFilter\n\nlogger = logging.getLogger(__name__)\n\n\nclass ConfidenceModifier(MetadataFilter):\n def __init__(self, favor, penalize):\n \"\"\" Create a new BountyFilter object with an array of Filters and RejectFilters\n Args:\n favor (None|list[Filter]): List of Filters for accepted bounties\n penalize (None|list[Filter]): List of Filters for rejected bounties\n \"\"\"\n if favor is None:\n self.favor = []\n else:\n self.favor = favor\n\n if penalize is None:\n self.penalize = []\n else:\n self.penalize = penalize\n\n def modify(self, metadata, confidence):\n \"\"\"Check metadata against the penalty and favor filters.\n Matching both bonus and penalty results offset\n\n Args:\n metadata (any): metadata dict to test\n confidence (float): confidence as returned by the Av engine\n\n Returns:\n (float): confidence that is either more, same or less after comparing against bonus/penalty Filters\n \"\"\"\n if not self.favor and not self.penalize:\n return confidence\n\n favored = any([f.filter(metadata) for f in self.favor])\n\n penalized = any([f.filter(metadata) for f in self.penalize])\n\n if favored and not penalized:\n logger.debug('Increasing confidence for favored value %s', json.dumps(metadata),\n extra={'extra': self.favor})\n return confidence * 1.2\n elif penalized and not favored:\n logger.debug('Decreasing confidence for penalized value %s', json.dumps(metadata),\n extra={'extra': self.penalize})\n return confidence * .8\n else:\n return confidence\n","repo_name":"supernothing/polyswarm-client","sub_path":"src/polyswarmclient/filters/confidencefilter.py","file_name":"confidencefilter.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"25947014108","text":"#import tensorflow as tf\n\n# placeholder for inputs\nx = tf.placeholder(tf.float32)\n# linear_model is just a tensor but also represents the model used for learning.\nW = tf.Variable(0.3, dtype=tf.float32)\nb = tf.Variable(-0.3, dtype=tf.float32)\nlinear_model = W*x + b\n\n# Initialize the variables defined earlier\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\n# linear_model can be evaluated for several values of x by feeding the dictionary of values.\n# print(sess.run(linear_model, {x: [1.0, 2.0, 3.0, 4.0]}))\n\n# The model has been created but we still need to check if it is any good. For this we need to define a loss function.\n\n# Placeholder for training set outputs\ny = tf.placeholder(tf.float32)\n\n# Let's use the standard sum of squares error model since this is linear regression.\nsquared_deltas = tf.square(linear_model - y)\nloss = tf.reduce_sum(squared_deltas)\n\n# Check to see if the perfect values for W and b produce a loss value of 0.\n# Just assign the values to the variables\n# fixW = tf.assign(W, [-1])\n# fixb = tf.assign(b, [1])\n# sess.run([fixb, fixW])\nprint(\"loss value(before training model): \", sess.run(loss, {x: [1.0, 2.0, 3.0, 4.0], y: [0, -1, -2, -3]}))\nprint(\"initial model parameters: \", sess.run([W, b]))\n\n# Machine learning is not fun at all if you have to guess the model parameters, these\n# must be found automatically.\n# A gradient descent optimizer is used to train the model\noptimizer = tf.train.GradientDescentOptimizer(0.01)\ntrain = optimizer.minimize(loss)\nfor i in range(1000):\n sess.run(train, {x:[1, 2, 3, 4], y:[0, -1, -2, -3]})\n\nprint(\"loss value(after training model): \", sess.run(loss, {x: [1.0, 2.0, 3.0, 4.0], y: [0, -1, -2, -3]}))\nprint(\"trained model parameters: \", sess.run([W, b]))\n","repo_name":"vorzawk/gettingStarted_tensorflow","sub_path":"images_TFTutorials/getting_started.py","file_name":"getting_started.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37113338520","text":"import pygame\r\n\r\npygame.init()\r\nscreen = pygame.display.set_mode((400, 300))\npygame.display.set_caption(\"Clicker Game\")\nwhite = (255, 255, 255)\nfont = pygame.font.Font(\"manaspc.ttf\", 20)\nfont.get_bold()\n\nclicks = 0\nupgradex = 0\ncps = 0\nupgrade1_price = 15\nupgrade2_price = 25\n\nlast_update = pygame.time.get_ticks()\n\nclick_interval = 1000\n\ncookie = pygame.image.load(\"cookie.png\")\ncookie_rect = cookie.get_rect()\ncookie_rect.center = (150, 150)\ncookie_scaled = pygame.transform.scale(cookie, (150, 150))\n\nshow_button = False\n\n\ndef loadstats():\n global clicks, upgradex, cps, upgrade1_price, upgrade2_price\n with open(\"save.txt\", \"r\") as r:\n lines = r.readlines()\n if not lines:\n return\n for line in lines:\n data = line.strip().split(\":\")\n if len(data) != 2:\n return\n if data[0] == \"Clicks\":\n clicks = int(data[1])\n elif data[0] == \"Upgradex\":\n upgradex = int(data[1])\n elif data[0] == \"CPS\":\n cps = int(data[1])\n elif data[0] == \"Upgrade 1 Price\":\n upgrade1_price = int(data[1])\n elif data[0] == \"Upgrade 2 Price\":\n upgrade2_price = int(data[1])\n else:\n print(\"Data Loaded Successfully.\")\n\n\ndef savestats():\n with open(\"save.txt\", \"w\") as f:\n f.write(f\"Clicks:{clicks}\\n\")\n f.write(f\"Upgradex:{upgradex}\\n\")\n f.write(f\"CPS:{cps}\\n\")\n f.write(f\"Upgrade 1 Price:{upgrade1_price}\\n\")\n f.write(f\"Upgrade 2 Price:{upgrade2_price}\\n\")\n print(\"Data Saved Successfully.\")\n\n\ndef texts():\r\n global up_text, up, up_text2, up_text2_rect, up_text_rect, upgrade_text, upgrade_text_rect, upgrade_text2, upgrade_text2_rect, show_text, show_text_rect, show_text\n font1 = pygame.font.Font(\"manaspc.ttf\", 10)\n up_text = font1.render(f\"{upgrade1_price} CLICKS\", True, (0, 50, 250))\n up_text_rect = up_text.get_rect()\n up_text_rect.center = (15 + 35, 265 + -5)\n upgrade_text = font.render(\"+1 click\", True, (255, 0, 0))\n upgrade_text_rect = upgrade_text.get_rect()\n upgrade_text_rect.center = (15 + 35, 265 + 25)\n\r\n if show_button == False:\n show_text = font.render(\"SHOW\", True, (0, 0, 0))\n show_text_rect = show_text.get_rect()\n show_text_rect.center = (35 + 2, 15 + 10)\n else:\n show_text = font.render(\"HIDE\", True, (0, 0, 0))\n\r\n up_text2 = font1.render(f\"{upgrade2_price} CLICKS\", True, (0, 50, 250))\n up_text2_rect = up_text.get_rect()\n up_text2_rect.center = (100 + 35, 265 + -5)\n upgrade_text2 = font.render(\"1 cps\", True, (255, 0, 0))\n upgrade_text2_rect = upgrade_text2.get_rect()\n upgrade_text2_rect.center = (100 + 35, 265 + 25)\n\n\ndef upgrades():\n global upgrade1, upgrade2, show\n show = pygame.draw.rect(screen, (0, 200, 250), (2, 2, 70, 40))\n\n if show_button == True:\n if clicks >= upgrade1_price:\n upgrade1 = pygame.draw.rect(screen, (0, 200, 0), (15, 265, 70, 50))\n else:\n upgrade1 = pygame.draw.rect(screen, (200, 0, 0), (15, 265, 70, 50))\n upgrade1_text = font.render(f\"Upgrade 1 ({upgrade1_price})\", True,\n (0, 0, 0))\n upgrade1_text_rect = upgrade1_text.get_rect()\n upgrade1_text_rect.center = (35, 70) # changed x-coordinate\n\n if clicks >= upgrade2_price:\n upgrade2 = pygame.draw.rect(screen, (0, 200, 0),\n (100, 265, 70, 50))\n else:\n upgrade2 = pygame.draw.rect(screen, (200, 0, 0),\n (100, 265, 70, 50))\n upgrade2_text = font.render(f\"Upgrade 2 ({upgrade2_price})\", True,\n (0, 0, 0))\n upgrade2_text_rect = upgrade2_text.get_rect()\n upgrade2_text_rect.center = (35, 150) # changed x-coordinate\n\n\nrunning = True\nloadstats()\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n savestats()\n\n running = False\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n mouse_pos = pygame.mouse.get_pos()\n if show.collidepoint(mouse_pos) and show_button == False:\n show_button = True\n elif show.collidepoint(mouse_pos) and show_button == True:\n show_button = False\n elif cookie_rect.collidepoint(mouse_pos):\n cookie_scaled = pygame.transform.scale(cookie, (150, 150))\n clicks += 1 + upgradex\n if show_button == True:\n\n upgrades()\n if upgrade1.collidepoint(mouse_pos):\n if clicks >= upgrade1_price:\n clicks -= upgrade1_price\n upgradex += 1\n upgrade1_price = upgrade1_price * 1.25\n upgrade1_price = int(round(upgrade1_price))\n print(f\"New price : {upgrade1_price}\")\n else:\n pass\n elif upgrade2.collidepoint(mouse_pos):\n if clicks >= upgrade2_price:\n clicks -= upgrade2_price\n cps += 1\n upgrade2_price = upgrade2_price * 1.45\n upgrade2_price = int(round(upgrade2_price))\n print(f\"New price : {upgrade2_price}\")\n else:\n pass\n else:\n pass\n current_time = pygame.time.get_ticks()\n if current_time - last_update >= click_interval:\n clicks += cps\n last_update = current_time\n\n screen.fill(white)\n\n def text():\n texts()\n global text_rect, button_text_rect, text, button_text\n text = font.render(f\"Clicks: {clicks}\", True, (0, 0, 0))\n text_rect = text.get_rect()\n text_rect.center = (200, 20)\n button_text = font.render(\"UPGRADES\", True, (0, 0, 0))\n button_text_rect = button_text.get_rect()\n button_text_rect.center = (210, 240) # changed x-coordinate\n\r\n screen.blit(cookie_scaled, (130, 50))\r\n screen.blit(text, text_rect)\r\n screen.blit(show_text, show_text_rect)\r\n\r\n if show_button == True:\n screen.blit(up_text2, up_text2_rect)\r\n screen.blit(up_text, up_text_rect)\n screen.blit(upgrade_text, upgrade_text_rect)\n screen.blit(button_text, button_text_rect)\n screen.blit(upgrade_text2, upgrade_text2_rect)\r\n else:\r\n pass\r\n\r\n text()\r\n upgrades()\r\n pygame.display.update()\r\n\r\npygame.quit()\r\n","repo_name":"Sneezedip/cookie-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4763872045","text":"import sigrokdecode as srd\n\nWORD_ADDR_RESET = 0x00\nWORD_ADDR_SLEEP = 0x01\nWORD_ADDR_IDLE = 0x02\nWORD_ADDR_COMMAND = 0x03\n\nWORD_ADDR = {0x00: 'RESET', 0x01: 'SLEEP', 0x02: 'IDLE', 0x03: 'COMMAND'}\n\nOPCODE_COUNTER = 0x24\nOPCODE_DERIVE_KEY = 0x1c\nOPCODE_DEV_REV = 0x30\nOPCODE_ECDH = 0x43\nOPCODE_GEN_DIG = 0x15\nOPCODE_GEN_KEY = 0x40\nOPCODE_HMAC = 0x11\nOPCODE_CHECK_MAC = 0x28\nOPCODE_LOCK = 0x17\nOPCODE_MAC = 0x08\nOPCODE_NONCE = 0x16\nOPCODE_PAUSE = 0x01\nOPCODE_PRIVWRITE = 0x46\nOPCODE_RANDOM = 0x1b\nOPCODE_READ = 0x02\nOPCODE_SHA = 0x47\nOPCODE_SIGN = 0x41\nOPCODE_UPDATE_EXTRA = 0x20\nOPCODE_VERIFY = 0x45\nOPCODE_WRITE = 0x12\n\nOPCODES = {\n 0x01: 'Pause',\n 0x02: 'Read',\n 0x08: 'MAC',\n 0x11: 'HMAC',\n 0x12: 'Write',\n 0x15: 'GenDig',\n 0x16: 'Nonce',\n 0x17: 'Lock',\n 0x1b: 'Random',\n 0x1c: 'DeriveKey',\n 0x20: 'UpdateExtra',\n 0x24: 'Counter',\n 0x28: 'CheckMac',\n 0x30: 'DevRev',\n 0x40: 'GenKey',\n 0x41: 'Sign',\n 0x43: 'ECDH',\n 0x45: 'Verify',\n 0x46: 'PrivWrite',\n 0x47: 'SHA',\n}\n\nZONE_CONFIG = 0x00\nZONE_OTP = 0x01\nZONE_DATA = 0x02\n\nZONES = {0x00: 'CONFIG', 0x01: 'OTP', 0x02: 'DATA'}\n\nSTATUS_SUCCESS = 0x00\nSTATUS_CHECKMAC_FAIL = 0x01\nSTATUS_PARSE_ERROR = 0x03\nSTATUS_EXECUTION_ERROR = 0x0f\nSTATUS_READY = 0x11\nSTATUS_CRC_COMM_ERROR = 0xff\n\nSTATUS = {\n 0x00: 'Command success',\n 0x01: 'Checkmac failure',\n 0x03: 'Parse error',\n 0x0f: 'Execution error',\n 0x11: 'Ready',\n 0xff: 'CRC / communications error',\n}\n\nclass Decoder(srd.Decoder):\n api_version = 3\n id = 'atsha204a'\n name = 'ATSHA204A'\n longname = 'Microchip ATSHA204A'\n desc = 'Microchip ATSHA204A family crypto authentication protocol.'\n license = 'gplv2+'\n inputs = ['i2c']\n outputs = []\n tags = ['Security/crypto', 'IC', 'Memory']\n annotations = (\n ('waddr', 'Word address'),\n ('count', 'Count'),\n ('opcode', 'Opcode'),\n ('param1', 'Param1'),\n ('param2', 'Param2'),\n ('data', 'Data'),\n ('crc', 'CRC'),\n ('status', 'Status'),\n ('warning', 'Warning'),\n )\n annotation_rows = (\n ('frame', 'Frame', (0, 1, 2, 3, 4, 5, 6)),\n ('status', 'Status', (7,)),\n ('warnings', 'Warnings', (8,)),\n )\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.state = 'IDLE'\n self.waddr = self.opcode = -1\n self.ss_block = self.es_block = 0\n self.bytes = []\n\n def start(self):\n self.out_ann = self.register(srd.OUTPUT_ANN)\n\n def output_tx_bytes(self):\n b = self.bytes\n if len(b) < 1: # Ignore wakeup.\n return\n self.waddr = b[0][2]\n self.put_waddr(b[0])\n if self.waddr == WORD_ADDR_COMMAND:\n count = b[1][2]\n self.put_count(b[1])\n if len(b) - 1 != count:\n self.put_warning(b[0][0], b[-1][1],\n 'Invalid frame length: Got {}, expecting {} '.format(\n len(b) - 1, count))\n return\n self.opcode = b[2][2]\n self.put_opcode(b[2])\n self.put_param1(b[3])\n self.put_param2([b[4], b[5]])\n self.put_data(b[6:-2])\n self.put_crc([b[-2], b[-1]])\n\n def output_rx_bytes(self):\n b = self.bytes\n count = b[0][2]\n self.put_count(b[0])\n if self.waddr == WORD_ADDR_RESET:\n self.put_data([b[1]])\n self.put_crc([b[2], b[3]])\n self.put_status(b[0][0], b[-1][1], b[1][2])\n elif self.waddr == WORD_ADDR_COMMAND:\n if count == 4: # Status / Error.\n self.put_data([b[1]])\n self.put_crc([b[2], b[3]])\n self.put_status(b[0][0], b[-1][1], b[1][2])\n else:\n self.put_data(b[1:-2])\n self.put_crc([b[-2], b[-1]])\n\n def putx(self, s, data):\n self.put(s[0], s[1], self.out_ann, data)\n\n def puty(self, s, data):\n self.put(s[0][0], s[1][1], self.out_ann, data)\n\n def putz(self, ss, es, data):\n self.put(ss, es, self.out_ann, data)\n\n def put_waddr(self, s):\n self.putx(s, [0, ['Word addr: %s' % WORD_ADDR[s[2]]]])\n\n def put_count(self, s):\n self.putx(s, [1, ['Count: %s' % s[2]]])\n\n def put_opcode(self, s):\n self.putx(s, [2, ['Opcode: %s' % OPCODES[s[2]]]])\n\n def put_param1(self, s):\n op = self.opcode\n if op in (OPCODE_CHECK_MAC, OPCODE_COUNTER, OPCODE_DEV_REV, \\\n OPCODE_ECDH, OPCODE_GEN_KEY, OPCODE_HMAC, OPCODE_MAC, \\\n OPCODE_NONCE, OPCODE_RANDOM, OPCODE_SHA, OPCODE_SIGN, \\\n OPCODE_VERIFY):\n self.putx(s, [3, ['Mode: %02X' % s[2]]])\n elif op == OPCODE_DERIVE_KEY:\n self.putx(s, [3, ['Random: %s' % s[2]]])\n elif op == OPCODE_PRIVWRITE:\n self.putx(s, [3, ['Encrypted: {}'.format('Yes' if s[2] & 0x40 else 'No')]])\n elif op == OPCODE_GEN_DIG:\n self.putx(s, [3, ['Zone: %s' % ZONES[s[2]]]])\n elif op == OPCODE_LOCK:\n self.putx(s, [3, ['Zone: {}, Summary: {}'.format(\n 'DATA/OTP' if s[2] else 'CONFIG',\n 'Ignored' if s[2] & 0x80 else 'Used')]])\n elif op == OPCODE_PAUSE:\n self.putx(s, [3, ['Selector: %02X' % s[2]]])\n elif op == OPCODE_READ:\n self.putx(s, [3, ['Zone: {}, Length: {}'.format(ZONES[s[2] & 0x03],\n '32 bytes' if s[2] & 0x90 else '4 bytes')]])\n elif op == OPCODE_WRITE:\n self.putx(s, [3, ['Zone: {}, Encrypted: {}, Length: {}'.format(ZONES[s[2] & 0x03],\n 'Yes' if s[2] & 0x40 else 'No', '32 bytes' if s[2] & 0x90 else '4 bytes')]])\n else:\n self.putx(s, [3, ['Param1: %02X' % s[2]]])\n\n def put_param2(self, s):\n op = self.opcode\n if op == OPCODE_DERIVE_KEY:\n self.puty(s, [4, ['TargetKey: {:02x} {:02x}'.format(s[1][2], s[0][2])]])\n elif op in (OPCODE_COUNTER, OPCODE_ECDH, OPCODE_GEN_KEY, OPCODE_PRIVWRITE, \\\n OPCODE_SIGN, OPCODE_VERIFY):\n self.puty(s, [4, ['KeyID: {:02x} {:02x}'.format(s[1][2], s[0][2])]])\n elif op in (OPCODE_NONCE, OPCODE_PAUSE, OPCODE_RANDOM):\n self.puty(s, [4, ['Zero: {:02x} {:02x}'.format(s[1][2], s[0][2])]])\n elif op in (OPCODE_HMAC, OPCODE_MAC, OPCODE_CHECK_MAC, OPCODE_GEN_DIG):\n self.puty(s, [4, ['SlotID: {:02x} {:02x}'.format(s[1][2], s[0][2])]])\n elif op == OPCODE_LOCK:\n self.puty(s, [4, ['Summary: {:02x} {:02x}'.format(s[1][2], s[0][2])]])\n elif op in (OPCODE_READ, OPCODE_WRITE):\n self.puty(s, [4, ['Address: {:02x} {:02x}'.format(s[1][2], s[0][2])]])\n elif op == OPCODE_UPDATE_EXTRA:\n self.puty(s, [4, ['NewValue: {:02x}'.format(s[0][2])]])\n else:\n self.puty(s, [4, ['-']])\n\n def put_data(self, s):\n if len(s) == 0:\n return\n op = self.opcode\n if op == OPCODE_CHECK_MAC:\n self.putz(s[0][0], s[31][1], [5, ['ClientChal: %s' % ' '.join(format(i[2], '02x') for i in s[0:32])]])\n self.putz(s[32][0], s[63][1], [5, ['ClientResp: %s' % ' '.join(format(i[2], '02x') for i in s[32:64])]])\n self.putz(s[64][0], s[76][1], [5, ['OtherData: %s' % ' '.join(format(i[2], '02x') for i in s[64:77])]])\n elif op == OPCODE_DERIVE_KEY:\n self.putz(s[0][0], s[31][1], [5, ['MAC: %s' % ' '.join(format(i[2], '02x') for i in s)]])\n elif op == OPCODE_ECDH:\n self.putz(s[0][0], s[31][1], [5, ['Pub X: %s' % ' '.join(format(i[2], '02x') for i in s[0:32])]])\n self.putz(s[32][0], s[63][1], [5, ['Pub Y: %s' % ' '.join(format(i[2], '02x') for i in s[32:64])]])\n elif op in (OPCODE_GEN_DIG, OPCODE_GEN_KEY):\n self.putz(s[0][0], s[3][1], [5, ['OtherData: %s' % ' '.join(format(i[2], '02x') for i in s)]])\n elif op == OPCODE_MAC:\n self.putz(s[0][0], s[31][1], [5, ['Challenge: %s' % ' '.join(format(i[2], '02x') for i in s)]])\n elif op == OPCODE_PRIVWRITE:\n if len(s) > 36: # Key + MAC.\n self.putz(s[0][0], s[-35][1], [5, ['Value: %s' % ' '.join(format(i[2], '02x') for i in s)]])\n self.putz(s[-32][0], s[-1][1], [5, ['MAC: %s' % ' '.join(format(i[2], '02x') for i in s)]])\n else: # Just value.\n self.putz(s[0][0], s[-1][1], [5, ['Value: %s' % ' '.join(format(i[2], '02x') for i in s)]])\n elif op == OPCODE_VERIFY:\n if len(s) >= 64: # ECDSA components (always present)\n self.putz(s[0][0], s[31][1], [5, ['ECDSA R: %s' % ' '.join(format(i[2], '02x') for i in s[0:32])]])\n self.putz(s[32][0], s[63][1], [5, ['ECDSA S: %s' % ' '.join(format(i[2], '02x') for i in s[32:64])]])\n if len(s) == 83: # OtherData (follow ECDSA components in validate / invalidate mode)\n self.putz(s[64][0], s[82][1], [5, ['OtherData: %s' % ' '.join(format(i[2], '02x') for i in s[64:83])]])\n if len(s) == 128: # Public key components (follow ECDSA components in external mode)\n self.putz(s[64][0], s[95][1], [5, ['Pub X: %s' % ' '.join(format(i[2], '02x') for i in s[64:96])]])\n self.putz(s[96][0], s[127][1], [5, ['Pub Y: %s' % ' '.join(format(i[2], '02x') for i in s[96:128])]])\n elif op == OPCODE_WRITE:\n if len(s) > 32: # Value + MAC.\n self.putz(s[0][0], s[-31][1], [5, ['Value: %s' % ' '.join(format(i[2], '02x') for i in s)]])\n self.putz(s[-32][0], s[-1][1], [5, ['MAC: %s' % ' '.join(format(i[2], '02x') for i in s)]])\n else: # Just value.\n self.putz(s[0][0], s[-1][1], [5, ['Value: %s' % ' '.join(format(i[2], '02x') for i in s)]])\n else:\n self.putz(s[0][0], s[-1][1], [5, ['Data: %s' % ' '.join(format(i[2], '02x') for i in s)]])\n\n def put_crc(self, s):\n self.puty(s, [6, ['CRC: {:02X} {:02X}'.format(s[0][2], s[1][2])]])\n\n def put_status(self, ss, es, status):\n self.putz(ss, es, [7, ['Status: %s' % STATUS[status]]])\n\n def put_warning(self, ss, es, msg):\n self.putz(ss, es, [8, ['Warning: %s' % msg]])\n\n def decode(self, ss, es, data):\n cmd, databyte = data\n # State machine.\n if self.state == 'IDLE':\n # Wait for an I²C START condition.\n if cmd != 'START':\n return\n self.state = 'GET SLAVE ADDR'\n self.ss_block = ss\n elif self.state == 'GET SLAVE ADDR':\n # Wait for an address read/write operation.\n if cmd == 'ADDRESS READ':\n self.state = 'READ REGS'\n elif cmd == 'ADDRESS WRITE':\n self.state = 'WRITE REGS'\n elif self.state == 'READ REGS':\n if cmd == 'DATA READ':\n self.bytes.append([ss, es, databyte])\n elif cmd == 'STOP':\n self.es_block = es\n # Reset the opcode before received data, as this causes\n # responses to be displayed incorrectly.\n self.opcode = -1\n if len(self.bytes) > 0:\n self.output_rx_bytes()\n self.waddr = -1\n self.bytes = []\n self.state = 'IDLE'\n elif self.state == 'WRITE REGS':\n if cmd == 'DATA WRITE':\n self.bytes.append([ss, es, databyte])\n elif cmd == 'STOP':\n self.es_block = es\n self.output_tx_bytes()\n self.bytes = []\n self.state = 'IDLE'\n","repo_name":"DreamSourceLab/DSView","sub_path":"libsigrokdecode4DSL/decoders/atsha204a/pd.py","file_name":"pd.py","file_ext":"py","file_size_in_byte":11926,"program_lang":"python","lang":"en","doc_type":"code","stars":1001,"dataset":"github-code","pt":"22"} +{"seq_id":"32390438267","text":"def average_rating(text):\n file = open('C:\\\\Users\\\\User\\\\Desktop\\\\test.txt', 'w')\n lst = []\n lst2 = []\n lst3 = []\n d = {}\n text = text.read().strip().splitlines()\n for i in range(len(text)):\n lst2.append(text[i].split(';'))\n for i in lst2:\n lst3.append(i[0])\n x = (int(i[1]) + int(i[2]) + int(i[3])) / 3\n lst3.append(x)\n d[lst3[0]] = lst3[1]\n lst3.clear()\n for i in d:\n file.write(str(d[i]) + '\\n')\n\n\ndef average_1(text):\n file = open('C:\\\\Users\\\\User\\\\Desktop\\\\test.txt', 'a')\n lst = []\n lst2 = []\n lst3 = []\n lst4 = []\n text = text.read().strip().splitlines()\n for i in range(len(text)):\n lst.append(text[i].split(';'))\n for i in lst:\n lst2.append(int(i[1]))\n lst3.append(int(i[2]))\n lst4.append(int(i[3]))\n x = len(lst2)\n x1 = len(lst3)\n x2 = len(lst4)\n a = sum(lst2) / x\n b = sum(lst3) / x1\n c = sum(lst4) / x2\n final_list = []\n final_list.append(a)\n final_list.append(b)\n final_list.append(c)\n for i in final_list:\n file.write(str(i) + ' ')\n\n\n\ntext = open('C:\\\\Users\\\\User\\\\Desktop\\\\dataset_3363_4 (1).txt', 'r')\naverage_rating(text) \ntext.close()\ntext = open('C:\\\\Users\\\\User\\\\Desktop\\\\dataset_3363_4 (1).txt', 'r')\naverage_1(text)\n","repo_name":"Den4ik-Bro/projects","sub_path":"job_for_file.py","file_name":"job_for_file.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2454907772","text":"import altair as alt\nfrom data_load_functions import *\n# import folium\nimport geopandas as gpd\n# from folium.plugins import MarkerCluster\nimport numpy as np\nfrom ipyleaflet import Polygon, Map, basemaps, basemap_to_tiles, GeoJSON, MarkerCluster, LayersControl, FullScreenControl\nfrom ipywidgets import RadioButtons\nimport json\nfrom viz_funcs import *\nimport pickle\n\n\nif __name__ == '__main__':\n file_path = \"joined_dfs.pkl\"\n with open(file_path, \"rb\") as f:\n final_dataframe = pickle.load(f)\n check_cols = list(final_dataframe.columns[-9:])\n map_dict = get_prepped_dfs()\n structures_map = map_dict['structures']\n fields_map = map_dict['fields']\n blocks_map = map_dict['blocks']\n subareas_map = map_dict['sub_areas']\n discoveries_map = map_dict['discoveries']\n facilities_map = map_dict['facilities']\n m = Map(\n layers=(basemap_to_tiles(basemaps.Esri.WorldTopoMap),),\n center=(60.5, 5),\n zoom=4,\n figsize=(10, 15)\n )\n structure_layer = create_layer(structures_map, 'structures',\n label_col='steNameEN', secondary_label_col='document',\n layer_type='polygon', filter_on='document', inverse=True, color='lightGray')\n structure_layer_docs = create_layer(structures_map, 'structures_docs',\n label_col='steNameEN', secondary_label_col='document',\n layer_type='polygon', filter_on='document', color='orange')\n fields_layer = create_layer(fields_map, 'fields',\n label_col='FIELDNAME', secondary_label_col='document',\n layer_type='polygon', filter_on='document', inverse=True, color='lightGray')\n fields_layer_docs = create_layer(fields_map, 'fields_docs',\n label_col='FIELDNAME', secondary_label_col='document',\n layer_type='polygon', filter_on='document', color='red')\n subareas_layer = create_layer(subareas_map, 'subareas',\n label_col='NAME', secondary_label_col='document',\n layer_type='polygon', filter_on='document', inverse=True, color='lightGray')\n subareas_layer_docs = create_layer(subareas_map, 'subareas_docs',\n label_col='NAME', secondary_label_col='document',\n layer_type='polygon', filter_on='document', color='blue')\n discoveries_layer = create_layer(discoveries_map, 'discoveries',\n label_col='DISCNAME', secondary_label_col='document',\n layer_type='polygon', filter_on='document', inverse=True, color='lightGray')\n discoveries_layer_docs = create_layer(discoveries_map, 'discoveries_docs',\n label_col='DISCNAME', secondary_label_col='document',\n layer_type='polygon', filter_on='document', color='green')\n facilities_layer = create_layer(facilities_map, 'facilities',\n label_col='FACNAME', secondary_label_col='document',\n layer_type='marker', filter_on='document', inverse=True, color='lightGray')\n facilities_layer_docs = create_layer(facilities_map, 'facilities_docs',\n label_col='FACNAME', secondary_label_col='document',\n layer_type='marker', filter_on='document', color='black')\n\n wells = map_dict['wells']\n well_layer = create_layer(wells, 'wells with docs',\n label_col='wlbWell', secondary_label_col='document',\n layer_type='marker', filter_on='document', color='red')\n well_layer_no_docs = create_layer(wells, 'wells with docs',\n label_col='wlbWell', secondary_label_col='document',\n layer_type='marker', filter_on='document', inverse=True, color='lightGray')\n\n marker_cluster = MarkerCluster(markers=well_layer.layers, name='Wells with Docs')\n marker_cluster2 = MarkerCluster(markers=well_layer_no_docs.layers, name='Wells without Docs')\n # marker_cluster.add_layer(well_layer)\n m.add_layer(structure_layer_docs)\n m.add_layer(structure_layer)\n m.add_layer(fields_layer_docs)\n m.add_layer(fields_layer)\n m.add_layer(subareas_layer)\n m.add_layer(subareas_layer_docs)\n m.add_layer(facilities_layer_docs)\n m.add_layer(facilities_layer)\n m.add_layer(discoveries_layer_docs)\n m.add_layer(discoveries_layer)\n m.add_layer(marker_cluster)\n m.add_layer(marker_cluster2)\n m.add_control(LayersControl())\n m.add_control(FullScreenControl())\n comments = []\n\n for i in check_cols:\n comments.append(create_layer(final_dataframe[final_dataframe[i] != \"Empty\"],\n i, label_col='wlbWellbor', secondary_label_col=i, layer_type='marker',\n color='green'))\n\n m.add_layer(comments[-1])\n m","repo_name":"nathangeology/FORCE_Geolocation_Docs","sub_path":"debug_and_testing/ipyleaflet_test.py","file_name":"ipyleaflet_test.py","file_ext":"py","file_size_in_byte":5196,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"74883073656","text":"\"\"\"Code for HW2 Problem 4: Neural Networks on MNIST.\n\nWe will use this notation:\n - B: size of batch\n - C: number of classes, i.e. NUM_CLASSES\n - D: size of inputs, i.e. INPUT_DIM\n - N: number of training examples\n - N_dev: number of dev examples\n\"\"\"\nimport argparse\nimport copy\n# import sys\nimport time\n# from tqdm import tqdm\n\n# import matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader, Dataset\nfrom PIL import Image\n\nOPTS = None\n\nIMAGE_SHAPE = (218, 336) # Size of MNIST images\nNUM_CLASSES = 10 # Number of classes we are classifying over\n#Note that NUMCOLS_PER_SECOND * HIDDEN_INPUT_DIM = 336\nNUMCOLS_PER_SECOND = 3\nHIDDEN_DIM = 112\n\n#batch size = 32\n\nclass MusicDabber(nn.Module):\n def __init__(self):\n super(MusicDabber, self).__init__()\n self.RGBTransform = nn.Linear(3, 1)\n # self.RGBTransform = torch.nn.Parameter(torch.rand(3))\n # self.RGBTransformBias = torch.nn.Parameter(torch.rand(1))\n self.linear = nn.Linear(HIDDEN_DIM, NUM_CLASSES)\n \n #Input x is of dimension (200, 288, 432, 4)\n def forward(self, x):\n\n \n # print(f\"x's shape is : {x.shape}\")\n\n # print(torch.sum(x < 0))\n\n #Debug code\n # print(f\"before transformation = {torch.sum(x[0] != x[1])}\")\n # print(f\"random number is {self.RGBTransform} and bias is {self.RGBTransformBias}\")\n\n #End Debug code\n\n # print(f\"float conversion = {torch.sum(x.float()[0] != x.float()[1])}\")\n\n #Brightness is of dimension (200, 288, 432)\n brightness = self.RGBTransform(x)\n # print(brightness.shape)\n\n # print(torch.sum(brightness < 0))\n\n # print(f\"Before relu but after linear transformation = {torch.sum(brightness[0] != brightness[1])}\")\n\n #Brightness is now of dimension (200, 288, 432)\n\n # brightness = F.normalize(brightness, p = 1.0, dim = 0)\n # print(brightness.shape)\n # print(torch.mean(brightness[0]))\n # print(torch.mean(brightness[15]))\n # print(torch.sum(brightness < 0))\n\n brightness = F.tanh(brightness)\n\n # test = brightness.reshape(brightness.shape[0], brightness.shape[1], HIDDEN_DIM, NUMCOLS_PER_SECOND)\n # # print(f\"after reshape = {brightness.shape}\")\n # test = test.sum(dim = -1)\n # test = test.sum(dim = -2)\n # #The following is debug stuff\n # prist(brightness.shape)\n # for example in range(1):\n # for col in range(0, 24):\n # tempSum = 0\n # for row in range(0, 288):\n # for j in range(18):\n # tempSum += brightness[example][row][col * 18 + j]\n # print(f\"True sum for col {col} in row {example} is {tempSum}\")\n # print(f\"tensor operations got sum {test[example][col]}\")\n \n # firstExample = brightness[0]\n # tempSum = 0\n # for i in range(288):\n # for j in range(18):\n # tempSum += firstExample[i][j]\n\n # print(f\"After brightness + relu = {torch.sum(brightness[0] != brightness[1])}\")\n\n #Brightness is of dimension (200, 288, 24, 18)\n brightness = brightness.reshape(brightness.shape[0], brightness.shape[1], HIDDEN_DIM, NUMCOLS_PER_SECOND)\n\n # print(torch.sum(brightness < 0))\n \n # print(f\"After reshaping = {torch.sum(x[0] != x[1])}\")\n \n # print(f\"after reshape = {brightness.shape}\")\n brightness = brightness.mean(dim = -1)\n brightness = brightness.mean(dim = -2)\n #Brightness now has dimension (200, 24)\n\n\n # print(torch.sum(brightness < 0))\n # print(f\"after sum = {brightness.shape}\")\n output = self.linear(brightness)\n # print(f\"output dim = {output.shape}\")\n return output\n \n\n\n\ndef train(model, X_train, y_train, X_dev, y_dev, lr=1e-1, batch_size=32, num_epochs=30):\n \"\"\"Run the training loop for the model.\n\n All of this code is highly generic and works for any model that does multi-class classification.\n\n Args:\n model: A nn.Module model, must take in inputs of size (B, D)\n and output predictions of size (B, C)\n X_train: Tensor of size (N, D)\n y_train: Tensor of size (N,)\n X_dev: Tensor of size (N_dev, D). Used for early stopping.\n y_dev: Tensor of size (N_dev,). Used for early stopping.\n lr: Learning rate for SGD\n batch_size: Desired batch size.\n num_epochs: Number of epochs of SGD to run\n \"\"\"\n start_time = time.time()\n loss_func = nn.CrossEntropyLoss() # Cross-entropy loss is just softmax regression loss\n optimizer = optim.Adam(model.parameters(), lr=lr) # Stochastic gradient descent optimizer\n\n # Prepare the training dataset\n # Pytorch DataLoader expects a dataset to be a list of (x, y) pairs\n train_dataset = [(X_train[i,:], y_train[i]) for i in range(len(y_train))]\n\n # Simple version of early stopping: save the best model checkpoint based on dev accuracy\n best_dev_acc = -1\n best_checkpoint = None\n best_epoch = -1\n\n for t in range(num_epochs):\n\n\n train_num_correct = 0\n\n lossSum = 0\n\n # Training loop\n model.train() # Set model to \"training mode\", e.g. turns dropout on if you have dropout layers\n for batch in DataLoader(train_dataset, batch_size=batch_size, shuffle=True):\n # DataLoader automatically groups the data into batchse of roughly batch_size\n # shuffle=True makes it so that the batches are randomly chosen in each epoch\n x_batch, y_batch = batch # unpack batch, which is a tuple (x_batch, y_batch)\n # x_batch is tensor of size (B, D)\n # y_batch is tensor of size (B,)\n optimizer.zero_grad() # Reset the gradients to zero\n # Recall how backpropagation works---gradients are initialized to zero and then accumulated\n # So we need to reset to zero before running on a new batch!\n logits = model(x_batch) # tensor of size (B, C), each row is the logits (pre-softmax scores) for the C classes\n # For MNIST, C=10\n # print(logits)\n # print(y_batch)\n # return\n\n loss = loss_func(logits, y_batch) # Compute the loss of the model output compared to true labels\n lossSum += loss\n loss.backward() # Run backpropagation to compute gradients\n optimizer.step() # Take a SGD step\n # Note that when we created the optimizer, we passed in model.parameters()\n # This is a list of all parameters of all layers of the model\n # optimizer.step() iterates over this list and does an SGD update to each parameter\n\n # Compute running count of number of training examples correct\n preds = torch.argmax(logits, dim=1) # Choose argmax for each row (i.e., collapse dimension 1, hence dim=1)\n train_num_correct += torch.sum(preds == y_batch).item()\n\n print(f\"Loss for epoch {t} is {lossSum / (len(train_dataset) / batch_size)}\")\n # Evaluate train and dev accuracy at the end of each epoch\n train_acc = train_num_correct / len(y_train)\n \n model.eval() # Set model to \"eval mode\", e.g. turns dropout off if you have dropout layers.\n with torch.no_grad(): # Don't allocate memory for storing gradients, more efficient when not training\n # print(X_dev.shape)\n # print(X_dev[0])\n # print(X_dev[1])\n # print(torch.sum(X_dev[0] != X_dev[1]))\n # print(torch.sum(X_dev[0] != X_dev[2]))\n dev_logits = model(X_dev)\n # print(f\"dev_logits shape = {dev_logits.shape}\")\n # print(dev_logits)\n dev_preds = torch.argmax(dev_logits, dim=1)\n # print(dev_preds)\n dev_acc = torch.mean((dev_preds == y_dev).float()).item()\n if dev_acc > best_dev_acc:\n # Save this checkpoint if it has best dev accuracy so far\n best_dev_acc = dev_acc\n best_checkpoint = copy.deepcopy(model.state_dict())\n best_epoch = t\n # print(f'Epoch {t: <2}: dev_acc={dev_acc:.5f}')\n print(f'Epoch {t: <2}: train_acc={train_acc:.5f}, dev_acc={dev_acc:.5f}')\n\n # Set the model parameters to the best checkpoint across all epochs\n model.load_state_dict(best_checkpoint)\n end_time = time.time()\n print(f'Training took {end_time - start_time:.2f} seconds')\n print(f'\\nBest epoch was {best_epoch}, dev_acc={best_dev_acc:.5f}')\n\n\n\ndef evaluate(model, X, y, name):\n \"\"\"Measure and print accuracy of a predictor on a dataset.\"\"\"\n model.eval() # Set model to \"eval mode\", e.g. turns dropout off if you have dropout layers.\n with torch.no_grad(): # Don't allocate memory for storing gradients, more efficient when not training\n logits = model(X) # tensor of size (N, 10)\n y_preds = torch.argmax(logits, dim=1) # Choose argmax for each row (i.e., collapse dimension 1, hence dim=1)\n acc = torch.mean((y_preds == y).float()).item()\n print(f' {name} Accuracy: {acc:.5f}')\n return acc\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--learning-rate', '-r', type=float, default=1e-1)\n parser.add_argument('--batch-size', '-b', type=int, default=32)\n parser.add_argument('--num-epochs', '-T', type=int, default=30)\n parser.add_argument('--test', action='store_true')\n return parser.parse_args()\n\n#Tasks for tomorrow: \n'''\nStart: Understand dataloader input types, etc\n1. To extract the list of filenames from data_split.txt and store it into a tensor\n2. To use dataloader to loop thru each of the files and train, etc\n3. To then modify the neural.py to train BEFORE NOON\n'''\n\n#To read the train dev and test sets from the data split file\ndef read_from_data():\n f = open(\"./../data_split.txt\")\n f.readline() # Burn train\n\n trainNames = list(map(lambda x: x[1:-1], f.readline()[1:-2].split(\", \")))\n\n f.readline() # Burn dev\n\n devNames = list(map(lambda x: x[1:-1], f.readline()[1:-2].split(\", \")))\n\n f.readline() # Burn test\n\n testNames = list(map(lambda x: x[1:-1], f.readline()[1:-2].split(\", \")))\n\n return trainNames, devNames, testNames\n\n\n# #For the CSV prediction\n# class MyDataset(Dataset):\n \n# def __init__(self, file_name):\n# price_df=pd.read_csv(file_name)\n# labels = [\"blues\", \"classical\", \"country\", \"disco\", \"hiphop\", \"jazz\", \"metal\", \"pop\", \"reggae\", \"rock\"]\n \n# x=price_df.iloc[:,1:-1].values\n# # print(x)\n \n# # labelExtract = lambda x: i = i[:5] for i in x\n# y=np.array(list(map(lambda x: labels.index(x), price_df.iloc[:,-1].values)))\n \n# self.x_train=torch.tensor(x,dtype=torch.float32)\n# self.y_train=torch.tensor(y,dtype=torch.float32)\n \n# def __len__(self):\n# return len(self.y_train)\n \n# def __getitem__(self,idx):\n# return self.x_train[idx],self.y_train[idx]\n\n# myDs = MyDataset(\"./../data/features_3_sec.csv\")\n\n# for batch in DataLoader(myDs, batch_size = 32, shuffle = True):\n# x_batch, y_batch = batch\n# print(x_batch.shape)\n# break\n\ndef main():\n # Set random seed, for reproducibility\n torch.manual_seed(42)\n\n # Read the data\n\n trainNames, devNames, testNames = read_from_data()\n\n data_address = \"./../data/images_original/\"\n\n labels = [\"blues\", \"classical\", \"country\", \"disco\", \"hiphop\", \"jazz\", \"metal\", \"pop\", \"reggae\", \"rock\"]\n\n trainArrX = []\n trainArrY = []\n devArrX = []\n devArrY = []\n testArrX = []\n testArrY = []\n\n for trainExample in trainNames:\n im = Image.open(f\"{data_address}{trainExample}\")\n trainArrX += [np.asarray(im)]\n trainArrY += [labels.index(trainExample[:trainExample.find(\"/\")])]\n \n for devExample in devNames:\n im = Image.open(f\"{data_address}{devExample}\")\n devArrX += [np.asarray(im)]\n devArrY += [labels.index(devExample[:devExample.find(\"/\")])]\n\n for testExample in testNames:\n im = Image.open(f\"{data_address}{testExample}\")\n testArrX += [np.asarray(im)]\n testArrY += [labels.index(testExample[:testExample.find(\"/\")])]\n \n #(700, 288, 432, 4)\n trainArrX = torch.tensor(np.stack(trainArrX))[:, 35:253, 54:390, :3].float()\n # print(trainArrX[0])\n meanArr = torch.tensor([[[trainArrX[:, i, j, k].mean() for k in range(trainArrX.shape[3])] for j in range(trainArrX.shape[2])] for i in range(trainArrX.shape[1])])\n print(\"finished converting mean for train\")\n stdArr = torch.tensor([[[trainArrX[:, i, j, k].std() for k in range(trainArrX.shape[3])] for j in range(trainArrX.shape[2])] for i in range(trainArrX.shape[1])])\n print(\"finished converting std and mean for train\")\n meanArr = torch.stack([meanArr for i in range(trainArrX.shape[0])])\n stdArr = torch.stack([stdArr for i in range(trainArrX.shape[0])])\n\n # print(meanArr.shape)\n # print(stdArr.shape)\n # print(trainArrX.shape)\n\n trainArrX = torch.nan_to_num((trainArrX - meanArr)/stdArr)\n\n # print(trainArrX.shape)\n # print(trainArrX[0])\n # print(meanArr.shape)\n\n # testMean = trainArrX.flatten(start_dim = 1).mean(dim = 1)\n # testVar = trainArrX.flatten(start_dim = 1).std(dim = 1)\n\n # meanArr = torch.stack([testMean for i in range(trainArrX.shape[0])])\n # varArr = torch.stack([testVar for i in range(trainArrX.shape[0])])\n # print(meanArr.shape)\n # print(testVar)\n # print(trainArrX + testMean)\n\n #(700)\n trainArrY = torch.tensor(np.array(trainArrY))\n\n #(200, 288, 432, 4)\n devArrX = torch.tensor(np.stack(devArrX))[:, 35:253, 54:390, :3].float()\n # print(trainArrX[0])\n meanArr = torch.tensor([[[devArrX[:, i, j, k].mean() for k in range(devArrX.shape[3])] for j in range(devArrX.shape[2])] for i in range(devArrX.shape[1])])\n print(\"finished converting mean for dev\")\n stdArr = torch.tensor([[[devArrX[:, i, j, k].std() for k in range(devArrX.shape[3])] for j in range(devArrX.shape[2])] for i in range(devArrX.shape[1])])\n print(\"finished converting std and mean for dev\")\n meanArr = torch.stack([meanArr for i in range(devArrX.shape[0])])\n stdArr = torch.stack([stdArr for i in range(devArrX.shape[0])])\n\n # print(meanArr.shape)\n # print(stdArr.shape)\n # print(trainArrX.shape)\n\n devArrX = torch.nan_to_num((devArrX - meanArr)/stdArr)\n\n\n #(200)\n devArrY = torch.tensor(np.array(devArrY))\n\n #(99, 288, 432, 4)\n testArrX = torch.tensor(np.stack(testArrX))[:, 35:253, 54:390, :3].float()\n # print(trainArrX[0])\n meanArr = torch.tensor([[[testArrX[:, i, j, k].mean() for k in range(testArrX.shape[3])] for j in range(testArrX.shape[2])] for i in range(testArrX.shape[1])])\n print(\"finished converting mean for test\")\n stdArr = torch.tensor([[[testArrX[:, i, j, k].std() for k in range(testArrX.shape[3])] for j in range(testArrX.shape[2])] for i in range(testArrX.shape[1])])\n print(\"finished converting std and mean for test\")\n meanArr = torch.stack([meanArr for i in range(testArrX.shape[0])])\n stdArr = torch.stack([stdArr for i in range(testArrX.shape[0])])\n\n # print(meanArr.shape)\n # print(stdArr.shape)\n # print(trainArrX.shape)\n\n testArrX = torch.nan_to_num((testArrX - meanArr)/stdArr)\n\n\n #(99)\n testArrY = torch.tensor(np.array(testArrY))\n\n\n print(\"Finish Data Collection\")\n\n # print(len(devNames))\n # print(devArrY)\n\n \n\n\n \n # print(trainArrX.shape)\n # print(trainArrY.shape)\n\n # print(trainArrX[0])\n\n # return\n\n model = MusicDabber()\n train(model, trainArrX, trainArrY, devArrX, devArrY, lr=OPTS.learning_rate,\n batch_size=OPTS.batch_size, num_epochs=OPTS.num_epochs)\n\n \n # Evaluate the model\n print('\\nEvaluating final model:')\n train_acc = evaluate(model, trainArrX, trainArrY, 'Train')\n dev_acc = evaluate(model, devArrX, devArrY, 'Dev')\n train_acc = evaluate(model, testArrX, testArrY, 'Test')\n\n for i in range(10):\n tempDevX = devArrX[20*i:20*(i+1)]\n tempDevY = devArrY[20*i:20*(i+1)]\n evaluate(model, tempDevX, tempDevY, labels[tempDevY[0]])\n\n # PATH = \"./models/\"\n # if OPTS.test:\n # test_acc = evaluate(model, testArrX, testArrY, 'Test')\n # torch.save(model.state_dict(), PATH + str(dev_acc)[2:6] + \".pt\")\n \n\n\nif __name__ == '__main__':\n OPTS = parse_args()\n main()\n\n","repo_name":"Lorenayannnnn/csci467_music_genre_classification","sub_path":"Baseline_Logistic_Regression/ImageSoftmax.py","file_name":"ImageSoftmax.py","file_ext":"py","file_size_in_byte":16727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72055021497","text":"from django.conf import settings\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.renderers import JSONRenderer\n\n# from rest_framework_jsonp.renderers import JSONPRenderer\nfrom cantusdata.serializers.search import SearchSerializer\nimport solr\n\n\nCHANT_FIELDS = [\n \"type\",\n \"item_id\",\n \"marginalia\",\n \"manuscript\",\n \"manuscript_id\",\n \"manuscript_name_hidden\",\n \"folio\",\n \"folio_id\",\n \"sequence\",\n \"cantus_id\",\n \"feast\",\n \"office\",\n \"genre\",\n \"position\",\n \"mode\",\n \"differentia\",\n \"finalis\",\n \"incipit\",\n \"full_text\",\n \"full_text_ms\",\n \"volpiano\",\n \"concordances\",\n \"cdb_uri\",\n]\n\n\nclass FolioChantSetView(APIView):\n serializer_class = SearchSerializer\n renderer_classes = (JSONRenderer,)\n\n def get(self, request, *args, **kwargs):\n folio_ids_list = [f\"folio_id:{id}\" for id in kwargs[\"pk\"].split(\",\")]\n folio_ids_str = \" OR \".join(folio_ids_list)\n # We want to get all chants of a particular folio of a particular\n # manuscript. It is fastest to pull these from Solr!\n solrconn = solr.SolrConnection(settings.SOLR_SERVER)\n composed_request = f'type:\"cantusdata_chant\" AND ({folio_ids_str})'\n results = solrconn.query(\n composed_request,\n sort=\"folio asc, sequence asc\",\n rows=100,\n fields=CHANT_FIELDS,\n score=False,\n )\n\n return Response(results)\n\n\nclass ManuscriptChantSetView(APIView):\n serializer_class = SearchSerializer\n renderer_classes = (JSONRenderer,)\n\n def get(self, request, *args, **kwargs):\n manuscript_id = kwargs[\"pk\"]\n\n if \"start\" in kwargs:\n start = kwargs[\"start\"]\n else:\n start = 0\n\n solrconn = solr.SolrConnection(settings.SOLR_SERVER)\n\n composed_request = 'type:\"cantusdata_chant\" AND manuscript_id:{0}'.format(\n manuscript_id\n )\n results = solrconn.query(\n composed_request,\n sort=\"sequence asc\",\n start=start,\n rows=100,\n fields=CHANT_FIELDS,\n score=False,\n )\n\n return Response(results)\n","repo_name":"DDMAL/cantus","sub_path":"app/public/cantusdata/views/chant_set.py","file_name":"chant_set.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"22"} +{"seq_id":"38743029290","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\n\n\ndef plot_roc(model, parameters, y_true):\n \"\"\"\n Arguments:\n model - trained model such as DecisionTreeClassifier, etc.\n parameters - array-like or sparse matrix of shape [n_samples, n_features]. The input samples.\n y_true - True binary labels in range {0, 1} or {-1, 1}. If labels are not binary, pos_label should be explicitly given.\n \"\"\"\n if model is None:\n return 0., 0., np.array([])\n\n predicted = model.predict_proba(parameters)[: ,1]\n threshold = 0.5\n predicted_binary = (predicted > threshold).astype(int)\n\n fpr, tpr, threshold = metrics.roc_curve(y_true, predicted, pos_label=1)\n\n roc_auc = metrics.auc(fpr, tpr)\n ks = np.max(tpr - fpr) # Kolmogorov-Smirnov test\n\n print('ROC_auc = ', roc_auc)\n print('KS_test = ', ks)\n print('AUC score: %f ' % metrics.roc_auc_score(y_true, predicted))\n\n try:\n plt.title('%s ROC curve ' % model.__class__.__name__)\n plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % roc_auc)\n plt.legend(loc='lower right')\n plt.plot([0 ,1], [0 ,1], 'r--')\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n\n # plt.savefig('ROC_curve.png')\n plt.show()\n except: pass\n return roc_auc, ks, threshold\n\n","repo_name":"AlphaTac-AI/AlphaCore","sub_path":"lib/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"21130598606","text":"import numpy as np\nimport yaml, pickle, tqdm\nfrom os import path\n\nclass Dataset:\n def __init__(self, project_path, config_path):\n self.project_path = project_path\n self.config_path = config_path\n\n self.data_name = ['bodypoints','scaled_bodypoints', 'rotated_bodypoints', 'angles', 'limbs', 'angle_power', 'limb_power', \n 'all_embeddings', 'all_postural_embeddings', 'maker_postural_embeddings', 'angle_postural_embeddings', 'limb_postural_embeddings',\n 'all_kinematic_embeddings', 'marker_kinematic_embeddings', 'limb_kinematic_embeddings', 'angle_kinematic_embeddings', 'cluster', 'kinematic_cluster']\n self.data_obj = {}\n \n self.config = self.load_config()\n self.info, self.info_values = self.load_info()\n\n def load_info(self):\n print(\"Loading INFO.yaml ...\")\n with open(f\"{self.project_path}/{self.config['result_path']}/INFO.yaml\") as f:\n INFO = yaml.load(f, Loader=yaml.FullLoader)\n INFO_values = list(INFO.values())\n INFO_values.sort(key=lambda x: x['order'])\n print(\"Finished loading INFO\")\n return INFO, INFO_values\n \n def load_config(self):\n print(\"Loading config.yaml ...\")\n with open(f\"{self.config_path}\") as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n print(\"Finished loading config\")\n return config\n\n def load_data(self):\n for file_name in self.data_name:\n self.data_obj[file_name] = []\n for file in tqdm.tqdm(self.info_values):\n # print(file)\n for file_name in self.data_name:\n # print(file_name)\n abs_data_path = f\"{self.project_path}/{file['directory']}/{file_name}.npy\"\n if path.exists(abs_data_path):\n # print(np.load(abs_data_path).shape)\n self.data_obj[file_name].append( np.load(abs_data_path) )\n for file_name in self.data_name:\n if self.data_obj[file_name]:\n self.data_obj[file_name] = np.concatenate(self.data_obj[file_name])\n","repo_name":"Souvik-Mandal-Harvard/ES_GPU","sub_path":"utils/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"29798235996","text":"#a quick script I made to read an email my now fiance sent me that was botched by gmail\ncounter = 0\ndone = False\nnewText = \"\"\n#this is just an example of how the email was formatted, the real email was thousands of characters long\ngo = str(\" H e y ? H o w a r e y o u ? \")\nfor x in go:\n if counter == 1:\n counter = 0\n done = True\n if done == False and counter == 0:\n counter = 1\n newText = newText + x\n elif x != \" \":\n counter = 1\n newText = newText + x\n done = False\nprint(newText)\n \n","repo_name":"JustB544/Resume-Programs","sub_path":"Python/Random Project.py","file_name":"Random Project.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"17887236105","text":"import pytest\nimport pkg_resources\nimport networkx\nimport numpy as np\nimport pandas as pd\n\nfrom seekr import graph\n\n\nclass TestMaker:\n def _build_adj(self):\n kmers = \"tests/data/example_2mers.npy\"\n kmers = pkg_resources.resource_filename(\"seekr\", kmers)\n kmers = np.load(kmers)\n adj = np.corrcoef(kmers) * -1 # Flip signs for fewer negatives\n names = list(range(5))\n adj = pd.DataFrame(adj, names, names)\n return adj\n\n def _build_disconnected_graph(self):\n g = networkx.Graph()\n edges = [(0, 1), (0, 2), (0, 3), (1, 2), (2, 4), (2, 5), (2, 6), (7, 8), (8, 9)]\n g.add_edges_from(edges)\n networkx.set_edge_attributes(g, 1, \"weight\")\n return g\n\n def _save_adj(self, out_dir, binary=True):\n adj = self._build_adj()\n if binary:\n out_path = str(out_dir.join(\"adj.npy\"))\n np.save(out_path, adj.values)\n else:\n out_path = str(out_dir.join(\"adj.csv\"))\n adj.to_csv(out_path)\n return out_path\n\n def _get_maker_with_partition(self, out_path):\n gml_path = str(out_path.join(\"out_main_sub.gml\"))\n adj = self._build_adj()\n maker = graph.Maker(adj, gml_path=gml_path, seed=0)\n maker.build()\n maker.save(True)\n maker.get_partition()\n return maker\n\n def test_apply_threshold(self):\n adj = self._build_adj()\n maker = graph.Maker(adj)\n maker.apply_threshold()\n assert np.alltrue(maker.adj.values.diagonal() == np.zeros(5))\n assert adj.values[1, 0] == 0\n assert adj.values[1, 2] != 0\n\n def test_apply_threshold_t1(self):\n adj = self._build_adj()\n maker = graph.Maker(adj, threshold=1)\n maker.apply_threshold()\n assert maker.adj.values.sum() == 0\n\n def test_apply_threshold_ndarray(self):\n adj = self._build_adj().values\n maker = graph.Maker(adj, threshold=1)\n maker.apply_threshold()\n assert maker.adj.sum() == 0\n\n def test_build(self):\n adj = self._build_adj()\n maker = graph.Maker(adj)\n maker.build()\n assert type(maker.graph) == networkx.Graph\n assert len(maker.graph) == 5\n assert len(maker.graph.edges()) == 9\n assert len(networkx.get_edge_attributes(maker.graph, \"weight\")) == 9\n assert maker.adj is None\n assert maker.main_sub is not None\n\n def test_build_ndarray(self):\n adj = self._build_adj().values\n maker = graph.Maker(adj)\n maker.build()\n assert type(maker.graph) == networkx.Graph\n assert len(maker.graph) == 5\n assert len(maker.graph.edges()) == 9\n assert len(networkx.get_edge_attributes(maker.graph, \"weight\")) == 9\n\n def test_build_no_clear_adj(self):\n adj = self._build_adj()\n maker = graph.Maker(adj)\n maker.build(clear_adj=False)\n assert maker.adj.equals(adj)\n\n def test_build_no_main_sub(self):\n adj = self._build_adj()\n maker = graph.Maker(adj)\n maker.build(main_sub=False)\n assert maker.main_sub is None\n\n def test_find_main_sub(self):\n g = self._build_disconnected_graph()\n maker = graph.Maker()\n maker.graph = g\n maker.find_main_sub()\n assert list(maker.main_sub) == list(range(7))\n\n def test_save(self, tmpdir):\n gml_path = str(tmpdir.join(\"out.gml\"))\n adj = self._build_adj()\n maker = graph.Maker(adj, gml_path=gml_path)\n maker.build()\n maker.save()\n saved = networkx.read_gml(gml_path)\n expected = [str(n) for n in maker.graph.nodes()]\n assert list(saved.nodes()) == expected\n expected = [(str(n1), str(n2)) for n1, n2 in maker.graph.edges()]\n assert list(saved.edges()) == expected\n\n def test_save_main_sub(self, tmpdir):\n gml_path = str(tmpdir.join(\"out_main_sub.gml\"))\n adj = self._build_adj()\n maker = graph.Maker(adj, gml_path=gml_path)\n maker.build()\n maker.save(main_sub=True)\n saved = networkx.read_gml(gml_path)\n expected = [str(n) for n in maker.main_sub.nodes()]\n assert list(saved.nodes()) == expected\n expected = [(str(n1), str(n2)) for n1, n2 in maker.main_sub.edges()]\n assert list(saved.edges()) == expected\n\n def test_get_partition(self, tmpdir):\n maker = self._get_maker_with_partition(tmpdir)\n assert np.isclose(maker.partition.modularity, -0.08024691358024699)\n assert maker.partition.membership == [1, 0, 1, 0, 0]\n\n def test_membership2attribute(self, tmpdir):\n maker = self._get_maker_with_partition(tmpdir)\n name2group = maker.membership2attribute()\n assert name2group == {\"0\": 1, \"1\": 0, \"2\": 1, \"3\": 0, \"4\": 0}\n assert name2group == networkx.get_node_attributes(maker.graph, \"Group\")\n\n def test_membership2attribute_disconnected(self, tmpdir):\n gml_path = str(tmpdir.join(\"out_main_sub.gml\"))\n g = self._build_disconnected_graph()\n maker = graph.Maker(gml_path=gml_path)\n maker.graph = g\n maker.find_main_sub()\n maker.save(True)\n maker.get_partition()\n name2group = maker.membership2attribute()\n expected = {0: 1, 1: 1, 2: 0, 3: 1, 4: 0, 5: 0, 6: 0, 7: 2, 8: 2, 9: 2}\n assert name2group == expected\n\n def test_membership2attribute_disconnected_ncomms1(self, tmpdir):\n gml_path = str(tmpdir.join(\"out_main_sub.gml\"))\n g = self._build_disconnected_graph()\n maker = graph.Maker(gml_path=gml_path, n_comms=1)\n maker.graph = g\n maker.find_main_sub()\n maker.save(True)\n maker.get_partition()\n name2group = maker.membership2attribute()\n expected = {0: 1, 1: 1, 2: 0, 3: 1, 4: 0, 5: 0, 6: 0, 7: 1, 8: 1, 9: 1}\n assert name2group == expected\n\n def test_membership2attribute_disconnected_ncomms3(self, tmpdir):\n gml_path = str(tmpdir.join(\"out_main_sub.gml\"))\n g = self._build_disconnected_graph()\n maker = graph.Maker(gml_path=gml_path, n_comms=3, gamma=10)\n maker.graph = g\n maker.find_main_sub()\n maker.save(True)\n maker.get_partition()\n name2group = maker.membership2attribute()\n expected = {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 3, 6: 3, 7: 3, 8: 3, 9: 3}\n assert name2group == expected\n\n def test_make_gml_file(self, tmpdir):\n gml_path = str(tmpdir.join(\"out.gml\"))\n csv_path = str(tmpdir.join(\"out.csv\"))\n adj = self._build_adj()\n maker = graph.Maker(adj, gml_path=gml_path, csv_path=csv_path)\n maker.make_gml_csv_files()\n in_graph = networkx.read_gml(gml_path)\n assert list(in_graph.nodes()) == [str(i) for i in range(5)]\n assert len(networkx.get_node_attributes(in_graph, \"Group\")) == 5\n df = pd.read_csv(csv_path, index_col=0)\n assert np.alltrue(df.index.values == np.arange(5))\n assert np.alltrue(df[\"Group\"].values == np.array([1, 0, 1, 0, 0]))\n","repo_name":"CalabreseLab/seekr","sub_path":"seekr/tests/test_graph.py","file_name":"test_graph.py","file_ext":"py","file_size_in_byte":7018,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"22"} +{"seq_id":"35986354951","text":"def numero_x_na_matriz (m, x):\n \n for linha in range(len(m)):\n for coluna in range(len(m)):\n if m[linha][coluna] == x:\n return (linha+1),(coluna+1)\n else:\n return -1\n\npergunta = \"S\"\nwhile pergunta == \"S\":\n matriz = []\n for linha in range(5):\n l = []\n matriz.append(l)\n for coluna in range(5):\n l.append(int(input(f\"Digite um valor para linha {linha+1}, coluna {coluna+1} para preencher a matriz: \")))\n\n print(\"A matriz gerada foi: \")\n for l in range(len(matriz)):\n for c in range(len(matriz)):\n print(f\"{matriz[l][c]}\\t\", end = \"\")\n print()\n \n print(f\"/\" * 60, end = \"\")\n N_encontrar = int(input(\"\\nDigite um número que deseja encontrar: \"))\n \n resultado = numero_x_na_matriz(matriz, N_encontrar)\n if resultado == -1 :\n print(\"Não há este valor na matriz\")\n else:\n print(f\"O valor está na linha {resultado[0]}, coluna {resultado[1]}\")\n\n pergunta = input(\"Gostaria de continuar? [S/N]\\n\").upper() \nprint(\"Fim do programa...\")\n\n ","repo_name":"lucenac/fup-ufc","sub_path":"fup_8/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70547191737","text":"import torch.nn as nn\nfrom .shufflenet import *\n\n\nclass ShuffleNet(nn.Module):\n\n __factory = {\n 0.5: shufflenet_v2_x0_5,\n 1.0: shufflenet_v2_x1_0,\n 1.5: shufflenet_v2_x1_5,\n 2.0: shufflenet_v2_x2_0,\n }\n\n def __init__(self, ratio, pretrained=True, norm_layer=nn.BatchNorm2d):\n super(ShuffleNet, self).__init__()\n if ratio not in self.__factory:\n raise KeyError(\"Unsupported ratio:\", ratio)\n self.model = self.__factory[ratio](\n pretrained=pretrained,\n norm_layer=norm_layer\n )\n self.out_channels = self.model._stage_out_channels[-1]\n\n def forward(self, x):\n outputs = []\n x = self.model.conv1(x)\n x = self.model.maxpool(x)\n x = self.model.stage2(x)\n x = self.model.stage3(x)\n x = self.model.stage4(x)\n x = self.model.conv5(x)\n outputs.append(x)\n return outputs\n","repo_name":"czyczyyzc/CondLSTR","sub_path":"modeling/models/backbones/shufflenet/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"19463403945","text":"#! /usr/bin/env python\n# coding: UTF-8\n#ラベルの表示確認\n#2019.01.16 by SGR\n#---- import ------\nimport smbus\nimport time\n\n#------- class\nclass I2cWR(object):\n \"\"\"I2Cの読み書きを行うclass\"\"\"\n def __init__(self, dvice_adress):\n \"\"\"デバイスアドレスを設定してオブジェクト生成device_adress = スレイブアドレス\"\"\"\n self._dvice_adress = dvice_adress\n self.i2c = smbus.SMBus(1)\n self._r_data = 0x00\n #------ Data Write\n def i2c_data_w(self, write_adress, w_data):\n \"\"\"write_adress = hex:書き込みアドレス w_data = hex:書き込みデータ\"\"\"\n self.i2c.write_i2c_block_data(self._dvice_adress, write_adress, w_data)\n #------ I2c Data Read\n def i2c_data_r(self, read_start, read_end):\n \"\"\"yomidasi\"\"\"\n self.read_data = self.i2c.read_i2c_block_data(self._dvice_adress, read_start, read_end)\n return self.read_data\n\n###############################\nif __name__ == '__main__':\n #am2320オブジェクト生成\n am2320_tmp = I2cWR(0x5c)\n\n # センサsleep解除\n try:\n am2320_tmp.i2c_data_w(0x00,[])\n #i2cAm2320.write_i2c_block_data(address,0x00,[])\n except:\n pass\n\n # 読み取り命令\n time.sleep(0.003)\n am2320_tmp.i2c_data_w(0x03,[0x00,0x04])\n # データ受取\n time.sleep(0.015)\n block = am2320_tmp.i2c_data_r(0,6)\n hum = float(block[2] << 8 | block[3])/10\n tmp = float(block[4] << 8 | block[5])/10\n\n print('hum=%.2f' %hum) # 湿度表示\n print('Temp=%.2f' %tmp) # 温度表示\n","repo_name":"Suguru36/raspi_sensor_test","sub_path":"sub/I2cWR.py","file_name":"I2cWR.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18325948205","text":"from tkinter import *\r\nfrom tkinter import filedialog\r\nwindow = Tk()\r\n\r\ndef save():\r\n file = filedialog.asksaveasfile(initialdir=\"C:/Users/USER/OneDrive/Desktop/programming/python GUI\",defaultextension='.txt',\r\n filetypes = [\r\n (\"Text file\",\".txt\"),\r\n (\"HTML file\",\".html\"),\r\n (\"All files\",\".*\"),\r\n ])\r\n if file is None:\r\n return\r\n filetext = str(text.get(1.0,END))\r\n #filetext = input(\"Enter some text : \")\r\n file.write(filetext)\r\n file.close()\r\n \r\n \r\nbutton = Button(text = 'SAVE',command = save)\r\nbutton.pack()\r\n\r\ntext = Text(window)\r\ntext.pack()\r\n\r\n\r\n\r\n\r\nwindow.mainloop()","repo_name":"PADDA-YOGESHWAR/Tkinter","sub_path":"13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"71900428217","text":"import os\nimport random\nfrom loader.exceptions import PicturesFormatNotSupportedError, PicturesNotUploadedError, OutOfFreeNamesError\n\n\nclass UploadManager:\n\n def get_free_filename(self, folder, file_type):\n \"\"\"Получения нового имени для загружаемого файла\"\"\"\n attemps = 0\n LIMIT_OF_ATTEMPS = 1000000\n\n while True:\n pic_name = random.randint(0, 1000000)\n filename_to_save = f\"{pic_name}.{file_type}\"\n os_path = os.path.join(folder, filename_to_save)\n is_filename_occupied = os.path.exists(os_path)\n\n if not is_filename_occupied:\n return filename_to_save\n\n attemps += 1\n\n if attemps > LIMIT_OF_ATTEMPS:\n raise OutOfFreeNamesError(\"No free names to save images\")\n\n def if_file_type_valid(self, file_type):\n \"\"\"Проверка расширения файла\"\"\"\n if file_type.lower() in [\"jpeg\", \"jpg\", \"gif\", \"png\", \"webp\"]:\n return True\n return False\n\n def save_with_random_name(self, picture):\n\n # Получаем данные с картинки\n filename = picture.filename\n file_type = filename.split(\".\")[-1]\n\n # Проверяем валидность картинки\n if not self.if_file_type_valid(file_type):\n raise PicturesFormatNotSupportedError(f\"Формат {file_type} не поддерживается\")\n\n # Получаем свободное имя\n folder = os.path.join(\".\", \"uploads\", \"images\")\n filename_to_save = self.get_free_filename(folder, file_type)\n\n # Сохраняем под новым именем\n try:\n picture.save(os.path.join(folder, filename_to_save))\n except FileNotFoundError:\n raise PicturesNotUploadedError(f\"{folder, filename_to_save}\")\n\n return filename_to_save\n","repo_name":"frameboo/hw12","sub_path":"loader/upload_manager.py","file_name":"upload_manager.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28284616124","text":"'''from flask import Flask, render_template, request, redirect, url_for\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/upload')\ndef imagetopdf():\n return render_template('upload_image.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)'''\n\n\nfrom flask import Flask, render_template,Response, request, redirect, url_for\nfrom flask_wtf import FlaskForm\nfrom wtforms import FileField, SubmitField\nfrom main import img2pdf\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'your_secret_key_here'\n\nclass UploadForm(FlaskForm):\n images = FileField('Select image(s)', render_kw={'multiple': True})\n submit = SubmitField('Convert to PDF')\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload():\n form = UploadForm()\n\n if form.validate_on_submit():\n images = request.files.getlist('images')\n # process images into a single PDF\n instance = img2pdf([image.filename for image in images])\n pdf_data = instance.save_pdf(instance.create_pdf())\n\n # send the PDF file to the user\n return redirect(url_for('send_pdf', pdf_data=pdf_data))\n\n return render_template('forms.html', form=form)\n\n@app.route('/send-pdf')\ndef send_pdf():\n pdf_data = request.args.get('pdf_data')\n return Response(pdf_data, mimetype='application/pdf')\n","repo_name":"farazmashruwala/farazmashruwala","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9236817408","text":"from typing import List\nfrom enum import Enum\nfrom FslFontHelper.BasicFont import BasicFont\nfrom FslFontHelper.BasicFont import BasicFontHeader\nfrom FslFontHelper.BasicFont import BasicFontPoint2\nfrom FslFontHelper.BasicFont import BasicFontGlyphKerning\nfrom FslFontHelper.BasicFont import BasicFontGlyphRange\nfrom FslFontHelper.BitmapFont import BitmapFont\nfrom FslFontHelper.BitmapFont import BitmapFontChar\nfrom FslFontHelper.BitmapFont import BitmapFontKerning\nfrom FslFontHelper.BitmapFont import BitmapFontType\n\n#-----------------------------------------------------------------------------------------------------------------------------------------------------\n\nclass ExtractRangeState(Enum):\n FindBegin = 1\n FindEnd = 2\n\n\ndef _ExtractHeader(font: BitmapFont) -> BasicFontHeader:\n return BasicFontHeader(font.TextureName, font.LineSpacingPx, font.BaseLinePx, BasicFontPoint2(0, 0))\n\ndef _AddRange(dstList: List[BasicFontGlyphRange], newEntry: BasicFontGlyphRange) -> None:\n if newEntry.From < 32 and (newEntry.From + newEntry.Length) <= 32:\n return\n dstList.append(newEntry)\n\ndef _ExtractRanges(font: BitmapFont) -> List[BasicFontGlyphRange]:\n result = [] # type: List[BasicFontGlyphRange]\n state = ExtractRangeState.FindBegin\n rangeStartId = 0\n expectedRangeId = 0\n\n index = 0\n while index < len(font.Chars):\n fontChar = font.Chars[index]\n if state == ExtractRangeState.FindBegin:\n state = ExtractRangeState.FindEnd\n rangeStartId = fontChar.Id\n expectedRangeId = rangeStartId + 1\n elif state == ExtractRangeState.FindEnd:\n if fontChar.Id != expectedRangeId:\n state = ExtractRangeState.FindBegin\n _AddRange(result, BasicFontGlyphRange(rangeStartId, expectedRangeId - rangeStartId, rangeStartId))\n index -= 1\n else:\n expectedRangeId += 1\n index += 1\n\n if state == ExtractRangeState.FindEnd:\n _AddRange(result, BasicFontGlyphRange(rangeStartId, expectedRangeId - rangeStartId, rangeStartId))\n return result\n\ndef _IsInRange(validRanges: List[BasicFontGlyphRange], charId: int) -> bool:\n for range in validRanges:\n if charId >= range.From and charId < (range.From + range.Length):\n return True;\n return False\n\ndef _ExtractKernings(font: BitmapFont, validRanges: List[BasicFontGlyphRange]) -> List[BasicFontGlyphKerning]:\n result = [] # type: List[BasicFontGlyphKerning]\n for fontChar in font.Chars:\n if _IsInRange(validRanges, fontChar.Id):\n result.append(BasicFontGlyphKerning(fontChar.Id, fontChar.XOffsetPx, fontChar.YOffsetPx, fontChar.XAdvancePx))\n return result\n\ndef ToBasicFont(font: BitmapFont) -> BasicFont:\n name = font.Name\n header = _ExtractHeader(font)\n fontGlyphRanges = _ExtractRanges(font)\n fontGlyphKernings = _ExtractKernings(font, fontGlyphRanges)\n return BasicFont(name, header, fontGlyphRanges, fontGlyphKernings)\n\n#-----------------------------------------------------------------------------------------------------------------------------------------------------\n\ndef _FindBasicGlyphKerning(basicKernings: List[BasicFontGlyphKerning], id: int) -> BasicFontGlyphKerning:\n for entry in basicKernings:\n if entry.Id == id:\n return entry\n raise Exception(\"Could not find information for {}\".format(id))\n\ndef _ToBitmapFontChars(ranges: List[BasicFontGlyphRange], basicKernings: List[BasicFontGlyphKerning]) -> List[BitmapFontChar]:\n # To get the full information we will need information from the texture atlas\n print(\"FIX: implement support for the texture rectangle\")\n result = [] # type: List[BitmapFontChar]\n for basicGlyphRange in ranges:\n for id in range(basicGlyphRange.From, basicGlyphRange.From + basicGlyphRange.Length):\n basicGlyphKerning = _FindBasicGlyphKerning(basicKernings, id)\n rectTexX = 0\n rectTexY = 0\n rectTexWidth = 0\n rectTexHeight = 0\n xOffsetPx = basicGlyphKerning.OffsetX\n yOffsetPx = basicGlyphKerning.OffsetY\n xAdvancePx = basicGlyphKerning.LayoutWidth\n result.append(BitmapFontChar(id, rectTexX, rectTexY, rectTexWidth, rectTexHeight, xOffsetPx, yOffsetPx, xAdvancePx))\n return result\n\ndef ToBitmapFont(font: BasicFont) -> BitmapFont:\n chars = _ToBitmapFontChars(font.Ranges, font.BasicGlyphKerning)\n kernings = [] # type: List[BitmapFontKerning]\n return BitmapFont(font.Name, 1, font.Header.LineSpacing, font.Header.BaseLine, font.Header.PathName, BitmapFontType.Bitmap, chars, kernings)\n\n#-----------------------------------------------------------------------------------------------------------------------------------------------------\n\n","repo_name":"nxp-imx/gtec-demo-framework","sub_path":".Config/FslFontHelper/TypeConverter.py","file_name":"TypeConverter.py","file_ext":"py","file_size_in_byte":4807,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"22"} +{"seq_id":"38266684172","text":"import os\n\nimport cv2\nimport h5py\nimport numpy as np\nfrom torchvision.datasets import MNIST\n\ntrain_iter = MNIST('data/temp', train=True, download=True)\ntest_iter = MNIST('data/temp', train=False, download=True)\n\nif not os.path.exists('data/train'):\n os.mkdir('data/train')\nif not os.path.exists('data/test'):\n os.mkdir('data/test')\n\ntrain_data, test_data = [], []\ntrain_edge_data, test_edge_data = [], []\ntrain_label, test_label = [], []\n\n\nif not os.path.exists('data/mnist.h5'):\n for X, y in train_iter:\n X = np.array(X)\n Y = cv2.Canny(X, 50, 100)\n train_data.append(X)\n train_edge_data.append(Y)\n train_label.append(y)\n\n for X, y in test_iter:\n X = np.array(X)\n Y = cv2.Canny(X, 50, 100)\n test_data.append(X)\n test_edge_data.append(Y)\n test_label.append(y)\n\n train_data = np.array(train_data, dtype=np.uint8)\n train_edge_data = np.array(train_edge_data, dtype=np.uint8)\n test_data = np.array(test_data, dtype=np.uint8)\n test_edge_data = np.array(test_edge_data, dtype=np.uint8)\n train_label = np.array(train_label, dtype=int)\n test_label = np.array(test_label, dtype=int)\n\n with h5py.File('data/mnist.h5', 'w') as f:\n f['train_data'] = train_data\n f['train_edge_data'] = train_edge_data\n f['test_data'] = test_data\n f['test_edge_data'] = test_edge_data\n f['train_label'] = train_label\n f['test_label'] = test_label\n","repo_name":"mgisr/Edge-TripleNet","sub_path":"data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18025225125","text":"import typing\n\nimport pytest\nimport pytest_asyncio\nfrom asyncpg import Connection\n\nfrom asyncpg_engine import Engine\n\n\ndef pytest_configure(config: typing.Any) -> None:\n config.addinivalue_line(\"markers\", \"asyncpg_engine: configure asyncpg-engine plugin behaviour.\")\n\n\n@pytest.fixture()\ndef asyncpg_engine_cls() -> typing.Type[Engine]:\n return Engine\n\n\n@pytest_asyncio.fixture()\nasync def db(\n request: pytest.FixtureRequest, asyncpg_engine_cls: typing.Type[Engine], postgres_url: str\n) -> typing.AsyncGenerator[Engine, None]:\n plugin_config = request.node.get_closest_marker(\"asyncpg_engine\")\n\n transactional = getattr(plugin_config, \"kwargs\", {}).get(\"transactional\", True)\n\n _db = await asyncpg_engine_cls.create(url=postgres_url, use_single_connection=transactional)\n\n con: Connection = await _db.acquire() # type: ignore\n tr = con.transaction()\n await tr.start()\n\n yield _db\n\n await tr.rollback()\n await _db.release(con, force=transactional)\n await _db.close()\n\n\n@pytest_asyncio.fixture()\nasync def con(db: Engine) -> typing.AsyncGenerator[Connection, None]:\n async with db.acquire() as _con:\n yield _con\n","repo_name":"sivakov512/asyncpg-engine","sub_path":"asyncpg_engine/pytest_plugin.py","file_name":"pytest_plugin.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"8302325028","text":"# Lily has a chocolate bar that she wants to share it with Ron for his birthday. Each of the squares has an integer on it. She decides to share a contiguous segment of the bar selected such that the length of the segment matches Ron's birth month and the sum of the integers on the squares is equal to his birth day. You must determine how many ways she can divide the chocolate.\n# birthday has the following parameter(s):\n\n#s: an array of integers, the numbers on each of the squares of chocolate\n#d: an integer, Ron's birth day\n#m: an integer, Ron's birth month\n#s = [1, 2, 1, 3, 2]\n#d = 3\n#m = 2\nsum1 = 0\ncount = 0\nfor i in range(len(s)):\n sum1 = 0\n for j in range(0, m):\n print(j)\n if i+j < len(s):\n sum1 = sum1 + s[i+j]\n if d == sum1:\n count = count + 1\n\nreturn count","repo_name":"sanjar/hackerrank","sub_path":"HackerRank/src/com/hack/python/the_birthday_bar.py","file_name":"the_birthday_bar.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"5845736246","text":"# This file is part of Gajim.\n#\n# Gajim is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published\n# by the Free Software Foundation; version 3 only.\n#\n# Gajim is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Gajim. If not, see .\n\nfrom typing import Any\nfrom typing import cast\n\nimport logging\n\nfrom gi.repository import Gio\nfrom gi.repository import GLib\n\nfrom gajim.common import app\nfrom gajim.common import events\nfrom gajim.common import ged\nfrom gajim.common import helpers\nfrom gajim.common.modules.contacts import BareContact\nfrom gajim.common.modules.contacts import GroupchatContact\nfrom gajim.common.modules.contacts import GroupchatParticipant\nfrom gajim.common.structs import OutgoingMessage\n\nlog = logging.getLogger('gajim.c.dbus.remote_control')\n\nINTERFACE_DESC = '''\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n'''\n\n\ndef get_dbus_struct(obj: Any) -> GLib.Variant:\n '''\n Recursively go through all the items and replace them with their casted dbus\n equivalents\n '''\n if isinstance(obj, str):\n return GLib.Variant('s', obj)\n if isinstance(obj, int):\n return GLib.Variant('i', obj)\n if isinstance(obj, float):\n return GLib.Variant('d', obj)\n if isinstance(obj, bool):\n return GLib.Variant('b', obj)\n if isinstance(obj, list | tuple):\n lst = [get_dbus_struct(i) for i in obj # pyright: ignore\n if i is not None]\n result = GLib.Variant('av', lst)\n return result\n if isinstance(obj, dict):\n obj = cast(dict[str, Any], obj)\n result = GLib.VariantDict()\n for key, value in obj.items():\n result.insert_value(key, get_dbus_struct(value))\n return result.end()\n # unknown type\n return GLib.Variant('s', str(obj))\n\n\nclass Server:\n def __init__(self, con: Gio.DBusConnection, path: str) -> None:\n self._method_outargs: dict[str, str] = {}\n self._method_inargs: dict[str, tuple[str, ...]] = {}\n node_info = Gio.DBusNodeInfo.new_for_xml(INTERFACE_DESC)\n for interface in node_info.interfaces:\n for method in interface.methods:\n self._method_outargs[method.name] = '(' + ''.join(\n [arg.signature for arg in method.out_args]) + ')'\n self._method_inargs[method.name] = tuple(\n arg.signature for arg in method.in_args)\n\n con.register_object(\n object_path=path,\n interface_info=interface,\n method_call_closure=self._on_method_call)\n\n def _on_method_call(self,\n _connection: Gio.DBusConnection,\n _sender: str,\n _object_path: str,\n _interface_name: str,\n method_name: str,\n parameters: GLib.Variant,\n invocation: Gio.DBusMethodInvocation) -> None:\n\n args = list(parameters.unpack())\n for i, sig in enumerate(self._method_inargs[method_name]):\n if sig == 'h':\n msg = invocation.get_message()\n fd_list = msg.get_unix_fd_list()\n assert fd_list is not None\n args[i] = fd_list.get(args[i])\n\n result = getattr(self, method_name)(*args)\n\n # out_args is at least (signature1). We therefore always wrap the result\n # as a tuple. Refer to https://bugzilla.gnome.org/show_bug.cgi?id=765603\n result = (result, )\n\n out_args = self._method_outargs[method_name]\n if out_args != '()':\n variant = GLib.Variant(out_args, result)\n invocation.return_value(variant)\n else:\n invocation.return_value(None)\n\n\nclass GajimRemote(Server):\n def __init__(self) -> None:\n self._con = Gio.bus_get_sync(Gio.BusType.SESSION, None)\n Gio.bus_own_name_on_connection(self._con, 'org.gajim.Gajim',\n Gio.BusNameOwnerFlags.NONE, None, None)\n super().__init__(self._con, '/org/gajim/dbus/RemoteObject')\n\n app.ged.register_event_handler('presence-received',\n ged.POSTGUI,\n self._on_presence_received)\n app.ged.register_event_handler('gc-message-received',\n ged.POSTGUI,\n self._on_gc_message_received)\n app.ged.register_event_handler('message-received',\n ged.POSTGUI,\n self._on_message_received)\n app.ged.register_event_handler('our-show',\n ged.POSTGUI,\n self._on_our_status)\n app.ged.register_event_handler('message-sent',\n ged.POSTGUI,\n self._on_message_sent)\n\n def _on_message_sent(self, event: events.MessageSent) -> None:\n self.raise_signal('MessageSent', (\n event.account, [event.jid,\n event.message]))\n\n def _on_presence_received(self, event: events.PresenceReceived) -> None:\n self.raise_signal('ContactPresence', (event.account, [\n event.jid,\n event.resource,\n event.show,\n event.status]))\n\n def _on_gc_message_received(self, event: events.GcMessageReceived) -> None:\n self.raise_signal('GCMessage', (\n event.conn.name, [event.fjid,\n event.msgtxt,\n event.properties.timestamp,\n event.delayed,\n event.displaymarking]))\n\n def _on_message_received(self,\n event: events.MessageReceived) -> None:\n\n event_type = event.properties.type.value\n if event.properties.is_muc_pm:\n event_type = 'pm'\n self.raise_signal('NewMessage', (\n event.conn.name, [event.fjid,\n event.msgtxt,\n event.properties.timestamp,\n event_type,\n event.properties.subject,\n event.msg_log_id,\n event.properties.nickname]))\n\n def _on_our_status(self, event: events.ShowChanged) -> None:\n self.raise_signal('AccountPresence', (event.show, event.account))\n\n def raise_signal(self, event_name: str, data: Any) -> None:\n log.info('Send event %s', event_name)\n self._con.emit_signal(None,\n '/org/gajim/dbus/RemoteObject',\n 'org.gajim.dbus.RemoteInterface',\n event_name,\n GLib.Variant.new_tuple(get_dbus_struct(data)))\n\n @staticmethod\n def get_status(account: str) -> str:\n if not account:\n return helpers.get_global_show()\n return helpers.get_client_status(account)\n\n @staticmethod\n def get_status_message(account: str) -> str:\n if not account:\n return str(helpers.get_global_status_message())\n return app.get_client(account).status_message\n\n @staticmethod\n def _send_message(jid: str,\n message: str,\n account: str,\n type_: str) -> bool:\n\n if not app.account_is_available(account):\n return False\n\n client = app.get_client(account)\n contact = client.get_module('Contacts').get_contact(\n jid, groupchat=type_ == 'groupchat')\n\n if type_ == 'groupchat':\n assert isinstance(contact, GroupchatContact)\n if not contact.is_joined:\n return False\n\n assert isinstance(\n contact, BareContact | GroupchatContact | GroupchatParticipant)\n message_ = OutgoingMessage(account=account,\n contact=contact,\n message=message,\n type_=type_)\n\n app.get_client(account).send_message(message_)\n return True\n\n def send_chat_message(self, jid: str, message: str, account: str) -> bool:\n if not jid or not message or not account:\n return False\n\n return self._send_message(jid, message, account, 'chat')\n\n def send_groupchat_message(self,\n jid: str,\n message: str,\n account: str) -> bool:\n\n if not jid or not message or not account:\n return False\n\n return self._send_message(jid, message, account, 'groupchat')\n\n @staticmethod\n def change_status(status: str, message: str, account: str) -> bool:\n '''\n change_status(status, message, account). Account is optional - if not\n specified status is changed for all accounts\n '''\n if status not in ('offline', 'online', 'chat', 'away', 'xa', 'dnd'):\n status = ''\n\n if account:\n if not status:\n if account not in app.settings.get_active_accounts():\n return False\n status = app.get_client(account).status\n\n GLib.idle_add(app.get_client(account).change_status,\n status,\n message)\n else:\n # account not specified, so change the status of all accounts\n for acc in app.settings.get_active_accounts():\n if not app.settings.get_account_setting(\n acc, 'sync_with_global_status'):\n continue\n\n if not status:\n status = app.get_client(acc).status\n\n GLib.idle_add(app.get_client(acc).change_status,\n status,\n message)\n return True\n\n @staticmethod\n def list_accounts() -> list[str]:\n '''\n List register accounts\n '''\n result = app.settings.get_active_accounts()\n result_array: list[str] = []\n if result:\n for account in result:\n result_array.append(account)\n return result_array\n\n @staticmethod\n def account_info(account: str) -> dict[str, str]:\n '''\n Show info on account: resource, jid, nick, prio, message\n '''\n result: dict[str, str] = {}\n if account in app.settings.get_active_accounts():\n # account is valid\n client = app.get_client(account)\n result['status'] = client.status\n result['name'] = client.name\n result['jid'] = app.get_jid_from_account(client.name)\n result['message'] = client.status_message\n result['priority'] = str(client.priority)\n result['resource'] = app.settings.get_account_setting(client.name,\n 'resource')\n return result\n\n def list_contacts(self, account: str) -> list[dict[str, GLib.Variant]]:\n result: list[dict[str, GLib.Variant]] = []\n accounts = app.settings.get_active_accounts()\n if not accounts:\n return result\n if account:\n accounts_to_search = [account]\n else:\n accounts_to_search = accounts\n for acct in accounts_to_search:\n if acct in accounts:\n client = app.get_client(acct)\n for contact in client.get_module('Roster').iter_contacts():\n item = self._contacts_as_dbus_structure(contact)\n if item:\n result.append(item)\n return result\n\n @staticmethod\n def _contacts_as_dbus_structure(contact: BareContact\n ) -> dict[str, GLib.Variant]:\n '''\n Get info from list of Contact objects and create dbus dict\n '''\n\n contact_dict: dict[str, GLib.Variant] = {}\n\n contact_dict['name'] = GLib.Variant('s', contact.name)\n contact_dict['show'] = GLib.Variant('s', contact.show.value)\n contact_dict['jid'] = GLib.Variant('s', str(contact.jid))\n\n resources = GLib.VariantBuilder(GLib.VariantType('a(ss)'))\n for res_contact in contact.iter_resources():\n resource_props = (res_contact.resource,\n res_contact.status)\n resources.add_value(GLib.Variant('(ss)', resource_props))\n contact_dict['resources'] = resources.end()\n\n groups = GLib.VariantBuilder(GLib.VariantType('as'))\n for group in contact.groups:\n groups.add_value(GLib.Variant('s', group))\n contact_dict['groups'] = groups.end()\n return contact_dict\n\n @staticmethod\n def get_unread_msgs_number() -> str:\n return str(app.window.get_total_unread_count())\n\n @staticmethod\n def Introspect() -> str: # pylint: disable=invalid-name\n return INTERFACE_DESC\n","repo_name":"gajim/gajim","sub_path":"gajim/common/dbus/remote_control.py","file_name":"remote_control.py","file_ext":"py","file_size_in_byte":15724,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"22"} +{"seq_id":"43782550697","text":"__author__ = 'Rolando'\n#############################################\n### Perkovic Intro to Python ###\n#### CH 6: Containers and Randomness ####\n##### PG 171 CH 6 #####\n#############################################\n\n# Dictionaries\ndays = {'Mo': 'Monday', # A dictionary is a container (a class, like a list) that stores items\n 'Tu': 'Tuesday', # that are accessible using \"user-specified\" indexes.\n 'We': 'Wednesday',\n 'Th': 'Thursday',\n 'Fr': 'Friday'}\n\nprint(days) # Dictionaries are not ordered the same way they are listed\nprint(days['We']) # Values are accessed by keys, not by index (but still use same operator d[k]\n\ndays['Sa'] = 'Saturday' # Dictionaries are mutable\ndays['Su'] = 'Sunday'\nprint(days['Sa'], days['Su'])\nprint(days)\n\nprint(len(days)) # Operator len() can still be used on dictionaries\n\nprint('Fr' in days) # so can k in d (key in dictionary)\nprint('Ja' in days)\nprint('Feb' not in days) # and k not in d\n\nfavorites = {'Th': 'Thrusday', # other operators are not applicable to dictionaries\n 'Fr': 'Friday', # such as +, *, max(), min(), and sum(), among others\n 'Sa': 'Saturday'}\ndays.pop('Su') # and dictionaries share very few operators with lists as well\ndays.pop('Sa') # such as d.pop(k)\nprint(days)\n\ndays['Sa'] = 'Sat' # another method is d1.update(d2)\nprint(days) # Which updates d1 with d2\ndays.update(favorites) # New entries from d2 are added if not present in d1\nprint(days) # Old keys are kept unless they are present in both\n# values in d1 that have the same key as those on d2 are replaced by d2 values\nprint(days.keys()) # other operators are useful in representing dictionaries as well\nprint(days.values())\nprint(days.items())\n\nfor key in days.keys():\n print(key, end=' ')\nprint()\nfor value in days.values():\n print(value, end=', ')\nprint()\nfor item in days.items():\n print(item, end='; ')\nprint()\n\"\"\" REFERENCE\n d.items() : Returns a view of (key, value) pairs in d as tuples\n d.get(k) : Returns the value of key k, equivalent to d[k]\n d.keys() : Returns a view of keys in d\n d.pop(k) : Removes the (key, value) pair with key k form d and returns the value\n d.update(d2) : Adds the (key, value) pairs of dictionary d2 to d\n d.values() Returns a view of the values of d\n [] = lst\n {:} = dict\n (,) = tuple\n {,} = set\n\"\"\"\n\n###########\n### 6.1 ###\n###########\nprint('\\nPP 6.1')\n\n\ndef birthstate(s):\n \"\"\"\n :param s: Name of president (Key)\n :return: State of birth for s president (Value)\n \"\"\"\n presidents = {'Barrack Hussein Obama II': 'Hawaii',\n 'George Walker Bush': 'Connecticut',\n 'William Jefferson Clinton': 'Arkansas',\n 'George Herbert Walker Bush': 'Massachusetts',\n 'Ronald Wilson Reagan': 'Illinois',\n 'James Earl Carter, Jr': 'Georgia'}\n return presidents[s]\n\n\nprint(birthstate('Ronald Wilson Reagan'))\n\n\n###########\n### 6.2 ###\n###########\nprint('\\nPP 6.2')\n\nrphonebook = {'(123)456-78-90': ['Anna', 'Karenina'],\n '(901)234-56-78': ['Yu', 'Tsun'],\n '(321)908-76-54': ['Hans', 'Castorp']}\n\n\ndef rlookup(d):\n \"\"\"\n :param d: Dictionary (Phone-book) of Phone-numbers and Individuals\n :input number: Phone number (Key)\n :return: Name of individual (Value)\n \"\"\"\n number = input('Enter a number in the format (xxx)xxx-xx-xx: ')\n if number not in d:\n return 'Incorrect number format, or the number you entered is not in use.'\n return d[number]\n\n\n# print(rlookup(rphonebook))\n\n\ndef complete(abbreviation):\n \"\"\"\n :param abbreviation: abbreviation which actually refers to key in day dictionary (Key)\n :return: day of the week corresponding to abbreviation (Value)\n \"\"\"\n days2 = {'Mo': 'Monday',\n 'Tu': 'Tuesday',\n 'We': 'Wednesday',\n 'Th': 'Thursday',\n 'Fr': 'Friday',\n 'Sa': 'Saturday',\n 'Su': 'Sunday'}\n if abbreviation not in days2:\n return False\n return days2[abbreviation]\n\n\nprint(complete('Mo'))\nprint(complete('Su'))\nprint(complete('Ja'))\n\n\ndef frequency(itemlist):\n \"\"\"\n :param itemlist: Takes list of items (list)\n :return: frequency of items on list (dict)\n \"\"\"\n counters = {} # Initialize dictionary of counters\n for item1 in itemlist:\n if item1 in counters: # Counter for item already exits\n counters[item1] += 1 # so increment it\n else: # If not, Counter for item is created\n counters[item1] = 1 # and initialized to 1\n return counters\n\n\nstudents = ['Cindy', 'John', 'Cindy', 'Adam', 'Adam', 'Jimmy', 'Joan', 'Cindy', 'Joan']\nprint(frequency(students))\n\n#################################\n### 6.3 does not ask for code ###\n#################################\nprint('\\nPP 6.3')\nprint('No code needed')\n\n###########\n### 6.4 ###\n###########\nprint('\\nPP 6.4')\n\n\ndef wordcount(s):\n \"\"\"\n :param s: (string)\n :return: frequency of each word on the text (dict)\n \"\"\"\n counters = {}\n wordlist = s.split()\n for word in wordlist:\n if word in counters:\n counters[word] += 1\n else:\n counters[word] = 1\n for word in counters:\n if counters[word] == 1:\n print('{:8} appears {} time.'.format(word, counters[word]))\n else:\n print('{:8} appears {} times.'.format(word, counters[word]))\n return ''\n\n\ntext = 'all animals are equal but some animals are more equal than others'\nprint(wordcount(text))\n\n###########\n### 6.5 ###\n###########\nprint('PP 6.5')\n\n# Tuples\nphonebook = {('Anna', 'Karenina'): '(123)456-78-90', # In the function rlookup(), if we switch up keys and values\n ('Yu', 'Tsun'): '(901)234-56-78', # an error occurs because the names are list objects\n ('Hans', 'Castorp'): '(321)908-76-54'} # therefore a new class must b used called the tuple\nprint(phonebook) # tuples behave as lists in almost every way except they are\n# immutable, and use () instead of []\n\n\ndef lookup(d):\n \"\"\"\n :param d: Dictionary (Phone-book) of Phone-numbers and Individuals\n :return: Phone Number (Value)\n \"\"\"\n first = input('Enter the first name: ')\n last = input('Enter the last name: ')\n person = (first, last)\n if person not in d:\n return 'Name could not be found'\n return d[person]\n\n#print(lookup(phonebook))\n\n# Sets\nphonebook1 = {'123-45-67', '234-56-78', '345-67-89'}\nprint(phonebook1) # Sets have the same properties as mathematical sets\nphonebook1 = {'123-45-67', '234-56-78', '345-67-89', # Duplicates are ignored\n '123-45-67', '234-56-78'} # Which make sets useful for removing duplicates\nprint(phonebook1) # But as with dictionaries, they are out of order as well\n\nphonebook2 = set() # Empty sets cannot be represented with braces since those\nprint(type(phonebook2)) # are use by dictionaries, so it must be called implicitly\n\nprint('123-45-67' in phonebook1) # set class supports operators that correspond to ususal\nprint('456-78-90' in phonebook1) # mathematical types and operations, as well as a few that\nprint('456-78-90' not in phonebook1) # can be used in lst, str, and dict\n\nprint(len(phonebook1))\n\nphonebook3 = {'345-67-89', '456-78-90'} # Comparison operators are supported for sets\nprint(phonebook1 == phonebook3)\nprint(phonebook1 != phonebook3)\n\nprint({'123-45-67', '345-67-89'} <= phonebook1)\nprint(phonebook1 < phonebook1)\n# Mathematical set operators can be used too such as\nprint(phonebook1 | phonebook3) # Union\nprint(phonebook1 & phonebook3) # Intersection\nprint(phonebook1 - phonebook3) # difference between sets\nprint(phonebook1 ^ phonebook3) # symmetrical difference\n\nphonebook3.add('123-45-67') # sets support their own methods as well\nprint(phonebook3)\nphonebook3.remove('123-45-67')\nprint(phonebook3)\nphonebook3.clear()\nprint(phonebook3)\n\n''' REFERENCE\n x in s : True if x in ser s, else False\n x not in s : False if x is in set s, else True\n len(s) : Returns size of set x\n s == t : True if set s and t contain same elements, else False\n s != t : True if set s and t do not contain the same elements, else False\n s <= t : True if ever element of set s is in set t, else False\n s < t : True if s <= t and s != t\n s | t : Returns the union of sets s and t (both sets combined into a single set)\n s & t : Returns the intersection of sets s and t (all elements that are in both sets)\n s - t : Returns the difference between sets s and t (set s with elements from set t removed)\n s ^ t : Returns the symmetric difference between sets s and t (Elements not shared by both sets)\n\n s.add(v) : adds v to set s\n s.remove(v) : removes v from set s\n s.clear() : removes all elements from set s and makes it an empty set\n'''\n###########\n### 6.6 ###\n###########\nprint('\\nPP 6.6')\n\n\ndef sync(lst):\n \"\"\"\n :param lst: list of phone-books (lst)\n :return: phone-book containing the union of all the phone-books (set)\n \"\"\"\n pbl = set()\n for pb in lst:\n pbl = pbl | pb\n return pbl\n\n\nphonebook4 = {'234-56-78', '456-78-90'}\nphonebooks = [phonebook1, phonebook2, phonebook3, phonebook4]\nprint(sync(phonebooks))\n\n\n###########\n### 6.7 ###\n###########\nprint('\\nPP 6.7')\n\n\ndef encoding(s):\n \"\"\"\n :param s: (str)\n :return: prints ASCII C code in decimal, hex, and binary notation for every character\n \"\"\"\n print('Char Decimal Hex Binary') # Column headings\n for c in s:\n code = ord(c) # Compute ASCII code\n print(' {} {:7} {:4x} {:9b}'.format(c, code, code, code))\n\n\nprint(encoding('dad'))\n\n###########\n### 6.8 ###\n###########\nprint('\\nPP 6.8')\n\n\ndef char(low, high):\n \"\"\"\n :param low: lowest ASCII value\n :param high: highest ASCII value\n :return: prints characters corresponding to ASCII decimal codes i for all values of i from low up to and including\n high\n \"\"\"\n for i in range(low, high + 1):\n print('{} : {}'.format(i, chr(i)))\n\n\nprint(char(62, 67))\n\nimport random # Python has an RNG (random number generator)\n\nprint(random.randrange(1, 7)) # randrange() allows you to RNG within a range of integers\nprint(random.randrange(1, 7))\nprint(random.randrange(1, 7))\nprint(random.randrange(1, 7))\nprint(random.randrange(1, 7))\n\nprint(random.uniform(0, 1)) # uniform() allows you to RNG within a range of floats\nprint(random.uniform(0, 1))\n\nrlst = [1, 2, 3, 4, 5]\nprint(random.shuffle(rlst)) # shuffle() lets you shuffle the order of a sequence\nprint(random.shuffle(rlst))\n\nprint(random.choice(rlst)) # choice() allows you to choose an item at random from a sequence\nprint(random.choice(rlst))\n\nprint(random.sample(rlst, 2)) # sample() allows you to choose multiple items from a container at random\nprint(random.sample(rlst, 3))\nprint(random.sample(rlst, 3))\n\n###########\n### 6.9 ###\n###########\nprint('\\nPP 6.9')\n\n\ndef guess(n):\n answer = random.randrange(1, n)\n while True:\n input1 = eval(input('Enter your guess: '))\n if input1 == answer:\n return 'You got it!'\n elif input1 == 'I quit':\n return 'Better luck next time!'\n elif input1 < answer:\n print('Too low.')\n else:\n print('Too high.')\n\n#print(guess(100))\n\n############\n### 6.10 ###\n############\nprint('\\nPP 6.10')\n\n\ndef approxpi(n):\n count = 0\n for i in range(n):\n x = random.uniform(-1, 1)\n y = random.uniform(-1, 1)\n if x ** 2 + y ** 2 <= 1:\n count += 1\n return 4 * count / n\n\n\nprint(approxpi(1000))\nprint(approxpi(10000))\n\n#####################################################################\n### There is a case study file for chapter 6 - \"Percovic Ch 06 CS ###\n#####################################################################\n\n############\n### 6.11 ###\n############\nprint('\\nPP 6.11')\n\n\ndef easycrypto(s):\n \"\"\"\n :param s: (str)\n :return: Every character at an odd position i in the alphabet will be encrypted with the character at position\n i + 1, and every character at an even position i will be encrypted with the character at position i - 1.\n \"\"\"\n alpha = 'abcdefghijklmnopqrstuvwxyz'\n alpha2 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n alpha3 = alpha + alpha2\n al3 = list(alpha3)\n\n ad = {}\n nd = {}\n count = 0\n for letter in al3:\n count += 1\n ad[letter] = count\n count = 0\n for letter in al3:\n count += 1\n nd[count] = letter\n sl = list(s)\n for i in range(len(sl)):\n if sl[i] in al3:\n ch = sl[i]\n num = ad[ch]\n if num % 2 == 0:\n sl[i] = nd[num - 1]\n else:\n sl[i] = nd[num + 1]\n return ''.join(sl)\n\n\nprint(easycrypto('abc'))\nprint(easycrypto('ZOO'))\n\n############\n### 6.12 ###\n############\nprint('\\nPP 6.12')\n\n\ndef letter2number(s):\n \"\"\" Takes a letter grade (A, B, C, D, F, with + or -) and returns corresponding number grade. The numeric values\n for A, B, C, D, and F are 4, 3, 2, 1, 0, a + increases number grade value by 0.3 and a - decreases it by 0.3\n \"\"\"\n grades = {'A': 4,\n 'A-': 3.7,\n 'B+': 3.3,\n 'B': 3.0,\n 'B-': 2.7,\n 'C+': 2.3,\n 'C': 2.0,\n 'C-': 1.7,\n 'D': 1.0,\n 'F': 0}\n if s in grades:\n return grades[s]\n else:\n return 0\n\n\nprint(letter2number('A'))\nprint(letter2number('B+'))\nprint(letter2number('F'))\n\n#####################\n### 6.13 and 6.14 ###\n#####################\nprint('\\nPP 6.13')\n\nagencies = {'CCC': 'Civilian Conservation Corps',\n 'FCC': 'Federal Communications Board',\n 'FDIC': 'Federal Deposit Insurance Corporation',\n 'SSB': 'Social Security Board',\n 'WPA': 'Works Progress Administration'}\nprint(agencies)\nacronyms = agencies.keys()\nprint(acronyms)\n# A\nagencies['SEC'] = 'Securities and Exchange Commission'\nprint(agencies)\n# B\nagencies['SSB'] = 'Social Security Administration'\nprint(agencies)\n# C\nagencies.pop('CCC')\nagencies.pop('WPA')\nprint(agencies)\n\n############\n### 6.14 ###\n############\nprint('\\nPP 6.14')\n\nprint(acronyms)\n\n############\n### 6.15 ###\n############\nprint('\\nPP 6.15')\nphonebook5 = {('Anna', 'Karenina'): ['(123)456-78-90', '(777)777-77-77'],\n ('Yu', 'Tsun'): '(901)234-56-78',\n ('Hans', 'Castorp'): '(321)908-76-54'}\nprint(phonebook5)\n\n\n#print(lookup(phonebook5))\n\n############\n### 6.16 ###\n############\nprint('\\nPP 6.16')\n\nl3 = []\nm3 = 3\ncount3 = 1\nwhile m3 < 100:\n count3 += 1\n l3.append(m3)\n m3 = 3 * count3\nmult3 = set(l3)\nprint(mult3)\n\nl5 = []\nm5 = 5\ncount5 = 1\nwhile m5 < 100:\n count5 += 1\n l5.append(m5)\n m5 = 5 * count5\nmult5 = set(l5)\nprint(mult5)\n\nl7 = []\nm7 = 7\ncount7 = 1\nwhile m7 < 100:\n count7 += 1\n l7.append(m7)\n m7 = 7 * count7\nmult7 = set(l7)\nprint(mult7)\n\nmulta = mult3 | mult5 | mult7\nprint(multa)\n\n# A\n\n\ndef m35():\n mult35 = []\n for i in multa:\n if i % 35 == 0:\n mult35.append(i)\n return set(mult35)\n\n\nprint(m35())\n# B\n\n\ndef m105():\n mult105 = []\n for i in multa:\n if i % 105 == 0:\n mult105.append(i)\n return set(mult105)\n\n\nprint(m105())\n# C\n\n\ndef m3or7():\n mult3or7 = mult3 | mult7\n return mult3or7\n\n\nprint(m3or7())\n# D\n\n\ndef m3or7nb():\n mult3or7nb = mult3 ^ mult7\n return mult3or7nb\n\n\nprint(m3or7nb())\n# E\n\n\ndef m7not3():\n mult7not3 = mult7 - mult3\n return mult7not3\n\n\nprint(m7not3())\n\n############\n### 6.17 ###\n############\nprint('\\nPP 6.17')\n\n\ndef hexascii():\n \"\"\"\n :return: prints the correspondence between the lowercase characters in the alphabet with their hexdecimal\n representation of their ASCII code. (using format str method)\n \"\"\"\n alpha = 'abcdefghijklmnopqrstuvwxyz'\n al = list(alpha)\n for letter in al:\n code = ord(letter)\n print('{}:{:2x}'.format(letter, code), end=' ')\n\n\nprint(hexascii())\n\n############\n### 6.18 ###\n############\nprint('\\nPP.6.18')\n\n\ndef coin():\n \"\"\"\n :return: Flips a coin. Returns either heads or tails, with equal probability\n \"\"\"\n answer = random.randrange(0, 2)\n if answer == 0:\n return 'Heads'\n else:\n return 'Tails'\n\n\nprint(coin())\n\n############\n### 6.19 ###\n############\nprint('\\nPP 6,19')\n\narabic = 'اسمي ادا'\njapanese = '私の名前はエイダです'\nserbian = 'Моје име је Ада'\n\n\ndef ut(s):\n \"\"\"\n :param s: untranslated string\n :return: unicode code point for each char in str\n \"\"\"\n for i in s:\n code = ord(i)\n print('{}{}'.format(i, code), end=' ')\n\n\nprint(ut(arabic))\nprint(ut(japanese))\nprint(ut(serbian))\n\n############\n### 6.20 ###\n############\nprint('\\nPP 6.20')\n\nphonebook7 = {'Smith, Jane': '123-45-67',\n 'Doe, John': '987-65-43',\n 'Baker, David': '567-89-01'}\n\n\ndef reverse(d):\n \"\"\"\n :param d: dictionary\n :return: reversed keys - values mapping of dict to values - keys\n \"\"\"\n keys = []\n values = []\n d1 = {}\n for key1 in d.keys():\n keys.append(key1)\n for value1 in d.values():\n values.append(value1)\n for i in range(len(keys)):\n d2 = {values[i]: keys[i]}\n d1.update(d2)\n return d1\n\n\nprint(reverse(phonebook7))\n\n############\n### 6.21 ###\n############\nprint('\\nPP 6.21')\n\n\ndef ticker(file):\n \"\"\"\n :param file: file\n :input: Company Name\n :return: Company Abbreviation\n \"\"\"\n infile = open(file)\n content = infile.read()\n infile.close()\n content = content.replace('\\t', '')\n content = content.replace(' ', '')\n content = content.strip()\n content = content.split('\\n')\n company = []\n abrv = []\n d1 = {}\n for i in range(0, len(content), 2):\n company.append(content[i])\n for i in range(1, len(content), 2):\n abrv.append(content[i])\n for i in range(len(abrv)):\n d2 = {company[i]: abrv[i]}\n d1.update(d2)\n name = input('Enter Company name: ')\n if name not in company:\n return \"Company not in file.\"\n return d1[name]\n\n# print(ticker('nasdaq.txt'))\n\n############\n### 6.22 ###\n############\nprint('\\nPP 6.22')\n\n\ndef mirror(s):\n \"\"\"\n :param s: string that has mirror image possible in alphabet\n :return: returns mirror image, else 'INVALID'\n \"\"\"\n nomirror = 'acefghjklmnrstuyzBCDEFGJKLNPQRSZ'\n for i in s:\n if i in nomirror:\n return 'INVALID'\n table = str.maketrans('bdpq', 'dbqp')\n txt = s.translate(table)\n reversetxt = txt[:: -1]\n return reversetxt\n\n\nprint(mirror('vow'))\nprint(mirror('wood'))\nprint(mirror('bed'))\n\n############\n### 6.23 ###\n############\nprint('\\nPP 6.23')\n\n\ndef scarydict(file):\n \"\"\"\n :param file: txt file\n :return: prints each word, line by line, removing duplicates, punctuation, and any word with 2 or less characters\n \"\"\"\n infile = open(file)\n content = infile.read()\n infile.close()\n table = str.maketrans('''1234567890.,,, ;,`()-:!?\"\\n'[]_''', ' ')\n content = content.translate(table)\n content = content.lower()\n contentlist = content.split()\n contentlist = list(set(contentlist))\n contentlist.sort()\n for i in contentlist:\n if len(i) < 3:\n contentlist.remove(i)\n else:\n print(i)\n return 'DONE'\n\n\nprint(scarydict('frankenstein.txt'))\n\n############\n### 6.24 ###\n############\nprint('\\nPP 6.24')\n\n\ndef names():\n \"\"\"\n :input: Student name\n :return: while input isn't empty str, continue to ask for name of student; else print for every name the number of\n students with that name\n \"\"\"\n studentlist = []\n input1 = input('Enter first student name: ')\n while input1 != '':\n studentlist.append(input1)\n input1 = input('Enter next name: ')\n counters = {} # Initialize dictionary of counters\n for item1 in studentlist:\n if item1 in counters: # Counter for item already exits\n counters[item1] += 1 # so increment it\n else: # If not, Counter for item is created\n counters[item1] = 1 # and initialized to 1\n studentlist = list(set(studentlist))\n studentlist.sort()\n for item1 in studentlist:\n if counters[item1] == 1:\n print('There is {} student named {}'.format(counters[item1], item1))\n else:\n print('There are {} students named {}'.format(counters[item1], item1))\n\n#print(names())\n\n############\n### 6.25 ###\n############\nprint('\\nPP 6.25')\n\n\ndef different(t):\n \"\"\"\n :param t: table (2D list)\n :return: the number of unique entries in each\n \"\"\"\n counters = {}\n for i in t:\n for j in i:\n if j in counters:\n counters[j] += 1\n else:\n counters[j] = 1\n return len(counters)\n\n\nt1 = [[1, 0, 1], [0, 1, 0]]\nt2 = [[32, 12, 52, 63], [32, 64, 67, 52], [64, 64, 17, 34], [34, 17, 76, 98]]\n\nprint(different(t1))\nprint(different(t2))\n\n############\n### 6.26 ###\n############\nprint('\\nPP 6.26')\n\n\ndef week():\n \"\"\"\n :input: 2 letter abbreviation of day of the week\n :return: Name of day represented by input abbreviation\n \"\"\"\n days1 = {'Mo': 'Monday',\n 'Tu': 'Tuesday',\n 'We': 'Wednesday',\n 'Th': 'Thursday',\n 'Fr': 'Friday',\n 'Sa': 'Saturday',\n 'Su': 'Sunday'}\n while True:\n input1 = input('Enter day abbreviation: ')\n if input1 in days1:\n print(days1[input1])\n else:\n break\n return 'DONE'\n\n#print(week())\n\n############\n### 6.27 ###\n############\nprint('\\nPP 6.27')\n\n\ndef index(file, lst):\n \"\"\"\n :param file: txt file\n :param lst: list of str\n :return: the lines in 'file' where each item in 'lst' appears\n \"\"\"\n infile = open(file)\n content = infile.read()\n infile.close()\n table = str.maketrans('''.,,, ;,`()-:!?\"'[]_''', ' ')\n content = content.translate(table)\n content = content.lower()\n contentlist = content.split('\\n')\n words = sorted(lst)\n wdic = {}\n for word in words:\n counter = 0\n for line in contentlist:\n counter += 1\n if word in line:\n if word in wdic:\n wdic[word].append(counter)\n else:\n wdic[word] = [counter]\n for word in words:\n print('{:10}{}'.format(word, ', '.join(str(x) for x in wdic[word])))\n\n\nprint(index('raven.txt', ['raven', 'mortal', 'dying', 'ghost', 'ghastly', 'evil', 'demon']))\n\n############\n### 6.28 ###\n############\nprint('\\nPP 6.28')\n\n\ndef translate(d):\n \"\"\"\n :param d: translation dictionary (e.g. English - Spanish)\n :return: translation from one language (keys) to the other (values)\n \"\"\"\n phrase = input('Please enter your phrase: ')\n wlist = phrase.split()\n alist = []\n for word in wlist:\n if word in d:\n alist.append(d[word])\n else:\n alist.append('____')\n return ' '.join(alist)\n\n\ndt = {'I': 'Me',\n 'like': 'gusta',\n 'boobs': 'chichis'}\n\n#print(translate(dt))\n\n############\n### 6.29 ###\n############\nprint('\\nPP 6.29')\n\n\ndef networks(n, lst):\n \"\"\"\n :param n: number of networks\n :param lst: list of friend tuples (who is friends with who)\n :return:\n \"\"\"\n groups = [] # Empty group list\n for i in range(n): # Make a group for every person (in range of n)\n groups.append({i})\n print(groups)\n\n for pair in lst: # For each tuple pair\n union = groups[pair[0]] | groups[pair[1]] # Make union (variable) the union of the pair (tuple)\n for p in union: # for each person (value) in the union (set), the groups correspond\n groups[p] = union # to each person are\n print(groups)\n\n sets = set()\n for g in groups:\n sets.add(tuple(g))\n print(sets)\n\n i = 0\n for s in sets:\n print('Network {} is {}'.format(i, set(s)))\n i += 1\n\n\nprint(networks(5, [(0, 1), (1, 2), (3, 4)]))\n\n############\n### 6.30 ###\n############\nprint('\\nPP 6.30')\n\n\ndef simul(n):\n \"\"\"\n :param n: number of times game of RPS is played\n :return: simulated result of total games\n Rock = 0, Paper = 1, Scissors = 2\n \"\"\"\n rounds = 0\n winner = 0\n for i in range(n):\n rounds += 1\n p1 = random.randrange(0, 3)\n p2 = random.randrange(0, 3)\n if p1 == p2:\n print('Round {}: Tie'.format(rounds))\n elif p1 == 0 and p2 == 1 or p1 == 1 and p2 == 2 or p1 == 2 and p2 == 0:\n winner += 1\n print('Round {}: Player 2 Wins'.format(rounds))\n else:\n winner -= 1\n print('Round {}: Player 1 Wins'.format(rounds))\n if winner == 0:\n return 'Overall result: Tie'\n elif winner > 0:\n return 'Overall result: Player 2'\n else:\n return 'Overall result: Player 1'\n\n\nprint(simul(1))\nprint(simul(1))\nprint(simul(10))\n\n############\n### 6.31 ###\n############\nprint('\\nPP 6.31')\n\n\ndef craps():\n \"\"\"\n :return: Simulates a game of craps and returns 1 if player won and 0 if they lost\n \"\"\"\n # first roll\n rounds = 1\n d1 = random.randrange(1, 7)\n d2 = random.randrange(1, 7)\n result1 = d1 + d2\n # print('Round {} roll: {}'.format(rounds, result1))\n if result1 == 7 or result1 == 11:\n return 1\n if result1 == 2 or result1 == 3 or result1 == 12:\n return 0\n\n # consecutive rolls\n result2 = 0\n while result2 != 7:\n d1 = random.randrange(1, 7)\n d2 = random.randrange(1, 7)\n result2 = d1 + d2\n rounds += 1\n # print('Round {} roll: {}'.format(rounds, result2))\n if result2 == result1:\n return 1\n return 0\n\n\nprint(craps())\n\n\ndef testcraps(n):\n counter = 0\n for i in range(n):\n counter += craps()\n return counter / n\n\n\nprint(testcraps(100))\nprint(testcraps(10000))\n\n############\n### 6.32 ###\n############\nprint('\\n6.32')\n\n\ndef manhattan(x, y):\n res = []\n for i in range(x):\n res.append([])\n for i in res:\n for j in range(y):\n i.append(0)\n position = (x // 2 + 1, y // 2 + 1)\n z = position[0]\n v = position[1]\n\n while z != -1 and z != x and v != -1 and v != y:\n res[z][v] += 1\n direction = random.randrange(1, 5)\n if direction == 1:\n v += 1\n elif direction == 2:\n z += 1\n elif direction == 3:\n v -= 1\n else:\n z -= 1\n for i in res:\n print(i)\n\n\nprint(manhattan(5, 11))\n\n############\n### 6.33 ###\n############\nprint('\\n6.33')\n\n# import random\n\n\ndef shuffledeck():\n \"\"\"\n :return: shuffled deck\n \"\"\"\n suits = {'\\u2660', '\\u2661', '\\u2662', '\\u2663'} # suits is a set od 4 unicode symbols: black spade and club,\n ranks = {'2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A'} # and white diamond and heart\n deck = []\n\n for suit in suits: # Create a deck out of 52 cards\n for rank in ranks: # card is the concatenation\n deck.append(rank + ' ' + suit) # of suit and rank\n\n random.shuffle(deck) # Shuffle the deck and return it\n return deck\n\n\ndef dealcard(deck, participant):\n \"\"\"\n :param deck: deck from chuffledeck() (lst)\n :param participant: the hand of the participant (lst)\n :return: card dealt (added) to participant (a single card is dealt from the deck to the participant\n \"\"\"\n card = deck.pop()\n participant.append(card)\n return card\n\n\ndef total(hand):\n \"\"\"\n :param hand: the hand of participant (lst)\n :return: value of hand\n \"\"\"\n values = {'2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8,\n '9': 9, '1': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14}\n result = values[hand[0]]\n return result\n\n\ndef war():\n \"\"\"\n :return: simulates a game of war and returns\n \"\"\"\n # Step 1: Shuffle deck\n deck = shuffledeck()\n\n # Step 2: Split decks\n deck1 = deck[:int(len(deck) / 2)]\n deck2 = deck[int(len(deck) / 2):]\n hand1 = []\n hand2 = []\n\n # Step 3: Each player reveals top card of deck, player with higher card takes both cards and adds to his deck\n rounds = 0\n wars = 0\n while deck1 != [] and deck2 != []:\n rounds += 1\n winner = 0\n # Shuffle decks at start of each round\n random.shuffle(deck1)\n random.shuffle(deck2)\n\n play1 = deck1.pop()\n play2 = deck2.pop()\n\n # if cards have same value: war occurs\n if total(play1) == total(play2):\n wars += 1\n hand1.extend([play1] + deck1[-3:])\n deck1 = deck1[:-3]\n deck1.append(hand1.pop())\n\n hand2.extend([play2] + deck2[-3:])\n deck2 = deck2[:-3]\n deck.append(hand2.pop())\n winner = '\\tWar'\n\n elif total(play1) > total(play2):\n deck1 = [play1, play2] + hand1 + hand2 + deck1\n hand1 = []\n hand2 = []\n winner = '\\t1'\n\n elif total(play2) > total(play1):\n deck2 = [play2, play1] + hand2 + hand1 + deck2\n hand1 = []\n hand2 = []\n winner = '\\t2'\n\n print('Round: {}, Player 1 card: {}, Player 2 card: {}'.format(rounds,\n play1, play2))\n print(winner)\n if deck2 is []:\n print('PLAYER 1 WINS WAR')\n else:\n print('PLAYER 2 WINS WAR')\n\n result = 'Total rounds: ' + str(rounds), 'Total wars: ' + str(wars)\n print(result)\n return rounds, wars\n\n\nprint(war())\n\n\ndef warstats(n):\n \"\"\"\n :param n: The amount of times War is played\n :return: The average\n \"\"\"\n trounds = 0\n twars = 0\n for i in range(n):\n wart = war()\n trounds += wart[0]\n twars += wart[1]\n arounds = trounds / n\n awars = twars / n\n return 'Average rounds: ' + str(arounds), 'Average wars: ' + str(awars)\n\n\nprint(warstats(10))\n\n############\n### 6.34 ###\n############\nprint('\\nPP 6.34')\n\n\ndef game(n):\n \"\"\"\n :param n: Number of addition games\n :return: the amount of correct answers out of total\n \"\"\"\n correct_answers = 0\n for i in range(n):\n a = random.randrange(0, 10)\n b = random.randrange(0, 10)\n answer = a + b\n print('{} + {} ='.format(a, b))\n kid = eval(input('Enter answer: '))\n if kid == answer:\n correct_answers += 1\n print('Correct.')\n else:\n print('Incorrect.')\n return 'You got {} correct answers out of {}'.format(correct_answers, n)\n\n# print(game(3))\n\n############\n### 6.35 ###\n############\nprint('\\nPP 6.35')\n\n\ndef caesar(n, file):\n \"\"\"\n :param n: key for caesar cipher\n :param file: text to be encrypted\n :return: encrypted text\n \"\"\"\n infile = open(file)\n content = infile.read()\n infile.close()\n cipher = ''\n\n for i in content:\n c = ord(i)\n if 64 < c < 91 - n or 96 < c < 123 - n:\n cn = ord(i) + n\n cipher += chr(cn)\n elif 90 - n < c < 91 + n or 122 - n < c < 123 + n:\n cn = ord(i) + n - 26\n cipher += chr(cn)\n else:\n cipher += chr(c)\n\n return cipher\n\n\nprint(caesar(3, 'clear.txt'))\n\n############\n### 6.36 ###\n############\nprint('\\nPP 6.36')","repo_name":"BionStt/L_Python_Perkovic_Introduction-to-Computing-Using-Python_Edition-1","sub_path":"ch06/Perkovic_Ch_06.py","file_name":"Perkovic_Ch_06.py","file_ext":"py","file_size_in_byte":31402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12541923744","text":"import sys \n\ndef solve():\n tests = int(input())\n for _ in range(tests):\n num_vertices = int(input())\n # the following list might be too long, use sys.stdin.readline() instead\n parents = list(map(int, sys.stdin.readline().split()))\n\n # build the tree and find the root\n tree = {}\n root = None\n non_leafs = set()\n for i in range(1, num_vertices + 1):\n if parents[i - 1] == i:\n root = i\n else:\n non_leafs.add(parents[i - 1])\n if parents[i - 1] not in tree:\n tree[parents[i - 1]] = []\n tree[parents[i - 1]].append(i)\n\n # count all leafs\n print(num_vertices - len(non_leafs))\n\n # destroy parents list\n del parents\n\n # enumerate the paths\n stack = [(root, [root])]\n\n while stack:\n node, path = stack.pop()\n\n if node not in tree:\n print(len(path))\n print(*path)\n\n else:\n for i, child in enumerate(tree[node]):\n if i == 0:\n stack.append((child, path + [child]))\n else:\n stack.append((child, [child]))\n\n print()\n\nif __name__ == \"__main__\":\n solve()\n","repo_name":"ffekirnew/a2sv-contests","sub_path":"contest-27/c-vertical_paths.py","file_name":"c-vertical_paths.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20204310645","text":"# coding: utf-8\nimport select, threading\nfrom pylibcurl import Multi, Curl, const\nfrom Queue import Queue, Empty\n\ntry:\n event_epoll2curl = {\n select.EPOLLIN: const.CURL_CSELECT_IN,\n select.EPOLLOUT: const.CURL_CSELECT_OUT,\n select.EPOLLERR: const.CURL_CSELECT_ERR,\n }\n\n event_curl2epoll = {\n const.CURL_POLL_IN: select.EPOLLIN,\n const.CURL_POLL_OUT: select.EPOLLOUT,\n const.CURL_POLL_INOUT: select.EPOLLIN | select.EPOLLOUT,\n }\n\n event_epoll_verbose = {\n select.EPOLLIN: 'READ (Available for read)',\n select.EPOLLOUT: 'WRITE (Available for write)',\n select.EPOLLPRI: 'Urgent data for read',\n select.EPOLLERR: 'ERROR (Error condition happened on the assoc. fd)',\n select.EPOLLHUP: 'Hang up happened on the assoc. fd',\n select.EPOLLET: 'Set Edge Trigger behavior, the default is Level Trigger behavior',\n }\n\n event_curl_verbose = {\n const.CURL_POLL_NONE: 'register, not interested in readiness (yet)',\n const.CURL_POLL_IN: 'READ (register, interested in read readiness)',\n const.CURL_POLL_OUT: 'WRITE (register, interested in write readiness)',\n const.CURL_POLL_INOUT: 'READ/WRITE (register, interested in both read and write readiness)',\n const.CURL_POLL_REMOVE: 'unregister',\n }\n\n event_poll2curl = {\n select.POLLIN: const.CURL_CSELECT_IN,\n select.POLLOUT: const.CURL_CSELECT_OUT,\n select.POLLERR: const.CURL_CSELECT_ERR,\n }\n\n event_curl2poll = {\n const.CURL_POLL_IN: select.POLLIN,\n const.CURL_POLL_OUT: select.POLLOUT,\n const.CURL_POLL_INOUT: select.POLLIN | select.POLLOUT,\n }\nexcept AttributeError:\n pass\n\n\nclass Pool(threading.Thread):\n \"\"\"\n Pool work as like Thread and Queue\n \"\"\"\n default_curl_settings = dict(\n useragent='Mozilla 6', \n autoreferer=1, \n followlocation=1,\n maxredirs=20, \n encoding='',\n )\n\n def __init__(self, maxconnects, qsize=0, **kwargs):\n self._multi = Multi(**kwargs)\n self._multi.maxconnects = maxconnects\n # TODO: fix bug with pipelining Pool not work\n self._multi.pipelining = 0\n\n\n self._queue = Queue(qsize)\n self._size = maxconnects\n\n self._data = {}\n \n # registry curl, sockets, event bitmask\n self._registry = {} # key is curl, value is (socket, event bitmask)\n\n\n def socket_cb(curl, socket, event):\n self._registry[curl] = (socket, event)\n \n # TODO: timer\n def timer_cb(m, timeout):\n if timeout == 0:\n code, running = m.socket_action(const.CURL_SOCKET_TIMEOUT, 0)\n \n self._multi.socketfunction = socket_cb\n self._multi.timerfunction = timer_cb\n\n super(Pool, self).__init__()\n\n def _do_socket_action(self, socket, event=0):\n code = const.CURLM_CALL_MULTI_SOCKET\n while code == const.CURLM_CALL_MULTI_SOCKET:\n code, running = self._multi.socket_action(socket, event)\n \n return running\n\n def _do_remove(self):\n # remove complete curls and sockets\n complete = [(curl, item[0]) for curl, item in self._registry.items() if item[1] == const.CURL_POLL_REMOVE]\n for curl, sock in complete:\n self._multi.remove_handle(curl)\n \n # call callback\n url, header, body, callback = self._data[curl]\n body = ''.join(body)\n\n if self._Thread__started.is_set():\n del self._data[curl]\n else:\n self._data[curl][2] = body\n\n if callback:\n callback(curl, url, header, body)\n \n # task done\n self._queue.task_done()\n\n removed = len(complete) > 0\n if removed:\n for curl, socket in complete:\n del self._registry[curl]\n\n return removed\n\n \n def _do_add(self):\n start_event = self._Thread__started\n added = False\n\n while len(self._multi._handles) < self._size:\n block = False\n \n # thread mode and daemon and empty\n if self.isDaemon() and start_event.is_set() and not self._multi._handles: \n block = True\n \n try:\n data = self._queue.get(block=block)\n except Empty:\n break\n else:\n added = True\n url, kwargs, callback = data\n c = self._create_curl(url, callback, **kwargs)\n self._multi.add_handle(c)\n\n\n if added:\n code = const.CURLM_CALL_MULTI_SOCKET\n while code:\n code, running = self._multi.socket_action(const.CURL_SOCKET_TIMEOUT, 0)\n\n return added\n\n def _create_curl(self, url, callback, **kwargs):\n settings = self.default_curl_settings.copy()\n settings.update(kwargs)\n\n header = []\n body = []\n\n def headerfunction(v):\n v = v.strip()\n if v:\n header.append(v)\n\n def writefunction(v):\n body.append(v)\n\n settings['headerfunction'] = headerfunction\n settings['writefunction'] = writefunction\n\n \n c = Curl(url, **settings)\n self._data[c] = [url, header, body, callback]\n return c\n\n\n\n def run(self):\n \"\"\"\n for threading mode use \"start\" method\n for non-threading mode use \"run\" method\n \"\"\"\n start_event = self._Thread__started\n self._do_add()\n running = 1\n\n \n\n do_action = self._do_socket_action\n\n while True:\n \n while running:\n r = []\n w = []\n e = []\n\n for socket, event in self._registry.values():\n if event == const.CURL_POLL_IN:\n r.append(socket)\n elif event == const.CURL_POLL_OUT:\n w.append(socket)\n elif event == const.CURL_POLL_INOUT:\n r.append(socket)\n w.append(socket)\n\n rr, ww, ee = select.select(r, w, e, 1.0)\n \n #print self._registry\n #print r, w, e\n\n for socket in rr:\n running = do_action(socket, const.CURL_CSELECT_IN)\n \n for socket in ww:\n running = do_action(socket, const.CURL_CSELECT_OUT)\n \n for socket in ee:\n running = do_action(socket, const.CURL_CSELECT_ERR)\n \n self._do_remove()\n self._do_add()\n\n # check loop\n started = start_event.is_set()\n if not started and not self.isDaemon():\n break\n\n # for non-threading mode\n if not start_event.is_set():\n data = [(curl, v[0], v[1], v[2]) \n for curl, v in self._data.items()]\n self._data.clear()\n return data\n\n def add(self, url, callback=None, **kwargs):\n \"\"\"\n put values to queue\n\n callback prototype:\n def callback(curl, start_url, header, body):\n [code]\n \"\"\"\n self._queue.put((url, kwargs, callback))\n\n\n def join(self, timeout=None):\n \"\"\"\n if thread is daemon then run loop forever\n \"\"\"\n if self.isDaemon():\n self._queue.join()\n else:\n super(Pool, self).join(timeout)\n\n\n\n\n\n","repo_name":"bharismendy/alternance_m2","sub_path":"projet_ovirt/venv/lib/python3.6/site-packages/pylibcurl/shortcuts.py","file_name":"shortcuts.py","file_ext":"py","file_size_in_byte":7667,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"72104166456","text":"import os, pickle\r\nfrom gensim.models import Word2Vec\r\n\r\nfrom testing import get_test_all\r\nfrom __config__.filepath import AUX_DIR\r\n\r\ntest_fn = get_test_all()\r\n\r\nwith open(os.path.join(AUX_DIR, 'multicore-5-vocab.pkl'), 'rb') as f:\r\n pred_name = pickle.load(f)\r\n\r\nmodel2 = Word2Vec.load(os.path.join(AUX_DIR, 'word2vec', 'model-plain'))\r\n\r\ndef sim2(a, b):\r\n x = pred_name[a].split('_')[1]\r\n y = pred_name[b].split('_')[1]\r\n return model2.similarity(x,y)\r\n\r\ntest_fn(sim2)\r\n\r\n\r\nmodel = Word2Vec.load(os.path.join(AUX_DIR, 'word2vec', 'model'))\r\n\r\ndef sim(a, b):\r\n x = pred_name[a]\r\n y = pred_name[b]\r\n return model.similarity(x,y)\r\n\r\ntest_fn(sim)\r\n\r\n\r\n","repo_name":"guyemerson/sem-func","sub_path":"src/testing_baseline.py","file_name":"testing_baseline.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"36230521045","text":"import logging\nimport cv2\nimport numpy as np\nimport pandas as pd\nimport io\nfrom sklearn.externals import joblib\nimport os\nimport lightgbm as lgb\n\nLOG = logging.getLogger(__name__)\n\nPARAMS = {\n 'deps': 15,\n 'model': './model',\n}\nmodel_path = None\nxsc = None\nysc = None\ngbm = None\n\n\ndef init_hook(**kwargs):\n global PARAMS, xsc, ysc, gbm, model_path\n PARAMS.update(kwargs)\n PARAMS['deps'] = int(PARAMS['deps'])\n model_path = PARAMS['model']\n xsc = joblib.load(os.path.join(model_path, 'xscaler.pkl'))\n ysc = joblib.load(os.path.join(model_path, 'yscaler.pkl'))\n gbm = lgb.Booster(model_file=os.path.join(model_path, 'model.data'))\n LOG.info('init: {}'.format(kwargs))\n\n\ndef process(inputs, ctx, **kwargs):\n doc = inputs['doc'][0]\n operational_settings = ['o_{}'.format(i + 1) for i in range(3)]\n sensor_columns = ['m_{}'.format(i + 1) for i in range(24)]\n cols = ['no', 'cycle'] + operational_settings + sensor_columns\n metrics = cols[2:-3]\n data = pd.read_csv(io.BytesIO(doc), sep=' ', index_col=False, header=None, names=cols)\n data = data.drop(cols[-3:], axis=1)\n data = data.fillna(0)\n results = []\n for k, v in data.groupby(['no']):\n v = v.reset_index(drop=True)\n x = v.loc[v.shape[0] - PARAMS['deps'] - 1:, metrics].values\n x = np.expand_dims(np.reshape(x, (-1)), axis=0)\n x = xsc.transform(x)\n y = gbm.predict(x)\n y = np.reshape(ysc.inverse_transform(np.reshape(y, (-1, 1))), -1)\n results.append(int(y[0]))\n\n return {'results': np.array(results, np.int32)}\n","repo_name":"kibernetika-ai/demo-zoo","sub_path":"demo_sp/hook.py","file_name":"hook.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18424628164","text":"from getpass import getpass\nfrom modules.utils import Utils\nfrom modules.crypt import RSA, PrimeGen\n\nimport requests, json\n\nip, port = None, None\nlogged_in = False\napi_token, private_key = None, None\nuser_id, user_name = None, None\n\nclass Authentication:\n def __init__(self, ip = '127.0.0.1', port = '5000'):\n self.__ip = ip\n self.__port = port\n self.__default_url = 'http://{0}:{1}'.format(self.__ip, self.__port)\n \n def register(self, name, public_key, key_length):\n '''Register a new user.\n\n Input:\n - name: user name,\n - public_key: user public_key (generated)\n - key_length: user key length (n)\n\n Output:\n - user id\n \n Exception:\n - thrown if any errors occured.\n '''\n url = self.__default_url + '/register'\n\n res = requests.post(url, params = {\n 'name' : name,\n 'public_key' : public_key,\n 'key_length' : key_length\n })\n\n res = json.loads(res.text)\n\n if res[\"error\"]:\n raise Exception(res[\"message\"])\n else:\n return res[\"user_id\"]\n\n def login(self, user_id, private_key):\n '''Log user in.\n\n Input:\n - user_id : id of the user.\n - private_key: private key of that user.\n \n Output:\n - name: user's name,\n - api_token: user's api token for further actions.\n \n Exception:\n '''\n url = self.__default_url + '/login'\n\n fire_res = requests.get(url, params = {\n 'user_id' : user_id\n })\n\n fire_res = json.loads(fire_res.text)\n\n if fire_res[\"error\"]:\n raise Exception(fire_res[\"message\"])\n else:\n verify_token = fire_res[\"verify_token\"]\n key_length = fire_res[\"key_length\"]\n\n verify_token = RSA.decrypt(verify_token, private_key, key_length)\n\n confirm_res = requests.post(url, params = {\n 'user_id' : user_id,\n 'verify_token' : verify_token\n })\n\n confirm_res = json.loads(confirm_res.text)\n\n if confirm_res[\"error\"]:\n raise Exception('Wrong credentials')\n else:\n return confirm_res[\"api_token\"], confirm_res[\"name\"]\n \n def logout(self, user_id, api_token):\n '''Log user out.\n\n Input:\n - user_id : user's id\n - api_token: user's api_token\n \n Output:\n - message (str)\n \n Exception:\n - thrown if any errors occurred.\n '''\n url = self.__default_url + '/logout'\n\n res = requests.post(url, params = {\n 'user_id' : user_id,\n 'api_token' : api_token\n })\n\n res = json.loads(res.text)\n\n if res[\"error\"]:\n raise Exception(res[\"message\"])\n else:\n return res[\"message\"]\n\nclass AuthenticationUI:\n @staticmethod\n def Menu_UI():\n Utils.clrscr()\n print('Hi, welcome aboard! It seems like you\\'re not logged in!')\n print('Now you can:')\n print('\\t1. Log in')\n print('\\t2. Create a new account')\n print('\\t3. Exit')\n\n try:\n ans = int(input('What\\'s your choice then? '))\n\n if ans == 1:\n AuthenticationUI().Login_UI()\n elif ans == 2:\n AuthenticationUI().Register_UI()\n elif ans == 3:\n print('Goodbye!')\n exit(0)\n else:\n raise Exception('Invalid choice!')\n except Exception as e:\n print('Error: ' + str(e))\n Utils.pause()\n\n @staticmethod\n def Register_UI():\n pg = PrimeGen() \n Utils.clrscr()\n\n print('Now we\\'ll try to create a new account for you!')\n try:\n name = input('Your name: ')\n\n print('Choose your public & private pair:')\n\n while 1:\n p, q = pg.primeGen(16), pg.primeGen(16)\n\n key_length, public_key, private_key = RSA.generate(p, q)\n\n print('Key pair: ({0}, {1}), n = {2}'.format(public_key, private_key, key_length))\n ans = input('Accept (y) or generate a new pair (n): ')\n\n if ans == 'y':\n break\n\n user_id = Authentication(ip, port).register(name, public_key, key_length)\n\n except Exception as e:\n print('Error: ' + str(e))\n Utils.pause()\n\n else:\n print('Register success, your new id is: {0}'.format(user_id))\n print('\\nDo you want to export credentials to {0}/txt? (y/n) '.format(user_id))\n \n ans = input()\n\n if ans == 'y':\n with open('{0}.txt'.format(user_id), 'w+') as f:\n print('ID: {0}'.format(user_id), file = f)\n print('Public_key: {0}'.format(public_key), file = f)\n print('Private_key: {0}'.format(private_key), file = f)\n print('Successfully exported to {0}.txt'.format(user_id))\n else:\n print('Remember your credentials:')\n print('ID: {0}'.format(user_id))\n print('Public_key: {0}'.format(public_key))\n print('Private_key: {0}'.format(private_key))\n\n Utils.pause()\n\n @staticmethod\n def Login_UI():\n Utils.clrscr()\n\n global logged_in\n global user_id\n global user_name\n global api_token\n global private_key\n\n print('Log in with your ID and private_key!')\n\n try:\n user_id_ = input('Your ID: ')\n private_key_ = getpass('Your private_key: ')\n\n api_token_, user_name_ = Authentication(ip, port).login(user_id_, private_key_)\n\n except Exception as e:\n print('Error: Please check your credentials again')\n Utils.pause()\n\n else:\n api_token = api_token_\n private_key = private_key_\n user_id, user_name = user_id_, user_name_\n logged_in = True\n\n print('Logged in success.')\n\n Utils.pause()\n \n @staticmethod\n def Logout_UI():\n Utils.clrscr()\n \n global user_id\n global api_token\n global logged_in\n global private_key\n \n try:\n print('Are you sure you want to log out?')\n print('\\t1. Yes')\n print('\\t2. No')\n\n ans = int(input('What\\'s your choice then? '))\n if ans == 1:\n Authentication(ip, port).logout(user_id, api_token)\n else:\n return False\n\n except Exception as e:\n print('Error: ' + str(e))\n\n else:\n private_key, user_id = None, None\n logged_in = False\n\n print('Logged out success.')\n \n Utils.pause()\n","repo_name":"trhgquan/image-sharing","sub_path":"client/modules/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"22980613915","text":"import os\nimport numpy as np\nimport pandas as pd\nimport pickle\n\nimport torch\nimport torch.nn as nn\n\nfrom einops import rearrange\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler\n\n# concat reconstruction data if needed\n# CAUTION : Windows should not collapse\ndef concat_recon(recon_output):\n \n w,b,f = recon_output.shape\n tmp = rearrange(recon_output, 'w b f -> b w f')\n output = tmp.reshape(w*b,f)\n\n return output\n\n# reconstruction evaluation\ndef eval_recon(recon, real, scaler = None, undo = False):\n criterion = nn.MSELoss()\n \n if undo == True:\n assert scaler != None, 'Scaler should be defined!!'\n \n # reverse scaling\n recon = scaler.inverse_transform(recon)\n \n r = recon.shape[0]\n real = real[:r,:]\n\n # compute loss\n eval_loss = criterion(torch.tensor(recon), torch.tensor(real))\n \n return eval_loss\n\n# get difference between reconstruction data and real data\ndef get_diff(recon, real, scaler = None, undo = False):\n \n if undo == True:\n assert scaler != None, 'Scaler should be defined!!'\n \n # reverse scaling\n recon = scaler.inverse_transform(recon)\n \n r = recon.shape[0]\n real = real[:r,:]\n \n return np.abs(recon-real)","repo_name":"euisuk-chung/timeseries-generation","sub_path":"utils/utils_vrae.py","file_name":"utils_vrae.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"33665205145","text":"#GUI_project_trial3\r\n\r\nfrom tkinter import*\r\n\r\nimport tkinter.font\r\n\r\nfrom pygame import mixer\r\nmixer.init() \r\n#Adding background music\r\nmixer.music.load(\"Sounds/Courage.wav\")\r\nmixer.music.play(0)\r\n\r\n\r\n \r\n\r\ndef part2():\r\n import origin_project\r\n Tk.destroy\r\n \r\ndef part3(): \r\n import info_window_01\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\ndef play_music():\r\n import pygame\r\n pygame.mixer.init()\r\n pygame.mixer.music.load(\"Sounds/Button-SoundBible.com-1420500901.wav\")\r\n pygame.mixer.music.play()\r\n\r\ndef play_music1():\r\n import pygame\r\n pygame.mixer.init()\r\n pygame.mixer.music.load(\"Sounds/Button-SoundBible.com-1420500901.wav\")\r\n pygame.mixer.music.play() \r\n\r\n\r\n\r\n\r\n\r\np=Tk()\r\np.title(\"Project Part 1\")\r\np.geometry(\"700x500\") \r\nframe = Frame(p, relief=GROOVE, borderwidth=20,bg=\"Aquamarine\").pack(fill=BOTH,expand=1)\r\n\r\n#inserting an Image in background\r\n\r\nimage=\"Images/forest_01.png\"\r\nbg= PhotoImage(file=image,width=650,height=450)\r\nLabel0=Label(master=frame,image=bg)\r\nLabel0.place(x=19,y=19) \r\n\r\nText1=Label(master=frame,text=\"Welcome to\",fg=\"blue\",bg=\"pink\",).place(x=320,y=50)\r\nText2=Label(master=frame,text=\"MAGIC.KNIGHTS\",fg=\"tomato\",bg=\"lightblue\",font=(\"arial\",40)).place(x=130,y=100)\r\n\r\n\r\n\r\n\r\n\r\nEnter=Button(master=frame,relief=RAISED,borderwidth=5,text= \"ENTER\",command=lambda:[play_music(),part2()] ,bg=\"pink\",fg=\"blue\",height=3,width=70).place(x=95,y=400)\r\n\r\ninfo=Button(master=frame,relief=RAISED,borderwidth=4,text= \"i\",command=lambda:[play_music(),part3()] ,bg=\"pink\",activebackground = \"blue\",activeforeground = \"Turquoise\",fg=\"blue\",height=1,width=1).place(x=655,y=445)\r\n\r\np.mainloop()","repo_name":"AzureSky0/First-Python-Pygame-Project","sub_path":"pygame-project/Game_MainWindow.py","file_name":"Game_MainWindow.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"72384761976","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.encoding import python_2_unicode_compatible\n\nTARGET_BLANK = '_blank'\nTARGET_SELF = '_self'\nTARGET_PARENT = '_parent'\nTARGET_TOP = '_top'\n\nTARGET_CHOICES = (\n (TARGET_BLANK, '_blank'),\n (TARGET_SELF, '_self'),\n (TARGET_PARENT, '_parent'),\n (TARGET_TOP, '_top'),\n)\n\nDEFAULT_LIMIT = getattr(settings, 'LINK_DEFAULT_LIMIT', 0)\nDEFAULT_EVERY = getattr(settings, 'LINK_DEFAULT_EVERY', 1)\nDEFAULT_TARGET = getattr(settings, 'LINK_DEFAULT_TARGET', '_blank')\nDEFAULT_NOFOLLOW = getattr(settings, 'LINK_DEFAULT_NOFOLLOW', False)\nDEFAULT_CSS_CLASS = getattr(settings, 'LINK_DEFAULT_CSS_CLASS', None)\n\n\n@python_2_unicode_compatible\nclass AutomaticLink(models.Model):\n keyword = models.CharField(_('keyword'), max_length=255, unique=True)\n link = models.CharField(_('link'), max_length=255)\n # options\n active = models.BooleanField(_('active'), default=True)\n limit = models.IntegerField(_('limit'), default=DEFAULT_LIMIT, help_text=_('zero - disabled'))\n every = models.IntegerField(\n _('every N'),\n default=DEFAULT_EVERY,\n help_text=_('Every \"3\" mean that this keyword will be replaced to link in every third content item')\n )\n target = models.CharField(_('target'), max_length=10, choices=TARGET_CHOICES, default=DEFAULT_TARGET)\n nofollow = models.BooleanField(_('rel=\"nofollow\"'), default=DEFAULT_NOFOLLOW)\n css_class = models.CharField(_('css class'), max_length=100, blank=True, null=True, default=DEFAULT_CSS_CLASS)\n\n class Meta:\n unique_together = (('keyword', 'link'),)\n verbose_name = _('automatic link')\n verbose_name_plural = _('automatic links')\n\n def __str__(self):\n return self.keyword\n","repo_name":"silentsokolov/django-automatic-links","sub_path":"automatic_links/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"72348277816","text":"\"\"\"Converts SPC convective outlook to nicer file format.\"\"\"\n\nimport pickle\nimport argparse\nimport shapefile\nimport numpy\nimport pandas\nfrom gewittergefahr.gg_utils import polygons\nfrom gewittergefahr.gg_utils import projections\nfrom gewittergefahr.gg_utils import file_system_utils\n\nRISK_TYPE_INDEX = 0\nMARGINAL_RISK_ENUM = 3\nSLIGHT_RISK_ENUM = 4\nENHANCED_RISK_ENUM = 5\nMODERATE_RISK_ENUM = 6\nHIGH_RISK_ENUM = 7\n\nMARGINAL_RISK_STRING = 'marginal'\nSLIGHT_RISK_STRING = 'slight'\nENHANCED_RISK_STRING = 'enhanced'\nMODERATE_RISK_STRING = 'moderate'\nHIGH_RISK_STRING = 'high'\n\nRISK_TYPE_ENUM_TO_STRING = {\n MARGINAL_RISK_ENUM: MARGINAL_RISK_STRING,\n SLIGHT_RISK_ENUM: SLIGHT_RISK_STRING,\n ENHANCED_RISK_ENUM: ENHANCED_RISK_STRING,\n MODERATE_RISK_ENUM: MODERATE_RISK_STRING,\n HIGH_RISK_ENUM: HIGH_RISK_STRING\n}\n\nSTANDARD_LATITUDES_DEG = numpy.array([33, 45], dtype=float)\nCENTRAL_LONGITUDE_DEG = 0.\nELLIPSOID_NAME = projections.WGS84_NAME\nFALSE_EASTING_METRES = 0.\nFALSE_NORTHING_METRES = 0.\n\nRISK_TYPE_COLUMN = 'risk_type_string'\nPOLYGON_COLUMN = 'polygon_object_latlng'\n\nINPUT_FILE_ARG_NAME = 'input_shapefile_name'\nOUTPUT_FILE_ARG_NAME = 'output_pickle_file_name'\n\nINPUT_FILE_HELP_STRING = (\n 'Path to input file. This should be a shapefile with one convective '\n 'outlook, such as those found here: '\n 'https://www.spc.noaa.gov/cgi-bin-spc/getacrange.pl?date0=20180403&'\n 'date1=20180403')\n\nOUTPUT_FILE_HELP_STRING = (\n 'Path to output file. Risk polygons (slight, enhanced, moderate, etc.) '\n 'will be saved here in a pandas DataFrame.')\n\nINPUT_ARG_PARSER = argparse.ArgumentParser()\nINPUT_ARG_PARSER.add_argument(\n '--' + INPUT_FILE_ARG_NAME, type=str, required=True,\n help=INPUT_FILE_HELP_STRING)\n\nINPUT_ARG_PARSER.add_argument(\n '--' + OUTPUT_FILE_ARG_NAME, type=str, required=True,\n help=OUTPUT_FILE_HELP_STRING)\n\n\ndef _run(input_shapefile_name, output_pickle_file_name):\n \"\"\"Converts SPC convective outlook to nicer file format.\n\n This is effectively the main method.\n\n :param input_shapefile_name: See documentation at top of file.\n :param output_pickle_file_name: Same.\n \"\"\"\n\n projection_object = projections.init_lcc_projection(\n standard_latitudes_deg=STANDARD_LATITUDES_DEG,\n central_longitude_deg=CENTRAL_LONGITUDE_DEG,\n ellipsoid_name=ELLIPSOID_NAME)\n\n print('Reading data from: \"{0:s}\"...'.format(input_shapefile_name))\n shapefile_handle = shapefile.Reader(input_shapefile_name)\n\n list_of_polygon_objects_latlng = []\n risk_type_strings = []\n\n for this_record_object in shapefile_handle.iterShapeRecords():\n # print this_record_object.record\n this_risk_type_enum = this_record_object.record[RISK_TYPE_INDEX]\n\n try:\n this_risk_type_string = RISK_TYPE_ENUM_TO_STRING[\n this_risk_type_enum]\n except KeyError:\n continue\n\n these_xy_tuples = this_record_object.shape.points\n this_num_vertices = len(these_xy_tuples)\n\n these_x_coords_metres = numpy.array([\n these_xy_tuples[k][0] for k in range(this_num_vertices)\n ])\n\n these_y_coords_metres = numpy.array([\n these_xy_tuples[k][1] for k in range(this_num_vertices)\n ])\n\n these_latitudes_deg, these_longitudes_deg = (\n projections.project_xy_to_latlng(\n x_coords_metres=these_x_coords_metres,\n y_coords_metres=these_y_coords_metres,\n projection_object=projection_object,\n false_easting_metres=FALSE_EASTING_METRES,\n false_northing_metres=FALSE_NORTHING_METRES)\n )\n\n this_polygon_object_latlng = (\n polygons.vertex_arrays_to_polygon_object(\n exterior_x_coords=these_longitudes_deg,\n exterior_y_coords=these_latitudes_deg)\n )\n\n risk_type_strings.append(this_risk_type_string)\n list_of_polygon_objects_latlng.append(this_polygon_object_latlng)\n\n outlook_dict = {\n RISK_TYPE_COLUMN: risk_type_strings,\n POLYGON_COLUMN: list_of_polygon_objects_latlng\n }\n\n outlook_table = pandas.DataFrame.from_dict(outlook_dict)\n # print(outlook_table)\n\n print('Writing outlook polygons to file: \"{0:s}\"...'.format(\n output_pickle_file_name))\n\n file_system_utils.mkdir_recursive_if_necessary(\n file_name=output_pickle_file_name)\n\n pickle_file_handle = open(output_pickle_file_name, 'wb')\n pickle.dump(outlook_table, pickle_file_handle)\n pickle_file_handle.close()\n\n\nif __name__ == '__main__':\n INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()\n\n _run(\n input_shapefile_name=getattr(INPUT_ARG_OBJECT, INPUT_FILE_ARG_NAME),\n output_pickle_file_name=getattr(INPUT_ARG_OBJECT, OUTPUT_FILE_ARG_NAME)\n )\n","repo_name":"thunderhoser/GewitterGefahr","sub_path":"gewittergefahr/nature2019/convert_spc_outlook.py","file_name":"convert_spc_outlook.py","file_ext":"py","file_size_in_byte":4806,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"22"} +{"seq_id":"41072292305","text":"#\n# sMDT tube construction, fix duplicate database entries\n# \n# Author: Reinhard Schwienhorst, based on Monthly production example\n# 2021-08-25\n#\n\nfrom datetime import datetime, timedelta\nfrom dateutil.relativedelta import relativedelta\n\nfrom sMDT import db, tube\nfrom sMDT.data import status\nimport sys\n\n# get all of the tubes in the database\ndatabase=db.db()\ntubes = database.get_tubes()\n\n\n\nfor tube in tubes:\n #print(tube.get_ID())\n orig_id=tube.get_ID()\n td=tube.get_mfg_date()\n if td!=None:\n continue\n print(\"Need to fix date \",orig_id)\n # find the tube with the next higher ID\n found_id=0\n check_id='MSU'+'{:05d}'.format(int(orig_id[3:])+1)\n while(found_id==0):\n print(\"checking ID\",check_id)\n try:\n tube2=database.get_tube(check_id)\n except KeyError:\n check_id='MSU'+'{:05d}'.format(int(check_id[3:])+1)\n continue\n td2=tube2.get_mfg_date()\n if td2==None:\n check_id='MSU'+'{:05d}'.format(int(check_id[3:])+1)\n continue\n \n print(\"tube date found,\",td2,\", adding comment\")\n #tube.new_comment((\"Adding closest mfg date\",\"Reinhard\",td2,status.Status.PASS))\n #database.add_tube(tube)\n found_id=check_id\n# end of loop\n \nsys.exit(0)\n","repo_name":"dravinflores/smdt","sub_path":"utilities/FixNoDate.py","file_name":"FixNoDate.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"4501032231","text":"import math\nD, Rad, Orb = map(int, input().split())\nMBKS_cor = (-Orb, 0)\ncos_alpha = 1 - (D**2)/(Orb**2)/2\nalpha = math.acos(cos_alpha)\nsin_alpha = math.sin(alpha)\nLinal_1_cor = (-cos_alpha*Orb, sin_alpha*Orb)\n\ncentre = ((Linal_1_cor[0] + MBKS_cor[0])/2, (Linal_1_cor[1] + MBKS_cor[1])/2)\nif abs(centre[0]) < Rad and abs(centre[1]) < Rad:\n print(\"Trouble\")\nelse:\n print(\"Escape\")\n\n\n","repo_name":"TolimanStaR/Course-Work","sub_path":"media/code_files/87b09f25ff42a7c49ba80d7b71ef0b5220d8e4b6.py","file_name":"87b09f25ff42a7c49ba80d7b71ef0b5220d8e4b6.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"75490582455","text":"#\n# Example code to create and populate a NoSQL DynamoDB table \n# named UN_country_codes that will be used in Assignment 2\n# \n# The name in this code has been changed so that you cannot\n# accidently try to overwrite the table if you just execute \n# this code\n#\nimport boto3\nimport csv\n\n# Establish a connection to the AWS resource dynamodb\n# Replace xxxx with your name\nsession = boto3.Session(profile_name='default', region_name='ca-central-1')\ndynamodb = session.resource('dynamodb', region_name='ca-central-1')\n\n# Create a new table called UN_country_codes\n\ntry:\n table = dynamodb.create_table(\n TableName='ymei_country_codes', # replace myUN_country_codes with UN_country_codes\n KeySchema=[\n {\n 'AttributeName': 'iso3',\n 'KeyType': 'HASH' # Partition key\n },\n {\n 'AttributeName': 'Area',\n 'KeyType': 'RANGE' # sort key\n },\n ],\n AttributeDefinitions=[\n {\n 'AttributeName': 'iso3',\n 'AttributeType': 'S'\n },\n {\n 'AttributeName': 'Area',\n 'AttributeType': 'N'\n },\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 10,\n 'WriteCapacityUnits': 10\n }\n )\n print(\"Table status:\", table.table_status, table.table_name)\n table.wait_until_exists()\n print(\"Table \", table.table_name, \" created\")\nexcept Exception as e:\n print(e)\n\ntable = dynamodb.Table('ymei_country_codes') # replace myUN_country_codes with UN_country_codes\n\n###############################################################################################################\n#\n# These are the CRUD examples\n# Re: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/dynamodb.html\n###############################################################################################################\n\n########## To Create ##########################################################################################\n# this program will assume the input csv always contain a header\nwith open('shortlist_area.csv', 'r') as file:\n reader = csv.DictReader(file)\n header = reader.fieldnames\n\n for row in reader:\n print(row)\n item1 = table.put_item(\n Item={\n 'iso3': row['ISO3'],\n 'name': row['Country Name'],\n 'Area': int(row[\"Area\"]),\n\n }\n )\n print(item1)\n\nprint(\"Database records loaded\")\n########### To Read ##########################################################################################\nresponse = table.get_item(\n Key={\n 'iso3': \"ALB\",\n 'Area': 1,\n }\n)\n\nitem = response['Item']\nprint(item['Area'])\n########### To Update ##########################################################################################\n################################################################################################################\n# The thing is, key can not be updated, in order to update a key, we need to delete and re-new a new element\n################################################################################################################\ntable.update_item(\n Key={\n 'iso3': \"ALB\",\n 'Area': 1,\n },\n UpdateExpression='SET #field1 = :val1, #field2 = :val2', # field name need a # in the front, values need a : in the front\n ExpressionAttributeNames={\n \"#field1\": \"age\",\n \"#field2\": \"name\",\n },\n ExpressionAttributeValues={\n ':val1': 26,\n ':val2': \"pig country\",\n },\n)\n\n# If we attach a new attribute, the other elements that does not have this attribute will not even return this attribute\nresponse = table.get_item(\n Key={\n 'iso3': \"DZA\",\n 'Area': 2,\n }\n)\nitem = response['Item']\nprint(item)\n\nresponse = table.get_item(\n Key={\n 'iso3': \"ALB\",\n 'Area': 1,\n },\n)\nitem = response['Item']\nprint(item)\n########### To delete ##########################################################################################\ntable.delete_item(\n Key = {\n 'iso3': \"DZA\",\n 'Area': 2,\n }\n)\n\n# attribute can be a list as well: L\n\n# https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_AttributeValue.html","repo_name":"y5mei/CIS4100","sub_path":"Week-2 Storage/Assignment-2/createATable.py","file_name":"createATable.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2729185638","text":"from datetime import datetime\n\nfrom api.apis.carriers.fedex.soap_objects.common.address import Address\nfrom api.apis.carriers.fedex.soap_objects.common.version_id import VersionId\nfrom api.apis.carriers.fedex.soap_objects.common.web_authentication_detail import (\n WebAuthenticationDetail,\n)\nfrom api.apis.carriers.fedex.soap_objects.pickup.client_detail import ClientDetail\nfrom api.apis.carriers.fedex.soap_objects.soap_object import FedExSoapObject\n\n\nclass PickupAvailabilityRequest(FedExSoapObject):\n _required_keys = {\"WebAuthenticationDetail\", \"ClientDetail\", \"Version\"}\n _optional_keys = {\n \"TransactionDetail\",\n \"PickupType\",\n \"AccountNumber\",\n \"PickupAddress\",\n \"PickupRequestType\",\n \"DispatchDate\",\n \"NumberOfBusinessDays\",\n \"PackageReadyTime\",\n \"CustomerCloseTime\",\n \"Carriers\",\n \"ShipmentAttributes\",\n \"PackageDetails\",\n }\n\n def __init__(self, pickup_request: dict):\n version = VersionId(\n version={\n \"ServiceId\": \"disp\",\n \"Major\": 17,\n \"Intermediate\": 0,\n \"Minor\": 0,\n }\n )\n\n client = ClientDetail(\n account_number=pickup_request[\"account_number\"],\n meter_number=pickup_request[\"meter_number\"],\n )\n\n auth = WebAuthenticationDetail(\n key=pickup_request[\"key\"],\n password=pickup_request[\"password\"],\n )\n\n super().__init__(\n {\n \"WebAuthenticationDetail\": auth.data,\n \"ClientDetail\": client.data,\n \"Version\": version.data,\n \"PickupAddress\": Address(address_details=pickup_request[\"origin\"]).data,\n \"PickupRequestType\": [\"SAME_DAY\"],\n \"DispatchDate\": datetime.strptime(\n pickup_request[\"pickup\"][\"date\"], \"%Y-%m-%d\"\n ),\n \"PackageReadyTime\": datetime.strptime(\n pickup_request[\"pickup\"][\"start_time\"], \"%H:%M\"\n ).time(),\n \"CustomerCloseTime\": datetime.strptime(\n pickup_request[\"pickup\"][\"end_time\"], \"%H:%M\"\n ).time(),\n \"Carriers\": [\"FDXG\", \"FDXE\"],\n },\n required_keys=self._required_keys,\n optional_keys=self._optional_keys,\n )\n","repo_name":"JimRh/ubbereview","sub_path":"api/apis/carriers/fedex/soap_objects/pickup/pickup_availability_request.py","file_name":"pickup_availability_request.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20758313543","text":"import numpy as np\n\nclass Markov:\n '''Class for the Markov chain.\n\n This class represents the transition martices.\n\n Attributes\n ----------\n chain : np.array(floats)\n The transition matrix as 3-d array. Square matrix in both the first two\n dimensions, where the row represents the current state, and the coulumn\n represents the following state. The z-axis represents the inhomogenous\n factor, which is the dependancy on the transition time.\n dim : int, optional\n The dimention at which the sum of the transition matrix add to oneself.\n (the default is 1)\n '''\n def __init__(self, chain, dim = 1):\n self.chain = chain\n assert (self.chain.shape[0] == self.chain.shape[1]), \"the transition \\\n matrix should be square in the first two dimensions\"\n self.cumulativeSum = np.cumsum(self.chain, axis = dim)\n\n\n def extract_transition_probability(self, currentState, time_step=None):\n '''Extracts the transition probability from the current state.\n\n Parameters\n ----------\n currentState : int\n The current state of the Markov chain.\n time_step : int, optional\n The time step of the inhomegenous Markov chain. Use only if you use\n an inhomegenous Markov chain. (the default is None)\n\n Returns\n -------\n np.array(float)\n The row of the tranistion probabilty matrix for the current state\n and time.\n\n '''\n if time_step is not None:\n return( self.cumulativeSum[currentState, :, time_step] )\n else:\n return( self.cumulativeSum[currentState, :] )\n\n\n def next_state(self, currentState, rnd, time_step = None):\n '''Estimates the future state of the Markov chain.\n\n Parameters\n ---------\n currentState : int\n The current state of the Markov chain.\n rnd : float\n A random number.\n time_step : int, optional\n The time step of the inhomegenous Markov chain. Use only if you use\n an inhomegenous Markov chain. (the default is None)\n\n Returns\n -------\n int\n The future state of the Markov chain.\n\n '''\n\n transitionProbCumSum = self.extract_transition_probability(currentState, time_step)\n nextState = np.where(transitionProbCumSum >= rnd)[0][0]\n return(nextState)\n","repo_name":"SheperoMah/EVSpatialChargingModel","sub_path":"spatialModelPkg/markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"17285413075","text":"import sys\n\n# calculates gcd and its linear combo. according to the extended euclidean algorithm\ndef gcd(x, y, extended=False) -> int or tuple:\n if x == 0:\n return y, 0, 1\n\n # recursive call \n g, a, b = gcd(y % x, x, extended=True)\n\n # returns the gcd and its linear combo. wrt x and y\n return (g, b - (y//x) * a, a) if extended else g\n\n# computes solutions to the system of congurences according to crt in O(nlog min(a, m)) time\ndef chinese_remainder_theorem(a, b, m) -> int:\n\n # gets the number of equations in the system and stored them in a variable\n length = len(m)\n\n # guard clause satisfying the pre-requisites for the theorem\n for i in range(length):\n if m[i] <= 0:\n return \"N\"\n\n for j in range(length):\n if i != j and gcd(m[i], m[j]) != 1:\n return \"N\"\n\n M = 1\n for num in m:\n M *= num\n\n # converts to the standard form by inversing the coeff. of x\n a = [b[i] * gcd(a[i], m[i], extended=True)[1] for i in range(length)]\n\n # calcs. b[j], i.e. the inverse of M/m[j] under modulo m[j]\n b = [gcd(M/m[j], m[j], extended=True)[1] for j in range(length)]\n\n # gets the final value of x[0] under modulo M\n x = int(sum([M * b[j] * a[j] / m[j] for j in range(length)]) % M)\n\n print(\"Y\", end=\" \")\n return x\n\n# driver code\nif __name__ == \"__main__\":\n\n # parse the command line arguments and store them in variables\n args = [[], [], []]\n for i in range(2, 3 * int(sys.argv[1]) + 2):\n args[(i - 2) % 3].append(int(sys.argv[i]))\n\n # prints the solution to the system of congurences using the crt\n print(chinese_remainder_theorem(*args), end=\"\")","repo_name":"theShafted/system_and_network_security","sub_path":"prg8.py","file_name":"prg8.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36077258293","text":"from django.contrib import admin\nfrom django.urls import path\nfrom blog import views \n\nurlpatterns = [\n path(\"\",views.index,name=\"index\"),\n path(\"blog\",views.blog,name=\"blog\"),\n path('blogpost/',views.blogpost,name=\"blogpost\"),\n path('search',views.search,name=\"search\"),\n path('profile',views.profile,name=\"profile\"),\n \n \n]\n","repo_name":"csk-5652/Django_Blog_Application","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"12146195152","text":"import uuid\nfrom datetime import datetime\nfrom unittest.mock import patch\nfrom rest_api.rest_api_server.models.enums import ConstraintTypes\nfrom rest_api.rest_api_server.tests.unittests.test_api_base import TestApiBase\n\n\nclass TestResourceConstraints(TestApiBase):\n\n def setUp(self, version='v2'):\n super().setUp(version)\n _, self.organization = self.client.organization_create(\n {'name': 'test organization'})\n valid_aws_creds = {\n 'name': 'test_credentials',\n 'type': 'aws_cnr',\n 'config': {\n 'access_key_id': 'key',\n 'secret_access_key': 'secret',\n 'config_scheme': 'create_report'\n }\n }\n self.p_configure = patch(\n 'tools.cloud_adapter.clouds.aws.Aws.configure_report').start()\n self.user_id = self.gen_id()\n _, self.employee = self.client.employee_create(\n self.organization['id'],\n {'name': 'John Smith', 'auth_user_id': self.user_id})\n _, self.cloud_acc = self.create_cloud_account(\n self.organization['id'], valid_aws_creds, auth_user_id=self.user_id)\n self._mock_auth_user(self.user_id)\n self.valid_resource = {\n 'cloud_resource_id': self.gen_id(),\n 'name': 'test_resource',\n 'resource_type': 'test_type',\n 'employee_id': self.employee['id'],\n 'pool_id': self.organization['pool_id']\n }\n _, self.resource = self._create_cloud_resource(self.cloud_acc['id'],\n self.valid_resource)\n self.valid_constraint = {\n 'limit': int(datetime.utcnow().timestamp()) + 3600,\n 'type': 'ttl'\n }\n\n def _create_cloud_resource(self, cloud_acc_id, params, active=True,\n valid_until=None, request_id=None):\n code, resource = self.cloud_resource_create(\n cloud_acc_id, params)\n if active:\n self.resources_collection.update_one(\n filter={\n '_id': resource['id']\n },\n update={'$set': {\n 'last_seen': int(datetime.utcnow().timestamp() - 1),\n 'active': True\n }}\n )\n return code, resource\n\n def test_create_constraint(self):\n code, constraint = self.client.resource_constraint_create(\n self.resource['id'], self.valid_constraint)\n self.assertEqual(code, 201)\n self.assertEqual(constraint['resource_id'], self.resource['id'])\n self.assertEqual(constraint['type'], ConstraintTypes.TTL.value)\n self.assertEqual(constraint['limit'],\n self.valid_constraint['limit'])\n\n def test_get_constraint(self):\n _, constraint = self.client.resource_constraint_create(\n self.resource['id'], self.valid_constraint)\n code, constraint = self.client.resource_constraint_get(\n constraint['id'])\n self.assertEqual(code, 200)\n self.assertEqual(constraint['resource_id'], self.resource['id'])\n\n def test_get_global_constraints(self):\n valid_azure_cloud_acc = {\n 'name': 'azure',\n 'type': 'azure_cnr',\n 'config': {\n 'client_id': 'client',\n 'secret': 'secret',\n 'tenant': 'tenant',\n 'subscription_id': 'subscription',\n }\n }\n patch('tools.cloud_adapter.clouds.azure.Azure.configure_report').start()\n _, azure_cloud_acc = self.create_cloud_account(\n self.organization['id'], valid_azure_cloud_acc,\n auth_user_id=self.user_id)\n azure_resource_template = self.valid_resource.copy()\n azure_resource_template['cloud_resource_id'] = self.gen_id()\n azure_resource_template['name'] = 'azure_test_resource'\n _, azure_resource = self._create_cloud_resource(\n azure_cloud_acc['id'], azure_resource_template)\n _, aws_res_constraint = self.client.resource_constraint_create(\n self.resource['id'], self.valid_constraint)\n _, azure_res_constraint = self.client.resource_constraint_create(\n azure_resource['id'], self.valid_constraint)\n code, global_constraints = self.client.resource_constraints_list(\n self.organization['id'])\n self.assertEqual(code, 200)\n resource_constraints = global_constraints['resource_constraints']\n self.assertEqual(len(resource_constraints), 2)\n code, global_constraints = self.client.resource_constraints_list(\n self.organization['id'], details=True)\n self.assertEqual(code, 200)\n resource_constraints = global_constraints['resource_constraints']\n self.assertEqual(len(resource_constraints), 2)\n for resource_constraint in resource_constraints:\n details = resource_constraint['details']\n owner = details['owner']\n self.assertIsNotNone(owner)\n pool = details['pool']\n self.assertIsNotNone(pool)\n self.assertEqual(pool['id'], self.organization['pool_id'])\n self.assertEqual(pool['name'], self.organization['name'])\n self.assertEqual(pool['purpose'], 'business_unit')\n self.assertEqual(owner['id'], self.employee['id'])\n self.assertEqual(owner['name'], self.employee['name'])\n code, resp = self.client.cloud_account_delete(azure_cloud_acc['id'])\n self.assertEqual(code, 204)\n code, global_constraints = self.client.resource_constraints_list(\n self.organization['id'])\n self.assertEqual(code, 200)\n resource_constraints = global_constraints['resource_constraints']\n self.assertEqual(len(resource_constraints), 1)\n\n def test_list_constraints(self):\n self.client.resource_constraint_create(\n self.resource['id'], self.valid_constraint)\n code, resp = self.client.resource_constraint_list(self.resource['id'])\n self.assertEqual(code, 200)\n self.assertIsNotNone(resp['constraints'])\n self.assertEqual(len(resp['constraints']), 1)\n\n def test_list_constraints_nonexistent(self):\n code, response = self.client.resource_constraint_list(\n str(uuid.uuid4()))\n self.assertEqual(code, 404)\n self.assertEqual(response['error']['error_code'], 'OE0002')\n\n def test_create_duplicate(self):\n code, _ = self.client.resource_constraint_create(\n self.resource['id'], self.valid_constraint)\n self.assertEqual(code, 201)\n code, response = self.client.resource_constraint_create(\n self.resource['id'], {\n 'limit': int(datetime.utcnow().timestamp()) + 7200,\n 'type': 'ttl'\n }\n )\n self.assertEqual(code, 409)\n self.assertEqual(response['error']['error_code'], 'OE0441')\n\n code, response = self.client.resource_constraint_create(\n self.resource['id'], {\n 'limit': 150,\n 'type': 'total_expense_limit'\n }\n )\n self.assertEqual(code, 201)\n\n def test_create_invalid_id(self):\n code, response = self.client.resource_constraint_create(\n str(uuid.uuid4()), self.valid_constraint)\n self.assertEqual(code, 404)\n self.assertEqual(response['error']['error_code'], 'OE0002')\n\n def test_get_invalid_id(self):\n code, response = self.client.resource_constraint_get(str(uuid.uuid4()))\n self.assertEqual(code, 404)\n self.assertEqual(response['error']['error_code'], 'OE0002')\n\n def test_create_invalid_params(self):\n invalid_params_map = {\n 'limit': ['string', -1, None, '', 2**31],\n 'type': ['value', 123, '', None],\n }\n for param, invalid_values in invalid_params_map.items():\n body = self.valid_constraint.copy()\n for invalid_value in invalid_values:\n body[param] = invalid_value\n code, response = self.client.resource_constraint_create(\n self.resource['id'], body)\n self.assertEqual(code, 400)\n\n def test_unexpected_and_immutable(self):\n extra = {'extra_param': 'extra_value'}\n self.valid_constraint.update(extra)\n code, response = self.client.resource_constraint_create(\n self.resource['id'], self.valid_constraint)\n self.assertEqual(code, 400)\n self.assertEqual(response['error']['error_code'], 'OE0212')\n\n code, constraint = self.client.resource_constraint_create(\n self.resource['id'], {\n 'limit': 100,\n 'type': 'total_expense_limit'\n })\n self.assertEqual(code, 201)\n code, response = self.client.resource_constraint_update(\n constraint['id'], extra)\n self.assertEqual(code, 400)\n self.assertEqual(response['error']['error_code'], 'OE0212')\n\n self.valid_constraint.pop('extra_param')\n extra = {'deleted_at': 1}\n self.valid_constraint.update(extra)\n code, response = self.client.resource_constraint_create(\n self.resource['id'], self.valid_constraint)\n self.assertEqual(code, 400)\n self.assertEqual(response['error']['error_code'], 'OE0211')\n\n code, response = self.client.resource_constraint_update(\n constraint['id'], extra)\n self.assertEqual(code, 400)\n self.assertEqual(response['error']['error_code'], 'OE0211')\n\n def test_update(self):\n limit = int(datetime.utcnow().timestamp()) + 1800\n _, response = self.client.resource_constraint_create(\n self.resource['id'], self.valid_constraint)\n code, response = self.client.resource_constraint_update(\n response['id'], {'limit': limit})\n self.assertEqual(code, 200)\n self.assertEqual(response['limit'], limit)\n\n def test_delete(self):\n _, constraint = self.client.resource_constraint_create(\n self.resource['id'], self.valid_constraint)\n code, _ = self.client.resource_constraint_delete(constraint['id'])\n self.assertEqual(code, 204)\n code, _ = self.client.resource_constraint_get(constraint['id'])\n self.assertEqual(code, 404)\n code, _ = self.client.resource_constraint_update(\n constraint['id'], {})\n self.assertEqual(code, 404)\n\n def test_limit_values(self):\n constraint = {\n 'limit': 721,\n 'type': 'ttl'\n }\n code, response = self.client.resource_constraint_create(\n self.resource['id'], constraint)\n self.assertEqual(code, 400)\n self.assertEqual(response['error']['error_code'], 'OE0461')\n constraint['type'] = 'total_expense_limit'\n code, response = self.client.resource_constraint_create(\n self.resource['id'], constraint)\n self.assertEqual(code, 201)\n\n def test_create_constraint_invactive_resource(self):\n _, inactive_resource = self._create_cloud_resource(self.cloud_acc['id'], {\n 'cloud_resource_id': self.gen_id(),\n 'name': 'test_resource',\n 'resource_type': 'test_type'\n }, active=False)\n code, response = self.client.resource_constraint_create(\n inactive_resource['id'], {\n 'limit': 100,\n 'type': 'ttl'\n })\n self.assertEqual(code, 424)\n self.assertEqual(response['error']['error_code'], 'OE0443')\n\n def test_constraint_limit_min_max_values(self):\n out_of_limits_values = {\n 'ttl': [(-1, 'OE0224'), (720, 'OE0461'),\n (int(datetime.utcnow().timestamp()) - 1, 'OE0461')],\n 'total_expense_limit': [(-1, 'OE0224'), (2147483648, 'OE0224')]\n }\n for constr_type, values in out_of_limits_values.items():\n for value, error_code in values:\n code, response = self.client.resource_constraint_create(\n self.resource['id'], {\n 'limit': value,\n 'type': constr_type\n })\n self.assertEqual(code, 400)\n self.assertEqual(response['error']['error_code'], error_code)\n code, constraint_ttl = self.client.resource_constraint_create(\n self.resource['id'], {\n 'limit': int(datetime.utcnow().timestamp()) + 3600,\n 'type': 'ttl'\n })\n self.assertEqual(code, 201)\n code, constraint_exp_limit = self.client.resource_constraint_create(\n self.resource['id'], {\n 'limit': 100,\n 'type': 'total_expense_limit'\n })\n self.assertEqual(code, 201)\n constraint_map = {\n 'ttl': constraint_ttl,\n 'total_expense_limit': constraint_exp_limit\n }\n for constr_type, values in out_of_limits_values.items():\n constraint = constraint_map[constr_type]\n for value, error_code in values:\n code, response = self.client.resource_constraint_update(\n constraint['id'], {'limit': value})\n self.assertEqual(code, 400)\n self.assertEqual(response['error']['error_code'], error_code)\n\n def test_create_constraint_invalid_type(self):\n constraint = {\n 'limit': 721,\n }\n code, response = self.client.resource_constraint_create(\n self.resource['id'], constraint)\n self.assertEqual(code, 400)\n self.assertEqual(response['error']['error_code'], 'OE0216')\n\n constraint['type'] = 'invalid'\n code, response = self.client.resource_constraint_create(\n self.resource['id'], constraint)\n self.assertEqual(code, 400)\n self.assertEqual(response['error']['error_code'], 'OE0004')\n\n def test_create_infinity_constraint(self):\n self.valid_constraint['limit'] = 0\n code, constraint = self.client.resource_constraint_create(\n self.resource['id'], self.valid_constraint)\n self.assertEqual(code, 201)\n self.assertEqual(constraint['resource_id'], self.resource['id'])\n self.assertEqual(constraint['type'], ConstraintTypes.TTL.value)\n self.assertEqual(constraint['limit'], 0)\n\n def test_resource_constraint_events(self):\n user_info = {\n 'display_name': 'John Doe', 'id': self._user_id,\n 'email': 'example@hystax.com'\n }\n self.p_get_user_info.return_value = user_info\n\n p_publish_activities = patch(\n 'rest_api.rest_api_server.controllers.base.BaseController.'\n 'publish_activities_task'\n ).start()\n limit = int(datetime.utcnow().timestamp()) + 3600\n code, constraint = self.client.resource_constraint_create(\n self.resource['id'], {'limit': limit, 'type': 'ttl'})\n self.assertEqual(code, 201)\n evt_args = dict(\n c_type='ttl',\n r_name=self.resource['name'],\n r_id=self.resource['id'],\n u_name=user_info['display_name'],\n u_email=user_info['email']\n )\n activity_param_tuples = self.get_publish_activity_tuple(\n self.organization['id'], self.resource['id'], 'resource',\n 'constraint_created', {\n 'object_name': self.resource['name'],\n 'constraint_type': 'ttl'\n })\n p_publish_activities.assert_called_once_with(\n *activity_param_tuples, add_token=True\n )\n\n p_publish_activities = patch(\n 'rest_api.rest_api_server.controllers.base.BaseController.'\n 'publish_activities_task'\n ).start()\n code, response = self.client.resource_constraint_update(\n constraint['id'], {'limit': limit})\n self.assertEqual(code, 200)\n p_publish_activities.assert_not_called()\n\n code, response = self.client.resource_constraint_update(\n constraint['id'], {'limit': limit - 100})\n self.assertEqual(code, 200)\n evt_args['params'] = 'limit: %s' % (limit - 100)\n activity_param_tuples = self.get_publish_activity_tuple(\n self.organization['id'], self.resource['id'], 'resource',\n 'constraint_updated', {\n 'object_name': self.resource['name'],\n 'constraint_type': 'ttl',\n 'params': 'limit: %s' % (limit - 100)\n })\n p_publish_activities.assert_called_once_with(\n *activity_param_tuples, add_token=True\n )\n p_publish_activities = patch(\n 'rest_api.rest_api_server.controllers.base.BaseController.'\n 'publish_activities_task'\n ).start()\n code, _ = self.client.resource_constraint_delete(constraint['id'])\n self.assertEqual(code, 204)\n activity_param_tuples = self.get_publish_activity_tuple(\n self.organization['id'], self.resource['id'], 'resource',\n 'constraint_deleted', {\n 'object_name': self.resource['name'],\n 'constraint_type': 'ttl'\n })\n p_publish_activities.assert_called_once_with(\n *activity_param_tuples, add_token=True\n )\n\n def test_create_constraint_dependent(self):\n cluster_id = str(uuid.uuid4())\n self.resources_collection.update_one(\n filter={\n '_id': self.resource['id']\n },\n update={'$set': {\n 'cluster_id': cluster_id\n }}\n )\n code, res = self.client.resource_constraint_create(\n self.resource['id'], self.valid_constraint)\n self.assertEqual(code, 424)\n self.assertEqual(res['error']['error_code'], 'OE0464')\n\n def test_get_resource_constraint_for_zero_pool_policy(self):\n code, response = self.client.rules_list(self.organization['id'])\n self.assertEqual(code, 200)\n rules = response['rules']\n self.assertEqual(len(rules), 1)\n created_cloud_rule = rules[0]\n self.set_allowed_pair(self.user_id, created_cloud_rule['pool_id'])\n code, created_cloud_pool = self.client.pool_get(\n created_cloud_rule['pool_id'])\n self.assertEqual(code, 200)\n bp = {\n 'limit': 0,\n 'type': 'ttl'\n }\n code, policy = self.client.pool_policy_create(\n created_cloud_pool['id'], bp)\n self.assertEqual(code, 201)\n code, resp = self.cloud_resource_create(\n self.cloud_acc['id'], {\n 'cloud_resource_id': self.gen_id(),\n 'name': 'res_name',\n 'resource_type': 'res_type',\n 'employee_id': self.employee['id'],\n 'pool_id': created_cloud_pool['id']\n })\n self.assertEqual(code, 201)\n code, cloud_resource = self.client.cloud_resource_get(\n resp['id'], details=True)\n self.assertEqual(code, 200)\n cloud_resource_details = cloud_resource['details']\n policies = cloud_resource_details['policies']\n self.assertEqual(len(policies), 1)\n ttl = policies['ttl']\n self.assertEqual(ttl['limit'], 0)\n","repo_name":"hystax/optscale","sub_path":"rest_api/rest_api_server/tests/unittests/test_resource_constraints.py","file_name":"test_resource_constraints.py","file_ext":"py","file_size_in_byte":19379,"program_lang":"python","lang":"en","doc_type":"code","stars":646,"dataset":"github-code","pt":"22"} +{"seq_id":"34742332166","text":"import json\nimport re\nimport requests\n\n\nif __name__ == \"__main__\":\n query_list = []\n non_idiom_list = []\n json_url_pattern = r\"\\\"(https?://.+\\.json)\\\"\"\n\n for page_num in range(298):\n with open(\"../data/pages/\" + str(page_num) + \".html\", \"r\") as serp_page:\n serp_data = serp_page.read()\n print(\"Page {}: \".format(page_num), end=\"\")\n json_urls = re.findall(json_url_pattern, serp_data)\n if len(json_urls) != 100:\n print(\"Warning: This page has {} JSON urls.\".format(len(json_urls)))\n count = 0\n for json_url in json_urls:\n r = requests.get(json_url)\n if r.ok:\n content = r.content.decode(\"utf-8\")\n json_data = json.loads(content)\n query = json_data[\"search_parameters\"][\"q\"].strip(\"\\\"\\'\")\n query = query + \"1\" if query in query_list else query\n query_list.append(query)\n json_filename = query + \".json\"\n with open(\"../data/search_results/\" + json_filename, \"w\") as json_out:\n json_out.write(content)\n if \"成语\" not in content and \"成語\" not in content:\n non_idiom_list.append(query)\n count += 1\n if count % 10 == 0:\n print(\".\", end=\"\")\n else:\n print(\"Error downloading json file: \" + json_url)\n break\n print(\"Done.\".format(page_num))\n\n print(len(non_idiom_list))\n with open(\"../data/non_idiom_candidates.txt\", \"a\") as non_idiom_file:\n for word in non_idiom_list:\n non_idiom_file.write(word + \"\\n\")\n","repo_name":"T0ny8576/handle-solver","sub_path":"src/download_results.py","file_name":"download_results.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23533400759","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@File: handle_json.py\n\"\"\"\n\nimport json\n\nimport yaml\n\nfrom utils.logger import log as logger\n\n\ndef get_json(path, field=''):\n \"\"\"\n get json file datas\n :param path:\n :param field:\n :return:\n \"\"\"\n with open(path, 'r', encoding='utf-8') as f:\n json_data = json.load(f)\n if field:\n data = json_data.get(field)\n return data\n else:\n return json_data\n\n\ndef write_data(res, json_path, formatter=\"json\"):\n \"\"\"\n handle params write data to json file\n :param res:\n :param json_path:\n :param formatter: default json format\n :return:\n \"\"\"\n if formatter == \"json\":\n with open(json_path + \".json\", 'w', encoding='utf-8') as f:\n json.dump(res, f, ensure_ascii=False, sort_keys=True, indent=4)\n logger.info('Interface Params Total:{} ,write to json file successfully!'.format(len(res)))\n elif formatter in [\"yaml\", \"yml\"]:\n with open(json_path + \".yml\", \"w\", encoding=\"utf-8\") as f:\n yaml.safe_dump(res, stream=f, allow_unicode=True, default_flow_style=False)\n logger.info('Interface Params Total:{} ,write to yaml file successfully!'.format(len(res)))\n\n\ndef json_to_yaml(json_file):\n \"\"\"\n supported json to yaml file\n \"\"\"\n if json_file.endswith(\".json\"):\n with open(json_file, \"r\") as pf:\n json_to_dict = json.loads(pf.read())\n yaml_file = json_file.replace(\".json\", \".yaml\")\n with open(yaml_file, \"w\") as fp:\n yaml.safe_dump(json_to_dict, stream=fp, default_flow_style=False)\n logger.info(\"json to yaml success!!!\")\n else:\n logger.info(\"The file does not end with a JSON suffix!!!\")\n\n\ndef yaml_to_json(yaml_file):\n \"\"\"\n yaml to json file\n \"\"\"\n if yaml_file.endswith(\".yml\"):\n with open(yaml_file, \"r\", encoding=\"utf8\") as pf:\n # First convert yaml to dict format\n yaml_to_dict = yaml.load(pf, Loader=yaml.FullLoader)\n dict_to_json = json.dumps(yaml_to_dict, sort_keys=False, indent=4, separators=(',', ': '))\n json_file = yaml_file.replace(\".yaml\", \".json\")\n with open(json_file, \"w\") as fp:\n fp.write(dict_to_json)\n logger.info(\"yaml to json success!!!\")\n else:\n logger.info(\"The file does not end with a YAML suffix!!!\")\n","repo_name":"xuping2012/httprunner_swagger","sub_path":"utils/handle_json.py","file_name":"handle_json.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"22"} +{"seq_id":"24916851620","text":"import sys\nfrom collections import namedtuple\nimport numpy as np\nfrom utils.bmodel_dis import opdef_1684x\nfrom utils.bmodel_dis import bmodel_fbs\nimport itertools\n\n# Example:\n\"\"\"\n# bmodel file\ncode = TPUCMD(file_name)\n\n\n# bdc binary file\nbdc_code = decode_bdc(file_name)\n\n# gdma binary file\ngama_code = decode_gdma(file_name)\n\"\"\"\n\n\ndef read_file(cmd_file):\n cmmand_buf = np.fromfile(cmd_file, dtype=np.uint8)\n return np.unpackbits(cmmand_buf, bitorder=\"little\")\n\n\ndef read_buf(cmd_buf):\n cmmand_buf = np.frombuffer(cmd_buf, dtype=np.uint8)\n return np.unpackbits(cmmand_buf, bitorder=\"little\")\n\n\nclass BmodelReader:\n header_t = np.dtype(\n [\n (\"magic\", np.uint32),\n (\"header_size\", np.uint32),\n (\"flatbuffers_size\", np.uint32),\n (\"binary_size\", np.uint32),\n (\"reserved\", np.uint32, 12),\n ]\n )\n\n class cmd_group_cls:\n def __init__(self, fbs: bmodel_fbs.CmdGroup, cmd_buf):\n self.bdc_num = fbs.BdcNum()\n self.gdma_num = fbs.GdmaNum()\n self.binary_bdc = (fbs.BinaryBdc().Start(), fbs.BinaryBdc().Size()) # type: ignore\n self.binary_gdma = (fbs.BinaryGdma().Start(), fbs.BinaryGdma().Size()) # type: ignore\n self.bdc_cmd = cmd_buf[self.binary_bdc[0] : sum(self.binary_bdc)]\n self.gdma_cmd = cmd_buf[self.binary_gdma[0] : sum(self.binary_gdma)]\n\n def __repr__(self):\n return f\"bdc_num: {self.bdc_num}\\ngdma_num: {self.gdma_num}\"\n\n def __init__(self, bmodel_file):\n self.head = None\n self.binary_desc = None\n self.binary = None\n with open(bmodel_file, \"rb\") as file_obj:\n file_obj.seek(0, 0)\n self.head = np.frombuffer(\n file_obj.read(self.header_t.itemsize), dtype=self.header_t\n )\n self.binary_desc = file_obj.read(self.head[\"flatbuffers_size\"][0])\n self.binary = file_obj.read(self.head[\"binary_size\"][0])\n bmodel = bmodel_fbs.Model.GetRootAsModel(self.binary_desc, 0)\n\n def get_cmd(param, _fields):\n field, *_fields = _fields\n mult = []\n for s in range(getattr(param, field + \"Length\")()):\n cmd = getattr(param, field)(s)\n # if cmd is None:\n # return mult\n if _fields == []:\n mult.append(self.cmd_group_cls(cmd, self.binary))\n else:\n mult.append(get_cmd(cmd, _fields))\n return mult\n\n # net{ stage{ subnet{ cmdgroup... }... }... }...\n fields = [\"Net\", \"Parameter\", \"SubNet\", \"CmdGroup\"]\n self.nets = get_cmd(bmodel, fields)\n\n\nclass TPUCMD:\n _cmd = namedtuple(\"cmd\", [\"bdc\", \"gdma\", \"all\"])\n\n def decode_cmd(self, cmd):\n bdc_cmd = read_buf(cmd.bdc_cmd)\n gdma_cmd = read_buf(cmd.gdma_cmd)\n bdc = itertools.islice(self.decode_bdc(bdc_cmd), cmd.bdc_num)\n gdma = itertools.islice(self.decode_gdma(gdma_cmd), cmd.gdma_num)\n bdc = list(bdc)\n gdma = list(gdma)\n return self._cmd(bdc, gdma, self.merge_cmd(gdma, bdc))\n\n def __init__(self, bmodel_file):\n self.bmodel = BmodelReader(bmodel_file)\n\n def get_cmd(cmd, id):\n for idx, v in enumerate(cmd):\n id[-1] = idx\n if isinstance(v, list):\n id.append(idx)\n yield from get_cmd(v, id)\n else:\n yield (id, self.decode_cmd(v))\n\n self.cmd = get_cmd(self.bmodel.nets, [0])\n\n @staticmethod\n def __decode(cmd_buf, cmd_bits, cmd_set, sys_end):\n code = None\n cur = 0\n l, h = cmd_bits\n while cmd_buf.size > 0:\n cmd_key = opdef_1684x.packbits(cmd_buf[l:h])\n if cmd_key in cmd_set:\n recognize = False\n for op in cmd_set[cmd_key]:\n if op.is_comp(cmd_buf):\n # check whether this command is recognized by the operation\n code = op.decode(cmd_buf)\n yield code\n # consume this command_code\n cmd_buf = cmd_buf[op.len :]\n cur += op.len\n recognize = True\n break\n is_sys = isinstance(code, sys_end)\n is_less_1024 = cmd_buf.size < 1025\n is_all_zeros = np.all(cmd_buf == 0)\n if is_sys and is_less_1024 and is_all_zeros:\n break # all the BDC have been processed\n if not recognize:\n raise ValueError(\n \"Can not decode cmd, with opcode: {}, at {}.\".format(\n cmd_key, cur\n )\n )\n else:\n raise ValueError(\n \"Can not decode cmd, with opcode: {}, at {}.\".format(cmd_key, cur)\n )\n\n @staticmethod\n def decode_bdc(cmd_buf):\n return TPUCMD.__decode(\n cmd_buf,\n opdef_1684x.bdc_base.cmd_bits,\n opdef_1684x.bdc_cmd,\n opdef_1684x.sysid_op,\n )\n\n @staticmethod\n def decode_gdma(cmd_buf):\n return TPUCMD.__decode(\n cmd_buf,\n opdef_1684x.dma_base.cmd_bits,\n opdef_1684x.dma_cmd,\n opdef_1684x.sdma_sys,\n )\n\n @staticmethod\n def merge_cmd(main_cmd, inserted_cmd):\n # remove the system command\n main_id = [(m.cmd_id, m) for m in main_cmd[:-1]]\n inserted_id = [(i.cmd_id_dep, i) for i in inserted_cmd[:-1]]\n # \"sorted\" is stable, which keeps the inserted commands\n # after the main instructions.\n cmd = main_id + inserted_id\n cmd_sorted = sorted(cmd, key=lambda x: x[0])\n return [x[1] for x in cmd_sorted]\n\n\ndef decode_bdc(file_name):\n a = read_file(file_name)\n return [x for x in TPUCMD.decode_bdc(a)]\n\n\ndef decode_gdma(file_name):\n a = read_file(file_name)\n return [x for x in TPUCMD.decode_gdma(a)]\n\n\ndef unified_diff(a, b, fromfile=\"\", tofile=\"\", n=3, format=\"mlir\"):\n r\"\"\"\n Compare the operations of two BModel; generate the delta as a unified diff.\n\n Unified diffs are a compact way of showing line changes and a few\n lines of context. The number of context lines is set by 'n' which\n defaults to three.\n \"\"\"\n import difflib\n\n fmt_op = {\n \"raw\": lambda op: str(op.attr),\n \"mlir\": lambda op: str(op),\n \"bits\": lambda op: \"\".join((str(x) for x in op.cmd)),\n }\n fmt = fmt_op[format]\n\n lineterm = \"\\n\"\n started = False\n for group in difflib.SequenceMatcher(None, a, b).get_grouped_opcodes(n):\n if not started:\n started = True\n yield f\"--- {fromfile}\"\n yield f\"+++ {tofile}\"\n\n first, last = group[0], group[-1]\n file1_range = difflib._format_range_unified(first[1], last[2])\n file2_range = difflib._format_range_unified(first[3], last[4])\n yield \"@@ -{} +{} @@{}\".format(file1_range, file2_range, lineterm)\n\n for tag, i1, i2, j1, j2 in group:\n if tag == \"equal\":\n for line in a[i1:i2]:\n yield \" \" + fmt(line)\n continue\n if tag in {\"replace\", \"delete\"}:\n for line in a[i1:i2]:\n yield \"- \" + fmt(line)\n if tag in {\"replace\", \"insert\"}:\n for line in b[j1:j2]:\n yield \"+ \" + fmt(line)\n yield \"\"\n\n\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"BModel disassembler.\")\nparser.add_argument(\n \"bmodels\",\n type=str,\n nargs=\"+\",\n help=\"The path of BModels. If one BModel is provided, the assemble code will be printed. Compare the Bmodels if two models provided.\",\n)\nparser.add_argument(\n \"--fmt\",\n dest=\"format\",\n choices=[\"mlir\", \"raw\", \"bits\"],\n default=\"mlir\",\n help=\"The format of format operations.\",\n)\nparser.add_argument(\n \"--N\",\n dest=\"N\",\n type=int,\n default=3,\n help=\"The number of context lines.\",\n)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n\n if len(args.bmodels) == 1:\n tpu_cmd = TPUCMD(args.bmodels[0])\n for idx, cmd in tpu_cmd.cmd:\n fmt_cmd = [\"\\n \" + str(x) for x in cmd.all]\n fmt_cmd = \"\".join(fmt_cmd) + \"\\n\"\n fun_name = \"graph\" + \"\".join((str(x) for x in idx))\n print(f\"func.func @{fun_name}() {{{fmt_cmd}}}\")\n\n if len(args.bmodels) == 2:\n tpu_cmd_a = TPUCMD(args.bmodels[0])\n tpu_cmd_b = TPUCMD(args.bmodels[1])\n is_same = True\n for (idx, cmd_a), (_, cmd_b) in zip(tpu_cmd_a.cmd, tpu_cmd_b.cmd):\n fmt_cmd = [\n \"\\n\" + x\n for x in unified_diff(\n cmd_a.all,\n cmd_b.all,\n args.bmodels[0],\n args.bmodels[1],\n n=args.N,\n format=args.format,\n )\n ]\n fun_name = \"graph\" + \"\".join((str(x) for x in idx))\n if fmt_cmd != []:\n is_same = False\n fmt_cmd = \"\".join(fmt_cmd[:-1]) + \"\\n\"\n print(f\"func.func @{fun_name}() {{{fmt_cmd}}}\")\n if is_same:\n print(f\"\"\"\"{args.bmodels[0]}\" and \"{args.bmodels[1]}\" are the same!\"\"\")\n exit(0)\n else:\n exit(1)\n parser.error(\"Too many BModels.\")\n","repo_name":"TBAALi/tpu-mlir","sub_path":"python/tools/bmodel_dis.py","file_name":"bmodel_dis.py","file_ext":"py","file_size_in_byte":9559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"73169310457","text":"import collections\nimport string\n\nSubMove = collections.namedtuple('SubMove', ['pos', 'take'])\n\nDIRS = {'w': 1, 'b': -1}\n\nclass Board:\n def __init__(self, size=8):\n self.size = size\n self.pieces = {}\n self.player = 'w'\n self._move_cache = {}\n\n self.valid_coords = {(x, y)\n for x in range(self.size)\n for y in range(self.size)\n if (x + y) % 2 == 0\n }\n\n for x in range(self.size):\n for y in range(3):\n if (x + y) % 2 == 0:\n self.pieces[x, y] = 'w'\n for y in range(self.size-3, self.size):\n if (x + y) % 2 == 0:\n self.pieces[x, y] = 'b'\n\n def __eq__(self, other):\n try:\n return (self.size == other.size\n and self.player == other.player\n and self.pieces == other.pieces)\n except AttributeError:\n return NotImplemented\n\n def __ne__(self, other):\n return not self == other\n\n def dump(self):\n rows = [[' ' if (x + y) % 2 == 0 else '.'\n for x in range(self.size)] for y in range(self.size)]\n for (x, y), c in self.pieces.items():\n rows[self.size-1-y][x] = c\n result = []\n result += [f'[{self.player}] {string.ascii_letters[:self.size]}']\n result += [f' +{\"-\" * self.size}+']\n result.extend(f'{self.size-i:2} |{\"\".join(row)}|{self.size-i:2}'\n for i, row in enumerate(rows))\n result += [f' +{\"-\" * self.size}+']\n result += [f' {string.ascii_letters[:self.size]}']\n result += ['']\n return '\\n'.join(result)\n\n @classmethod\n def load(cls, source):\n if source.startswith('[w]'):\n player = 'w'\n elif source.startswith('[b]'):\n player = 'b'\n else:\n raise ValueError('bad data')\n rows = [r for r in source.splitlines() if r.count('|') == 2]\n if not rows:\n raise ValueError('no data found')\n self = cls(size=len(rows))\n self.player = player\n self.pieces.clear()\n for ym, row in enumerate(rows):\n pre, row, post = row.split('|')\n y = self.size-1-ym\n if len(row) != self.size:\n raise ValueError('bad number of columns')\n for x, c in enumerate(row):\n if (x, y) in self.valid_coords:\n if c == '.':\n continue\n if c not in 'wbWB':\n raise ValueError('bad symbol: ' + c)\n self.pieces[x, y] = c\n else:\n if c != ' ':\n raise ValueError('symbol on bad tile: ' + c)\n return self\n\n def _get_submoves(self, prefix):\n result = {}\n if not prefix:\n result = {}\n max_priority = 0\n for (x, y), c in self.pieces.items():\n if c.lower() == self.player:\n moves = self.get_submoves([(x, y)])\n for submove in moves.values():\n if not submove.take:\n priority = 0\n elif c.islower():\n priority = 1\n else:\n priority = 2\n if priority < max_priority:\n continue\n elif priority > max_priority:\n max_priority = priority\n result.clear()\n result[x, y] = SubMove((x, y), None)\n else:\n prev = [prefix[0]]\n piece = self.pieces[prefix[0]]\n removed = set(self.get_jumped(prefix))\n if removed:\n jumping = True\n else:\n jumping = False\n if len(prefix) > 1:\n return {}\n removed.add(prefix[0])\n if piece.islower():\n y_dirs = [DIRS[self.player]]\n else:\n y_dirs = -1, 1\n for x_direction in -1, 1:\n for y_direction in y_dirs:\n x, y = prefix[-1]\n taken = None\n while True:\n x += x_direction\n y += y_direction\n if (x, y) not in self.valid_coords:\n break\n p = self.pieces.get((x, y))\n if not p or (x, y) == prefix[0]:\n if taken in removed:\n break\n if taken and not jumping:\n jumping = True\n result.clear()\n if not taken and jumping:\n if piece.islower():\n break\n continue\n result[x, y] = SubMove((x, y), taken)\n elif p and p.lower() == self.player:\n break\n elif not taken or p in removed:\n taken = x, y\n continue\n else:\n break\n if piece.islower():\n break\n return result\n\n def get_submoves(self, prefix):\n prefix = tuple(prefix)\n try:\n c = self._move_cache[prefix]\n except KeyError:\n c = self._move_cache[prefix] = self._get_submoves(prefix)\n return c\n\n def possible_moves(self, prefix):\n if prefix and prefix[0] not in self.get_submoves([]):\n raise ValueError('bad prefix')\n return set(self.get_submoves(prefix))\n\n def move_finished(self, prefix):\n return not self.possible_moves(prefix)\n\n def get_jumped(self, prefix):\n if not prefix:\n return []\n result = []\n prev = [prefix[0]]\n for coord in prefix[1:]:\n try:\n f = self.get_submoves(prev)[coord]\n except KeyError:\n raise ValueError('bad prefix')\n if f.take:\n result.append(f.take)\n prev += (coord,)\n return result\n\n def make_move(self, move):\n deleted = []\n if not self.move_finished(move):\n raise ValueError('bad move')\n piece = self.pieces.pop(move[0])\n prefix = [move[0]]\n for coord in move[1:]:\n f = self.get_submoves(prefix)[coord]\n if f.take:\n deleted.append(self.pieces.pop(f.take))\n prefix += (coord,)\n if self.player == 'w':\n self.player = 'b'\n if move[-1][1] == self.size-1:\n piece = piece.upper()\n else:\n self.player = 'w'\n if move[-1][1] == 0:\n piece = piece.upper()\n self.pieces[move[-1]] = piece\n self._move_cache.clear()\n","repo_name":"encukou/dama","sub_path":"dama/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":7207,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"37253544005","text":"from Action import Action\nfrom Player import Player\nfrom random import randint\n\nclass Chance:\n def __init__(self):\n self.full_deck = []\n self.empty_deck = []\n\n self.building_loan = ('Your building loan matures. Collect $150', Action('make payment', {'from': 'BANK', 'to': 'current player', 'amount': 150}))\n self.board_chairman = ('You have been elected chairman of the board. Pay each player $50',\n Action('make payment', {'from': 'current player', 'to': 'ALL', 'amount': 50}))\n self.dividend = ('Bank pays you dividend of $50', Action('make payment', {'from': 'BANK', 'to': 'current player', 'amount': 50}))\n self.speeding_fine = ('Speeding fine $15', Action('make payment', {'from': 'current player', 'to': 'BANK', 'amount': 15}))\n self.gtj = ('Go to Jail', Action('go to jail', {'player': 'current player'}))\n self.advance_to_go = ('Advance to Go. (Collect $200)', Action('move to square', {'player': 'current player', 'name': 'GO'}))\n self.repairs = ('Make general repairs on all your properties: for each house pay $25, for each hotel pay $100', \n Action('repairs', {'player': 'current player', 'house cost': 25, 'hotel cost': 100}))\n self.get_out_of_jail_free = ('Get out of jail free. This card may be kept until needed or traded',\n Action('goojfc', {'player': 'current player', 'from': 'chance'}))\n self.advance_to_st_charles = ('Advance to St. Charles Place. If you pass go, collect $200',\n Action('move to square', {'player': 'current player', 'name': 'St. Charles Place'}))\n self.advance_to_illinois = ('Advance to Illinois Avenue. If you pass go, collect $200',\n Action('move to square', {'player': 'current player', 'name': 'Illinois Avenue'}))\n self.advance_to_boardwalk = ('Advance to Boardwalk', Action('move to square', {'player': 'current player', 'name': 'Boardwalk'}))\n self.advance_to_reading_rr = ('Take a trip to Reading Railroad. If you pass go, collect $200',\n Action('move to square', {'player': 'current player', 'name': 'Reading Railroad'}))\n self.back_three = ('Go back 3 spaces', Action('move to square', {'player': 'current player', 'name': 'BACK', 'spaces': 3}))\n self.nearest_utility = ('Advance to the nearest utility\\nIf UNOWNED, you may buy it from the bank\\n' + \n 'If OWNED, throw dice and pay the owner 10 times the amount thrown',\n Action('move to square', {'player': 'current player', 'name': 'NEXT UTILITY', 'payment multiplier': 10}))\n self.nearest_rr = ('Advance to the nearest railroad\\nIf UNOWNED, you may buy it from the bank\\n' + \n 'If OWNED, pay the owner twice the rental to which they are otherwise entitled',\n Action('move to square', {'player': 'current player', 'name': 'NEXT RAILROAD', 'payment multiplier': 2}))\n \n self.empty_deck.append(self.building_loan)\n self.empty_deck.append(self.board_chairman)\n self.empty_deck.append(self.dividend)\n self.empty_deck.append(self.speeding_fine)\n self.empty_deck.append(self.gtj)\n self.empty_deck.append(self.advance_to_go)\n self.empty_deck.append(self.repairs)\n self.empty_deck.append(self.get_out_of_jail_free)\n self.empty_deck.append(self.advance_to_st_charles)\n self.empty_deck.append(self.advance_to_illinois)\n self.empty_deck.append(self.advance_to_boardwalk)\n self.empty_deck.append(self.advance_to_reading_rr)\n self.empty_deck.append(self.back_three)\n self.empty_deck.append(self.nearest_utility)\n self.empty_deck.append(self.nearest_rr)\n self.empty_deck.append(self.nearest_rr)\n\n def shuffle(self):\n #print('Shuffling Chance cards...')\n while len(self.empty_deck) > 0:\n card_num = randint(0, len(self.empty_deck) - 1)\n card = self.empty_deck.pop(card_num)\n self.full_deck.append(card)\n\n def draw(self, current_player):\n card = self.full_deck.pop(0)\n if not(card == self.get_out_of_jail_free):\n self.empty_deck.append(card)\n if len(self.full_deck) == 0:\n self.shuffle()\n for i in card[1].information:\n if (card[1].information[i] == 'current player') or (type(card[1].information[i]) == Player):\n card[1].information[i] = current_player\n return card\n\n def return_goojf(self):\n self.empty_deck.append(self.get_out_of_jail_free)","repo_name":"cswizard11/monopoly","sub_path":"Chance.py","file_name":"Chance.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19317630241","text":"import requests, json, ast\n\nfrom mysite import settings\n\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\n\nfrom cert.forms import DeviceEnablementForm\n\nfrom schema.models import Module, KernelVersion\n\n# Create your views here.\ndef cert(request):\n # this sets the correct url root for making api requests\n if settings.ON_PAAS:\n api_url_root = 'http://vermanlab-jmitchel.rhcloud.com'\n else:\n api_url_root = 'http://localhost:8000'\n\n # this gets all the possible hardware aliases so the user can select from the form\n get_hardware_enablement_list_url = api_url_root + '/api/get_devices'\n aliases = json.loads(requests.get(get_hardware_enablement_list_url, params=request.GET).text)\n\n enabled_kernels = []\n nonenabled_kernels = []\n\n # render the form (and the enabled/non-enabled kernels if the user has selected an alias)\n if request.method == 'POST':\n form = DeviceEnablementForm(request.POST, alias_list = aliases)\n if form.is_valid():\n #selectedAlias is the selected alias\n selectedAlias = form.cleaned_data['selectedAlias']\n aliasDict = ast.literal_eval(selectedAlias)\n\n for mod in aliasDict['module']:\n realMod = Module.objects.get(pk=mod)\n kernelVersions = realMod.kernelVersions.all()\n for kv in kernelVersions:\n if kv not in enabled_kernels and kv.errata:\n enabled_kernels.append(kv)\n\n for kernel in KernelVersion.objects.all():\n if kernel not in enabled_kernels and kernel.errata:\n nonenabled_kernels.append(kernel)\n\n enabled_kernels.sort(key=lambda x: x.errata, reverse=False)\n nonenabled_kernels.sort(key=lambda x: x.errata, reverse=False)\n else:\n form = DeviceEnablementForm(alias_list = aliases)\n\n return render(request, 'cert/cert.html', {\n 'form': form,\n 'enabled_kernels': enabled_kernels,\n 'nonenabled_kernels': nonenabled_kernels,\n })","repo_name":"jlmitch5/vermanlab","sub_path":"cert/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20225975907","text":"import subprocess\nimport tempfile\nimport pandas as pd\nimport os\nimport formulaic\n\nimport qiime2\nfrom qiime2.metadata import NumericMetadataColumn\n\nfrom ._format import DataLoafPackageDirFmt\n\n\ndef run_commands(cmds, verbose=True):\n if verbose:\n print('Running external command line application(s). This may print'\n ' messages to stdout and/or stderr.')\n print('The command(s) being run are below. These commands cannot be'\n ' manually re-run as they will depend on temporary files that no'\n ' longer exist.')\n for cmd in cmds:\n if verbose:\n print('\\nCommand:', end=' ')\n print(' '.join(cmd), end='\\n\\n')\n subprocess.run(cmd, check=True)\n\n\ndef ancombc(table: pd.DataFrame, metadata: qiime2.Metadata, formula: str,\n p_adj_method: str = 'holm', prv_cut: float = 0.1, lib_cut: int = 0,\n reference_levels: str = None, neg_lb: bool = False,\n tol: float = 1e-05, max_iter: int = 100, conserve: bool = False,\n alpha: float = 0.05) -> DataLoafPackageDirFmt:\n\n return _ancombc(\n table=table,\n metadata=metadata,\n formula=formula,\n p_adj_method=p_adj_method,\n prv_cut=prv_cut,\n lib_cut=lib_cut,\n reference_levels=reference_levels,\n neg_lb=neg_lb,\n tol=tol,\n max_iter=max_iter,\n conserve=conserve,\n alpha=alpha,\n )\n\n\n# utility functions for formula parsing and column validation\ndef _parse_terms(formula):\n parse = formulaic.parser.parser.DefaultFormulaParser(\n include_intercept=False)\n terms = parse.get_ast(formula=formula).flatten()\n formula_terms = _leaf_collector(terms)\n return formula_terms\n\n\ndef _leaf_collector(term):\n if isinstance(term, formulaic.parser.types.Token):\n return [str(term)]\n\n if type(term) is not list:\n return []\n\n return _leaf_collector(term[1]) + _leaf_collector(term[2])\n\n\ndef _ancombc(table, metadata, formula, p_adj_method, prv_cut, lib_cut,\n reference_levels, neg_lb, tol, max_iter, conserve, alpha):\n\n meta = metadata.to_dataframe()\n\n # error on IDs found in table but not in metadata\n missing_ids = table.index.difference(meta.index).values\n\n if missing_ids.size > 0:\n raise KeyError('Not all samples present within the table were found in'\n ' the associated metadata file. Please make sure that'\n ' all samples in the FeatureTable are also present in'\n ' the metadata.'\n ' Sample IDs not found in the metadata: %s'\n % missing_ids)\n\n # column validation for the formula parameter\n formula_terms = _parse_terms(formula=formula)\n for term in formula_terms:\n metadata.get_column(term)\n\n # column & level validation for the reference_levels parameter\n if reference_levels is not None:\n for i in reference_levels:\n column = i.split('::')[0]\n level_value = i.split('::')[1]\n\n # check that reference_level columns are present in the metadata\n ref_column = metadata.get_column(column)\n\n # check that each chosen column contains discrete values\n if isinstance(ref_column, NumericMetadataColumn):\n raise TypeError('One of the `reference_levels` columns is not'\n ' a categorical Metadata column. Please make'\n ' sure that all chosen reference level columns'\n ' are categorical, and not numeric.'\n ' Non-categorical column selected:'\n ' %s' % column)\n\n if level_value not in pd.unique(meta[column].values):\n raise ValueError('Value provided in `reference_levels`'\n ' parameter not found in the associated'\n ' column within the metadata. Please make'\n ' sure each column::value pair is present'\n ' within the metadata file.'\n ' \\n\\n'\n ' column::value pair with a value that was'\n ' not found: \"%s\"' % i)\n\n # check that reference_level columns are also in the formula\n if column not in formula_terms:\n raise ValueError('`reference_levels` column \"%s\" was not found'\n ' within the `formula` terms.' % column)\n\n # check that IDs associated with chosen reference level(s) are\n # present within the input table\n level_value_idx = meta.index[meta[column] == level_value]\n table_idx = table.index\n\n if level_value_idx.intersection(table_idx).empty:\n raise ValueError('Value provided in `reference_levels`'\n ' parameter not associated with any IDs'\n ' in the feature table. Please make sure'\n ' the value(s) selected in each'\n ' column::value pair are associated with'\n ' IDs present in the feature table.'\n ' \\n\\n'\n ' Value not associated with any IDs in'\n ' the table: \"%s\"' % level_value,\n ' IDs not found in table:'\n ' \"%s\"' % level_value_idx)\n\n else:\n reference_levels = ''\n\n with tempfile.TemporaryDirectory() as temp_dir_name:\n biom_fp = os.path.join(temp_dir_name, 'input.biom.tsv')\n meta_fp = os.path.join(temp_dir_name, 'input.map.txt')\n\n table.to_csv(biom_fp, sep='\\t', header=True)\n meta.to_csv(meta_fp, sep='\\t', header=True)\n\n output_loaf = DataLoafPackageDirFmt()\n\n cmd = ['run_ancombc.R',\n '--inp_abundances_path', biom_fp,\n '--inp_metadata_path', meta_fp,\n '--formula', str(formula),\n '--p_adj_method', p_adj_method,\n '--prv_cut', str(prv_cut),\n '--lib_cut', str(lib_cut),\n '--reference_levels', str(reference_levels),\n '--neg_lb', str(neg_lb),\n '--tol', str(tol),\n '--max_iter', str(max_iter),\n '--conserve', str(conserve),\n '--alpha', str(alpha),\n '--output_loaf', str(output_loaf)\n ]\n\n try:\n run_commands([cmd])\n except subprocess.CalledProcessError as e:\n raise Exception('An error was encountered while running ANCOM-BC'\n ' in R (return code %d), please inspect stdout and'\n ' stderr to learn more.' % e.returncode)\n\n return output_loaf\n","repo_name":"colinbrislawn/q2-composition","sub_path":"q2_composition/_ancombc.py","file_name":"_ancombc.py","file_ext":"py","file_size_in_byte":6994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"30540877658","text":"from django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom django.utils.decorators import method_decorator\n\nfrom drf_yasg.utils import swagger_auto_schema\nfrom rest_framework import viewsets\nfrom users.serializers.users import GroupSerializer, UserSerializer\n\n__all__ = (\"UserViewSet\", \"GroupViewSet\")\n\nUser = get_user_model()\n\n\n@method_decorator(\n name=\"list\",\n decorator=swagger_auto_schema(\n operation_summary=\"User List\", operation_description=\"유저 목록\"\n ),\n)\n@method_decorator(\n name=\"retrieve\",\n decorator=swagger_auto_schema(\n operation_summary=\"Get User\", operation_description=\"유저 한명\"\n ),\n)\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n\n swagger_schema = None\n\n queryset = User.objects.all().order_by(\"-date_joined\")\n serializer_class = UserSerializer\n\n\n@method_decorator(\n name=\"list\",\n decorator=swagger_auto_schema(\n operation_summary=\"Group List\", operation_description=\"그룹 목록\"\n ),\n)\n@method_decorator(\n name=\"retrieve\",\n decorator=swagger_auto_schema(\n operation_summary=\"Get Group\", operation_description=\"그룹 하나\"\n ),\n)\nclass GroupViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n\n swagger_schema = None\n\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n","repo_name":"lizardmon/Booken-Backend","sub_path":"sources/app/users/apis/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3468607545","text":"import struct\nfrom typing import Optional, Tuple\nfrom spinn_utilities.abstract_base import AbstractBase\nfrom spinn_utilities.overrides import overrides\nfrom spinnman.connections.udp_packet_connections import SCAMPConnection\nfrom spinnman.messages.sdp import SDPMessage, SDPFlag\nfrom spinnman.messages.scp.abstract_messages import AbstractSCPRequest\nfrom spinnman.messages.scp.enums import SCPResult\nfrom .spalloc_proxied_connection import SpallocProxiedConnection\n\n_TWO_SHORTS = struct.Struct(\"<2H\")\n_TWO_SKIP: bytes = b'\\0\\0'\n\n\nclass SpallocSCPConnection(\n SCAMPConnection, SpallocProxiedConnection,\n metaclass=AbstractBase):\n \"\"\"\n The socket interface supported by proxied sockets. The socket will always\n be talking to a specific board. This emulates a\n :py:class:`SCAMPConnection`.\n \"\"\"\n __slots__ = ()\n\n def __init__(self, x, y):\n super(SpallocSCPConnection, self).__init__(x, y)\n\n @overrides(SCAMPConnection.receive_sdp_message)\n def receive_sdp_message(\n self, timeout: Optional[float] = None) -> SDPMessage:\n data = self.receive(timeout)\n return SDPMessage.from_bytestring(data, 2)\n\n @overrides(SCAMPConnection.send_sdp_message)\n def send_sdp_message(self, sdp_message: SDPMessage):\n # If a reply is expected, the connection should\n if sdp_message.sdp_header.flags == SDPFlag.REPLY_EXPECTED:\n sdp_message.sdp_header.update_for_send(self.chip_x, self.chip_y)\n else:\n sdp_message.sdp_header.update_for_send(0, 0)\n self.send(_TWO_SKIP + sdp_message.bytestring)\n\n @overrides(SCAMPConnection.receive_scp_response)\n def receive_scp_response(\n self, timeout=1.0) -> Tuple[SCPResult, int, bytes, int]:\n data = self.receive(timeout)\n result, sequence = _TWO_SHORTS.unpack_from(data, 10)\n return SCPResult(result), sequence, data, 2\n\n @overrides(SCAMPConnection.send_scp_request)\n def send_scp_request(self, scp_request: AbstractSCPRequest):\n self.send(self.get_scp_data(scp_request))\n\n @overrides(SCAMPConnection.get_scp_data)\n def get_scp_data(\n self, scp_request: AbstractSCPRequest, x=None, y=None) -> bytes:\n if x is None:\n x = self.chip_x\n if y is None:\n y = self.chip_y\n scp_request.sdp_header.update_for_send(x, y)\n return _TWO_SKIP + scp_request.bytestring\n","repo_name":"SpiNNakerManchester/SpiNNMan","sub_path":"spinnman/spalloc/spalloc_scp_connection.py","file_name":"spalloc_scp_connection.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"32290617831","text":"#coding:utf8\n__author__ = 'flybird1971'\n\"\"\"\n 本包主要是商城相关业务逻辑处理\n register.py 用于用户注册逻辑\n login.py 用于用户登录\n resetpwd.py 用于密码重置逻辑\n pay.py 用于支付业务处理,调用银行第三方支付接口\n car.py 查看购物车\n shoping.py 用户购物业务逻辑处理\n\"\"\"","repo_name":"flybird1971/spiders","sub_path":"test/ecmall/shop/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4179558314","text":"# PA6, CS124, Stanford, Winter 2019\n# v.1.0.3\n# Original Python code by Ignacio Cases (@cases)\n######################################################################\nimport movielens\nimport numpy as np\nimport re\nimport collections\nfrom nltk.stem import PorterStemmer\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nimport random\n\n\nclass Chatbot:\n \"\"\"Simple class to implement the chatbot for PA 6.\"\"\"\n\n def __init__(self, creative=False):\n # The chatbot's default name is `moviebot`. Give your chatbot a new name.\n self.name = 'MotherBot'\n\n self.creative = creative\n\n # This matrix has the following shape: num_movies x num_users\n # The values stored in each row i and column j is the rating for\n # movie i by user j\n self.titles, ratings = movielens.ratings()\n self.sentiment = movielens.sentiment()\n\n #############################################################################\n # TODO: Binarize the movie ratings matrix. #\n #############################################################################\n\n # Binarize the movie ratings before storing the binarized matrix.\n self.ratings = self.binarize(ratings)\n assert(len(self.titles) == len(self.ratings))\n self.user_ratings = np.zeros((len(self.titles),))\n\n self.n_data_points = 0\n self.casual_titles = self.make_casual_titles() if self.creative else []\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n #############################################################################\n # 1. WARM UP REPL #\n #############################################################################\n\n\n def make_casual_titles(self):\n casual_titles = []\n\n for t in self.titles:\n t = t[0]\n title, year = self.titleAndYear(t)\n titles = self.allTitleOpts(title)\n\n for i,to in enumerate(titles):\n to = to.lower()\n words = to.split()\n if (len(words) >= 2 \n and words[-1] in ('a', 'an', 'the', 'la', 'le') \n and words[-2][-1] == ','):\n words = [words[-1]] + words[:-1]\n words[-1] = words[-1][:-1]\n to = ' '.join(words)\n titles[i] = to\n\n casual_titles.append((titles, year))\n\n return casual_titles\n\n def greeting(self):\n \"\"\"Return a message that the chatbot uses to greet the user.\"\"\"\n #############################################################################\n # TODO: Write a short greeting message #\n #############################################################################\n\n greeting_message = \"Hi, I am %s the movie recommender. Fire away!\" % self.name\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return greeting_message\n\n def goodbye(self):\n \"\"\"Return a message that the chatbot uses to bid farewell to the user.\"\"\"\n #############################################################################\n # TODO: Write a short farewell message #\n #############################################################################\n\n goodbye_message = \"Thanks for joining me! Have fun watching more movies!\"\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return goodbye_message\n\n\n ###############################################################################\n # 2. Modules 2 and 3: extraction and transformation #\n ###############################################################################\n\n\n def begins_with_article(self, text):\n return text.strip() != '' and text.split()[0].lower() in ('a','an','the')\n\n def article_to_back(self, text):\n if not text.strip():\n return text\n tok = text.split()[0]\n text = text[len(tok):].strip() + ', ' + tok\n return text\n\n def ask_for_another(self):\n return random.choice([\n 'Tell me about another movie.',\n 'I would like to hear about some more movies.',\n 'Please, tell me about some more titles you felt strongly about.',\n 'Any other movies you would like to tell me about?'\n ])\n\n def couldnt_find_title(self):\n return random.choice([\n \"I'm sorry, but I wasn't able to find a movie title there. Try again please.\",\n \"I don't know what movie you're talking about. It's easier for me when you use quotes.\",\n \"Oops, didn't find a title name! Try to tell me again.\",\n \"Let's talk about movies, those are my strong suit.\",\n \"I don't think your talking about movies anymore; can we talk about movies?\"\n ])\n\n def cant_handle_multiple_titles(self):\n return random.choice([\n \"Sorry, but it seems you specified multiple titles here. I can only handle one at a time!\",\n \"You confused me by including multiple titles. Please only specify one at a time in quotes.\"\n ])\n\n def bad_review_resp(self, title):\n return random.choice([\n \"Wow, seems like \" + title + \" was a bad movie. \",\n \"Okay, so you didn't like \" + title + \". Thanks! \",\n \"Alright, \" + title + \" was not a hit. Good to know. \"\n ])\n\n def good_review_resp(self, title):\n return random.choice([\n \"Wow, seems like \" + title + \" is a total madness. Gnarly! \",\n \"Okay, so you liked \" + title + \". Thanks! \",\n \"Alright, \" + title + \" was a hit for you. Good to know. \"\n ])\n\n def couldnt_find_movie_in_db(self, title):\n return random.choice([\n \"Sorry, I couldn't find \" + title + \" in my database.\",\n \"Oops, looks like \" + title + \" isn't in my database. Check your spelling and try again!\",\n \"I didn't seem to find the title \" + title + \". Try something else maybe.\"\n ])\n\n def found_multiple(self, title, results):\n results = ['\"' + r + '\"' for r in results]\n results = ' and '.join(results)\n\n return random.choice([\n \"I found all of these records: \" + results + \". Please specify.\",\n \"Your description fits any one of these titles: \" + results + \". Please repeat with the title you meant.\",\n \"The title you specified could be any one of \" + results + \"; I need you to be more precise please.\",\n \"I found more than one movie named \" + title + \" so I'll need you to be more specific.\"\n ])\n\n def cant_find_emotion(self, title):\n return random.choice([\n \"Sorry, but I can't tell if you liked or disliked \" + title + \" .\",\n \"Sorry, I'm not sure how you felt about \" + title + \". Try to make it more obvious.\",\n \"I can't seem to figure out how you felt about \" + title + \". Try again.\"\n ])\n\n def get_title(self, index):\n\n return self.titles[index][0]\n\n def check_emotion(self, line):\n \n angry = set(['angry', 'enraged', 'furious', 'heated', 'irate', 'outragd', 'annoyed', 'frustrated'])\n sad = set(['sad', 'heartbroken', 'melancholy', 'somber', 'blue', 'sorry', 'low'])\n flirty = set(['flirty', 'hot', 'cute', 'pretty', 'adorable', 'charming', 'smooth'])\n happy = set(['happy', 'cheerful', 'delighted', 'elated', 'ecstatic', 'glad', 'joyful', 'thrilled'])\n address = set(['I am', 'You are', 'you are', 'i am'])\n\n line = self.titles_removed(line)\n line = line.replace(',', '')\n line = line.replace(' ', ' ')\n \n subject = ''\n for phrase in address:\n if phrase in line:\n subject = phrase\n\n line = line.split()\n emots = {'a' : 0, 's' : 0, 'f' : 0, 'h' : 0}\n\n for word in line:\n if word in angry:\n emots['a'] += 1\n if word in sad:\n emots['s'] += 1\n if word in flirty:\n emots['f'] += 1\n if word in happy:\n emots['h'] += 1\n\n maxEmot = ''\n count = 0\n for key in emots.keys():\n if emots[key] > count:\n maxEmot = key\n count = emots[key]\n\n if maxEmot and subject:\n if maxEmot == 'a':\n return 'You seem angry! I am very sorry for frustating you.'\n if maxEmot == 's':\n return 'You seem to be a little sad. I hope your day gets better!'\n if maxEmot == 'f':\n return 'I feel like you are flirting. You are cute and all but I have a girlfriend. She lives far away and was homeschooled so you probably would\\'nt know her.'\n if maxEmot == 'h':\n return 'I am glad that I am making you happy. You are very amusing to talk to as well.'\n else:\n return None\n\n\n\n def process(self, line):\n \"\"\"Process a line of input from the REPL and generate a response.\n\n This is the method that is called by the REPL loop directly with user input.\n\n You should delegate most of the work of processing the user's input to\n the helper functions you write later in this class.\n\n Takes the input string from the REPL and call delegated functions that\n 1) extract the relevant information, and\n 2) transform the information into a response to the user.\n\n Example:\n resp = chatbot.process('I loved \"The Notebok\" so much!!')\n print(resp) // prints 'So you loved \"The Notebook\", huh?'\n\n :param line: a user-supplied line of text\n :returns: a string containing the chatbot's response to the user input\n \"\"\"\n #############################################################################\n # TODO: Implement the extraction and transformation in this method, #\n # possibly calling other functions. Although modular code is not graded, #\n # it is highly recommended. #\n #############################################################################\n if self.creative:\n\n emotion = self.check_emotion(line)\n if emotion:\n print(emotion)\n\n ogline = line\n line = line.replace('\"', '')\n line = ' '.join(line.lower().split())\n\n titles = self.extract_titles(line)\n for tit in titles:\n done = False\n ind = 0\n while not done:\n ind = line.find(tit, ind)\n if ind > 0 and line[ind-1].isalnum():\n ind += len(tit)\n continue\n elif ind + len(tit) < len(line) and line[ind+len(tit)].isalnum():\n ind += len(tit)\n continue\n else:\n line = line[:ind] + '\"' + line[ind:ind+len(tit)] + '\"' + line[ind+len(tit):]\n done = True\n\n if not titles:\n self.creative = False\n titles = self.extract_titles(ogline)\n self.creative = True\n\n if not titles:\n if line.strip() and line.rstrip()[-1] == '?':\n return self.question_reply(line)\n\n return self.couldnt_find_title()\n\n newtitles = []\n for tit in titles:\n potentials = self.find_movies_closest_to_title(tit)\n\n if not potentials:\n return self.couldnt_find_title()\n\n if potentials:\n print(\"You mentioned \" + tit + \", did you mean \" + self.get_title(potentials[0]) + \"?\")\n print('>', end = '')\n yesno = input().strip()\n if yesno and yesno[0].lower() == 'n' or ' not ' in yesno.lower():\n for pot in potentials[1:]:\n print(\"Then did you mean %s?\" % self.get_title(pot))\n print('>', end='')\n yesno = input().strip()\n if yesno and yesno[0].lower() == 'y':\n titles = [self.titles[pot][0]]\n break\n else:\n return \"Sorry, so I guess I can't find the title for you then.\"\n else:\n titles = [self.titles[potentials[0]][0]]\n\n if len(titles) == 1:\n title = titles[0]\n\n sent = self.extract_sentiment(line)\n if sent == 0:\n return self.cant_find_emotion(title)\n\n records = self.find_movies_by_title(title)\n\n if len(records) > 1:\n records = self.narrow_down(records)\n\n if not records:\n return self.couldnt_find_movie_in_db(title)\n elif len(records) > 1:\n found = [self.titles[t][0] for t in records]\n return self.found_multiple(title, found)\n\n\n self.n_data_points += 1\n\n if sent < 0:\n index = records[0]\n self.user_ratings[index] = -1\n ret = self.bad_review_resp(title)\n\n elif sent > 0:\n index = records[0]\n self.user_ratings[index] = 1\n ret = self.good_review_resp(title)\n\n\n\n else:\n sent = self.extract_sentiment_for_movies(line)\n\n possies = []\n neggies = []\n\n for title, review in sent:\n if review == 0:\n print(self.cant_find_emotion(title))\n continue\n\n records = self.find_movies_by_title(title)\n\n if not records:\n return self.couldnt_find_movie_in_db(title)\n elif len(records) > 1:\n records = self.narrow_down(records)\n\n index = records[0]\n\n self.user_ratings[index] = review\n self.n_data_points += 1\n\n if review > 0:\n possies.append(title)\n else:\n neggies.append(title)\n\n\n # MAKE STRINGS\n if len(possies) == 0:\n string = ', '.join(titles[:-1])\n string += ' and ' + titles[-1]\n ret = 'Okay, so you did not like ' + string + '. '\n\n elif len(neggies) == 0:\n string = ', '.join(titles[:-1])\n string += ' and ' + titles[-1]\n ret = 'Alright, so you liked ' + string + '. '\n\n else:\n if len(possies) > 1:\n positive = ', '.join(possies[:-1])\n positive += ' and ' + possies[-1]\n else:\n positive = possies[0]\n\n if len(neggies) > 1:\n negative = ', '.join(neggies[:-1])\n negative += ' and ' + neggies[-1]\n else:\n negative = neggies[0]\n\n ret = 'Okay, so you liked ' + positive + \" but did not like \" + negative + '. '\n\n if self.n_data_points < 5:\n return ret + self.ask_for_another()\n\n ret += '\\n\\n'\n rec = self.recommend(self.user_ratings, self.ratings)\n rec = [self.get_title(i) for i in rec]\n\n ret += self.rec_message(rec)\n ret += '\\n\\n'\n\n return ret\n \n\n\n else: # not creative mode\n titles = self.extract_titles(line)\n if not titles:\n return self.couldnt_find_title()\n elif len(titles) > 1:\n return self.cant_handle_multiple_titles()\n\n title = titles[0]\n\n records = self.find_movies_by_title(title)\n if not records:\n return self.couldnt_find_movie_in_db(title)\n elif len(records) > 1:\n found = [self.titles[t][0] for t in records]\n return self.found_multiple(title, found)\n\n sent = self.extract_sentiment(line)\n if sent == 0:\n return self.cant_find_emotion(title)\n\n self.n_data_points += 1\n\n if sent < 0:\n index = records[0]\n self.user_ratings[index] = -1\n ret = self.bad_review_resp(title)\n\n elif sent > 0:\n index = records[0]\n self.user_ratings[index] = 1\n ret = self.good_review_resp(title)\n\n if self.n_data_points < 5:\n return ret + self.ask_for_another()\n\n ret += '\\n\\n'\n rec = self.recommend(self.user_ratings, self.ratings)\n rec = [self.get_title(i) for i in rec]\n\n ret += self.rec_message(rec)\n ret += '\\n\\n'\n\n return ret\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return ''\n\n\n\n def rec_message(self, recs):\n s = random.choice([\n \"Given what you told me, I would recommend these movies: \",\n \"Great! I think you might enjoy these movies as well:\",\n \"Sweet! I'm ready to give you some recommendations. Check out:\"\n ])\n\n for rec in recs:\n s += '\\n\\t' + rec\n\n s += '\\n\\n' + random.choice([\n \"Keep telling me about your movie preferences for more accurate recommendations!\",\n \"If you want, you can keep telling me about movies for more refined recommendations.\",\n \"You can exit by typing :quit, or you can keep telling me about movies!\"\n ])\n\n return s\n\n def extract_titles(self, text):\n \"\"\"Extract potential movie titles from a line of text.\n\n Given an input text, this method should return a list of movie titles\n that are potentially in the text.\n\n - If there are no movie titles in the text, return an empty list.\n - If there is exactly one movie title in the text, return a list\n containing just that one movie title.\n - If there are multiple movie titles in the text, return a list\n of all movie titles you've extracted from the text.\n\n Example:\n potential_titles = chatbot.extract_titles('I liked \"The Notebook\" a lot.')\n print(potential_titles) // prints [\"The Notebook\"]\n\n :param text: a user-supplied line of text that may contain movie titles\n :returns: list of movie titles that are potentially in the text\n \"\"\"\n\n def isYear(txt):\n pattern = r'\\(\\d\\d\\d\\d\\)$'\n yrs = re.findall(pattern, txt)\n if yrs:\n return True\n else:\n pattern = r'\\(\\d\\d\\d\\d-\\)$'\n yrs = re.findall(pattern, txt)\n if yrs:\n return True\n else:\n pattern = r'\\(\\d\\d\\d\\d-\\d\\d\\d\\d\\)$'\n yrs = re.findall(pattern, txt)\n if yrs:\n return True\n else:\n return False\n\n if self.creative:\n text = ' '.join(text.lower().split())\n titles = set()\n for (titleOpts, year) in self.casual_titles:\n\n for t in titleOpts:\n if t in text:\n i_sofar = 0\n while i_sofar < len(text):\n i_sofar = text.find(t, i_sofar)\n if i_sofar == -1: \n break\n i_sofar = i_sofar + len(t)\n if i_sofar > len(t) and text[i_sofar - len(t) - 1].isalnum():\n continue\n elif i_sofar < len(text) and text[i_sofar].isalnum():\n continue\n\n rest = text[i_sofar:].split()\n if rest and isYear(rest[0]):\n if rest[0] == year:\n titles.add(t + ' ' + year)\n i_sofar += len(year)\n else:\n titles.add(t)\n\n removes = []\n rets = list(titles)\n for s1 in rets:\n for s2 in rets:\n if s1 != s2 and s1 in s2:\n removes.append(s1)\n for r in removes:\n rets.remove(r)\n\n return rets\n\n else:\n titles = []\n split_on_quotes = text.split('\"')\n if len(split_on_quotes) % 2 == 0: \n split_on_quotes = split_on_quotes[:-1]\n for i in range((len(split_on_quotes))):\n if i % 2:\n titles.append(split_on_quotes[i])\n\n return titles\n\n def titles_removed(self, text):\n notTitles = []\n\n split_on_quotes = text.split('\"')\n if len(split_on_quotes) % 2 == 0: \n split_on_quotes = split_on_quotes[:-1]\n for i in range((len(split_on_quotes))):\n if i % 2 == 0:\n notTitles.append(split_on_quotes[i])\n\n return ' '.join(notTitles)\n\n def titleAndYear(self, t):\n t = t.strip()\n pattern = r'\\(\\d\\d\\d\\d\\)$'\n yrs = re.findall(pattern, t)\n\n if yrs:\n yr = yrs[0]\n else:\n pattern = r'\\(\\d\\d\\d\\d-\\)$'\n yrs = re.findall(pattern, t)\n if yrs:\n yr = yrs[0]\n else:\n pattern = r'\\(\\d\\d\\d\\d-\\d\\d\\d\\d\\)$'\n yrs = re.findall(pattern, t)\n if yrs:\n yr = yrs[0]\n else:\n yr = ''\n\n t = t[:-len(yr)] if yr else t\n t = t.rstrip()\n return t, yr\n\n def titleWithMovedArticle(self, t):\n if t.strip() and t.split()[0].lower() in ('a', 'an', 'the'):\n tok = t.split()[0]\n t = t[len(tok):].strip() + ', ' + tok\n return t\n\n def allTitleOpts(self, t):\n # assumes date has been removed already\n t = t.strip()\n others = []\n while t:\n pattern = r'\\(..+\\)$'\n langs = re.findall(pattern, t)\n if not langs: \n return [t.strip()] + others\n lang = langs[0]\n found = lang\n lang = lang[1:-1]\n lang = lang.strip()\n if lang[:len(\"a.k.a.\")] == \"a.k.a.\":\n lang = lang[len(\"a.k.a.\"):].strip()\n others.append(lang)\n t = t[:-len(found)].rstrip()\n return others\n\n def titleOptionsAndYear(self, title):\n\n title, year = self.titleAndYear(title)\n titles = self.allTitleOpts(title)\n\n titleOptions = []\n\n for t in titles:\n if t.strip() and t.split()[0].lower() in ('a', 'an', 'the',):\n tok = t.split()[0]\n t = t[len(tok):].strip() + ', ' + tok\n titleOptions.append(t)\n\n return titleOptions, year\n\n def firstTitleAndYear(self, title):\n titles, year = self.titleOptionsAndYear(title)\n return titles[0], year\n\n def find_movies_by_title(self, title):\n \"\"\" Given a movie title, return a list of indices of matching movies.\n\n - If no movies are found that match the given title, return an empty list.\n - If multiple movies are found that match the given title, return a list\n containing all of the indices of these matching movies.\n - If exactly one movie is found that matches the given title, return a list\n that contains the index of that matching movie.\n\n Example:\n ids = chatbot.find_movies_by_title('Titanic')\n print(ids) // prints [1359, 1953]\n\n :param title: a string containing a movie title\n :returns: a list of indices of matching movies\n \"\"\"\n\n matches = []\n titleOptions, year = self.titleOptionsAndYear(title)\n titleSet = set(titleOptions)\n\n lowerSet = set([x.lower() for x in titleSet])\n langSet = set()\n if self.creative:\n for to in titleOptions:\n if to.strip() and to.split()[0].lower() in ('la', 'la', 'die', 'las', 'los'):\n tok = to.split()[0]\n to = to[len(tok):].strip() + ', ' + tok\n langSet.add(to.lower())\n\n\n for index, otherTitle in enumerate(self.titles):\n otherTitle = otherTitle[0]\n otherTitleOptions, otherYear = self.titleOptionsAndYear(otherTitle)\n\n if (year != '') and (year != otherYear):\n continue\n\n for opt in otherTitleOptions:\n if opt in titleSet:\n matches.append(index)\n elif self.creative and opt.lower() in lowerSet or opt.lower() in langSet:\n matches.append(index)\n return matches\n\n def extract_sentiment(self, text):\n \"\"\"Extract a sentiment rating from a line of text.\n\n You should return -1 if the sentiment of the text is negative, 0 if the\n sentiment of the text is neutral (no sentiment detected), or +1 if the\n sentiment of the text is positive.\n\n As an optional creative extension, return -2 if the sentiment of the text\n is super negative and +2 if the sentiment of the text is super positive.\n\n Example:\n sentiment = chatbot.extract_sentiment('I liked \"The Titanic\"')\n print(sentiment) // prints 1\n\n :param text: a user-supplied line of text\n :returns: a numerical value for the sentiment of the text\n \"\"\"\n sent = self.sentiment\n text = text.replace('.', '')\n result = 0\n negate = False\n posWords = set(['loved', 'adored', 'amazing', 'incredible', 'awesome', 'outstanding', 'marvelous', 'wonderful'])\n negWords = set(['terrible', 'hated', 'awful', 'dreadful', 'horrendous', 'disgusting', 'horrible'])\n amps = set(['really', 'super'])\n stemmer = PorterStemmer()\n\n if self.creative:\n posWords = [stemmer.stem(word) for word in posWords]\n negWords = [stemmer.stem(word) for word in negWords]\n amps = [stemmer.stem(word) for word in amps]\n pattern = r'\\\"[^\\\"]+\\\"'\n title = re.findall(pattern, text)\n for t in range(len(title)):\n text = text.replace(title[t], '')\n text = text.split()\n text = [stemmer.stem(word) for word in text]\n\n if self.creative:\n pos = [word in text for word in posWords]\n if any(pos):\n pos2 = True\n else: pos2 = False\n neg = [word in text for word in negWords]\n if any(neg):\n neg2 = True\n else: neg2 = False\n amp = [word in text for word in amps]\n if any(amp):\n times2 = True\n else: times2 = False\n \n words = dict()\n oldkeys = list(sent.keys())\n keys = [stemmer.stem(key) for key in oldkeys]\n for i in range(len(keys)):\n words[keys[i]] = sent[oldkeys[i]]\n\n for word in text:\n access = word.replace(',', '')\n if access in keys:\n if words[access] == 'pos':\n if negate:\n result -= 1\n else:\n result += 1\n else:\n if negate:\n result += 1\n else:\n result -= 1\n\n if access == \"didn\\'t\" or access == \"not\" or access == \"never\": \n negate = True\n if ',' in word or '.'in word:\n negate = False\n\n if result > 0:\n if self.creative and (times2 or pos2):\n return 2\n else:\n return 1\n elif result < 0:\n if self.creative and (times2 or neg2):\n return -2\n else:\n return -1\n else:\n return 0\n\n def extract_sentiment_for_movies(self, text):\n \"\"\"Creative Feature: Extracts the sentiments from a line of text\n that may contain multiple movies. Note that the sentiments toward\n the movies may be different.\n\n You should use the same sentiment values as extract_sentiment, described above.\n Hint: feel free to call previously defined functions to implement this.\n\n Example:\n sentiments = chatbot.extract_sentiment_for_text('I liked both \"Titanic (1997)\" and \"Ex Machina\".')\n print(sentiments) // prints [(\"Titanic (1997)\", 1), (\"Ex Machina\", 1)]\n\n :param text: a user-supplied line of text\n :returns: a list of tuples, where the first item in the tuple is a movie title,\n and the second is the sentiment in the text toward that movie\n \"\"\"\n pattern = r'\\\"[^\\\"]+\\\"'\n ranks = []\n\n if text.count('but') == 0:\n result = self.extract_sentiment(text)\n titles = re.findall(pattern, text)\n for t in titles:\n t = t.replace(\"\\\"\", '')\n ranks.append((t, result))\n else:\n text = text.split('but')\n titles1 = re.findall(pattern, text[0])\n titles2 = re.findall(pattern, text[1])\n first = self.extract_sentiment(text[0])\n second = self.extract_sentiment(text[1])\n for title in titles1:\n title = title.replace(\"\\\"\", '')\n ranks.append((title, first))\n for title in titles2:\n if second == 0:\n second = -first\n title = title.replace(\"\\\"\", '')\n ranks.append((title, second))\n return ranks\n\n def find_movies_closest_to_title(self, title, max_distance=3):\n \"\"\"Creative Feature: Given a potentially misspelled movie title,\n return a list of the movies in the dataset whose titles have the least edit distance\n from the provided title, and with edit distance at most max_distance.\n\n - If no movies have titles within max_distance of the provided title, return an empty list.\n - Otherwise, if there's a movie closer in edit distance to the given title \n than all other movies, return a 1-element list containing its index.\n - If there is a tie for closest movie, return a list with the indices of all movies\n tying for minimum edit distance to the given movie.\n\n Example:\n chatbot.find_movies_closest_to_title(\"Sleeping Beaty\") # should return [1656]\n\n :param title: a potentially misspelled title\n :param max_distance: the maximum edit distance to search for\n :returns: a list of movie indices with titles closest to the given title and within edit distance max_distance\n \"\"\"\n\n title, year = self.titleAndYear(title)\n\n if title.strip() and title.split()[0].lower() in ('a', 'an', 'the'):\n tok = title.split()[0]\n title = title[len(tok):].strip() + ', ' + tok\n\n title += year\n\n def editDistance(w1,w2):\n table = [[None for _ in range(len(w1) + 1)] for _ in range(len(w2) + 1)]\n for r in range(len(table)):\n table[r][0] = r\n for c in range(len(table[0])):\n table[0][c] = c\n for r in range(1, len(table)):\n for c in range(1, len(table[0])):\n if w1[c-1] == w2[r-1]:\n table[r][c] = table[r-1][c-1]\n else:\n table[r][c] = min(table[r-1][c-1] + 2,\n table[r][c-1] + 1,\n table[r-1][c] + 1\n )\n return table[-1][-1]\n\n dists = collections.defaultdict(list)\n\n for i, (otherTitle, _) in enumerate(self.titles):\n otherTitles, otherYear = self.titleOptionsAndYear(otherTitle)\n for ot in otherTitles:\n if year: ot += otherYear\n ed = editDistance(title, ot)\n if ed <= max_distance:\n dists[ed].append(i)\n break # assumes won't match in multiple variations\n \n if not dists:\n return []\n\n return dists[min(dists.keys())]\n\n def disambiguate(self, clarification, candidates):\n \"\"\"Creative Feature: Given a list of movies that the user could be talking about \n (represented as indices), and a string given by the user as clarification \n (eg. in response to your bot saying \"Which movie did you mean: Titanic (1953) \n or Titanic (1997)?\"), use the clarification to narrow down the list and return \n a smaller list of candidates (hopefully just 1!)\n\n - If the clarification uniquely identifies one of the movies, this should return a 1-element\n list with the index of that movie.\n - If it's unclear which movie the user means by the clarification, it should return a list\n with the indices it could be referring to (to continue the disambiguation dialogue).\n\n Example:\n chatbot.disambiguate(\"1997\", [1359, 2716]) should return [1359]\n \n :param clarification: user input intended to disambiguate between the given movies\n :param candidates: a list of movie indices\n :returns: a list of indices corresponding to the movies identified by the clarification\n \"\"\"\n\n options = []\n\n for index in candidates:\n title, year = self.titleAndYear(self.titles[index][0])\n \n if year == clarification:\n options.append(index)\n\n elif year.strip('()') == clarification:\n options.append(index)\n\n elif title + year == clarification:\n options.append(index)\n\n elif title + ' ' + year == clarification:\n options.append(index)\n\n elif clarification in title:\n options.append(index)\n\n elif self.titleWithMovedArticle(title) + year == clarification:\n options.append(index)\n\n elif self.titleWithMovedArticle(title) + ' ' + year == clarification:\n options.append(index)\n\n elif clarification in self.titleWithMovedArticle(title):\n options.append(index)\n \n if not options:\n return candidates\n\n return options\n\n def question_reply(self, question):\n question = question.lower()\n if question[-1] == '?':\n question = question[:-1] \n\n if question.split() == ['what','is','your','favorite','movie']:\n return \"Quentin Tarantino's Pulp Fiction, of course!\"\n\n if question[:len('can you')] == 'can you':\n juice = question[len('can you'):]\n juice = juice.replace('your', 'my')\n juice = juice.replace('you', 'I')\n return \"I don't know if I can \" + juice + \".\"\n\n if question[:len('what is')] == 'what is':\n juice = question[len('can you'):]\n juice = juice.replace('your', 'my')\n juice = juice.replace('you', 'I')\n return \"I don't know what \" + juice + \" is.\"\n\n if question[:len('who is ')] == 'who is':\n juice = question[len('who is'):]\n juice = juice.replace('your', 'my')\n juice = juice.replace('you', 'I')\n return \"I don't know who \" + juice + \" is.\"\n\n # doesnt know much\n return \"I don't know the answer to your question.\"\n\n def narrow_down(self, options):\n if len(options) < 2:\n return options\n prompt = \"I have figured out that the title you described is either \"\n for opt in options[:-1]:\n prompt += '\\n' + self.titles[opt][0] + ', '\n\n prompt += '\\n' + 'or ' + self.titles[options[-1]][0]\n prompt += '.\\nWhich one did you mean?\\n'\n\n print(prompt + '>', end='')\n\n options = self.disambiguate(input(), options)\n\n ntries = 0\n while len(options) > 1 and ntries < 5:\n prompt = \"Now I know that the title is either \"\n for opt in options[:-1]:\n prompt += '\\n' + self.titles[opt][0] + ', '\n\n prompt += '\\n' + 'or ' + self.titles[options[-1]][0]\n prompt += '.\\nWhich one did you mean?\\n'\n\n print(prompt, end='')\n options = self.disambiguate(input(), options)\n\n return options\n\n #############################################################################\n # 3. Movie Recommendation helper functions #\n #############################################################################\n\n def binarize(self, ratings, threshold=2.5):\n \"\"\"Return a binarized version of the given matrix.\n\n To binarize a matrix, replace all entries above the threshold with 1.\n and replace all entries at or below the threshold with a -1.\n\n Entries whose values are 0 represent null values and should remain at 0.\n\n :param x: a (num_movies x num_users) matrix of user ratings, from 0.5 to 5.0\n :param threshold: Numerical rating above which ratings are considered positive\n\n :returns: a binarized version of the movie-rating matrix\n \"\"\"\n #############################################################################\n # TODO: Binarize the supplied ratings matrix. #\n #############################################################################\n\n # The starter code returns a new matrix shaped like ratings but full of zeros.\n binarized_ratings = np.zeros_like(ratings)\n binarized_ratings = binarized_ratings + (ratings > 2.5)\n binarized_ratings = binarized_ratings - (ratings <= 2.5)\n binarized_ratings[ratings == 0] = 0\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return binarized_ratings\n\n\n def similarity(self, u, v):\n \"\"\"Calculate the cosine similarity between two vectors.\n\n You may assume that the two arguments have the same shape.\n\n :param u: one vector, as a 1D numpy array\n :param v: another vector, as a 1D numpy array\n\n :returns: the cosine similarity between the two vectors\n \"\"\"\n #############################################################################\n # TODO: Compute cosine similarity between the two vectors.\n #############################################################################\n denom = (np.linalg.norm(u) * np.linalg.norm(v))\n if denom == 0:\n return 0\n\n return np.dot(u, v) / denom\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n\n def recommend(self, user_ratings, ratings_matrix, k=10, creative=False):\n \"\"\"Generate a list of indices of movies to recommend using collaborative filtering.\n\n You should return a collection of `k` indices of movies recommendations.\n\n As a precondition, user_ratings and ratings_matrix are both binarized.\n\n Remember to exclude movies the user has already rated!\n\n :param user_ratings: a binarized 1D numpy array of the user's movie ratings\n :param ratings_matrix: a binarized 2D numpy matrix of all ratings, where\n `ratings_matrix[i, j]` is the rating for movie i by user j\n :param k: the number of recommendations to generate\n :param creative: whether the chatbot is in creative mode\n\n :returns: a list of k movie indices corresponding to movies in ratings_matrix,\n in descending order of recommendation\n \"\"\"\n\n #######################################################################################\n # TODO: Implement a recommendation function that takes a vector user_ratings #\n # and matrix ratings_matrix and outputs a list of movies recommended by the chatbot. #\n # #\n # For starter mode, you should use item-item collaborative filtering #\n # with cosine similarity, no mean-centering, and no normalization of scores. #\n #######################################################################################\n\n # Populate this list with k movie indices to recommend to the user.\n\n moviesRated = np.where(user_ratings)[0]\n moviesUnrated = np.where(user_ratings == 0)[0]\n\n guessedRatings = {movieIndex : 0 for movieIndex in moviesUnrated}\n\n for guessIndex in moviesUnrated:\n guessedRatings[guessIndex] = sum(self.similarity(ratings_matrix[i,:].reshape(-1),ratings_matrix[guessIndex,:].reshape(-1)) * user_ratings[i] for i in moviesRated)\n\n recommendations = sorted(guessedRatings.keys(), key=lambda x: guessedRatings[x], reverse = True)[:k]\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return recommendations\n\n\n #############################################################################\n # 4. Debug info #\n #############################################################################\n\n def debug(self, line):\n \"\"\"Return debug information as a string for the line string from the REPL\"\"\"\n # Pass the debug information that you may think is important for your\n # evaluators\n debug_info = 'debug info'\n return debug_info\n\n #############################################################################\n # 5. Write a description for your chatbot here! #\n #############################################################################\n def intro(self):\n \"\"\"Return a string to use as your chatbot's description for the user.\n\n Consider adding to this description any information about what your chatbot\n can do and how the user can interact with it.\n \"\"\"\n return \"\"\"\n I am motherbot, the movie recommending chatbot. \n Tell me which movies you liked as well as which you did't \n and I will recommend you some new ones. Have fun!\n\n In creative mode, I can do some pretty interesting things:\n I can sense titles even when they are not properly capitalized.\n I can sense titles even when they are not in quotation marks. \n I can sense titles and react to input even when input is not regularly\n spaced.\n I can pick up on, and react to, the potential emotions of my users, such\n as happieness, anger, sadness, and flirtatiousness.\n I can disambiguate when I find multiple titles that might fit the \n bill for the title information a user is giving me. \n I can detect language titles even when they are in other languages. \n In fact, I can detect them even if they have foreign articles!\n\n If you offer me a potential title in quotes and I don't find any\n titles in your sentence to me, I can look for nearby words and give\n you spelling corrections suggestions.\n\n I can process your sentiment for multiple movies in a sentence. \n\n I can respond to unrelated inputs and inputs without movie titles\n with generic prompts. (Ask me what my favorite movie is, or what\n my favorite book is - I only know one of them!)\n\n I can speak very fluently since I include a lot of repsonse outputs\n in my conversational database, some even including colloquial\n lingo.\n \"\"\"\n\n\nif __name__ == '__main__':\n print('To run your chatbot in an interactive loop from the command line, run:')\n print(' python3 repl.py')\n","repo_name":"tconsigny/Movie-Recommendation-Chatbot","sub_path":"Chatbot.py","file_name":"Chatbot.py","file_ext":"py","file_size_in_byte":42531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31402081239","text":"def esDecimal(numero):\n try:\n float(numero)\n return True\n except:\n return False\n \nstrBase = input(\"Intruduzca la base del triángulo: \")\nwhile not esDecimal(strBase):\n print(strBase, \"no es un valor numérico\")\n strBase = input(\"Intruduzca la base del triángulo: \")\n\nstrAltura = input(\"Introduzca la altura del triángulo: \")\nwhile not esDecimal(strAltura):\n print(strAltura, \"no es un valor numérico\") \n strAltura = input(\"Introduzca la altura del triángulo: \")\n \nb = float(strBase)\nh = float(strAltura)\n\narea = b * h / 2\n\nprint(\"Superficie del triángulo: \", round(area,2))\n \n ","repo_name":"inigodm/python01","sub_path":"AreaTriangulo04.py","file_name":"AreaTriangulo04.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"75166869814","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression, LinearRegression\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\n\ndataset = pd.read_csv(\"train.csv\").drop(['PassengerId','Name','Ticket','Cabin'],axis=1)\ndataset = dataset.drop([61,829],axis=0)\nY = dataset['Survived']\nX = dataset.drop('Survived',axis=1)\n\nfrom sklearn.preprocessing import LabelEncoder\nencoder_x = LabelEncoder()\nX['Sex'] = encoder_x.fit_transform(X['Sex'])\nX['Embarked'] = encoder_x.fit_transform(X[\"Embarked\"])\nX['Age'].fillna(value=X['Age'].median(),inplace=True)\n\nonehotencoder_x=OneHotEncoder()\nX=onehotencoder_x.fit_transform(X).toarray()\n\nX_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.3,random_state=0)\n\n\n#sc_X = StandardScaler()\n#X_train = sc_X.fit_transform(X_train)\n#X_test = sc_X.transform(X_test)\n\nKNN = KNeighborsClassifier(n_neighbors = 7)\nLR = LogisticRegression()\nNB = BernoulliNB()\n\nmodels = {'knn':KNN, 'lr': LR, 'nb':NB}\n\ndef train(model, X, Y):\n model.fit(X, Y)\n \ndef predict(model, X_test):\n return model.predict(X_test)\n\ndef accuracy(Y_pred, Y_test):\n return accuracy_score(Y_test, Y_pred)\n\npred = []\nfor k,v in models.items():\n train(v, X_train, Y_train)\n pred.append((k, accuracy(predict(v, X_test), Y_test)))\n \n\n\n","repo_name":"Ujjal-Baniya/Machine-learning","sub_path":"Titanic/Titanic.py","file_name":"Titanic.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"34781119281","text":"# -*- coding: UTF-8 -*-\n\nimport os\nimport sys\nimport traceback\nimport tempfile\n\nfrom django.template.loader import render_to_string\nfrom django.conf import settings\nfrom django.utils.translation import ugettext as _\n\n\nALL_OK = 0\nUNAUTHENTICATED = 1\nCAN_NOT_REGISTER_COMPUTER = 2\nGET_METHOD_NOT_ALLOWED = 3\nCOMMAND_NOT_FOUND = 4\nINVALID_SIGNATURE = 5\nCOMPUTER_NOT_FOUND = 6\nDEVICE_NOT_FOUND = 7\nPROJECT_NOT_FOUND = 8\nUSER_DOES_NOT_HAVE_PERMISSION = 9\nUNSUBSCRIBED_COMPUTER = 10\nGENERIC = 100\n\nERROR_INFO = {\n ALL_OK: _(\"No errors\"),\n UNAUTHENTICATED: _(\"User unauthenticated\"),\n CAN_NOT_REGISTER_COMPUTER: _(\"User can not register computers\"),\n GET_METHOD_NOT_ALLOWED: _(\"Method GET not allowed\"),\n COMMAND_NOT_FOUND: _(\"Command not found\"),\n INVALID_SIGNATURE: _(\"Signature is not valid\"),\n COMPUTER_NOT_FOUND: _(\"Computer not found\"),\n DEVICE_NOT_FOUND: _(\"Device not found\"),\n PROJECT_NOT_FOUND: _(\"Project not found\"),\n USER_DOES_NOT_HAVE_PERMISSION: _(\"User does not have permission\"),\n UNSUBSCRIBED_COMPUTER: _(\"Unsubscribed computer\"),\n GENERIC: _(\"Generic error\")\n}\n\n\ndef error_info(number):\n \"\"\"\n string error_info(int number)\n \"\"\"\n return ERROR_INFO.get(number, '')\n\n\ndef error(number):\n ret = error_info(number)\n if settings.DEBUG and number == GENERIC:\n etype = sys.exc_info()[0]\n evalue = sys.exc_info()[1]\n\n dir_errors = os.path.join(settings.MIGASFREE_PUBLIC_DIR, 'errors')\n if not os.path.exists(dir_errors):\n os.makedirs(dir_errors)\n\n fp = tempfile.NamedTemporaryFile(\n mode='w+b',\n suffix='.html',\n prefix=str(evalue).replace(\" \", \"_\").replace(\"\\n\", \"_\"),\n dir=dir_errors,\n delete=False\n )\n\n fp.write(print_exc_plus(etype, evalue))\n fp.close()\n\n ret = '%s %s %s: %s' % (\n str(etype),\n str(evalue),\n _(\"Traceback\"),\n os.path.join(\n dir_errors,\n os.path.basename(fp.name)\n )\n )\n\n return {\"errmfs\": {\"code\": number, \"info\": ret}}\n\n\ndef ok():\n return error(ALL_OK)\n\n\ndef print_exc_plus(etype, evalue):\n \"\"\"\n Print the usual traceback information, followed by a listing of all the\n local variables in each frame.\n \"\"\"\n tb = sys.exc_info()[2]\n while 1:\n if not tb.tb_next:\n break\n tb = tb.tb_next\n\n stack = []\n f = tb.tb_frame\n while f:\n stack.append(f)\n f = f.f_back\n\n stack.reverse()\n traceback.print_exc()\n\n ret = []\n for frame in stack:\n fr = {\n 'filename': frame.f_code.co_filename,\n 'name': frame.f_code.co_name,\n 'line': frame.f_lineno\n }\n\n variables = []\n for key, value in frame.f_locals.items():\n try:\n variables.append({\"key\": key, \"value\": str(value)})\n except:\n pass\n\n fr[\"locals\"] = variables\n ret.append(fr)\n\n return render_to_string(\n 'error.html',\n {\n \"description\": '%s: %s %s' % (\n _(\"Generic error in server\"),\n str(etype),\n str(evalue)\n ),\n \"traceback\": ret\n }\n )\n","repo_name":"migasfree/migasfree","sub_path":"migasfree/server/errmfs.py","file_name":"errmfs.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"22"} +{"seq_id":"3172661813","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom .voicecommand import ConfigurableVoiceCommand\nfrom .process_result import ProcessResult\n\nclass PlayVoiceCommand(ConfigurableVoiceCommand):\n \n SIGNAL_WORDS = [\"spiele\", \"spiel\"]\n ARTIST_ONLY = [\"etwas\", \"was\"]\n ARTIST = \"von\"\n TARGET = \"auf\"\n STRIP_CHARS = \";,. \"\n \n def _load_config(self, data):\n self.RENDERERS = data['renderers']\n self.SERVERS = data['servers']\n \n def _get_server_url(self, name):\n # ignore name as there is only one :)\n return next(iter(self.SERVERS.values()))\n \n def _get_renderer_url(self, name):\n if (name is not None) and (name in self.RENDERERS):\n return self.RENDERERS[name]\n return next(iter(self.RENDERERS.values()))\n \n def can_process(self, vc):\n for k in self.SIGNAL_WORDS:\n if vc.lower().startswith(k):\n return True\n return False\n \n def _parse_title_artist_target(self, vc):\n title = None\n artist = None\n target = None\n artist_contained = False\n\n rest = vc.strip(self.STRIP_CHARS)\n # cut away signal word: rest is like SINGAL_WORD...\n for k in self.SIGNAL_WORDS:\n if k in rest.lower():\n rest = rest[len(k):].strip()\n \n # cut away artist: rest is like ARTIST...\n if self.ARTIST in rest.lower():\n artist_keyword_idx = rest.lower().find(self.ARTIST)\n title = rest[:artist_keyword_idx].strip()\n rest = rest[artist_keyword_idx + len(self.ARTIST):]\n log(\"artist contained\")\n artist_contained = True\n \n # cut away target: rest is like: <artist>TARGET... or <title>TARGET...\n if self.TARGET in rest.lower():\n target_keyword_idx = rest.lower().find(self.TARGET)\n if (title is None):\n title = rest[:target_keyword_idx].strip()\n else:\n artist = rest[:target_keyword_idx].strip()\n target = rest[target_keyword_idx + len(self.ARTIST):].strip()\n log(\"target contained\")\n \n if (title is None):\n # parse artist anyway\n title = rest.strip()\n if (artist_contained and artist is None):\n artist = rest.strip()\n \n # case someone say's \"spiel(e) (et)was von ARTIST\"\n if title.lower() in self.ARTIST_ONLY:\n title = None\n\n log('title: \"{t}\", artist: \"{a}\", target: \"{tt}\"'.format(t=title, a=artist, tt=target))\n return (title, artist, target)\n \n def process(self, vc):\n ti, ar, tar = self._parse_title_artist_target(vc)\n from dlna.mediaserver import MediaServer\n from dlna.renderer import Renderer\n from dlna.player import Player\n \n ms_url = self._get_server_url(None)\n ms = MediaServer(ms_url)\n log('searching for title=\"{t}\" of artist=\"{a}\"'.format(t=ti, a=ar))\n search_res = ms.search(title=ti, artist=ar)\n log('Found {} items'.format(search_res.get_matches()))\n \n succ = False\n result_text = \"\"\n if (search_res.get_matches() > 0):\n item = search_res.random_item()\n log(item.get_url())\n renderer_url = self._get_renderer_url(tar)\n player = Player(Renderer(tar, renderer_url, True))\n log('Playername: \"{p}\", Player URL: \"{u}\"'.format(p=tar, u=renderer_url))\n player.play(item.get_url(), item=item)\n succ = True\n result_text = 'spielt \"{t}\" von \"{a}\"'.format(t=item.get_title(), a=item.get_actor())\n else:\n succ = False\n result_text = \"Kein passenden Titel gefunden\"\n log(result_text)\n return ProcessResult(\"Media Player\", succ, result_text)\n \ndef log(txt):\n pass\n ","repo_name":"derHeinz/voicecommand","sub_path":"commands/playvoicecommand.py","file_name":"playvoicecommand.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21298855593","text":"from pydantic import BaseModel, Field, field_validator\nfrom abc import ABC\nfrom typing import Optional, Literal, Union\nimport tzlocal\nimport locale\n\nfrom .stepdefaults import StepDefaults\n\n\nclass FileType(BaseModel, ABC, extra=\"forbid\"):\n \"\"\"Template abstract base class for parser classes.\"\"\"\n\n filetype: Optional[str] = None\n timezone: Optional[str] = None\n locale: Optional[str] = None\n encoding: Optional[str] = None\n\n @field_validator(\"timezone\")\n @classmethod\n def timezone_resolve_localtime(cls, v):\n if v == \"localtime\":\n v = tzlocal.get_localzone_name()\n return v\n\n @field_validator(\"locale\")\n @classmethod\n def locale_set_default(cls, v):\n if v == \"getlocale\":\n v = \".\".join(locale.getlocale())\n return v\n\n\nclass NoFileType(FileType):\n filetype: Literal[\"None\"] = \"None\"\n # filetype: Literal[\"none\"]\n\n\nclass Tomato_json(FileType):\n filetype: Literal[\"tomato.json\"]\n\n\nDummyFileTypes = Union[\n NoFileType,\n Tomato_json,\n]\n\n\nclass Drycal_csv(FileType):\n filetype: Literal[\"drycal.csv\"]\n\n\nclass Drycal_rtf(FileType):\n filetype: Literal[\"drycal.rtf\"]\n\n\nclass Drycal_txt(FileType):\n filetype: Literal[\"drycal.txt\"]\n\n\nFlowDataFileTypes = Union[\n Drycal_csv,\n Drycal_rtf,\n Drycal_txt,\n]\n\n\nclass EClab_mpr(FileType):\n filetype: Literal[\"eclab.mpr\", \"marda:biologic-mpr\"]\n\n\nclass EClab_mpt(FileType):\n filetype: Literal[\"eclab.mpt\", \"marda:biologic-mpt\"]\n encoding: str = \"windows-1252\"\n\n\nElectroChemFileTypes = Union[\n EClab_mpr,\n EClab_mpt,\n Tomato_json,\n]\n\n\nclass EZChrom_asc(FileType):\n filetype: Literal[\"ezchrom.asc\"]\n\n\nclass Fusion_json(FileType):\n filetype: Literal[\"fusion.json\"]\n\n\nclass Fusion_zip(FileType):\n filetype: Literal[\"fusion.zip\"]\n\n\nclass Fusion_csv(FileType):\n filetype: Literal[\"fusion.csv\"]\n\n\nclass Agilent_ch(FileType):\n filetype: Literal[\"agilent.ch\", \"marda:agilent-ch\"]\n\n\nclass Agilent_dx(FileType):\n filetype: Literal[\"agilent.dx\", \"marda:agilent-dx\"]\n\n\nclass Agilent_csv(FileType):\n filetype: Literal[\"agilent.csv\"]\n\n\nclass EmpaLC_csv(FileType):\n filetype: Literal[\"empalc.csv\"]\n\n\nclass EmpaLC_xlsx(FileType):\n filetype: Literal[\"empalc.xlsx\"]\n\n\nChromTraceFileTypes = Union[\n EZChrom_asc,\n Fusion_json,\n Fusion_zip,\n Agilent_ch,\n Agilent_dx,\n Agilent_csv,\n]\n\nChromDataFileTypes = Union[\n Fusion_json,\n Fusion_zip,\n Fusion_csv,\n EmpaLC_csv,\n EmpaLC_xlsx,\n]\n\n\nclass Quadstar_sac(FileType):\n filetype: Literal[\"quadstar.sac\"]\n\n\nMassTraceFileTypes = Quadstar_sac\n\n\nclass LabView_csv(FileType):\n filetype: Literal[\"labview.csv\"]\n\n\nQFTraceFileTypes = LabView_csv\n\n\nclass Phi_spe(FileType):\n filetype: Literal[\"phi.spe\", \"marda:phi-spe\"]\n\n\nXPSTraceFileTypes = Phi_spe\n\n\nclass Panalytical_xrdml(FileType):\n filetype: Literal[\"panalytical.xrdml\", \"marda:panalytical-xrdml\"]\n\n\nclass Panalytical_xy(FileType):\n filetype: Literal[\"panalytical.xy\"]\n\n\nclass Panalytical_csv(FileType):\n filetype: Literal[\"panalytical.csv\"]\n\n\nXRDTraceFileTypes = Union[\n Panalytical_xrdml,\n Panalytical_xy,\n Panalytical_csv,\n]\n\n\nclass ExtractorFactory(BaseModel):\n \"\"\"\n Extractor factory class.\n\n Given an ``extractor=dict(filetype=k, ...)`` argument, attempts to determine the\n correct :class:`FileType`, parses any additionally supplied parameters for that\n :class:`FileType`, and back-fills defaults such as ``timezone``, ``locale``, and\n ``encoding``.\n\n The following is the current usage pattern in :mod:`yadg`:\n\n .. code-block::\n\n ftype = ExtractorFactory(extractor={\"filetype\": k}).extractor\n\n\n \"\"\"\n\n extractor: Union[\n DummyFileTypes,\n FlowDataFileTypes,\n ElectroChemFileTypes,\n ChromDataFileTypes,\n ChromTraceFileTypes,\n MassTraceFileTypes,\n QFTraceFileTypes,\n XPSTraceFileTypes,\n XRDTraceFileTypes,\n ] = Field(..., discriminator=\"filetype\")\n\n @field_validator(\"extractor\")\n @classmethod\n def extractor_set_defaults(cls, v):\n defaults = StepDefaults()\n if v.timezone is None:\n v.timezone = defaults.timezone\n if v.locale is None:\n v.locale = defaults.locale\n if v.encoding is None:\n v.encoding = defaults.encoding\n return v\n","repo_name":"dgbowl/dgbowl-schemas","sub_path":"src/dgbowl_schemas/yadg/dataschema_5_0/filetype.py","file_name":"filetype.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"20547616511","text":"from numpy import *\nimport time\n\n# selection sort\ndef findMaxAndSwap(L, n):\n maxim = L[n]\n poz = n\n for i in range(n-1, 0, -1):\n if L[i] > maxim:\n maxim = L[i]\n poz = i\n L[poz], L[n] = L[n], L[poz]\n\ndef selectionSort(L):\n n = len(L)\n for i in range(n-1, 0, -1):\n findMaxAndSwap(L, i)\n\n# heapsort\n\ndef heapify(H, size, i):\n if (2*i > size):\n return\n largest = i\n if (2*i <= size and H[i] < H[2*i]):\n largest = 2*i\n else:\n largest = i\n if (2*i+1 <= size and H[largest] < H[2*i+1]):\n largest = 2*i+1\n if (largest != i):\n H[i], H[largest] = H[largest], H[i]\n heapify(H, size, largest)\n\ndef buildHeap(L):\n n = len(L)-1\n for i in range(n//2 + n % 2, 0, -1):\n heapify(L, n, i)\n\ndef findMaxAndSwapHeap(L, i):\n L[1], L[i] = L[i], L[1]\n\ndef heapSort(L):\n n = len(L)\n for i in range(n-1, 0, -1):\n findMaxAndSwapHeap(L, i)\n heapify(L, i - 1, 1)\n\n#L = [0] + [1, 3, 2, 5, 4]\nL = [0] + random.permutation(10000).tolist()\nL1 = L.copy()\nL2 = L.copy()\n\nt0 = time.time()\nselectionSort(L1)\nt1 = time.time()\nbuildHeap(L2)\nheapSort(L2)\nt2 = time.time()\n\nprint(t1 - t0)\nprint(t2 - t1)\n#print(L1 == L2)\n\n\n","repo_name":"vladc15/DataStructures","sub_path":"Heapsort/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4023503259","text":"import asyncio\nimport inspect\nimport io\nimport os\nimport pycurl\nimport socket\nimport time\nfrom utils import log\nfrom tables import config, Url, Extracted, MegaGroup\nimport aiohttp\nfrom aioify import aioify\nfrom selenium import webdriver\nfrom selenium.webdriver.common.proxy import Proxy, ProxyType\nimport utils\nimport re\nimport urllib.parse\nfrom selenium.common.exceptions import WebDriverException\nimport funcs\nimport aiotask_context\n\nclass Crawl(utils.Sem):\n def __init__(self, row):\n super().__init__(name=utils.Sem.names[3])\n self.url = row.url\n self.site = row.site\n self.browser = row.browser\n self.click = row.click\n self.times_max = row.times_max\n self.times_real = row.times_real\n self.loop = aiotask_context.get('loop_{}'.format(utils.Sem.names[3]))\n asyncio.set_event_loop(self.loop)\n super().__init__(name=utils.Sem.names[3])\n\n self.sem = asyncio.Semaphore(self.max, loop=self.loop)\n self.todo = set()\n self.busy = set()\n self.done = {}\n self.extracted = {}\n self.tasks = set()\n self.megagroups = None\n self.error = 0\n\n self.patterns = ['http://t.me/', 'https://t.me/', 'http://www.t.me/', 'https://www.t.me/',\n 'http://telegram.me/', 'https://telegram.me/', 'http://www.telegram.me/',\n 'https://www.telegram.me/']\n\n # async def start(self):\n # # t = asyncio.create_task(self.addurls([(self.url, '')]))\n #\n # async with utils.Aiohttp() as self.client:\n # t = asyncio.create_task(self.addurls([(self.url, '')]))\n #\n # await asyncio.sleep(1)\n # while self.busy:\n # await asyncio.sleep(1)\n #\n # await t\n # # await self.aiohttp.close()\n async def add(self, urls):\n # print('urls: ', urls)\n for url, parenturl in urls:\n # print('url: ', url)\n # print('parenturl: ', parenturl)\n url = urllib.parse.urljoin(parenturl, url)\n # print('url: ', url)\n url, frag = urllib.parse.urldefrag(url)\n # print('url: ', url)\n #\n # print('frag: ', frag)\n # print('url.startswith({}): {}'.format(self.url, url.startswith(self.url)))\n # if (url.startswith(self.rooturl) and url not in self.busy and url not in self.done and\n # url not in self.todo and 'DependencyHandler' not in url and url[-4:] != '.png' and\n # url[-4:] != '.svg'):\n if (url.startswith(self.url) and url not in self.busy and url not in self.done and\n url not in self.todo):\n self.todo.add(url)\n await self.run(self.process, url)\n\n async def process(self, url):\n l = log()\n self.todo.remove(url)\n # self.busy.add(url)\n try:\n if self.browser:\n async with utils.Browser.get(url, self.click, self.times_max, self.times_real) as source:\n await self.parse(url, source)\n else:\n async with utils.Url.get(url) as source:\n await self.parse(url, source)\n self.done[url] = True\n except WebDriverException as exception:\n l.e(msg='Error opening url={} - Exception: {}'.format(url, repr(exception)))\n self.error += 1\n except Exception as exc:\n print('{}: {} - {}'.format(url, 'has error', repr(str(exc))))\n self.done[url] = False\n self.error += 1\n raise\n finally:\n print('Tasks: {} - completed: {} - pending: {} - todo: {} - error: {} - extracted: {} - megagroups:{}'\n .format(self.url, len(self.done), len(self.tasks), len(self.todo), self.error,\n len(self.extracted), self.megagroups))\n\n async def parse(self, url, source):\n if source:\n urls = re.findall(r'(?i)href=[\"\\']?([^\\s\"\\'<>]+)', source)\n\n sem = utils.Sem(utils.Sem.names[3])\n results = await utils.gather(*[sem.run(utils.Curl.url, turl) for turl in {*urls}\n for pattern in self.patterns if pattern in turl])\n extracted = await Url.aiochild_get(url)\n await utils.gather(*[Url.aiochild_update(url, extracted_url=telegram)\n for telegram in {*results} if telegram not in extracted])\n\n self.extracted[url] = True\n self.megagroups = await Url.aiochild_count(url)\n\n if self.site or self.browser:\n if self.browser:\n self.browser = False\n await asyncio.create_task(self.add([(u, url) for u in urls]))\n else:\n self.error += 1\n\n\nasync def crawl():\n l = log()\n\n l.s(msg='START: crawl(Url) == {}'.format(await Extracted.aiocount()))\n await utils.gather(*[Crawl(row).add([(row.url, '')]) for row in await Url.aioall()])\n l.s(msg='END: crawl(Url) == {}'.format(await Extracted.aiocount()))\n\n l.s(msg='START: crawl(Megagroup) == {}'.format(await MegaGroup.aiocount()))\n megagroup = await MegaGroup.aioall()\n await utils.gather(*[MegaGroup.aioupdate(url=row.extracted_url) for row in await Extracted.aioall() if\n row.extracted_url not in megagroup])\n l.s(msg='END: crawl(Megagroup) == {})'.format(await MegaGroup.aiocount()))\n\nif __name__ == '__main__':\n utils.start(crawl)\n","repo_name":"mnopi/examples","sub_path":"Python/crawlers-y-sitemaps/crawl/casibuenocrawl.py","file_name":"casibuenocrawl.py","file_ext":"py","file_size_in_byte":5521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24649027148","text":"import cv2\r\nimport numpy as np\r\n\r\ndef radiusfinder():\r\n\r\n img = cv2.imread('foregroundextrpart3blackandwhite.jpg',cv2.IMREAD_GRAYSCALE) #looks at image in grayscale\r\n ret,thresh = cv2.threshold(img,127,255,0) #converts image to black and white\r\n contours,hierarchy = cv2.findContours(thresh, 1, 2) #finds contours of image\r\n\r\n cnt = contours[0] #take the first contour\r\n (x,y),radius = cv2.minEnclosingCircle(cnt) # finds minimum enclosing circle on the contour\r\n center = (int(x),int(y))\r\n radius = int(radius)\r\n\r\n return radius\r\n\r\n\r\n##\r\n##img2 = cv2.imread('foregroundextrblackandwhite.jpg',cv2.IMREAD_COLOR) #reads image in color\r\n##cv2.circle(img2,center,radius,(0,255,0),3) #draws the min enclosing circle\r\n##cv2.circle(img2,center,3,(255,255,0),3) #draws the center of the circle\r\n##cv2.drawContours(img2, [cnt], 0, (0,0,255), 3) #draws all of the contour\r\n##print(center)\r\n##print(radius)\r\n##\r\n##cv2.imshow('image',img2)\r\n##cv2.waitKey(0)\r\n##cv2.destroyAllWindows()\r\n","repo_name":"Eeister/Jojoposes","sub_path":"jojo poses/compareradiusfinder.py","file_name":"compareradiusfinder.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6783047914","text":"import click\n\nfrom qiime.automation.preprocess.run_preprocess import PreProcess\n\n__author__ = \"jkkim\"\n\n\n@click.command()\n@click.option('-i', help='rawdata dir')\n@click.option('-s', help=\"sample type is either bac or its.\")\n@click.option('-t', type=int, help=\"threads.\")\n@click.option('-r', help=\"ref database is either gg or silva.\")\ndef main(i, t, s, r):\n \"\"\"do preprocess and otu clustering for community analysis.\"\"\"\n run = PreProcess(rawdata_dir=i, taxon=s, ref_db=r, threads=t, )\n run.preprocess_run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"joowkim/16s-data-analysis-using-Qiime1","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"35087701273","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/9/2 14:42\n# @Author : way\n# @Site : \n# @Describe: 将视频转换成代码视频\n\nimport os\nimport subprocess\nimport shutil\nimport cv2\nfrom PIL import Image, ImageFont, ImageDraw\n\nFFMPEG = r'D:\\ffmpeg\\bin\\ffmpeg.exe'\n\nclass CodeVideo:\n\n def __init__(self, **kwargs):\n \"\"\"\n :param kwargs:\n vediopath: 输入视频文件路径\n gray: 输出视频的颜色 True 灰色 False 彩色 默认 True\n style: 输出视频的代码风格 可选有 0,1,2,3 种 默认 0\n clean: 是否删除临时文件 True 删除 False 不删除 默认 True\n cut: 是否先对原视频做截取处理 True 截取 False 不截取 默认 False\n start: 视频截取开始时间点, 默认 00:00:00 仅当iscut=True时有效\n end: 视频截取结束时间点, 默认 00:00:14 仅当iscut=True时有效\n \"\"\"\n self.vediopath = kwargs.get('vediopath')\n self.code_color = (169, 169, 169) if kwargs.get('gray', True) else None\n self.clean = kwargs.get('clean', True)\n self.cut = kwargs.get('cut', False)\n self.cut_start = kwargs.get('start', '00:00:00')\n self.cut_end = kwargs.get('end', '00:00:14')\n self.ascii_char = (\n list(\"MNHQ$OC67)oa+>!:+. \"),\n list(\"MNHQ$OC67+>!:-. \"),\n list(\"$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\\|()1{}[]?-_+~<>i!lI;:oa+>!:+. \"),\n ['.', ',', ':', ';', '+', '*', '?', '%', 'S', '#', '@'],\n )[kwargs.get('style', 0)] # 像素对应ascii码\n\n def main(self):\n file_cut = self.vediopath.split('.')[0] + '_cut.mp4'\n file_mp3 = self.vediopath.split('.')[0] + '.mp3'\n file_temp_avi = self.vediopath.split('.')[0] + '_temp.avi'\n outfile_name = self.vediopath.split('.')[0] + '_code.mp4'\n print(\"开始生成...\")\n if self.cut:\n print(\"正在截取视频...\")\n self.vediocut(self.vediopath, file_cut, self.cut_start, self.cut_end)\n self.vediopath = file_cut\n print(\"正在转换代码图片...\")\n vc = self.video2txt_jpg(self.vediopath) # 视频转图片,图片转代码图片\n FPS = vc.get(cv2.CAP_PROP_FPS) # 获取帧率\n vc.release()\n print(\"正在分离音频...\")\n self.video2mp3(self.vediopath, file_mp3) # 从原视频分离出 音频mp3\n print(\"正在转换代码视频...\")\n self.jpg2video(file_temp_avi, FPS) # 代码图片转视频\n print(\"正在合成目标视频...\")\n self.video_add_mp3(file_temp_avi, file_mp3, outfile_name) # 将音频合成到代码视频\n if self.clean: # 移除临时文件\n print(\"正在移除临时文件...\")\n shutil.rmtree(\"Cache\")\n for file in [file_cut, file_mp3, file_temp_avi]:\n if os.path.exists(file):\n os.remove(file)\n print(\"生成成功:{0}\".format(outfile_name))\n\n # 将视频拆分成图片\n def video2txt_jpg(self, file_name):\n vc = cv2.VideoCapture(file_name)\n c = 1\n if vc.isOpened():\n r, frame = vc.read()\n if not os.path.exists('Cache'):\n os.mkdir('Cache')\n os.chdir('Cache')\n else:\n r = False\n while r:\n cv2.imwrite(str(c) + '.jpg', frame)\n self.txt2image(str(c) + '.jpg') # 同时转换为ascii图\n r, frame = vc.read()\n c += 1\n os.chdir('..')\n return vc\n\n # 将txt转换为图片\n def txt2image(self, file_name):\n im = Image.open(file_name).convert('RGB')\n # gif拆分后的图像,需要转换,否则报错,由于gif分割后保存的是索引颜色\n raw_width = im.width\n raw_height = im.height\n width = int(raw_width / 6)\n height = int(raw_height / 15)\n im = im.resize((width, height), Image.NEAREST)\n\n txt = \"\"\n colors = []\n for i in range(height):\n for j in range(width):\n pixel = im.getpixel((j, i))\n colors.append((pixel[0], pixel[1], pixel[2]))\n if (len(pixel) == 4):\n txt += self.get_char(pixel[0], pixel[1], pixel[2], pixel[3])\n else:\n txt += self.get_char(pixel[0], pixel[1], pixel[2])\n txt += '\\n'\n colors.append((255, 255, 255))\n\n im_txt = Image.new(\"RGB\", (raw_width, raw_height), (255, 255, 255))\n dr = ImageDraw.Draw(im_txt)\n # font = ImageFont.truetype(os.path.join(\"fonts\",\"汉仪楷体简.ttf\"),18)\n font = ImageFont.load_default().font\n x = y = 0\n # 获取字体的宽高\n font_w, font_h = font.getsize(txt[1])\n font_h *= 1.37 # 调整后更佳\n # ImageDraw为每个ascii码进行上色\n for i in range(len(txt)):\n if (txt[i] == '\\n'):\n x += font_h\n y = -font_w\n if self.code_color:\n dr.text((y, x), txt[i], fill=self.code_color) # fill=colors[i]彩色\n else:\n dr.text((y, x), txt[i], fill=colors[i]) # fill=colors[i]彩色\n y += font_w\n im_txt.save(file_name)\n\n # 将像素转换为ascii码\n def get_char(self, r, g, b, alpha=256):\n if alpha == 0:\n return ''\n gray = int(0.2126 * r + 0.7152 * g + 0.0722 * b)\n unit = (256.0 + 1) / len(self.ascii_char)\n return self.ascii_char[int(gray / unit)]\n\n # 代码图片转视频\n @staticmethod\n def jpg2video(outfile_name, fps):\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n images = os.listdir('Cache')\n im = Image.open('Cache/' + images[0])\n vw = cv2.VideoWriter(outfile_name, fourcc, fps, im.size)\n os.chdir('Cache')\n for image in range(len(images)):\n frame = cv2.imread(str(image + 1) + '.jpg')\n vw.write(frame)\n os.chdir('..')\n vw.release()\n\n # 调用 ffmpeg 分离音频\n @staticmethod\n def video2mp3(file_name, outfile_name):\n cmdstr = f'{FFMPEG} -i {file_name} -f mp3 {outfile_name} -y'\n subprocess.call(cmdstr, shell=True, creationflags=0x08000000)\n\n # 调用 ffmpeg 给视频添加音频\n @staticmethod\n def video_add_mp3(file_name, mp3_file, outfile_name):\n cmdstr = f'{FFMPEG} -i {file_name} -i {mp3_file} -strict -2 -f mp4 {outfile_name} -y'\n subprocess.call(cmdstr, shell=True, creationflags=0x08000000)\n\n # 调用 ffmpeg 截取视频\n @staticmethod\n def vediocut(file_name, outfile_name, start, end):\n cmdstr = f'{FFMPEG} -i {file_name} -vcodec copy -acodec copy -ss {start} -to {end} {outfile_name} -y'\n subprocess.call(cmdstr, shell=True, creationflags=0x08000000)\n\n\nif __name__ == '__main__':\n vediopath = r\"C:\\Users\\Administrator\\Desktop\\test.mp4\"\n CodeVideo(vediopath=vediopath).main()\n","repo_name":"TurboWay/pydemo","sub_path":"demo_code_video.py","file_name":"demo_code_video.py","file_ext":"py","file_size_in_byte":6976,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"39371050143","text":"from ProviderBase import ProviderBase\nfrom django.conf import settings\nfrom datetime import datetime\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom operator import itemgetter\nfrom pygithub3 import Github\nfrom pygithub3.exceptions import NotFound\nfrom repowatcher.main.models import RepositoryUser, Repository, LinkType\nfrom repowatcher.main.tasks import get_events\nimport json\nimport logging\nimport requests\nfrom django.http import Http404\nlogger = logging.getLogger(__name__)\n\nclass GithubProvider(ProviderBase):\n\n base_url = \"https://api.github.com/\"\n\n\n def __init__(self, user):\n self.user = user\n self.host = 'github'\n self.access_token = None\n if user.is_authenticated():\n try:\n self.access_token=user.social_auth.get(provider=self.host).extra_data['access_token']\n self.client = Github(token=self.access_token)\n except ObjectDoesNotExist:\n self.client = Github(client_id=settings.GITHUB_APP_ID, client_secret=settings.GITHUB_API_SECRET)\n else:\n self.client = Github(client_id=settings.GITHUB_APP_ID, client_secret=settings.GITHUB_API_SECRET)\n\n def get_user_details(self, username):\n try:\n github_user = self.client.users.get(username)\n return vars(github_user)\n except:\n raise Http404\n\n def create_or_update_user_details(self, user_dict, repository_user = None):\n if repository_user is None:\n repository_user = RepositoryUser()\n extra_data = {}\n for key, value in user_dict.iteritems():\n if key == \"_attrs\":\n continue\n if key in ['login', 'name', 'email', 'blog','following','followers','public_repos','created_at']:\n setattr(repository_user, key, value)\n else:\n if isinstance(value, datetime):\n extra_data[key] = value.__str__()\n else:\n extra_data[key] = value\n repository_user.extra_data = json.dumps(extra_data)\n repository_user.host = self.host\n return repository_user\n\n def retrieve_user_details(self, username):\n return RepositoryUser.objects.get(slug=self.host+'/'+username.lower())\n\n def get_user_events(self, username):\n try:\n r = requests.get(GithubProvider.base_url + 'users/'+ username + '/events', params = {\"client_id\": settings.GITHUB_APP_ID, \"client_secret\": settings.GITHUB_API_SECRET})\n user_events = json.loads(r.text)\n except Exception:\n user_events = []\n return user_events\n\n def get_repository_details(self, owner, repository):\n try:\n repo = self.client.repos.get(user=owner,repo=repository)\n return vars(repo)\n except NotFound:\n raise Http404\n\n def create_or_update_repository_details(self, repository_dict, repository = None):\n if repository is None:\n repository = Repository()\n extra_data = {}\n for key, value in repository_dict.iteritems():\n if key == \"_attrs\":\n continue\n if key == 'owner' or key == \"organization\":\n try:\n setattr(repository, key, value.login)\n continue\n except Exception:\n pass\n if key == 'source' or key == \"parent\":\n try:\n setattr(repository, key, value.full_name)\n continue\n except Exception:\n pass\n if key in ['owner', 'name', 'html_url', 'homepage', 'language','description','watchers','created_at','pushed_at','private']:\n setattr(repository, key, value)\n else:\n if isinstance(value, datetime):\n extra_data[key] = value.__str__()\n else:\n extra_data[key] = value\n repository.extra_data = json.dumps(extra_data)\n if repository.language == \"\" or repository.language == None:\n repository.language = \"other\"\n repository.scm = 'git'\n repository.host =self.host\n repository.language = repository.language.lower()\n return repository\n\n def retrieve_repository_details(self, owner, repository):\n host_slug = ('/'.join((self.host, owner, repository))).lower()\n return Repository.objects.get(host_slug=host_slug)\n\n def get_repository_events(self, owner, repository):\n slug = (owner + '/' + repository).lower()\n try:\n r = requests.get('https://api.github.com/repos/'+ slug + '/events', params = {\"client_id\": settings.GITHUB_APP_ID, \"client_secret\": settings.GITHUB_API_SECRET})\n repo_events = json.loads(r.text)\n except Exception:\n repo_events = []\n return repo_events\n\n def get_repositories_events(self, repository_list):\n repo_events = []\n request_urls = []\n url_requests = []\n for repository in repository_list:\n slug = '/'.join((repository.owner, repository.name))\n request_urls.append(GithubProvider.base_url + 'repos/' + slug + '/events' + '?client_id=' + settings.GITHUB_APP_ID + '&client_secret=' + settings.GITHUB_API_SECRET)\n for url in request_urls:\n url_requests.append(get_events.delay(url))\n for url_request in url_requests:\n get = url_request.get()\n if get is not None:\n repo_events.extend(get[:30])\n repo_events = sorted(repo_events, key=itemgetter('created_at'), reverse = True)[:30]\n return repo_events\n\n def get_repositories(self, username, link_type):\n if link_type == \"owned\":\n generator = self.client.repos.list(user=username)\n elif link_type == \"starred\":\n generator = self.client.repos.stargazers.list_repos(user=username)\n elif link_type == \"watched\":\n generator = self.client.repos.watchers.list_repos(user=username)\n for repository in generator.iterator():\n repository_dict = vars(repository)\n repository_dict['owner'] = repository.owner.login\n yield repository_dict\n\n def get_watched_status(self, owner, repository):\n watched = False\n if self.user.is_authenticated():\n try:\n watched = self.client.repos.watchers.is_watching(owner, repository)\n except Exception:\n pass\n return watched\n\n def get_starred_status(self, owner, repository):\n watched = False\n if self.user.is_authenticated():\n try:\n watched = self.client.repos.stargazers.is_starring(owner, repository)\n except Exception:\n pass\n return watched\n\n def watch(self, owner, repository):\n self.client.repos.watchers.watch(owner, repository)\n\n def star(self, owner, repository):\n self.client.repos.stargazers.star(owner, repository)\n\n def unwatch(self, owner, repository):\n self.client.repos.watchers.unwatch(owner, repository)\n\n def unstar(self, owner, repository):\n self.client.repos.stargazers.unstar(owner, repository)\n\n def search_user(self, username):\n if self.access_token is None:\n params = {\"client_id\": settings.GITHUB_APP_ID, \"client_secret\": settings.GITHUB_API_SECRET}\n else:\n params = {'access_token': self.access_token}\n try:\n r = requests.get(GithubProvider.base_url + 'legacy/user/search/'+ username,params = params)\n user_results = json.loads(r.text)['users']\n except Exception:\n user_results = []\n return user_results\n\n def search_repository(self, repository):\n if self.access_token is None:\n params = {\"client_id\": settings.GITHUB_APP_ID, \"client_secret\": settings.GITHUB_API_SECRET}\n else:\n params = {'access_token': self.access_token}\n try:\n r = requests.get(GithubProvider.base_url + 'legacy/repos/search/'+ repository, params = params)\n repository_results = json.loads(r.text)['repositories']\n except Exception:\n repository_results = []\n return repository_results\n","repo_name":"oracal/repowatcher","sub_path":"repowatcher/main/views/GithubProvider.py","file_name":"GithubProvider.py","file_ext":"py","file_size_in_byte":8299,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"22"} +{"seq_id":"42470080862","text":"# run.py\n\nfrom flask import Flask\nfrom config import DATABASE_URI\nfrom models import db, login_manager,Role,User\nfrom views.auth import auth_bp\nfrom views.home import home_bp\nfrom views.ocpcluster import cluster_bp\nfrom views.admin import admin_bp\nfrom flask_login import LoginManager\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n# Set a secret key for the session management\napp.config['SECRET_KEY'] = '11002355'\n\ndb.init_app(app)\n\n# Set up flask_login\nlogin_manager.init_app(app)\nlogin_manager.login_view = \"auth.login\" # The route for the login page\n\n\n# Create all the database tables\nwith app.app_context():\n db.create_all()\n Role.create_default_roles()\n # Check if there are any users in the database\n if not User.query.first():\n # If no users exist, create the default admin role\n admin_role = Role.query.filter_by(name='Admin').first()\n if not admin_role:\n admin_role = Role(name='Admin')\n db.session.add(admin_role)\n db.session.commit()\n\n # Create the default admin user\n default_admin_user = User(\n first_name='Admin',\n last_name='User',\n email='admin@example.com',\n password='password', # Replace with the desired default password\n role=admin_role,\n force_password_change=True # Set this flag to force password change on first login\n )\n db.session.add(default_admin_user)\n db.session.commit()\n\napp.register_blueprint(auth_bp)\napp.register_blueprint(home_bp)\napp.register_blueprint(cluster_bp)\napp.register_blueprint(admin_bp)\n\nif __name__ == '__main__':\n app.run(debug=True)\n ","repo_name":"niteshsharma1106/flask_backend1","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"22809554121","text":"'''\r\nCreated on Mar 16, 2018\r\n\r\n@author: Mmk\r\n'''\r\nimport pandas as pd\r\nfrom sklearn.cross_validation import train_test_split\r\nfrom sklearn.linear_model import LinearRegression \r\nfrom flask import Flask\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/\")\r\ndef pridict_sal( ):\r\n #username = request.args.get('exp')\r\n #print(username)\r\n dataset = pd.read_csv(\"Salary_Data.csv\")\r\n X = dataset.iloc[:,:-1].values\r\n y = dataset.iloc[:,1].values\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)\r\n \r\n regressor = LinearRegression()\r\n regressor.fit(X_train, y_train)\r\n y_pred = regressor.predict(1.5)\r\n \r\n return str(y_pred)\r\n\r\n@app.route('/my_endpoint/<int:id>', methods=['GET'])\r\ndef my_endpoint_handler(id):\r\n print(id)\r\n dataset = pd.read_csv(\"Salary_Data.csv\")\r\n X = dataset.iloc[:,:-1].values\r\n y = dataset.iloc[:,1].values\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)\r\n \r\n regressor = LinearRegression()\r\n regressor.fit(X_train, y_train)\r\n y_pred = regressor.predict(int(id))\r\n \r\n return str(y_pred*15)\r\n\r\nif __name__ == '__main__':\r\n print(pridict_sal())\r\n app.run(debug=True)\r\n","repo_name":"nirajkmr007/SalaryPredictor","sub_path":"salary_predictor.py","file_name":"salary_predictor.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14231480461","text":"#!/usr/bin/env python3\nfrom utils import *\nfrom pico_process import PicoContext, process_code, CartSource, CustomPreprocessor, ErrorFormat\nfrom pico_compress import write_code_size, write_compressed_size, CompressionTracer\nfrom pico_cart import Cart, CartFormat, read_cart, write_cart, get_bbs_cart_url\nfrom pico_export import read_cart_export, read_pod_file, ListOp\nfrom pico_tokenize import k_hint_split_re\nimport argparse\n\nk_version = 'v1.1e'\n\ndef SplitBySeps(val):\n return k_hint_split_re.split(val)\n\ndef EnumFromStr(enum_type):\n def cvt(name):\n return enum_type(name.replace(\"-\", \"_\").replace(\" \", \"_\"))\n cvt.__name__ = enum_type.__name__\n return cvt\n\ndef EnumList(list):\n return \", \".join(str.replace(\"_\", \"-\") for str in list)\n\ndef ParsableCountHandler(prefix, name, size, limit):\n print(f\"count:{prefix}:{name}:{size}:{limit}\")\n\nextend_arg = \"extend\" if sys.version_info >= (3,8) else None\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"input\", help=\"input file, can be in any format. ('-' for stdin)\", nargs='?')\nparser.add_argument(\"output\", help=\"output file. ('-' for stdout)\", nargs='?')\nparser.add_argument(\"-f\", \"--format\", type=EnumFromStr(CartFormat), help=\"output cart format {%s}\" % EnumList(CartFormat.output_names))\nparser.add_argument(\"-F\", \"--input-format\", type=EnumFromStr(CartFormat), help=\"input cart format {%s}\" % EnumList(CartFormat.input_names))\nparser.add_argument(\"-u\", \"--unicode-caps\", action=\"store_true\", help=\"write capitals as italicized unicode characters (better for copy/paste)\")\n\npgroup = parser.add_argument_group(\"minify options\")\npgroup.add_argument(\"-m\", \"--minify\", action=\"store_true\", help=\"enable minification of the cart\")\npgroup.add_argument(\"-M\", \"--minify-safe-only\", action=\"store_true\", help=\"only do minifaction that's always safe to do\")\npgroup.add_argument(\"-ot\", \"--focus-tokens\", action=\"store_true\", help=\"when minifying, focus on reducing the amount of tokens\")\npgroup.add_argument(\"-oc\", \"--focus-chars\", action=\"store_true\", help=\"when minifying, focus on reducing the amount of characters\")\npgroup.add_argument(\"-ob\", \"--focus-compressed\", action=\"store_true\", help=\"when minifying, focus on reducing the code's compressed size\")\npgroup.add_argument(\"--no-minify-rename\", action=\"store_true\", help=\"disable variable renaming in minification\")\npgroup.add_argument(\"--no-minify-spaces\", action=\"store_true\", help=\"disable space removal in minification\")\npgroup.add_argument(\"--no-minify-lines\", action=\"store_true\", help=\"disable line removal in minification\")\npgroup.add_argument(\"--no-minify-comments\", action=\"store_true\", help=\"disable comment removal in minification (requires --no-minify-spaces)\")\npgroup.add_argument(\"--no-minify-tokens\", action=\"store_true\", help=\"disable token removal/changes in minification\")\npgroup.add_argument(\"--no-minify-reorder\", action=\"store_true\", help=\"disable statement reordering in minification\")\npgroup.add_argument(\"-p\", \"--preserve\", type=SplitBySeps, action=extend_arg, help='preserve specific identifiers in minification, e.g. \"global1, global2, *.member2, table3.*\"')\npgroup.add_argument(\"--no-preserve\", type=SplitBySeps, action=extend_arg, help='do not preserve specific built-in identifiers in minification, e.g. \"circfill, rectfill\"')\npgroup.add_argument(\"--rename-safe-only\", action=\"store_true\", help=\"only do renaming that's always safe to do (subset of --minify-safe-only)\")\npgroup.add_argument(\"--rename-members-as-globals\", action=\"store_true\", help='rename globals and members the same way (same as --preserve \"*=*.*\")')\npgroup.add_argument(\"--reorder-safe-only\", action=\"store_true\", help=\"only do statement reordering that's always safe to do (subset of --minify-safe-only)\")\npgroup.add_argument(\"--rename-map\", help=\"log renaming of identifiers (from minify step) to this file\")\n\npgroup = parser.add_argument_group(\"lint options\")\npgroup.add_argument(\"-l\", \"--lint\", action=\"store_true\", help=\"enable checking the cart for common issues\")\npgroup.add_argument(\"--no-lint-unused\", action=\"store_true\", help=\"don't print lint warnings on unused variables\")\npgroup.add_argument(\"--no-lint-duplicate\", action=\"store_true\", help=\"don't print lint warnings on duplicate variables\")\npgroup.add_argument(\"--no-lint-undefined\", action=\"store_true\", help=\"don't print lint warnings on undefined variables\")\npgroup.add_argument(\"--no-lint-fail\", action=\"store_true\", help=\"create output cart even if there were lint warnings\")\npgroup.add_argument(\"--lint-global\", type=SplitBySeps, action=extend_arg, help=\"don't print lint warnings for these globals (same as '--lint:' comment)\")\npgroup.add_argument(\"--error-format\", type=EnumFromStr(ErrorFormat), help=\"how to format lint warnings & compilation errors {%s}\" % EnumList(ErrorFormat._values))\n\npgroup = parser.add_argument_group(\"count options\")\npgroup.add_argument(\"-c\", \"--count\", action=\"store_true\", help=\"enable printing token count, character count & compressed size\")\npgroup.add_argument(\"--input-count\", action=\"store_true\", help=\"enable printing input token count, character count & compressed size\")\npgroup.add_argument(\"--parsable-count\", action=\"store_true\", help=\"output counts in a stable, parsable format\")\npgroup.add_argument(\"--no-count-compress\", action=\"store_true\", help=\"do not compress the cart just to print the compressed size\")\npgroup.add_argument(\"--no-count-tokenize\", action=\"store_true\", help=\"do not tokenize the cart just to print the token count\")\n\npgroup = parser.add_argument_group(\"unminify options\")\npgroup.add_argument(\"--unminify\", action=\"store_true\", help=\"enable unminification of the cart\")\npgroup.add_argument(\"--unminify-indent\", type=int, help=\"indentation size when unminifying\", default=2)\n\npgroup = parser.add_argument_group(\"misc. options\")\npgroup.add_argument(\"-s\", \"--script\", help=\"manipulate the cart via a custom python script - see README for api details\")\npgroup.add_argument(\"--script-args\", nargs=argparse.REMAINDER, help=\"send arguments directly to --script\", default=())\npgroup.add_argument(\"--label\", help=\"path to image to use as the label when creating png carts (default: taken from __label__ like pico8 does)\")\npgroup.add_argument(\"--title\", help=\"title to use when creating png carts (default: taken from first two comments like pico8 does)\")\npgroup.add_argument(\"--extra-output\", nargs='+', action=\"append\", metavar=(\"OUTPUT\", \"FORMAT\"), help=\"Additional output file to produce (and optionally, the format to use)\")\n\npgroup = parser.add_argument_group(\"cart export options (for use with the formats: %s)\" % EnumList(CartFormat.export_names))\npgroup.add_argument(\"--list\", action=\"store_true\", help=\"list all cart names inside the export\")\npgroup.add_argument(\"--dump\", help=\"dump all carts inside the export to the specified folder. -f can be used to specify the output format\")\npgroup.add_argument(\"--cart\", help=\"name of cart to extract from the export\")\npgroup.add_argument(\"--pico8-dat\", help=\"path to the pico8.dat file in the pico8 directory. needed to create new exports\")\npgroup.add_argument(\"--insert-cart\", nargs=\"*\", metavar=(\"NAME\", \"BEFORE\"), help=\"add the cart to an existing export. (The default name is the input cart's name)\")\npgroup.add_argument(\"--replace-cart\", nargs=\"*\", metavar=(\"NAME\"), help=\"replace the cart with the given name (Default: main cart) in the export\")\npgroup.add_argument(\"--delete-cart\", nargs=1, help=\"delete the cart with the given name from the export\")\npgroup.add_argument(\"--rename-cart\", nargs=2, metavar=(\"OLD\", \"NEW\"), help=\"rename the cart with the given name in the export\")\n\npgroup = parser.add_argument_group(\"compression options (semi-undocumented)\")\npgroup.add_argument(\"--keep-compression\", action=\"store_true\", help=\"keep existing compression, instead of re-compressing\")\npgroup.add_argument(\"--fast-compression\", action=\"store_true\", help=\"force fast but poor compression (when creating png carts)\")\npgroup.add_argument(\"--force-compression\", action=\"store_true\", help=\"force code compression even if code fits (when creating png carts)\")\npgroup.add_argument(\"--old-compression\", action=\"store_true\", help=\"compress with the old pre-v0.2.0 compression scheme\")\npgroup.add_argument(\"--trace-compression\", help=\"trace the compressed symbols and their cost into this file\")\npgroup.add_argument(\"--trace-input-compression\", help=\"trace the input's compressed symbols and their cost into this file\")\n\npgroup = parser.add_argument_group(\"other semi-undocumented options\")\npgroup.add_argument(\"--builtin\", type=SplitBySeps, action=extend_arg, help=\"treat identifier(s) as a pico-8 builtin (for minify, lint, etc.)\")\npgroup.add_argument(\"--not-builtin\", type=SplitBySeps, action=extend_arg, help=\"do not treat identifier(s) as a pico-8 builtin (for minify, lint, etc.)\")\npgroup.add_argument(\"--global-builtins-only\", action=\"store_true\", help=\"assume all builtins are global, equivalent to pico8's -global_api option\")\npgroup.add_argument(\"--version\", action=\"store_true\", help=\"print version of cart. (if no cart is provided - print shrinko8 version and exit)\")\npgroup.add_argument(\"--bbs\", action=\"store_true\", help=\"interpret input as a bbs cart id, e.g. '#...' and download it from the bbs\")\npgroup.add_argument(\"--url\", action=\"store_true\", help=\"interpret input as a URL, and download it from the internet\")\npgroup.add_argument(\"--ignore-hints\", action=\"store_true\", help=\"ignore shrinko8 hint comments\")\npgroup.add_argument(\"--custom-preprocessor\", action=\"store_true\", help=\"enable a custom preprocessor (#define X 123, #ifdef X, #[X], #[X[[print('X enabled')]]])\")\npgroup.add_argument(\"--dump-misc-too\", action=\"store_true\", help=\"causes --dump to also dump misc. files inside the export\")\n\ndef default_output_format(output):\n ext = path_extension(output)[1:].lower()\n if ext in CartFormat.ext_names:\n return CartFormat(ext)\n else:\n return CartFormat.p8\n\ndef main_inner(raw_args):\n if not raw_args: # help is better than usage\n parser.print_help(sys.stderr)\n return 1\n\n args = parser.parse_args(raw_args)\n\n if args.version and not args.input:\n print(k_version)\n return\n \n if not args.input:\n throw(\"No input file specified\")\n \n if args.delete_cart or args.rename_cart:\n if args.output:\n throw(\"Only need to specify a single cart when using --delete-cart or --rename-cart\")\n args.input, args.output = None, args.input\n\n if args.input == \"-\":\n args.input = StdPath(\"-\")\n if args.output == \"-\":\n args.output = StdPath(\"-\")\n\n if args.url:\n args.input = URLPath(args.input)\n elif args.bbs:\n args.input = URLPath(get_bbs_cart_url(args.input))\n args.input_format = CartFormat.png\n\n if not args.lint and not args.count and not args.output and not args.input_count and not args.version and not args.list and not args.dump and not args.script:\n throw(\"No operation (--lint/--count/--script) or output file specified\")\n if args.format and not args.output and not args.dump:\n throw(\"Output should be specified under --format\")\n if args.minify and not args.output and not args.count:\n throw(\"Output (or --count) should be specified under --minify\")\n if args.minify and args.keep_compression:\n throw(\"Can't modify code and keep compression\")\n if (args.list or args.dump) and (args.output or args.lint or args.count):\n throw(\"--list & --dump can't be combined with most other options\")\n if (e(args.delete_cart) + e(args.rename_cart) + e(args.insert_cart) + e(args.replace_cart)) > 1:\n throw(\"Can only specify one of --insert/replace/delete/rename-cart\")\n \n if not args.format and args.output:\n args.format = default_output_format(args.output)\n\n if not args.input_format and args.input:\n ext = path_extension(args.input)[1:].lower()\n if ext in CartFormat.ext_names:\n args.input_format = CartFormat(ext)\n\n if args.lint:\n args.lint = {\n \"unused\": not args.no_lint_unused,\n \"duplicate\": not args.no_lint_duplicate,\n \"undefined\": not args.no_lint_undefined,\n \"globals\": args.lint_global or (),\n }\n \n args.focus = []\n if args.focus_chars:\n args.focus.append(\"chars\")\n if args.focus_compressed:\n args.focus.append(\"compressed\")\n if args.focus_tokens:\n args.focus.append(\"tokens\")\n\n if args.minify or args.minify_safe_only:\n args.minify = {\n \"safe-reorder\": args.minify_safe_only or args.reorder_safe_only,\n \"lines\": not args.no_minify_lines,\n \"wspace\": not args.no_minify_spaces,\n \"comments\": not args.no_minify_comments,\n \"tokens\": not args.no_minify_tokens,\n \"reorder\": not args.no_minify_reorder,\n \"focus\": args.focus,\n }\n\n args.rename = bool(args.minify) and not args.no_minify_rename\n if args.rename:\n if args.no_preserve:\n args.preserve = (args.preserve or []) + [f\"!{item}\" for item in args.no_preserve]\n if args.rename_members_as_globals:\n args.preserve = (args.preserve or []) + [\"*=*.*\"]\n args.rename = {\n \"safe-only\": args.minify_safe_only or args.rename_safe_only,\n \"focus\": args.focus,\n \"rules\": args.preserve or (),\n }\n\n if args.unminify:\n args.unminify = {\n \"indent\": args.unminify_indent\n }\n\n preproc_cb, postproc_cb, sublang_cb = None, None, None\n if args.script:\n preproc_cb, postproc_cb, sublang_cb = import_from_script_by_path(args.script, \"preprocess_main\", \"postprocess_main\", \"sublanguage_main\")\n\n base_count_handler = ParsableCountHandler if args.parsable_count else True\n if args.input_count:\n args.input_count = base_count_handler\n if args.count:\n args.count = base_count_handler\n\n if args.trace_input_compression:\n args.trace_input_compression = CompressionTracer(args.trace_input_compression)\n if args.trace_compression:\n args.trace_compression = CompressionTracer(args.trace_compression)\n\n if args.input:\n try:\n if args.list or args.dump:\n export = read_cart_export(args.input, args.input_format)\n if args.list:\n for entry in export.list_carts():\n print(entry)\n else:\n dir_ensure_exists(args.dump)\n export.dump_contents(args.dump, default(args.format, CartFormat.p8), misc=args.dump_misc_too)\n return 0\n\n preprocessor = CustomPreprocessor() if args.custom_preprocessor else None\n cart = read_cart(args.input, args.input_format, size_handler=args.input_count, \n debug_handler=args.trace_input_compression, cart_name=args.cart,\n keep_compression=args.keep_compression, preprocessor=preprocessor)\n src = CartSource(cart)\n except OSError as err:\n throw(f\"cannot read cart: {err}\")\n\n if args.input_count:\n write_code_size(cart, handler=args.input_count, input=True)\n \n ctxt = PicoContext(extra_builtins=args.builtin, not_builtins=args.not_builtin, \n local_builtins=not args.global_builtins_only,\n srcmap=args.rename_map, sublang_getter=sublang_cb, version=cart.version_id,\n hint_comments=not args.ignore_hints)\n if preproc_cb:\n preproc_cb(cart=cart, src=src, ctxt=ctxt, args=args)\n\n ok, errors = process_code(ctxt, src, input_count=args.input_count, count=args.count,\n lint=args.lint, minify=args.minify, rename=args.rename,\n unminify=args.unminify, stop_on_lint=not args.no_lint_fail,\n fail=False, want_count=not args.no_count_tokenize)\n if errors:\n print(\"Lint warnings:\" if ok else \"Compilation errors:\")\n for error in sorted(errors):\n print(error.format(args.error_format))\n if not ok or not args.no_lint_fail:\n return 2 if ok else 1\n\n if args.rename_map:\n file_write_text(args.rename_map, \"\\n\".join(ctxt.srcmap))\n \n if postproc_cb:\n postproc_cb(cart=cart, args=args)\n \n if args.count:\n write_code_size(cart, handler=args.count)\n if not (args.output and str(args.format) not in CartFormat.src_names) and not args.no_count_compress: # else, will be done in write_cart\n write_compressed_size(cart, handler=args.count, fast_compress=args.fast_compression, debug_handler=args.trace_compression)\n \n if args.version:\n print(\"version: %d, v%d.%d.%d:%d, %c\" % (cart.version_id, *cart.version_tuple, cart.platform))\n\n else:\n # output-only operations\n cart = Cart() # just to avoid exceptions\n errors = ()\n\n if args.output:\n output_cart_args = default(args.insert_cart, default(args.replace_cart, default(args.delete_cart, args.rename_cart)))\n output_cart_op = ListOp.insert if e(args.insert_cart) else ListOp.delete if e(args.delete_cart) else \\\n ListOp.replace if e(args.replace_cart) else ListOp.rename if e(args.rename_cart) else None\n\n all_outputs = [(args.output, args.format)]\n if args.extra_output:\n for extra_output in args.extra_output:\n if len(extra_output) == 1:\n all_outputs.append((extra_output[0], default_output_format(extra_output[0])))\n elif len(extra_output) == 2:\n all_outputs.append((extra_output[0], CartFormat(extra_output[1])))\n else:\n throw(\"too many arguments to --extra-output\")\n \n for output, format in all_outputs:\n target, pico8_dat = None, None\n if str(format) in CartFormat.export_names:\n if e(output_cart_op):\n try:\n target = read_cart_export(output, format)\n except OSError as err:\n throw(f\"cannot read export for editing: {err}\")\n else:\n if not args.pico8_dat:\n throw(\"Creating a new export requires passing --pico8-dat <path to pico8 dat>\")\n try:\n pico8_dat = read_pod_file(args.pico8_dat)\n except OSError as err:\n throw(f\"cannot read pico8 dat: {err}\")\n\n try:\n write_cart(output, cart, format, size_handler=args.count,\n debug_handler=args.trace_compression,\n unicode_caps=args.unicode_caps, old_compress=args.old_compression,\n force_compress=args.count or args.force_compression,\n fast_compress=args.fast_compression, keep_compression=args.keep_compression,\n screenshot_path=args.label, title=args.title,\n cart_args=output_cart_args, cart_op=output_cart_op, target=target, pico8_dat=pico8_dat)\n except OSError as err:\n throw(f\"cannot write cart: {err}\")\n\n if errors:\n return 2\n\ndef main(raw_args):\n try:\n return main_inner(raw_args)\n except CheckError as e:\n sys.stdout.flush()\n eprint(\"ERROR: \" + str(e))\n return 1\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv[1:]))\n","repo_name":"thisismypassport/shrinko8","sub_path":"shrinko8.py","file_name":"shrinko8.py","file_ext":"py","file_size_in_byte":19661,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"22"} +{"seq_id":"2036976003","text":"import bpy\n\n\ndef console_namespace():\n import console_python\n for window in bpy.context.window_manager.windows:\n for area in window.screen.areas:\n if area.type == 'CONSOLE':\n for region in area.regions:\n if region.type == 'WINDOW':\n console = console_python.get_console(hash(region))\n if console:\n return console[0].locals\n return {}\n\n\ndef is_display_list(listvar):\n from mathutils import Vector\n\n for var in listvar:\n if not isinstance(var, Vector):\n return False\n return True\n\n\nclass VarStates:\n\n @staticmethod\n def store_states():\n # Store the display states, called upon unregister the Add-on\n # This is useful when you press F8 to reload the Addons.\n # Then this function preserves the display states of the\n # console variables.\n state_props = bpy.context.window_manager.MathVisStatePropList\n variables = get_math_data()\n\n for index, state_prop in reversed(list(enumerate(state_props))):\n if state_prop.name not in variables:\n # Variable has been removed from console\n state_props.remove(index)\n\n for key, ktype in variables.items():\n if key and key not in state_props:\n prop = state_props.add()\n prop.name = key\n prop.ktype = ktype.__name__\n prop.state = [True, False]\n\n @staticmethod\n def get_index(key):\n index = bpy.context.window_manager.MathVisStatePropList.find(key)\n return index\n\n @staticmethod\n def delete(key):\n state_props = bpy.context.window_manager.MathVisStatePropList\n index = state_props.find(key)\n if index != -1:\n state_props.remove(index)\n\n @staticmethod\n def toggle_display_state(key):\n state_props = bpy.context.window_manager.MathVisStatePropList\n if key in state_props:\n state_props[key].state[0] = not state_props[key].state[0]\n else:\n print(\"Odd: Can not find key %s in MathVisStateProps\" % (key))\n\n @staticmethod\n def toggle_lock_state(key):\n state_props = bpy.context.window_manager.MathVisStatePropList\n if key in state_props:\n state_props[key].state[1] = not state_props[key].state[1]\n else:\n print(\"Odd: Can not find key %s in MathVisStateProps\" % (key))\n\n\ndef get_math_data():\n from mathutils import Matrix, Vector, Quaternion, Euler\n\n locals = console_namespace()\n if not locals:\n return {}\n\n variables = {}\n for key, var in locals.items():\n if len(key) == 0 or key[0] == \"_\":\n continue\n\n type_var = type(var)\n\n # Rules out sets/dicts.\n # It's also possible the length check below is slow\n # for data with underlying linked-list structure.\n if not hasattr(type_var, \"__getitem__\"):\n continue\n\n # Don't do a truth test on the data because this causes an error with some\n # array types, see T66107.\n len_fn = getattr(type_var, \"__len__\", None)\n if len_fn is None:\n continue\n if len_fn(var) == 0:\n continue\n\n if isinstance(var, (Matrix, Vector, Quaternion, Euler)) or \\\n isinstance(var, (tuple, list)) and is_display_list(var):\n\n variables[key] = type_var\n\n return variables\n\n\ndef cleanup_math_data():\n\n locals = console_namespace()\n if not locals:\n return\n\n variables = get_math_data()\n\n for key in variables.keys():\n index = VarStates.get_index(key)\n if index == -1:\n continue\n\n state_prop = bpy.context.window_manager.MathVisStatePropList.get(key)\n if state_prop.state[1]:\n continue\n\n del locals[key]\n bpy.context.window_manager.MathVisStatePropList.remove(index)\n\n\ndef console_math_data():\n from mathutils import Matrix, Vector, Quaternion, Euler\n\n data_matrix = {}\n data_quat = {}\n data_euler = {}\n data_vector = {}\n data_vector_array = {}\n\n for key, var in console_namespace().items():\n if key[0] == \"_\":\n continue\n\n state_prop = bpy.context.window_manager.MathVisStatePropList.get(key)\n if state_prop:\n disp, lock = state_prop.state\n if not disp:\n continue\n\n if isinstance(var, Matrix):\n if len(var.col) != 4 or len(var.row) != 4:\n if len(var.col) == len(var.row):\n var = var.to_4x4()\n else: # todo, support 4x3 matrix\n continue\n data_matrix[key] = var\n elif isinstance(var, Vector):\n if len(var) < 3:\n var = var.to_3d()\n data_vector[key] = var\n elif isinstance(var, Quaternion):\n data_quat[key] = var\n elif isinstance(var, Euler):\n data_euler[key] = var\n elif type(var) in {list, tuple} and is_display_list(var):\n data_vector_array[key] = var\n\n return data_matrix, data_quat, data_euler, data_vector, data_vector_array\n","repo_name":"Bforartists/Bforartists","sub_path":"scripts/addons/space_view3d_math_vis/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","stars":492,"dataset":"github-code","pt":"22"} +{"seq_id":"19989220933","text":"from bs4 import BeautifulSoup\nimport requests\n\n\nclass YouTubeScraper:\n base_url = 'https://www.youtube.com'\n search_base_url = base_url + '/results?search_query={}'\n page_url = None\n\n def __init__(self, title_id: str, title: str, show_type: str = None):\n self.title_id = title_id\n self.title = title\n self.show_type = show_type\n self.search_url = self.build_search_url()\n self.data = {'name': self.title, 'title_id': self.title_id}\n\n def build_search_url(self):\n search_term = self.title.replace(' ', '+').replace('#', '%23') + '+trailer'\n return self.search_base_url.format(search_term)\n\n def query(self):\n text = requests.get(self.search_url).text\n html = BeautifulSoup(text, 'html.parser')\n result = str(html).split('\"videoId\":')\n\n if len(result) > 0:\n video_id = result[1].split(',')[0].replace('\"', '')\n self.page_url = self.base_url + '/watch?v=' + video_id\n self.data['video_id'] = video_id\n self.data['url'] = self.page_url\n","repo_name":"jcatankard/netflix_movie_picker","sub_path":"collect_data/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"69846708858","text":"__author__ = \"Kristoffer Selim andAnders Logg\"\n__copyright__ = \"Copyright (C) 2010 Simula Research Laboratory and %s\" % __author__\n__license__ = \"GNU GPL Version 3 or any later version\"\n\n# Last changed: 2012-04-09\n\nfrom dolfin import *\nfrom numpy import array, append\nfrom cbc.common import CBCProblem\n\nfrom fsisolver import FSISolver\nfrom parameters import default_parameters, read_parameters\n\nclass FSI(CBCProblem):\n \"Base class for all FSI problems\"\n\n def __init__(self, mesh):\n \"Create FSI problem\"\n\n # Initialize base class\n CBCProblem.__init__(self)\n\n # Store original mesh\n self._original_mesh = mesh\n self.Omega = None\n\n def solve(self, parameters=default_parameters()):\n \"Solve and return computed solution (u_F, p_F, U_S, P_S, U_M, P_M)\"\n\n # Create submeshes and mappings (only first time)\n if self.Omega is None:\n\n # Refine original mesh\n mesh = self._original_mesh\n for i in range(parameters[\"num_initial_refinements\"]):\n mesh = refine(mesh)\n\n # Initialize meshes\n self.init_meshes(mesh, parameters)\n\n # Create solver\n solver = FSISolver(self)\n\n # Solve\n return solver.solve(parameters)\n\n def init_meshes(self, Omega, parameters):\n \"Create mappings between submeshes\"\n\n info(\"Extracting fluid and structure submeshes\")\n\n # Set global mesh\n self.Omega = Omega\n\n # Create cell markers (0 = fluid, 1 = structure)\n D = Omega.topology().dim()\n cell_domains = MeshFunction(\"uint\", self.Omega, D)\n cell_domains.set_all(0)\n structure = self.structure()\n structure.mark(cell_domains, 1)\n\n # Extract submeshes for fluid and structure\n Omega_F = SubMesh(self.Omega, cell_domains, 0)\n Omega_S = SubMesh(self.Omega, cell_domains, 1)\n\n info(\"Computing mappings between submeshes\")\n\n # Extract matching indices for fluid and structure\n fluid_to_structure_v = compute_vertex_map(Omega_F, Omega_S)\n fluid_to_structure_e = compute_edge_map(Omega_F, Omega_S)\n\n # Extract matching vertex indices for fluid and structure\n v_F = array([i for i in fluid_to_structure_v.iterkeys()])\n v_S = array([i for i in fluid_to_structure_v.itervalues()])\n\n # Extract matching edge indices for fluid and structure\n e_F = array([i for i in fluid_to_structure_e.iterkeys()])\n e_S = array([i for i in fluid_to_structure_e.itervalues()])\n\n # Extract matching dofs for fluid and structure\n structure_element_degree = parameters[\"structure_element_degree\"]\n Nv_F = Omega_F.num_vertices()\n Nv_S = Omega_S.num_vertices()\n Ne_F = Omega_F.num_edges()\n Ne_S = Omega_S.num_edges()\n if structure_element_degree == 1:\n fdofs = append(v_F, v_F + Nv_F)\n sdofs = append(v_S, v_S + Nv_S)\n elif structure_element_degree == 2:\n fdofs = append(append(v_F, Nv_F + e_F), append((Nv_F + Ne_F) + v_F, (Nv_F + Ne_F + Nv_F) + e_F))\n sdofs = append(append(v_S, Nv_S + e_S), append((Nv_S + Ne_S) + v_S, (Nv_S + Ne_S + Nv_S) + e_S))\n else:\n error(\"Only know how to map dofs for P1 and P2 elements.\")\n\n # Extract map from vertices in Omega to vertices in Omega_F\n vertex_map_to_fluid = {}\n vertex_map_from_fluid = Omega_F.data().mesh_function(\"parent_vertex_indices\")\n for i in range(vertex_map_from_fluid.size()):\n vertex_map_to_fluid[vertex_map_from_fluid[i]] = i\n\n # Extract map from vertices in Omega to vertices in Omega_S\n vertex_map_to_structure = {}\n vertex_map_from_structure = Omega_S.data().mesh_function(\"parent_vertex_indices\")\n for i in range(vertex_map_from_structure.size()):\n vertex_map_to_structure[vertex_map_from_structure[i]] = i\n\n info(\"Computing FSI boundary and orientation markers\")\n\n # Initialize FSI boundary and orientation markers on Omega\n Omega.init(D - 1, D)\n fsi_boundary = FacetFunction(\"uint\", Omega, D - 1)\n fsi_boundary.set_all(0)\n fsi_orientation = Omega.data().create_mesh_function(\"facet_orientation\", D - 1)\n fsi_orientation.set_all(0)\n\n # Initialize FSI boundary on Omega_F\n Omega_F.init(D - 1, D)\n Omega_F.init(0, 1)\n fsi_boundary_F = MeshFunction(\"uint\", Omega_F, D - 1)\n fsi_boundary_F.set_all(0)\n\n # Initialize FSI boundary on Omega_S\n Omega_S.init(D - 1, D)\n Omega_S.init(0, 1)\n fsi_boundary_S = MeshFunction(\"uint\", Omega_S, D - 1)\n fsi_boundary_S.set_all(0)\n\n # Compute FSI boundary and orientation markers on Omega\n for facet in facets(Omega):\n\n # Handle facets on the boundary\n cells = facet.entities(D)\n if len(cells) == 1:\n\n # Create cell and midpoint\n c = cells[0]\n cell = Cell(Omega, c)\n p = cell.midpoint()\n\n # Check whether point is inside structure domain\n facet_index = facet.index()\n if structure.inside(p0, True):\n\n # On structure boundary\n fsi_boundary[facet_index] = 1\n fsi_orientation[facet_index] = c\n\n else:\n\n # On fluid boundary\n fsi_boundary[facet_index] = 0\n fsi_orientation[facet_index] = c\n\n continue\n\n # Sanity check\n if len(cells) != 2:\n error(\"Strange, expecting one or two facets!\")\n\n # Create the two cells\n c0, c1 = cells\n cell0 = Cell(Omega, c0)\n cell1 = Cell(Omega, c1)\n\n # Get the two midpoints\n p0 = cell0.midpoint()\n p1 = cell1.midpoint()\n\n # Check if the points are inside\n p0_inside = structure.inside(p0, False)\n p1_inside = structure.inside(p1, False)\n\n # Just set c0, will be set only for FSI facets below\n fsi_orientation[facet.index()] = c0\n\n # Markers:\n #\n # 0 = fluid\n # 1 = structure\n # 2 = FSI boundary\n\n # Look for points where exactly one is inside the structure\n facet_index = facet.index()\n if p0_inside and not p1_inside:\n\n # On FSI boundary\n fsi_boundary[facet_index] = 2\n fsi_orientation[facet_index] = c1\n fsi_boundary_F[_map_to_facet(facet_index, Omega, Omega_F, vertex_map_to_fluid)] = 2\n fsi_boundary_S[_map_to_facet(facet_index, Omega, Omega_S, vertex_map_to_structure)] = 2\n elif p1_inside and not p0_inside:\n\n # On FSI boundary\n fsi_boundary[facet_index] = 2\n fsi_orientation[facet_index] = c0\n fsi_boundary_F[_map_to_facet(facet_index, Omega, Omega_F, vertex_map_to_fluid)] = 2\n fsi_boundary_S[_map_to_facet(facet_index, Omega, Omega_S, vertex_map_to_structure)] = 2\n elif p0_inside and p1_inside:\n\n # Inside structure domain\n fsi_boundary[facet_index] = 1\n else:\n\n # Inside fluid domain\n fsi_boundary[facet_index] = 0\n\n # Initialize global edge indices (used in read_primal_data)\n init_parent_edge_indices(Omega_F, Omega)\n init_parent_edge_indices(Omega_S, Omega)\n\n # Store data\n self.Omega_F = Omega_F\n self.Omega_S = Omega_S\n self.cell_domains = cell_domains\n self.fdofs = fdofs\n self.sdofs = sdofs\n self.fsi_boundary = fsi_boundary\n self.fsi_orientation = fsi_orientation\n self.fsi_boundary_F = fsi_boundary_F\n self.fsi_boundary_S = fsi_boundary_S\n\n def mesh(self):\n \"Return mesh for full domain\"\n return self.Omega\n\n def fluid_mesh(self):\n \"Return mesh for fluid domain\"\n return self.Omega_F\n\n def structure_mesh(self):\n \"Return mesh for structure domain\"\n return self.Omega_S\n\n def add_f2s(self, xs, xf):\n \"Compute xs += xf for corresponding indices\"\n xs_array = xs.array()\n xf_array = xf.array()\n xs_array[self.sdofs] += xf_array[self.fdofs]\n xs[:] = xs_array\n\n def add_s2f(self, xf, xs):\n \"Compute xf += xs for corresponding indices\"\n xf_array = xf.array()\n xs_array = xs.array()\n xf_array[self.fdofs] += xs_array[self.sdofs]\n xf[:] = xf_array\n\n #--- Functions\n\n def update(self, t0, t1, dt):\n pass\n\n def fluid_body_force(self):\n return []\n\n def structure_body_force(self):\n return []\n\n def structure_boundary_traction_extra(self):\n return Constant((0, 0))\n\n def mesh_right_hand_side(self):\n return Constant((0, 0))\n\ndef _map_to_facet(facet_index, Omega, Omega_X, vertex_map):\n \"Map facet index in Omega to facet index in Omega_X\"\n\n # Get the two vertices in Omega\n facet = Facet(Omega, facet_index)\n v0 = facet.entities(0)[0]\n v1 = facet.entities(0)[1]\n\n # Get the two vertices in Omega_X\n v0 = Vertex(Omega_X, vertex_map[v0])\n v1 = Vertex(Omega_X, vertex_map[v1])\n\n # Get the facets of the two vertices in Omega_X\n f0 = v0.entities(1)\n f1 = v1.entities(1)\n\n # Get the common facet index\n common_facets = set(f0).intersection(set(f1))\n\n # Check that we get exactly one facet\n if not len(common_facets) == 1:\n error(\"Unable to find facet in fluid mesh.\")\n\n return int(list(common_facets)[0])\n","repo_name":"astrojuanlu/CBC.Solve","sub_path":"sandbox/fsi-old/fsiproblem.py","file_name":"fsiproblem.py","file_ext":"py","file_size_in_byte":9725,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"34522193326","text":"import random\n\nif __name__ == \"__main__\":\n\tn = int(input())\n\tm = int(input())\n\tC = []\n\tfor i in range(m):\n\t\ttemp = input().split(\" \")\n\t\ttemp[0] = int(temp[0])\n\t\ttemp[1] = int(temp[1])\n\t\ttemp[2] = int(temp[2])\n\t\tC.append(temp)\n\twhile True:\n\t\tassignment = [False]\n\t\tfor i in range(n):\n\t\t\tassignment.append(bool(random.getrandbits(1)))\n\t\tcnt = 0\n\t\tfor i in range(m):\n\t\t\ta = False\n\t\t\tb = False\n\t\t\tc = False\n\t\t\tcnt = cnt + 1\n\t\t\tif C[i][0] > 0:\n\t\t\t\ta = assignment[C[i][0]]\n\t\t\telse:\n\t\t\t\ta = not assignment[-C[i][0]]\n\t\t\tif C[i][1] > 0:\n\t\t\t\tb = assignment[C[i][1]]\n\t\t\telse:\n\t\t\t\tb = not assignment[-C[i][1]]\n\t\t\tif C[i][2] > 0:\n\t\t\t\tc = assignment[C[i][2]]\n\t\t\telse:\n\t\t\t\tc = not assignment[-C[i][2]]\n\t\t\tif not (a or b or c):\n\t\t\t\tcnt = cnt - 1\n\t\tif cnt > (m * 7 / 8):\n\t\t\trst = \"\"\n\t\t\tfor i in range (1, n):\n\t\t\t\tif assignment[i]:\n\t\t\t\t\trst = rst + \"1 \"\n\t\t\t\telse:\n\t\t\t\t\trst = rst + \"-1 \"\n\t\t\tif assignment[n]:\n\t\t\t\trst = rst + \"1\"\n\t\t\telse:\n\t\t\t\trst = rst + \"-1\"\n\t\t\tprint(rst)\n\t\t\tbreak\n\t\t\t","repo_name":"ShichenQiao/CS577_SP22_Algorithms","sub_path":"HW13_Randomization/random3SAT.py","file_name":"random3SAT.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19395086105","text":"import tkinter\nfrom tkinter import *\nfrom tkinter import ttk \nimport pymysql\nbaglanti = pymysql.connect(\n\thost = 'host_adress',\n unix_socket = 'mysqld.sock_patch', \n user = 'your_user_name',\n passwd = 'your_user_pwd', \n db = 'your_db_name'\n ,autocommit=True,\n charset='utf8')\ncursor=baglanti.cursor()\ndef Ekle():\n\tsorgu=\"INSERT INTO calısan (name, address) VALUES (%s,%s)\"\n\tdeger=(adi.get(),combo.get())\n\tcursor.execute(sorgu,deger)\n\tListe()\ndef Liste():\n\tliste.delete(*liste.get_children())\n\tsorgu=\"SELECT *FROM calısan ORDER BY name DESC\"\n\tcursor.execute(sorgu)\n\tsonuc=cursor.fetchall()\n\tfor item in sonuc:\n\t\tliste.insert('',0,text=item[0],values=(item[1]))\n\nnesne=Tk()\nLabel(nesne,text=\"Adı: \").grid(row=1,column=1)\nadi=Entry(nesne)\nadi.grid(row=1,column=2)\n\nLabel(nesne,text=\"Şehir:\").grid(row=2,column=1)\ncombo=ttk.Combobox(nesne,width=\"15\")\ncombo['values']=(\"Manisa\",\"İzmir\",\"Hatay\")\ncombo.current(0)\ncombo.grid(row=2,column=2)\n\nbtn=Button(nesne,text=\"Ekle\",command=Ekle)\nbtn.grid(row=3,column=2)\n\n\nnesne1=Tk()\nliste=ttk.Treeview(nesne1,height=10,column=0)\nliste['columns']=(\"sut1\")\nliste.grid(row=5,column=1,columnspan=3)\nliste.heading(\"#0\",text=\"Adı\")\nliste.heading(\"sut1\",text=\"Şehir\")\nliste.bind('<ButtonRelease-1>',\"\")\nListe()\nnesne.mainloop()","repo_name":"gokhangumul/python_examples","sub_path":"tkinter_example2.py","file_name":"tkinter_example2.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13026618533","text":"from unittest.mock import MagicMock\n\nimport pytest\nfrom eth_utils import to_checksum_address\nfrom requests.exceptions import ReadTimeout\nfrom synapse.config import ConfigError\n\nfrom raiden_synapse_modules.presence_router.pfs import PFSPresenceRouter\n\n\ndef test_parse_config() -> None:\n with pytest.raises(ConfigError):\n PFSPresenceRouter.parse_config({\"ethereum_rpc\": \"bar\"})\n with pytest.raises(ConfigError):\n PFSPresenceRouter.parse_config({\"service_registry_address\": \"bar\"})\n with pytest.raises(ConfigError):\n PFSPresenceRouter.parse_config({\"service_registry_address\": \"bar\", \"ethereum_rpc\": \"foo\"})\n config = PFSPresenceRouter.parse_config(\n {\n \"service_registry_address\": \"0x1234567890123456789012345678901234567890\",\n \"ethereum_rpc\": \"http://foo.bar\",\n }\n )\n assert config.service_registry_address is not None\n assert (\n to_checksum_address(config.service_registry_address)\n == \"0x1234567890123456789012345678901234567890\"\n )\n assert config.ethereum_rpc == \"http://foo.bar\"\n with pytest.raises(ConfigError):\n PFSPresenceRouter.parse_config(\n {\n \"service_registry_address\": \"0x1234567890123456789012345678901234567890\",\n \"ethereum_rpc\": \"http://foo.bar\",\n \"blockchain_sync_seconds\": \"foo\",\n }\n )\n assert config.blockchain_sync == 15\n\n\ndef test_handle_eth_connection_timeout(presence_router: PFSPresenceRouter) -> None:\n \"\"\"Regression test for https://github.com/raiden-network/raiden-synapse-modules/issues/9\"\"\"\n presence_router.block_filter = MagicMock()\n presence_router.block_filter.get_new_entries = MagicMock(side_effect=ReadTimeout)\n try:\n presence_router._check_filters_once()\n except ReadTimeout:\n pytest.fail(\"Unexpected ReadTimeout\")\n","repo_name":"raiden-network/raiden-synapse-modules","sub_path":"tests/test_pfs_prensence_router.py","file_name":"test_pfs_prensence_router.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13041938163","text":"import pandas as pd \r\nimport numpy as np \r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt \r\ncorona_dataset_csv = pd.read_csv('Datasets/Covid19_Confirmed_dataset.csv')\r\ncorona_dataset_csv.head(10)\r\ncorona_dataset_csv.drop(['Lat','Long'],axis=1,inplace=True)\r\ncorona_dataset_aggregated = corona_dataset_csv.groupby('Country/Region').sum()\r\ncountries = list(corona_dataset_aggregated.index)\r\nmax_infection_rates = []\r\nfor country in countries:\r\n max_infection_rates.append(corona_dataset_aggregated.loc[country].diff().max())\r\ncorona_dataset_aggregated['max_infection_rate'] = max_infection_rates\r\ncorona_data = pd.DataFrame(corona_dataset_aggregated['max_infection_rate'])\r\nhappiness_report_csv = pd.read_csv('Datasets/worldwide_happiness_report.csv') \r\nuseless_cols = ['Overall rank','Score','Generosity','Perceptions of corruption']\r\nhappiness_report_csv.drop(useless_cols,axis=1,inplace=True)\r\nhappiness_report_csv.set_index('Country or region',inplace=True)\r\ndata = corona_data.join(happiness_report_csv,how='inner')\r\nx = data['GDP per capita']\r\ny = data['max_infection_rate']\r\nsns.regplot(x=x,y=np.log(y))\r\nx = data['Social support']\r\ny = data['max_infection_rate']\r\nsns.regplot(x=x,y=np.log(y))\r\nx = data['Healthy life expectancy']\r\ny= data['max_infection_rate']\r\nsns.regplot(x=x,y=np.log(y))\r\nx = data['Freedom to make life choices']\r\ny=data['max_infection_rate']\r\nsns.regplot(x=x,y=np.log(y))\r\n\r\n","repo_name":"Jatinchhabra21/Covid19_Data_Analysis_using_Python","sub_path":"covid19.py","file_name":"covid19.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"69953033656","text":"import numpy as np\r\nimport sympy\r\nimport math\r\nimport numpy as np\r\n\r\ndef df(x):\r\n return np.array([2 * x[0], 2 * x[1]])\r\n\r\ndef max1(g, x):\r\n return (g(x) + abs(g(x))) / 2\r\ndef H(g,x):\r\n return max1(g,x)\r\n\r\ndef MPF(fun):\r\n eps = 0.0001\r\n a = -5\r\n b = 5\r\n \"\"\"Реализация метода пассивного поиска\"\"\"\r\n alpha = sympy.symbols('alpha')\r\n func = sympy.lambdify(alpha, fun, 'numpy')\r\n k = int((b - a) / eps)\r\n # Ставим в соответствие каждому х значение функции\r\n dic = {}\r\n for i in range(k + 1):\r\n x_i = a + eps * i\r\n dic[x_i] = func(x_i)\r\n # Находим минимальное значение среди значений\r\n keys = dic.values()\r\n min_fx = min(keys)\r\n # Находим чему равен соответствующий х\r\n dic = {value: key for key, value in dic.items()} # Меняем ключи и значения местами\r\n print('Пассивный поиск возвращает', round(dic[min_fx], 6))\r\n return round(dic[min_fx], 6)\r\n\r\n\r\ndef golden_serch(fun, a=-10, b=10, delta=0.0001):\r\n alpha = sympy.symbols('alpha')\r\n f = sympy.lambdify(alpha, fun, 'numpy')\r\n a0 = a\r\n b0 = b\r\n k1 = (3 - math.sqrt(5)) / 2\r\n k2 = (math.sqrt(5) - 1) / 2\r\n # единственная итерация, на которой высчитываются значения функции в двух точках\r\n c = k1 * (b - a) + a\r\n d = k2 * (b - a) + a\r\n f_c = f(c)\r\n f_d = f(d)\r\n while delta < (b - a) / 2:\r\n if f(c) <= f(d):\r\n b = d\r\n d = c\r\n c = k1 * (b - a) + a\r\n xmin = c\r\n fmin = f_c\r\n f_d = f_c\r\n # единственное вычисление функции на данной итерации\r\n f_c = f(c)\r\n else:\r\n a = c\r\n c = d\r\n d = k2 * (b - a) + a\r\n xmin = d\r\n fmin = f_d\r\n f_c = f_d\r\n # единственное вычисление функции на данной итерации\r\n f_d = f(d)\r\n xmin = (a + b) / 2\r\n print('Золотое сечение возвращает', round(xmin,6))\r\n return xmin\r\n\r\n\r\ndef external_penalty_method(x0=np.array([1, 1]), r0=1, e=0.01):\r\n def coord_descent(x0, r0):\r\n alpha = sympy.symbols('alpha')\r\n x10 = x0\r\n while True:\r\n x11 = x10 - alpha * d_phi(x10, r0)\r\n a = MPF(phi(x11, r0))\r\n x11 = x10 - a * d_phi(x10, r0)\r\n if np.linalg.norm(d_phi(x11, r0)) <= 0.001:\r\n return (x11)\r\n else:\r\n x10 = x11\r\n\r\n x1 = sympy.symbols('x1')\r\n x2 = sympy.symbols('x2')\r\n alpha = sympy.symbols('alpha')\r\n f = lambda x: x[0] ** 2 + x[1] ** 2\r\n g = lambda x: 2 * x[0] + x[1] + 4\r\n phi = lambda x0, r0: f(x0) + r0 * H(g,x0) ** 2\r\n d_phi = lambda x0, r0: df(x0) + 2 * r0 * H(g,x0) * np.array([2, 1])\r\n i=1\r\n\r\n while True:\r\n x_new = coord_descent(x0, r0)\r\n # if i == 1:\r\n # x_new = np.array([-1.3333, -0.6666])\r\n # elif i==2:\r\n # x_new = np.array([-1.568629, -0.784310])\r\n # elif i==3:\r\n # print('Error')\r\n # break\r\n\r\n if H(g,x_new)**2>0.01:\r\n x0 = x_new\r\n r0 = 10 * r0\r\n\r\n else:\r\n print('Точка минимума', x_new)\r\n break\r\n\r\n\r\n\r\nexternal_penalty_method()\r\n","repo_name":"EgoInc/Optimization-methods","sub_path":"Условная минимизация/Метод внешних штрафов_ пример из учебника.py","file_name":"Метод внешних штрафов_ пример из учебника.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5148687395","text":"\nkeywords = [\n \"new\",\n \"if\",\n \"else\",\n \"elseif\",\n \"return\",\n \"familyof\",\n \"inherit\",\n \"panic\",\n \"listen\",\n \"routine\",\n \"when\",\n \"break\",\n \"do\",\n \"within\" ]\n\nclass TrieNode :\n def __init__(self,data:str) -> None:\n self.next = {}\n self.end = False\n self.data = data\n \n \nclass Trie:\n\n def __init__(self):\n self.head = TrieNode('')\n self.keywords = []\n self.addAll()\n\n def add(self,item:str):\n if item in self.keywords:\n return \n pointer = self.head\n size = len(item) - 1\n item = item.lower()\n for index,char in enumerate(item):\n if char not in pointer.next:\n node = TrieNode(char)\n pointer.next[char] = node\n\n pointer = pointer.next[char]\n \n pointer.end = True\n self.keywords.append(item)\n \n def autoComplete(self,word:str):\n node = self.head\n res = []\n word = word.lower()\n for char in word:\n if char in node.next:\n node = node.next[char]\n else:\n return []\n self.DFS(res,node,word[:-1])\n return res\n\n def DFS(self,res:list,node,prefix:str):\n if node.end:\n res.append(prefix+node.data)\n for child in node.next.values():\n self.DFS(res,child,prefix+node.data)\n\n def addAll(self):\n for keyword in keywords:\n #print(keyword)\n self.add(keyword)\n\nif __name__ == \"__main__\":\n x = \"I\"\n \n trie = Trie()\n print(trie.head.next)\n print(trie.autoComplete('List'))\n print(trie.autoComplete('R'))\n print(trie.autoComplete('els'))\n print(trie.autoComplete('whe'))\n\n \n ","repo_name":"omarmasoud/APE-Programming-Language-Compiler","sub_path":"Trie/Trie.py","file_name":"Trie.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73651129015","text":"import logging\nimport math\nfrom matplotlib import font_manager\nfrom tkinter import font\nfrom PIL import Image, ImageColor, ImageDraw, ImageFont\n\n# Classes to represent genes, exons, links, ...\n\nclass Gene:\n \"\"\"\n Class to represent a gene for drawing\n\n ...\n \n Attributes\n ----------\n id : str\n gene identifier\n species : str\n identifier of the species the gene belongs to\n len : int\n gene length in bp\n strand : str\n gene orientation, either '+' or '-'\n elements : dict\n gene elements like exons, has the structure {\"type1\": [(start0, end0), (start1, end1), ...], \n \"type2\": [...], \n ...}\n\n Methods\n -------\n addElement(elemtype: str, start: int, end: int):\n adds a gene element (e.g. an exon)\n \"\"\"\n def __init__(self, geneID: str, speciesID: str, length: int, strand: str):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n geneID : str\n gene identifier\n geneID : str\n species identifier\n length : int\n gene length in bp\n strand : str\n gene orientation, either '+' or '-'\n \"\"\"\n assert strand in ['+', '-'], \"[ERROR] >>> Strand must be '+' or '-', not \"+str(strand)\n self.id = geneID\n self.species = speciesID\n self.len = length\n self.strand = strand\n self.elements = dict()\n\n def __str__(self):\n return str({'id': self.id, 'species': self.species, 'len': self.len, 'strand': self.strand, \n 'elements': self.elements})\n\n def addElement(self, elemtype: str, start: int, end: int):\n \"\"\"\n Adds a gene element (e.g. an exon)\n\n Parameters\n ----------\n elemtype : str\n element type\n start : int\n relative element start position inside the gene, top strand, zero-based.\n I.e. start = 0 refers to the leftmost gene base when looking at the top strand of the chromosome,\n start = gene.len-1 refers to the rightmost gene base\n end : int\n relative element end position inside the gene, top strand, zero-based.\n \"\"\"\n if elemtype not in self.elements:\n self.elements[elemtype] = []\n\n self.elements[elemtype].append((start, end))\n\n\n\nclass Link:\n \"\"\"\n Class to represent a link for drawing. Can be in compressed mode to represent many links instead of individual ones.\n \n Attributes\n ----------\n genes : list\n list of gene identifiers for genes connected by the link\n pos : list\n list of positions for each respective gene where the link connects them (uncompressed mode, default). If\n compressed mode is activated, this is a list of lists of positions, giving one or more positions for each\n respective gene\n strand : list\n optional list of strands on which the link connects the genes. Note that currently storing of differing strands\n per gene in compressed mode is not implemented.\n connect : bool\n True by default. If False, do not connect occurrences when drawing and only draw markers at occurrence sites\n compressed : bool\n False by default. If True, the object is storing many links among the same genes\n color : None or str or tuple of RGB values\n optional color of drawn link, defaults to None and color is determined automatically\n \"\"\"\n def __init__(self, genes: list[Gene], pos: list[int], strands: list[str] = None, connect: bool = True, \n compressed: bool = False, color = None):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n genes : list\n list of gene identifiers for genes connected by the link\n pos : list\n list of positions for each respective gene where the link connects them. If compressed is True, this must be\n a list of lists of positions, giving one or more positions for each respective gene\n strand : list\n optional list of strands on which the link connects the genes\n connect : bool\n True by default. Set to False to not connect occurrences when drawing and only draw markers at occurrence \n sites\n compressed : bool\n False by default. Set to True to enable storing many links among the same genes\n color : None or str or tuple of RGB values\n optional color of drawn link, defaults to None and color is determined automatically\n \"\"\"\n assert len(genes) == len(pos), \"[ERROR] >>> genes and pos must be of equal length\"\n assert len(genes) >= 2, \"[ERROR] >>> A link must connect at least two genes\"\n if compressed:\n assert all([len(pl) > 0 for pl in pos]), \"[ERROR] >>> Need lists of positions for each gene in compressed\"+\\\n \" mode, and each list must contain at least one position. Got: \"+\\\n str(pos)\n assert len(set(genes)) == len(genes), \"[ERROR] >>> All genes in a link must be unique\"\n self.genes = genes\n self.pos = pos\n if strands is not None:\n assert len(strands) == len(genes), \"[ERROR] >>> genes and strands must be of equal length\"\n assert all([s in ['+', '-'] for s in strands]), \"[ERROR] >>> Strands must be '+' or '-'\"\n \n self.strands = strands\n self.connect = connect\n self.compressed = compressed\n self.color = color\n\n def __str__(self):\n return str({'genes': self.genes, 'pos': self.pos, 'strands': self.strands})\n\n\n\nclass GeneCoordinates:\n \"\"\"\n Class to store drawing coordinates for a gene\n\n ...\n Attributes\n ----------\n color : str or tuple of RGB values\n color value (string or rgb) of drawn gene\n gene : Gene\n reference to the corresponding gene\n id : str\n id of the corresponding gene\n len : int\n gene length in pixels\n res : float\n drawing resolution in pixel / basepair\n x0 : int\n pixel coordinate of the first base\n xn : int\n pixel coordinate of the last (rightmost) base\n y : int\n vertical pixel coordinate\n ylab : int\n vertical pixel coordinate of the label\n \"\"\"\n def __init__(self, gene: Gene, x0: int, y: int, ylab: int, res: float, col = None):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n gene : Gene\n corresponding gene object\n x0 : int\n pixel coordinate of the first (leftmost) base\n y : int\n vertical pixel coordinate\n ylab : int\n vertical pixel coordinate of the label\n res : float\n drawing resolution in pixel / basepair\n col : str or tuple of RGB values\n optional color of drawn gene, defaults to blue for fwd strand genes and red otherwise\n \"\"\"\n self.gene = gene\n self.id = gene.id\n assert x0 >= 0, '[ERROR] >>> pixel coordinate cannot be negative'\n assert y >= 0, '[ERROR] >>> pixel coordinate cannot be negative'\n assert ylab >= 0, '[ERROR] >>> pixel coordinate cannot be negative'\n assert res > 0, '[ERROR] >>> resolution must be positive and greater than zero'\n self.x0 = x0\n self.y = y\n self.ylab = ylab\n self.updateResolution(res) # sets self.res, self.len and self.xn\n if col is None:\n self.color = 'darkblue' if self.gene.strand == \"+\" else 'darkorange'\n else:\n self.color = col\n\n def __str__(self):\n return str({'id': self.id, 'color': self.color, 'len': self.len, \n 'res': self.res, 'x0': self.x0, 'y': self.y, 'ylab': self.ylab})\n\n def updateResolution(self, res : float):\n \"\"\"\n Set new drawing resolution, update pixel length accordingly\n\n Parameters\n ----------\n res : float\n new drawing resolution in pixel / basepair\n \"\"\"\n self.res = res\n self.len = math.ceil(self.gene.len * self.res)\n self.xn = self.x0 + self.len - 1\n\n\n\n# helper class containing array of handpicked color names for automatic coloring\nclass Palette:\n \"\"\"\n Helper class containing an array of handpicked color names for automatic coloring\n\n ...\n Attributes\n ----------\n colors : list\n list of color names\n i : int\n current index\n\n Methods\n -------\n color():\n returns the current color\n inc():\n sets index to next color\n \"\"\"\n def __init__(self, idx: int = 0):\n \"\"\"\n Constructor\n\n Parameters\n ----------\n idx : int\n initial color index, defaults to 0, set to -1 if given value is too big\n \"\"\"\n self.colors = ['darkblue', 'darkorange', 'blueviolet', 'burlywood', 'darkgreen', 'darkmagenta', 'cadetblue',\n 'chocolate', 'cyan', 'darkgoldenrod', 'darkcyan', 'darkkhaki', 'darkred', 'darkolivegreen', \n 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategrey', 'darkturquoise', 'deeppink',\n 'dodgerblue', 'gold', 'greenyellow', 'indigo', 'khaki', 'lightblue', 'maroon', 'plum', 'teal', \n 'bisque', 'aquamarine']\n self.i = idx if idx < len(self.colors) else -1 # default to last color if idx too large\n\n def color(self):\n \"\"\"\n Returns the current color name\n \"\"\"\n return self.colors[self.i]\n\n def inc(self):\n \"\"\"\n Sets the index to next color, starting over if color list has endet\n \"\"\"\n self.i += 1\n if self.i >= len(self.colors):\n self.i = 0\n\n\n\n# drawing functions\n\ndef draw(genes: list[Gene], links: list[Link], font = None,\n width: int = 1920, height: int = 1080, dpi: int = 100,\n outerMargin: int = None, genewidth: int = 5, linkwidth: int = 2, fontsize: int = 12,\n genecols: list = None, elementcols: dict = None, linkcol = None, show: bool = True):\n \"\"\"\n Draws an image with genes as horizontal bars, possibly with gene elements like exons inside, \n and links connecting them\n\n Returns a PIL.Image object\n\n Parameters:\n genes: list of Gene objects\n links: list of Link objects\n font: path to a truetype font (if omitted, default font will be used with fixed fontsize)\n width: image width in pixels\n height: image heigth in pixels\n dpi: image resolution in dpi\n outerMargin: distance from image edge to drawn content in pixels, automatically set if None\n genewidth: line width of drawn genes in pixels\n linkwidth: line witdh of drawn links in pixels\n fontsize: fontsize of text\n genecols: optional list of colors for each single gene, same length as genes\n elementcols: optional dict with element type names as keys and color values as values\n linkcol: optional color value (name or RGB tuple) or 2-tuple of color values to use for all links. If single\n value, all links are colored this way, otherwise links between opposing strands are colored in the \n second color value. If None, links are colored automatically. Is overwritten by Link.col values if set.\n show: set to False to suppress image drawing\n \"\"\"\n\n # some sanity checks\n assert len(genes) >= 2, \"[ERROR] >>> Single genes cannot be linked\"\n geneIDs = [gene.id for gene in genes]\n assert sorted(list(set(geneIDs))) == sorted(geneIDs), \"[ERROR] >>> genes contain duplicate IDs\"\n \n if outerMargin is not None:\n assert outerMargin >= 0, \"[ERROR] >>> margin must be positive\"\n assert outerMargin < min(width/2, height/2), \"[ERROR] >>> margin too big\"\n else:\n outerMargin = int(height*0.012)\n\n assert fontsize > 0, \"[ERROR] >>> Fontsize must be bigger than 0\"\n if font is not None:\n font = ImageFont.truetype(font, fontsize)\n\n if genecols is not None:\n assert len(genecols) == len(genes), \"[ERROR] >>> genecols must have same length as genes\"\n \n if linkcol is not None and type(linkcol).__name__ == 'tuple':\n assert len(linkcol) >= 2, \"[ERROR] >>> linkcol must be a single color name or value or a 2-tuple of colors\"\n\n # determine number of gene rows based on species\n ylabOffset = int(1.5*genewidth)\n speciesToRow = {} # {\"species\": row_num, ...}\n geneGrid = [] # [[gene_coord, gene_coord, ...], [...], ...]\n geneToCoord = {} # {\"geneID\": gene_coord, ...}\n for i in range(len(genes)):\n gene = genes[i]\n if genecols is not None:\n gc = GeneCoordinates(gene, 0, 0, ylabOffset, 1, col = genecols[i]) # set coords and res later\n else:\n gc = GeneCoordinates(gene, 0, 0, ylabOffset, 1) # set coords and res later\n\n if gene.species not in speciesToRow:\n speciesToRow[gene.species] = len(geneGrid)\n geneGrid.append([])\n\n geneGrid[speciesToRow[gene.species]].append(gc)\n geneToCoord[gene.id] = gc\n\n # special case: single species, draw all genes stacked\n if len(geneGrid) == 1:\n geneGrid = [[g] for g in geneGrid[0]]\n \n # assert that all genes have a grid position\n checkGeneGridSum = sum([len(row) for row in geneGrid])\n assert checkGeneGridSum == len(genes), \"[ERROR] >>> Not all \"+str(len(genes))+\" genes in grid (\"\\\n +str(checkGeneGridSum)+\")\"\n\n nrows = len(geneGrid)\n\n # assign y-coordinate (top) of each gene row\n rowheight = genewidth + int(genewidth/2) + fontsize + genewidth # last genewith as minimal space between rows\n rowpix = height - (2 * outerMargin)\n if nrows * rowheight > rowpix:\n height = (2 * outerMargin) + (nrows * rowheight)\n logging.warning(\"[geneLinkDraw.draw] >>> Too many gene rows for image height, increasing image height to \" + \\\n f\"{height} pixels. Adjust outerMargin, genewidth and fontsize if you want to keep the lower \" +\\\n \"image height\")\n #print(\"[WARNING] >>> Too many gene rows for image height, increasing image height to\",\n # height, \"pixels. Adjust outerMargin, genewidth and fontsize if you want to keep the lower image height\")\n\n #vspace = int((height - (nrows*rowheight) - (2 * outerMargin)) / (nrows-1)) if nrows > 1 else 0\n vspace = int((height - (nrows*rowheight) - (2 * outerMargin)) / nrows) if nrows > 1 else 0 # keep vspace below last\n # row for labels\n #rowToY = {}\n y = outerMargin\n for row in geneGrid:\n #rowToY[r] = y\n for gc in row:\n gc.y = y\n gc.ylab = y + ylabOffset\n\n y += (rowheight + vspace)\n \n # determine lowest needed pixel/bp resolution\n genesep = int(width * 0.006) # number of pixels between two genes in a row\n def determineRowRes(row):\n rowLen = sum([gc.gene.len for gc in row])\n ngenes = len(row)\n genepix = width - ((ngenes-1) * genesep) - (2 * outerMargin) # number of pixels available for gene drawing\n genepix -= ngenes # genes are drawn math.ceil(len * res), so reserve an additional hypothetical pixel per gene\n pxpbp = genepix / rowLen # pixel per basepair (horizontal)\n return pxpbp\n \n pxpbp = None\n for row in geneGrid:\n rowRes = determineRowRes(row)\n if pxpbp is None or rowRes < pxpbp:\n pxpbp = rowRes\n\n # set for each gene the appropriate x0 and resolution\n for row in geneGrid:\n x0 = outerMargin\n for gc in row:\n gc.x0 = x0\n gc.updateResolution(pxpbp) # sets correct res, pixellen, xn\n x0 += (genesep + gc.len)\n \n # try to stack gene labels to avoid overlap as much as possible\n for ri in range(len(geneGrid)):\n row = geneGrid[ri]\n maxY = (geneGrid[ri+1][0].y - fontsize) if (ri+1) < len(geneGrid) else (height-outerMargin-fontsize)\n spaceAvail = (row[0].ylab < maxY)\n ystack = [row[0].ylab] # track stack y coordinates\n xn = [-1] # track when each lane is available again\n for gc in row:\n laneAssigned = False\n textw, texth = font.getsize(gc.gene.id)\n for si in range(len(ystack)):\n if xn[si] < gc.x0:\n gc.ylab = ystack[si] # take first free lane\n xn[si] = gc.x0 + textw\n laneAssigned = True\n break\n \n if not laneAssigned and spaceAvail:\n newY = ystack[-1]+(texth)\n gc.ylab = newY\n spaceAvail = (newY < maxY)\n ystack.append(newY)\n xn.append(gc.x0 + textw)\n \n # start drawing\n img = Image.new(mode = \"RGB\", size = (width, height), color = \"white\")\n drw = ImageDraw.Draw(img) # drawing context \n palette = Palette(2) # default gene coloring are darkblue and darkorange, start at blueviolet for additional colors\n if elementcols is None:\n typeToPalette = {} # fill with default colors later\n else:\n typeToPalette = elementcols # use user-provided coloring\n\n # draw genes and elements\n for gid in geneToCoord:\n gc = geneToCoord[gid]\n drw.line(xy = ((gc.x0, gc.y), (gc.xn, gc.y)),\n fill = gc.color,\n width = genewidth)\n\n # write gene name below\n drw.text(xy = (gc.x0, gc.ylab), #(gc.x0, gc.y+int(1.5*genewidth)), \n text = gc.gene.id,\n font = font, fill = \"black\")\n\n for elemtype in gc.gene.elements:\n if elemtype not in typeToPalette:\n typeToPalette[elemtype] = palette.color()\n palette.inc()\n\n for elem in gc.gene.elements[elemtype]:\n a = gc.x0 + math.floor((gc.res * elem[0]))\n b = gc.x0 + math.ceil((gc.res * elem[1]))\n drw.line(xy = ((a, gc.y), (b, gc.y)),\n fill = typeToPalette[elemtype],\n width = genewidth)\n\n # draw links\n if links is not None:\n if linkcol is None:\n sscol = palette.color()\n palette.inc()\n oscol = palette.color()\n else:\n if type(linkcol).__name__ == 'tuple' and len(linkcol) == 2:\n sscol = linkcol[0]\n oscol = linkcol[1]\n else: # assume 3-tuple of RGB values\n sscol = linkcol\n oscol = linkcol\n \n radius = math.ceil(linkwidth/2) #+ 1\n #diam = radius*2\n\n for link in links:\n for i in range(1, len(link.genes)):\n gc1 = geneToCoord[link.genes[i-1]]\n gc2 = geneToCoord[link.genes[i]]\n if link.strands is not None and link.strands[i-1] != link.strands[i]:\n lcol = oscol if link.color is None else link.color\n else:\n lcol = sscol if link.color is None else link.color\n\n ellcol = sscol if link.color is None else link.color\n\n if not link.compressed:\n # draw markers where the links hit\n x1 = gc1.x0 + (gc1.res * link.pos[i-1])\n x2 = gc2.x0 + (gc2.res * link.pos[i])\n drw.ellipse(xy = ((x1-radius, gc1.y-radius), (x1+radius, gc1.y+radius)),\n fill = ellcol,\n outline = ellcol,\n width = 1)\n drw.ellipse(xy = ((x2-radius, gc2.y-radius), (x2+radius, gc2.y+radius)),\n fill = ellcol,\n outline = ellcol,\n width = 1)\n # draw the link\n if link.connect:\n drw.line(xy = ((x1, gc1.y), (x2, gc2.y)),\n fill = lcol,\n width = linkwidth)\n else:\n # draw markers where the links hit\n x1ls = []\n x2ls = []\n for p1 in link.pos[i-1]:\n x1 = gc1.x0 + (gc1.res * p1)\n x1ls.append(x1)\n drw.ellipse(xy = ((x1-radius, gc1.y-radius), (x1+radius, gc1.y+radius)),\n fill = ellcol,\n outline = ellcol,\n width = 1)\n for p2 in link.pos[i]:\n x2 = gc2.x0 + (gc2.res * p2)\n x2ls.append(x2)\n drw.ellipse(xy = ((x2-radius, gc2.y-radius), (x2+radius, gc2.y+radius)),\n fill = ellcol,\n outline = ellcol,\n width = 1)\n # draw links\n if link.connect:\n for x1 in x1ls:\n for x2 in x2ls:\n drw.line(xy = ((x1, gc1.y), (x2, gc2.y)),\n fill = lcol,\n width = linkwidth)\n \n\n if show:\n img.show()\n\n return img, geneToCoord\n\n\n\n# user helper functions\n\ndef getAvailableFonts():\n \"\"\"\n Returns and prints a list of font paths found on your system\n that you can use with the draw() function\n \"\"\"\n \n system_fonts = font_manager.findSystemFonts(fontpaths=None, fontext='ttf')\n print(\"\\n\".join(system_fonts))\n return system_fonts\n\n\n\ndef getColorSheet(font = None):\n \"\"\"\n Draws a big image with all named colors available\n\n Parameters:\n font: path to a truetype font (if omitted, default font will be used with fixed fontsize)\n \"\"\"\n\n fakegenes = []\n colors = []\n i = 0\n for name, code in ImageColor.colormap.items():\n fakegenes.append(Gene(geneID = str(i)+\" - \"+name+\" // \"+str(code),\n speciesID = str(i // 4), # four colors per row\n length = 100, strand = \"+\"))\n colors.append(name)\n i += 1\n\n # calculate image height\n genewidth = 10\n fontsize = 12\n outerMargin = 10\n rowheight = genewidth + int(genewidth/2) + fontsize + genewidth # last genewith as minimal space between rows\n height = (2 * outerMargin) + (((i // 4)+1) * rowheight)\n \n draw(fakegenes, None, genecols = colors, \n genewidth = 10, outerMargin = outerMargin, fontsize = fontsize,\n height = height, font = font)","repo_name":"mabl3/GeneLinkDraw","sub_path":"geneLinkDraw.py","file_name":"geneLinkDraw.py","file_ext":"py","file_size_in_byte":22973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39178117957","text":"#coding:utf-8\n\nimport builtwith#查看网站使用的技术,安装:pip install builtwith\nimport whois# 查看网站所有者信息 ,安装:pip install python-whois\nimport urllib\n\n# builtTechology = builtwith.parse(urlAddress)\n# print(builtTechology)\n# print(whois.whois(\"www.baidu.com\"))\n\n'''\n请求网页地址,读取响应数据,并作出异常判断\n'''\ndef download(url,numRetries=2):\n\tprint (\"Downloading url:\",url)\n\ttry:\n\t \thtml = urllib.request.urlopen(url).read()\n\texcept urllib.error.URLError as e:\n\t \tprint (\"Downloading error:\",e.reason)\n\t \thtml = None\n\t \tif numRetries > 0:\n\t \t\t# 遇到500错误时,重试2次如果还是500则放弃\n\t \t\tif hasattr(e, 'code') and 500 <= e.code < 600:\n\t \t\t\treturn download(url,numRetries-1)\n\treturn html\n\n# 请求http://httpstat.us/500 返回500\n# urlAddress = \"http://httpstat.us/500\"\nurlAddress = \"http://www.meetup.com\"\n\nprint (\"Downloading url is:\",download(urlAddress))","repo_name":"wudibbs-gcl/crawling","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25856635488","text":"from utils.inference import generator\nfrom utils.datasets import getData\nfrom models.cnn import simple_CNN\nimport numpy as np\n\n\nX_train, y_train = getData('../datasets/gender_datasets/Training')\nX_test, y_test = getData('../datasets/gender_datasets/Validation')\n\n\nX_train_scaled = np.array([x.ravel()/255. for x in X_train])\nX_test_scaled = np.array([x.ravel()/255. for x in X_test])\n\nmodel, simple_CNN = simple_CNN((50, 50, 1), 2)\n\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\nmodel.fit(generator(X_train, y_train),steps_per_epoch=5,\n epochs=500,validation_data=generator(X_test, y_test),\n validation_steps=5)\n\nsimple_CNN.save(r'E:\\AI\\face\\trained_models\\gender_models\\gender_simple_CNN.hdf5')","repo_name":"TranMinh11/ProjectAI-","sub_path":"models/trainingGender.py","file_name":"trainingGender.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73916868855","text":"from rest_framework.decorators import detail_route\nfrom core.daos.datastreams import DataStreamDBDAO, DatastreamHitsDAO\nfrom core.v8.serializers import EngineSerializer\nfrom core.rest.views import ResourceViewSet\nfrom core.v8.forms import DatastreamRequestForm, UpdateGridRequestForm\nfrom rest_framework import renderers\nfrom core.v8.renderers import (CSVEngineRenderer, XLSEngineRenderer, \n HTMLEngineRenderer, GridEngineRenderer)\nfrom core.rest.mixins import ResourceHitsMixin\nfrom core.rest.renderers import UTF8JSONRenderer\n\nclass RestDataStreamViewSet(ResourceHitsMixin, ResourceViewSet):\n queryset = DataStreamDBDAO() \n serializer_class = EngineSerializer\n lookup_field = 'id'\n data_types = ['ds']\n dao_get_param = 'datastream_revision_id'\n dao_pk = 'datastream_revision_id'\n app = 'microsites'\n hits_dao_class = DatastreamHitsDAO\n \n @detail_route(methods=['get'], renderer_classes=[\n UTF8JSONRenderer,\n renderers.BrowsableAPIRenderer,\n CSVEngineRenderer,\n XLSEngineRenderer,\n HTMLEngineRenderer,\n GridEngineRenderer])\n def data(self, request, format=None, *args, **kwargs):\n if format == 'grid':\n return self.engine_call( request, 'invoke', 'json',\n form_class=UpdateGridRequestForm,\n serialize=False) \n return self.engine_call( request, 'invoke', format,\n form_class=DatastreamRequestForm,\n serialize=False)\n","repo_name":"anukat2015/datal","sub_path":"microsites/rest/datastreams.py","file_name":"datastreams.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27220562324","text":"import os\nimport uuid\nimport pytest\n\nfrom baram.poetry_manager import PoetryManager\n\n\n@pytest.fixture()\ndef pm():\n return PoetryManager()\n\n\ndef test_get_version(pm):\n version = pm.get_version()\n print(version)\n assert version\n\n\ndef test_save_load_toml(pm):\n tml = pm.load_toml()\n\n filename = str(uuid.uuid4())\n pm.save_toml(tml, filename)\n\n new_tml = pm.load_toml(filename)\n print(new_tml)\n assert new_tml\n\n os.remove(os.path.join(pm.path, filename))\n","repo_name":"lks21c/baram","sub_path":"tests/test_poetry_manager.py","file_name":"test_poetry_manager.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14912047684","text":"# Author: Jherez Taylor <jherez.taylor@gmail.com>\n# License: MIT\n# Python 3.5\n\n\"\"\"\nThis module houses various io functions for use throughout the project\n\"\"\"\nfrom time import time\nimport cProfile\nimport pstats\nimport requests\nimport ujson\nfrom . import settings\n\n\ndef timing(func):\n \"\"\"Decorator for timing run time of a function\n \"\"\"\n\n def wrap(*args):\n \"\"\"Wrapper\n \"\"\"\n time1 = time()\n ret = func(*args)\n time2 = time()\n settings.logger.debug(\"%s function took %0.3f ms\",\n func.__name__, (time2 - time1) * 1000.0)\n return ret\n\n return wrap\n\n\ndef do_cprofile(func):\n \"\"\"Decorator for profiling a function\n \"\"\"\n\n def profiled_func(*args, **kwargs):\n \"\"\"Wrapper\n \"\"\"\n profile = cProfile.Profile()\n try:\n profile.enable()\n result = func(*args, **kwargs)\n profile.disable()\n return result\n finally:\n stats = pstats.Stats(profile)\n stats.sort_stats(\"time\").print_stats(20)\n\n return profiled_func\n\n\n@timing\ndef send_job_notification(title, body):\n \"\"\" Send a notification via Pushbullet.\n\n Args:\n json_obj (json_obj).\n\n Indicates whether a job has completed or whether an error occured.\n \"\"\"\n headers = {\"Access-Token\": settings.PUSHBULLET_API_KEY,\n \"Content-Type\": \"application/json\"}\n payload = {\"type\": \"note\", \"title\": title, \"body\": ujson.dumps(body)}\n url = \"https://api.pushbullet.com/v2/pushes\"\n return requests.post(url, headers=headers, data=ujson.dumps(payload))\n\n\ndef send_job_completion(run_time, args):\n \"\"\"Format and print the details of a completed job\n\n Args:\n run_time (list): Start and end times.\n args (list): Contains the following:\n 0: function_name (str): Name of the function that was run.\n 1: message_text (str): Text to be sent in notification.\n \"\"\"\n\n time_diff = round((run_time[1] - run_time[0]), 2)\n settings.logger.debug(\"%s function took %0.3f seconds\", args[0], time_diff)\n send_notification = send_job_notification(\n settings.MONGO_SOURCE + \": \" + args[1] + \" took \" + str(time_diff) + \" seconds\", \"Complete\")\n settings.logger.debug(send_notification.content)\n","repo_name":"JherezTaylor/hatespeech_codewords","sub_path":"hatespeech_core/modules/utils/notifiers.py","file_name":"notifiers.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"22"} +{"seq_id":"20180108718","text":"from ece import ece\nfrom poly2 import poly\nfrom zzn import zzn\n\n\nclass ec:\n def __init__(self, field, a, b, G, n=None):\n if not isinstance(field, zzn):\n raise Exception\n\n self.field = field\n self.a = a\n self.b = b\n\n self.Z = ece(self)\n\n if G is not None:\n self.G = ece(self, G[0], G[1])\n self.n = n\n self.checkGeneratorOrder()\n\n def of(self, x, y):\n return ece(self, x, y)\n\n def checkGeneratorOrder(self):\n if (self.n * self.G != self.Z):\n raise Exception('Wrong generator order')\n\n def allG(self):\n yield self.Z\n\n t = self.G\n\n while not t == self.Z:\n yield t\n t = t + self.G\n\n def all(self, field=None):\n yield self.Z\n if not field:\n field = self.field\n for x in field.generator():\n y_2 = x ** 3 + self.a * x + self.b\n for y in y_2.sqrt():\n yield ece(self, x, y)\n\n def poly(self):\n p = poly(self.field)\n return p.of([self.field.one, self.field.zero, self.a, self.b])\n\n","repo_name":"airgordon/ec","sub_path":"ec.py","file_name":"ec.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36123970144","text":"class Solution:\n DP = {0: 1, 1: 1, 2: 2}\n \n def numTrees(self, n: int) -> int:\n if n in self.DP: return self.DP[n]\n else:\n total = 0\n for left in range(n):\n right = n - 1 - left\n total += self.numTrees(left) * self.numTrees(right)\n \n self.DP[n] = total\n return self.DP[n]\n\n\nclass Solution2:\n def numTrees(self, n: int) -> int:\n F = [0] * (n + 1)\n F[0], F[1] = 1, 1\n \n for i in range(2, n + 1):\n for j in range(i):\n F[i] += F[j] * F[i - 1 - j]\n \n return F[n]","repo_name":"mcao516/LeetCode_Practice","sub_path":"96_Unique_Binary_Search_Trees.py","file_name":"96_Unique_Binary_Search_Trees.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13429968063","text":"from django.contrib.auth.models import User\r\nfrom django.db import models\r\nfrom django.contrib.auth.models import User\r\nfrom consumer.models import Consumer, Application\r\nfrom hr.models import Employee\r\n\r\nclass Meter(models.Model):\r\n METER_STATUS=(\r\n (1, \"Working\"),\r\n (2, \"Damaged\")\r\n )\r\n serial_no = models.CharField(max_length=30, unique=True)\r\n meter_category = models.ForeignKey(\"MeterCategory\", verbose_name='Meter Category(in mm)')\r\n meter_status = models.IntegerField(choices=METER_STATUS)\r\n date_purchased = models.DateField()\r\n reset_value = models.IntegerField(default=10000)\r\n \r\n class Meta:\r\n db_table = \"Meter\"\r\n \r\n def __unicode__(self):\r\n return \"%s\" % self.serial_no\r\n\r\nclass MeterCategory(models.Model):\r\n measure = models.IntegerField(unique=True, verbose_name='Measure(in mm)')\r\n thickness = models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Thickness(in inches)')\r\n rent_amount = models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Rent (Ksh)')\r\n \r\n class Meta:\r\n db_table = \"MeterCategory\"\r\n verbose_name_plural = \"Meter Categories\"\r\n \r\n def __unicode__(self):\r\n return \"%d mm\" % self.measure\r\n\r\nclass Account(models.Model):\r\n meter_no = models.ForeignKey(Meter, blank=True, null=True)\r\n application = models.ForeignKey(Application)\r\n sewer_connected = models.BooleanField(default=False)\r\n refuse = models.BooleanField(default=False)\r\n is_active = models.BooleanField(default=False)\r\n closed = models.BooleanField(default=False)\r\n date_activated = models.DateTimeField(\"Last Activated On\", editable=False, blank=True, null=True)\r\n date_closed = models.DateTimeField(\"Closed On\", editable=False, blank=True, null=True)\r\n \r\n class Meta:\r\n db_table = \"Account\"\r\n \r\n def __unicode__(self):\r\n return \"%s\" % (self.account_no)\r\n \r\n @property\r\n def account_no(self):\r\n return \"%d-%d%d\" % (self.application.consumer.pk, self.id, self.application.plot_no.sub_zone.pk)\r\n \r\n def account_balance(self):\r\n invoices = self.invoice_set.all()\r\n payments = self.payment_set.all()\r\n invoice_total = 0\r\n for invoice in invoices:\r\n invoice_total += invoice.total\r\n payment_total = 0\r\n for payment in payments:\r\n payment_total += payment.amount_paid\r\n return invoice_total-payment_total\r\n \r\nclass MeterReading(models.Model):\r\n account = models.ForeignKey(Account)\r\n reading = models.IntegerField(max_length=50, null=True)\r\n employee = models.ForeignKey(User, verbose_name=\"Submitted by\", null=True)\r\n reason = models.CharField(max_length=200, blank=True, null=True)\r\n date_recorded = models.DateTimeField(auto_now_add=True)\r\n\r\n class Meta:\r\n db_table = \"MeterReading\"\r\n def __unicode__(self):\r\n return \"%s\" % (self.reading)\r\n\r\nclass SubZone(models.Model):\r\n name = models.CharField(max_length=50, unique=True)\r\n zone = models.ForeignKey(\"Zone\")\r\n description = models.TextField(max_length=500)\r\n \r\n class Meta:\r\n db_table = \"SubZone\"\r\n \r\n def __unicode__(self):\r\n return \"%s\" % (self.name)\r\n \r\n def no_of_accounts(self):\r\n plots = self.plot_set.all()\r\n accounts_total = 0\r\n for plot in plots:\r\n for application in plot.application_set.all():\r\n accounts_total += application.account_set.all().__len__()\r\n return accounts_total\r\n \r\nclass Zone(models.Model):\r\n name = models.CharField(max_length=50, unique=True)\r\n region = models.ForeignKey(\"Region\")\r\n \r\n class Meta:\r\n db_table = \"Zone\"\r\n \r\n def no_of_accounts(self):\r\n subzones = self.subzone_set.all()\r\n accounts_total = 0\r\n for subzone in subzones:\r\n accounts_total += subzone.no_of_accounts\r\n return accounts_total\r\n \r\nclass Region(models.Model):\r\n name = models.CharField(max_length=50, unique=True)\r\n \r\n class Meta:\r\n db_table = \"Region\"\r\n \r\n def __unicode__(self):\r\n return \"%s\" % (self.name)\r\n \r\n def no_of_accounts(self):\r\n zones = self.zone_set.all()\r\n accounts_total = 0\r\n for zone in zones:\r\n accounts_total += zone.no_of_accounts\r\n return accounts_total\r\n \r\n ","repo_name":"KipkoTheGuru/majisoft","sub_path":"meter/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"13947034394","text":"\"\"\"\nCore implementation of the state (i.e. \"memory\") system\nused by the agent.\n\nThis agent does not use recurrence or any learned\nconnectivity pattern to decide what to remember or attend\nto in the past. Rather, every frame generates a state\nvector, and the previous STATE_STACK state vectors are fed\ninto the agent at every timestep. The state vectors do\ninclude output from a neural network classifier, but this\nnetwork is not trained as part of the agent--rather, it is\npre-trained on hand-labeled images.\n\nSince the agent does not learn the state representation,\nwe can consider the states to be part of the environment.\nInstead of returning plain observations, the augmented\nenvironments return tuples of (state_stack, observation).\n\"\"\"\n\nimport gym\nimport numpy as np\nimport torch\n\nfrom .batched_env import BatchedWrapper\nfrom .constants import STATE_SIZE, STATE_STACK, NUM_ACTIONS\nfrom .model import StateClassifier\n\n\nclass StateEnv(gym.Wrapper):\n \"\"\"\n StateEnv is a single environment wrapper that adds\n state features as part of its observations.\n \"\"\"\n\n def __init__(self, env, state_features=None):\n super().__init__(env)\n self.state_features = state_features or StateFeatures()\n self.prev_states = np.zeros([STATE_STACK, STATE_SIZE], dtype=np.float32)\n\n def reset(self, **kwargs):\n obs = self.env.reset(**kwargs)\n self.prev_states.fill(0)\n feats = self.state_features.features(np.array([obs])[..., -3:])\n self.prev_states[-1, NUM_ACTIONS + 1:] = feats\n return (self.prev_states.copy(), obs)\n\n def step(self, action):\n obs, rew, done, info = self.env.step(action)\n self.prev_states[:-1] = self.prev_states[1:]\n self.prev_states[-1] = np.zeros_like(self.prev_states[-1])\n self.prev_states[-1, action] = 1\n self.prev_states[-1, NUM_ACTIONS] = rew\n feats = self.state_features.features(np.array([obs])[..., -3:])\n self.prev_states[-1, NUM_ACTIONS + 1:] = feats\n if 'extra_reward' in info:\n rew += info['extra_reward']\n return (self.prev_states.copy(), obs), rew, done, info\n\n\nclass BatchedStateEnv(BatchedWrapper):\n \"\"\"\n BatchedStateEnv wraps a BatchedEnv and adds state\n features to all the observations.\n\n This is more efficient than using a batch of StateEnv\n instances, since the classifier can be run on all of\n the observations in a single batch.\n \"\"\"\n\n def __init__(self, env, state_features=None):\n super().__init__(env)\n self.state_features = state_features or StateFeatures()\n self.prev_states = np.zeros([env.num_envs, STATE_STACK, STATE_SIZE], dtype=np.float32)\n\n def reset(self):\n obses = self.env.reset()\n feats = self.state_features.features(np.array(obses)[..., -3:])\n self.prev_states.fill(0)\n self.prev_states[:, -1, NUM_ACTIONS + 1:] = feats\n return (self.prev_states.copy(), obses)\n\n def step(self, actions):\n obses, rews, dones, infos = self.env.step(actions)\n self.prev_states[:, :-1] = self.prev_states[:, 1:]\n features = self.state_features.features(np.array(obses)[..., -3:])\n for i, done in enumerate(dones):\n if done:\n self.prev_states[i].fill(0.0)\n else:\n self.prev_states[i, -1].fill(0.0)\n self.prev_states[i, -1, actions[i]] = 1.0\n self.prev_states[i, -1, NUM_ACTIONS] = rews[i]\n self.prev_states[:, -1, NUM_ACTIONS + 1:] = features\n for i, info in enumerate(infos):\n if 'extra_reward' in info:\n rews[i] += info['extra_reward']\n return (self.prev_states.copy(), obses), rews, dones, infos\n\n\nclass StateFeatures:\n \"\"\"\n Generate the part of state vectors that reflect the\n observation. This does not include rewards or actions.\n \"\"\"\n\n def __init__(self, path='save_classifier.pkl'):\n self.classifier = StateClassifier()\n self.classifier.load_state_dict(torch.load(path))\n self.classifier.to(torch.device('cuda'))\n\n def features(self, obses):\n res = []\n for obs in obses:\n # Check if we have a key.\n if (obs[3] != 0).any():\n res.append([1.0])\n else:\n res.append([0.0])\n device = next(self.classifier.parameters()).device\n obs_tensor = torch.from_numpy(obses).to(device)\n class_out = torch.sigmoid(self.classifier(obs_tensor)).detach().cpu().numpy()\n return np.concatenate([np.array(res), class_out], axis=-1)\n","repo_name":"unixpickle/obs-tower2","sub_path":"obs_tower2/states.py","file_name":"states.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"22"} +{"seq_id":"26988705929","text":"import math\nimport time\n\nfrom lib.utilities import get_digits\n\nif __name__ == \"__main__\":\n print(\"Starting....\")\n start = time.time()\n f = [0] * 10\n for n in range(10):\n f[n] = math.factorial(n)\n print(f)\n N = 1000000\n count = 0\n number_sums = {}\n while n > 1:\n n = N\n chain = {}\n E = True\n while E:\n s = 0\n if n in number_sums:\n s = number_sums[n]\n else:\n digits = get_digits(n)\n for d in digits:\n s += f[d]\n number_sums[n] = s\n if n not in chain:\n chain[n] = s\n n = s\n else:\n E = False\n N -= 1\n print(\"for {} length of chain is {}\".format(N + 1, len(chain)))\n if len(chain) == 60:\n count += 1\n print(count)\n end = time.time()\n print(\"Ending......\")\n print(\"Completed in {:.2}s\".format(end - start))\n","repo_name":"chanchs/euler","sub_path":"problems/problem-74.py","file_name":"problem-74.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40488941591","text":"import autograd.numpy as np\nfrom autograd import grad\nfrom scipy.optimize import fmin_l_bfgs_b\nimport traceback\nimport sys\nfrom .GP import GP\nfrom .activations import *\nimport random\n\nclass BO:\n def __init__(self, dataset, scale, bounds, bfgs_iter, debug=True):\n self.train_x = np.copy(dataset['train_x'])\n self.train_y = np.copy(dataset['train_y'])\n self.scale = scale\n self.bfgs_iter = bfgs_iter\n self.debug = debug\n self.dim = self.train_x.shape[0]\n self.outdim = self.train_y.shape[0]\n self.num_train = self.train_y.shape[1]\n self.construct_model()\n\n self.best_constr = np.inf\n self.best_y = np.zeros((self.outdim))\n self.best_y[0] = np.inf\n self.best_x = np.zeros((self.dim))\n self.get_best_y(self.train_x, self.train_y)\n\n\n def construct_model(self):\n dataset = {}\n dataset['train_x'] = self.train_x\n self.models = []\n for i in range(self.outdim):\n dataset['train_y'] = self.train_y[i:i+1]\n self.models.append(GP(dataset, bfgs_iter=self.bfgs_iter[i], debug=self.debug))\n self.models[i].train(scale=self.scale[i])\n print('BO. Finish constructing model.')\n\n def get_best_y(self, x, y):\n for i in range(y.shape[1]):\n constr = np.maximum(y[1:,i],0).sum()\n if constr < self.best_constr and self.best_constr > 0:\n self.best_constr = constr\n self.best_y = np.copy(y[:,i])\n self.best_x = np.copy(x[:,i])\n elif constr <= 0 and self.best_constr <= 0 and y[0,i] < self.best_y[0]:\n self.best_constr = constr\n self.best_y = np.copy(y[:,i])\n self.best_x = np.copy(x[:,i])\n\n def rand_x(self,n=1):\n tmp = np.random.uniform(0,1,(n))\n idx = (tmp < 0.4)\n x = np.random.uniform(-0.5, 0.5, (self.dim,n))\n x[:,idx] = (0.1*np.random.uniform(-0.5,0.5,(self.dim,idx.sum())).T + self.best_x).T\n x[:,idx] = np.maximum(-0.5, np.minimum(0.5, x[:,idx]))\n return x\n\n def wEI(self, x):\n x = x.reshape(self.dim, int(x.size/self.dim))\n EI = np.zeros((x.shape[1]))\n if self.best_constr <= 0:\n py, ps2 = self.models[0].predict(x)\n ps = np.sqrt(ps2) + 0.000001\n tmp = -(py - self.best_y[0])/ps\n # tmp > -40\n # tmp1 = np.maximum(-40, tmp)\n EI1 = ps*(tmp*cdf(tmp)+pdf(tmp))\n EI1 = np.log(np.maximum(0.000001, EI1))\n # tmp <= -40\n tmp2 = np.minimum(-40, tmp)**2\n EI2 = np.log(ps) - tmp2/2 - np.log(tmp2-1)\n # EI\n EI = EI1*(tmp > -40) + EI2*(tmp <= -40)\n PI = np.zeros((x.shape[1]))\n for i in range(1, self.outdim):\n py, ps2 = self.models[i].predict(x)\n ps = np.sqrt(ps2) + 0.000001\n PI = PI + logphi_vector(-py/ps)\n return EI+PI\n\n def predict(self, test_x):\n num_test = test_x.shape[1]\n py = np.zeros((self.outdim, num_test))\n ps2 = np.zeros((self.outdim, num_test))\n for i in range(self.outdim):\n py[i], ps2[i] = self.models[i].predict(test_x)\n return py, ps2\n \n \n\n\n\n\n","repo_name":"LilyEvansHogwarts/PlayGround","sub_path":"Multi-fidelity/GP/src/BO.py","file_name":"BO.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33080033215","text":"import chainer\nfrom chainer import functions as F\nfrom chainer import initializers as I\nfrom chainer import links as L\nfrom chainer import Parameter\n\nclass AttentionVLADpooling(chainer.Chain):\n def __init__(self, D, K):\n super(AttentionVLADpooling, self).__init__()\n self.D = D\n self.K = K\n initializer = I._get_initializer(None)\n\n with self.init_scope():\n self.wb = L.Convolution2D(D, K, ksize=1, stride=1, pad=0)\n self.c = Parameter(initializer, shape=(D, K))\n self.attn = L.Convolution2D(D, 1, ksize=1, stride=1, pad=0)\n\n def __call__(self, x):\n bs, channel, width, height = x.shape\n assert channel == self.D\n N = width * height\n\n # assignment\n a = self.wb(x)\n a = F.softmax(a)\n a = F.reshape(a, (bs, self.K, N))\n a = F.stack([a] * self.D, axis=1)\n\n # attention\n w = F.relu(self.attn(x))\n w = F.reshape(w, (bs, 1, N))\n w = F.stack([w] * self.D * self.K)\n w = F.reshape(w, (bs, self.D, self.K, N))\n\n x = F.reshape(x, (bs, self.D, N))\n x = F.stack([x] * self.K, axis=2)\n\n _c = F.broadcast_to(\n F.stack([self.c] * N, axis=2), (bs, self.D, self.K, N))\n\n v = F.sum(w * a * (x - _c), axis=3)\n\n v = F.normalize(v, axis=2)\n v = F.reshape(v, (bs, self.D * self.K))\n v = F.normalize(v, axis=1)\n return v\n","repo_name":"tn1031/NetVLAD","sub_path":"attention_vlad_pooling.py","file_name":"attention_vlad_pooling.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16965190268","text":"class Event:\r\n title = \"\"\r\n start_UTC = \"\"\r\n end_UTC = \"\"\r\n times = \"\"\r\n announcements = [10]\r\n description = None\r\n\r\n def __init__(self, title: str, start_UTC: str, end_UTC: str, times: str):\r\n self.title = title\r\n self.start_UTC = start_UTC\r\n self.end_UTC = end_UTC\r\n self.times = times\r\n","repo_name":"0xF76/QuantumBot","sub_path":"src/Event.py","file_name":"Event.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"36746245153","text":"import numpy as np\nimport itertools\n\n\ndef kakuro_combinations(target_sum, n_digits, include=set(), exclude=set()):\n output=set()\n# check if valid\n min_set = tuple(x+1 for x in range(n_digits))\n max_set = tuple(9-x for x in range(n_digits))\n min_sum = sum(min_set)\n max_sum = sum(max_set)\n if (target_sum > max_sum) or (target_sum < min_sum):\n print(f'{target_sum} is invalid for {n_digits}')\n elif target_sum == min_sum:\n print(f'minimum set for {target_sum}')\n output.add(min_set)\n elif target_sum == max_sum:\n print(f'maximum set for {target_sum}')\n output.add(max_set)\n else:\n digits = np.array([i for i in range(1,10)])\n choose = [1 if i<=n_digits else 0 for i in range(1,10)]\n guesses = list(set(itertools.permutations(choose)))\n for guess in guesses:\n if np.matmul(guess,digits) == target_sum:\n combo = guess*digits\n combo = combo[combo != 0]\n output.add(tuple(combo))\n# filter combos for additional information if provided\n blacklist = set()\n for combo in output:\n if any([known_not in combo for known_not in exclude]):\n blacklist.add(combo)\n elif any([known not in combo for known in include]):\n blacklist.add(combo)\n output.difference_update(blacklist)\n if len(output) == 0:\n print(f'After eliminating {len(blacklist)} combinations, none remained.')\n return output\n","repo_name":"avlam/Kakuro","sub_path":"kakuro_tools.py","file_name":"kakuro_tools.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37068334816","text":"import allure\nimport logging\nfrom pages.base_page_object import BasePageObject\nfrom pages.review_page import ReviewPage\nfrom webium import BasePage\nfrom allure import attachment_type\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass ReviewActions(BasePage, BasePageObject):\n\n def __init__(self, app):\n self.app = app\n self.driver = app.driver\n self.review_actions = ReviewPage(driver=self.driver)\n\n @allure.step(\"Open review form to write a review\")\n def open_review_form(self):\n LOGGER.info(\"Open review form to write a review\")\n self.review_actions.click_create_review_button()\n\n @allure.step(\"Fill in review info\")\n def fill_in_review(self, title, content, username, email):\n LOGGER.info(\"Fill in review info\")\n self.review_actions.click_score_option()\n self.review_actions.type_review_title(title)\n self.review_actions.type_review_content(content)\n self.review_actions.type_review_username(username)\n self.review_actions.type_review_email(email)\n\n @allure.step(\"Post review\")\n def post_review(self):\n LOGGER.info(\"Post review\")\n self.review_actions.click_post_review_button()\n\n @allure.step(\"Verify created review\")\n def verify_review(self, title, content):\n LOGGER.info(\"Verify created review\")\n review_title = self.review_actions.get_review_title()\n review_content = self.review_actions.get_review_content()\n assert review_title == title, f\"Test create review failed. \" \\\n f\"Expected review title: {title}, \" \\\n f\"Actual review title: {review_title}\"\n assert review_content == content, f\"Test create review failed. \" \\\n f\"Expected review content: {content}, \" \\\n f\"Actual review content: {review_content}\"\n allure.attach(self.driver.get_screenshot_as_png(), \"screenshot\", attachment_type.PNG)\n","repo_name":"AlexeiMo/test_UI_python","sub_path":"actions/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10580042008","text":"from PIL import ImageFile\r\nImageFile.LOAD_TRUNCATED_IMAGES = True\r\nimport random\r\nimport os\r\nfrom utils import *\r\nfrom vgg16_classifier import *\r\nfrom inceptionv3_classifier import *\r\nfrom resnet50_classifier import *\r\n\r\ninput_image = (264, 198)\r\n\r\ntrain_data_dir = 'data/264x198'\r\nvalidation_data_dir = 'data/264x198-val'\r\n\r\nnb_class = 23\r\n\r\n#######################################\r\n### VGG16\r\n#######################################\r\n\r\nepochs = 5\r\nbatch_size = 32\r\n\r\nmodel = CustomVGG16('vgg16', input_image, nb_class, weights='vgg16.h5')\r\nmodel.fit(train_data_dir, validation_data_dir, epochs, batch_size=batch_size)\r\n#vgg16.save('vgg16_weights.h5')\r\n#vgg16.evaluate(validation_data_dir, batch_size)\r\n\r\n#######################################\r\n\r\n\r\n#######################################\r\n### InceptionV3\r\n#######################################\r\n\r\nepochs = 5\r\nbatch_size = 32\r\n\r\nmodel = CustomInceptionV3('inceptionv3', input_image, nb_class, weights='inceptionv3.h5')\r\nmodel.fit(train_data_dir, validation_data_dir, epochs, batch_size=batch_size)\r\n#model.save('inceptionv3_weights.h5')\r\n#model.evaluate(validation_data_dir, batch_size)\r\n\r\n#######################################\r\n\r\n\r\n#######################################\r\n### Resnet50\r\n#######################################\r\n\r\nepochs = 5\r\nbatch_size = 32\r\n\r\nmodel = CustomRestNet50('resnet50', input_image, nb_class, weights='resnet50.h5')\r\nmodel.fit(train_data_dir, validation_data_dir, epochs, batch_size=batch_size)\r\n#model.save('resnet50_weights.h5')\r\n#model.evaluate(validation_data_dir, batch_size)\r\n\r\n#######################################\r\n","repo_name":"PoornaPragnaMS/ImageClassification","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70561103417","text":"from django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('transaction', '0019_transaction_ref_code'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='transaction',\n name='sender_name',\n field=models.CharField(max_length=250, blank=True),\n ),\n ]\n","repo_name":"ministryofjustice/money-to-prisoners-api","sub_path":"mtp_api/apps/transaction/migrations/0020_auto_20151203_0955.py","file_name":"0020_auto_20151203_0955.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"7445576171","text":"### GLOBAL_DEFINITIONS\n\n\n# STATIC VALUES\nPASS_MOVE = None\n\nBLACK = 1\nEMPTY = 0\nWHITE = -1\n\nMAX_VALUE = 65536.0\nMIN_VALUE = -65536.0\n\n\n# VARIABLES\nBOARD_SIZE = 9\nSQUARE_BOARD_SIZE = 81\nKOMI = 2.5\n\nNUM_STONES_TO_END_GAME = 72\n\nMAX_TURN = 60\nNUM_MOVES_TO_END_GAME = MAX_TURN * 2\n\nPARTIAL_OBSERVABILITY = 8\n\nADDITIONAL_SEARCH_COUNT_DURING_MCTS = 6\n\nSAVE_PERIOD = 10\n\nRANDOM_MOVE_PROB = 0.04\n\nTRAIN_COUNT = 1\n\nMAX_SAMPLE_COUNT = 6000\n\nMCTS_N_ITERS = 160\n\nNUM_SAMPLE_POOLS = 2 # Default : Black / White\n\nTRAIN_COUNTS_TO_SKIP_NONE_MOVE = 0\n\n# Play game counts (Must Be odd)\nDEFAULT_PLAY_GAME_COUNTS = 3\n\n# For pagame\nWINDOW_WIDTH = 500\nWINDOW_HEIGHT = 300\nBOARD_WIDTH = 300\nBOARD_HEIGHT = 300\n\nGRID_SIZE = 30\n\nBG_COLOR = (128, 128, 128)","repo_name":"JustAManPassingBy/ProjectBetaGoZero","sub_path":"definitions.py","file_name":"definitions.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5005216155","text":"from aiohttp import web\nimport logging\nimport aiohttp\n\nroutes = web.RouteTableDef()\n\n@routes.get('/')\nasync def hello(request):\n request.app['counter'] += 1\n n = request.app['counter']\n return web.Response(text=\"Hello, world {}\".format(n))\n\n\n@routes.get('/f')\nasync def f(request):\n status, response = 0, ''\n async with aiohttp.ClientSession() as session:\n async with session.get('http://httpbin.org/get') as resp:\n status = resp.status\n response = await resp.text()\n return web.Response(text=\"{},\\n{}\".format(status, response))\n\n\napp = web.Application()\napp.add_routes(routes)\napp['counter'] = 0\nlogging.basicConfig(level=logging.DEBUG)\nweb.run_app(app)\n","repo_name":"msehnout/openshift-deployment-example","sub_path":"central-node-python/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72040307896","text":"# Airflow DAG Definition: AI Training Run\n#\n# Steps:\n# 1. Data prep job\n# 2. Dataset snapshot (for traceability)\n# 3. Training job\n# 4. Model snapshot (for versioning/baselining)\n# 5. Inference validation job\n\n\nfrom airflow import DAG\nfrom airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.utils.dates import days_ago\nfrom kubernetes.client import models as k8s\nimport uuid\n\n\n##### DEFINE PARAMETERS: Modify parameter values in this section to match your environment #####\n\n## Define default args for DAG\nai_training_run_dag_default_args = {\n 'owner': 'NetApp'\n}\n\n## Define DAG details\nai_training_run_dag = DAG(\n dag_id='ai_training_run',\n default_args=ai_training_run_dag_default_args,\n schedule_interval=None,\n start_date=days_ago(2),\n tags=['training']\n)\n\n# Define Kubernetes namespace to execute DAG in\nnamespace = 'airflow'\n\n## Define volume details (change values as necessary to match your environment)\n\n# Dataset volume\ndataset_volume_pvc_existing = 'dataset-vol'\ndataset_volume = k8s.V1Volume(\n name=dataset_volume_pvc_existing,\n persistent_volume_claim=k8s.V1PersistentVolumeClaimVolumeSource(claim_name=dataset_volume_pvc_existing),\n)\ndataset_volume_mount = k8s.V1VolumeMount(\n name=dataset_volume_pvc_existing, \n mount_path='/mnt/dataset', \n sub_path=None, \n read_only=False\n)\n\n# Model volume\nmodel_volume_pvc_existing = 'airflow-model-vol'\nmodel_volume = k8s.V1Volume(\n name=model_volume_pvc_existing,\n persistent_volume_claim=k8s.V1PersistentVolumeClaimVolumeSource(claim_name=model_volume_pvc_existing),\n)\nmodel_volume_mount = k8s.V1VolumeMount(\n name=model_volume_pvc_existing, \n mount_path='/mnt/model', \n sub_path=None, \n read_only=False\n)\n\n## Define job details (change values as needed)\n\n# Data prep step\ndata_prep_step_container_image = \"nvcr.io/nvidia/tensorflow:21.03-tf1-py3\"\ndata_prep_step_command = [\"echo\", \"'No data prep command entered'\"] # Replace this echo command with the data prep command that you wish to execute\ndata_prep_step_resources = {} # Hint: To request that 1 GPU be allocated to job pod, change to: {'limit_gpu': 1}\n\n# Training step\ntrain_step_container_image = \"nvcr.io/nvidia/tensorflow:21.03-tf1-py3\"\ntrain_step_command = [\"echo\", \"'No training command entered'\"] # Replace this echo command with the training command that you wish to execute\ntrain_step_resources = {} # Hint: To request that 1 GPU be allocated to job pod, change to: {'limit_gpu': 1}\n\n# Inference validation step\nvalidate_step_container_image = \"nvcr.io/nvidia/tensorflow:21.03-tf1-py3\"\nvalidate_step_command = [\"echo\", \"'No inference validation command entered'\"] # Replace this echo command with the inference validation command that you wish to execute\nvalidate_step_resources = {} # Hint: To request that 1 GPU be allocated to job pod, change to: {'limit_gpu': 1}\n\n################################################################################################\n\n\n# Define DAG steps/workflow\nwith ai_training_run_dag as dag :\n\n # Define step to generate uuid for run\n generate_uuid = PythonOperator(\n task_id='generate-uuid',\n python_callable=lambda: str(uuid.uuid4())\n )\n\n # Define data prep step using Kubernetes Pod operator (https://airflow.apache.org/docs/stable/kubernetes.html#kubernetespodoperator)\n data_prep = KubernetesPodOperator(\n namespace=namespace,\n image=data_prep_step_container_image,\n cmds=data_prep_step_command,\n resources = data_prep_step_resources,\n volumes=[dataset_volume, model_volume],\n volume_mounts=[dataset_volume_mount, model_volume_mount],\n name=\"ai-training-run-data-prep\",\n task_id=\"data-prep\",\n is_delete_operator_pod=True,\n hostnetwork=False\n )\n\n # Define step to take a snapshot of the dataset volume for traceability\n dataset_snapshot = KubernetesPodOperator(\n namespace=namespace,\n image=\"python:3\",\n cmds=[\"/bin/bash\", \"-c\"],\n arguments=[\"\\\n python3 -m pip install netapp-dataops-k8s && \\\n netapp_dataops_k8s_cli.py create volume-snapshot --pvc-name=\" + str(dataset_volume_pvc_existing) + \" --snapshot-name=dataset-{{ task_instance.xcom_pull(task_ids='generate-uuid', dag_id='ai_training_run', key='return_value') }} --namespace=\" + namespace],\n name=\"ai-training-run-dataset-snapshot\",\n task_id=\"dataset-snapshot\",\n is_delete_operator_pod=True,\n hostnetwork=False\n )\n\n # State that the dataset snapshot should be created after the data prep job completes and the uuid job completes\n data_prep >> dataset_snapshot\n generate_uuid >> dataset_snapshot\n\n # Define training step using Kubernetes Pod operator (https://airflow.apache.org/docs/stable/kubernetes.html#kubernetespodoperator)\n train = KubernetesPodOperator(\n namespace=namespace,\n image=train_step_container_image,\n cmds=train_step_command,\n resources = train_step_resources,\n volumes=[dataset_volume, model_volume],\n volume_mounts=[dataset_volume_mount, model_volume_mount],\n name=\"ai-training-run-train\",\n task_id=\"train\",\n is_delete_operator_pod=True,\n hostnetwork=False\n )\n\n # State that training job should be executed after dataset volume snapshot is taken\n dataset_snapshot >> train\n\n # Define step to take a snapshot of the model volume for versioning/baselining\n model_snapshot = KubernetesPodOperator(\n namespace=namespace,\n image=\"python:3\",\n cmds=[\"/bin/bash\", \"-c\"],\n arguments=[\"\\\n python3 -m pip install netapp-dataops-k8s && \\\n netapp_dataops_k8s_cli.py create volume-snapshot --pvc-name=\" + str(model_volume_pvc_existing) + \" --snapshot-name=model-{{ task_instance.xcom_pull(task_ids='generate-uuid', dag_id='ai_training_run', key='return_value') }} --namespace=\" + namespace],\n name=\"ai-training-run-model-snapshot\",\n task_id=\"model-snapshot\",\n is_delete_operator_pod=True,\n hostnetwork=False\n )\n\n # State that the model snapshot should be created after the training job completes\n train >> model_snapshot\n\n # Define inference validation step using Kubernetes Pod operator (https://airflow.apache.org/docs/stable/kubernetes.html#kubernetespodoperator)\n validate = KubernetesPodOperator(\n namespace=namespace,\n image=validate_step_container_image,\n cmds=validate_step_command,\n resources = validate_step_resources,\n volumes=[dataset_volume, model_volume],\n volume_mounts=[dataset_volume_mount, model_volume_mount],\n name=\"ai-training-run-validate\",\n task_id=\"validate\",\n is_delete_operator_pod=True,\n hostnetwork=False\n )\n\n # State that inference validation job should be executed after model volume snapshot is taken\n model_snapshot >> validate\n","repo_name":"NetApp/netapp-dataops-toolkit","sub_path":"netapp_dataops_k8s/Examples/Airflow/ai-training-run.py","file_name":"ai-training-run.py","file_ext":"py","file_size_in_byte":7019,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"22"} +{"seq_id":"22282141962","text":"# ドイチのアルゴリズム(一定な関数と均等な関数の比較)\r\n# 量子回路をふたつつくって比較をおこなう\r\nfrom qiskit import *\r\nfrom qiskit.tools.visualization import plot_histogram, circuit_drawer\r\n\r\n\r\ndef c_oracle(qci, x, y_fx):\r\n qci.x(y_fx)\r\n\r\n\r\ndef b_oracle(qci, x, y_fx):\r\n qci.cx(x, y_fx)\r\n\r\n\r\nbn = 2 # 量子ビット数\r\n\r\nq1 = QuantumRegister(bn) # bn個の 量子 レジスタq1の 生成\r\nq2 = QuantumRegister(bn) # bn個の 量子 レジスタq2の 生成\r\nc = ClassicalRegister(bn) # cn個の 古典 的 レジスタcの 生成\r\n\r\nqc1 = QuantumCircuit(q1, c) # 量子 回路 qc1の 生成\r\nqc2 = QuantumCircuit(q2, c) # 量子 回路 qc2の 生成\r\n\r\nqc = qc1+qc2 # ふたつの回路を合成する\r\n\r\nqc.x(q1[1])\r\nqc.x(q2[1])\r\n\r\n\r\n# 量子ゲート部分 ****************\r\n\r\n# アダマールゲートでそれぞれのビットに対して、重ね合わせ状態を生成する\r\nfor i in range(bn):\r\n qc.h(q1[i])\r\nfor i in range(bn):\r\n qc.h(q2[i])\r\n\r\n# 量子オラクル部分\r\nc_oracle(qc, q1[0], q1[1])\r\nb_oracle(qc, q2[0], q2[1])\r\n\r\nfor i in range(bn):\r\n qc.h(q1[i])\r\nfor i in range(bn):\r\n qc.h(q2[i])\r\n\r\nqc.measure(q2[0], c[0])\r\nqc.measure(q1[0], c[1])\r\n\r\n# 量子ゲート部分 ****************\r\n\r\n\r\n#量子 回路 を 実 行 し、 結 果 rに 代入 する\r\n# Use Aer's qasm_simulator\r\nbackend_sim = BasicAer.get_backend('qasm_simulator')\r\nr = execute(qc, backend_sim, shots=8192).result()\r\nrc = r.get_counts()\r\nprint(rc)\r\n# circuit_drawer(qc)\r\nplot_histogram(rc)\r\n\r\n#実行結果として、|01>が観測される。\r\n# 前半の量子オラクルは「一定な関数」と判断でき、後半の量子オラクルは均等な関数と判断できる\r\n","repo_name":"shunak/QuantumProgramming","sub_path":"Deutsch_alg_comp_const_variable.py","file_name":"Deutsch_alg_comp_const_variable.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"69947795898","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\ncities = pd.read_csv(\"../input/cities.csv\")\ncities.head()\ndef is_prime(n):\n \"\"\"Determines if a positive integer is prime.\"\"\"\n\n if n > 2:\n i = 2\n while i ** 2 <= n:\n if n % i:\n i += 1\n else:\n return False\n elif n != 2:\n return False\n return True\n#Create a column within the cities dataframe to flag prime cities\ncities['is_prime'] = cities.CityId.apply(is_prime)\nfig = plt.figure(figsize=(18,18))\nplt.scatter(cities.X, cities.Y, c=cities['is_prime'], marker=\".\", alpha=.5);\n","repo_name":"aorursy/new-nb-5","sub_path":"mtodisco10_visualizing-the-prime-path.py","file_name":"mtodisco10_visualizing-the-prime-path.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9065200965","text":"\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\ndf = pd.read_csv('StressLevelDataset.csv')\ndf.head()\n\n\nprint(df.describe())\n\n\n# Calcula la matriz de correlación\ncorrelation_matrix = round(df.corr(),2)\n\n# Visualiza la matriz de correlación con seaborn\nplt.figure(figsize=(10, 8))\nsns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', vmin=-1, vmax=1)\nplt.title('Matriz de Correlación')\nplt.show()\n\n\n","repo_name":"Haroldgio28/docker_project","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"15704107339","text":"import os\nimport wandb\nimport pickle\nimport random\nimport numpy as np\nimport pandas as pd\nfrom copy import copy\nfrom pymgrid import MicrogridGenerator as m_gen\nfrom pymgrid.Environments.ScenarioEnvironment import CSPLAScenarioEnvironment\nfrom pymgrid.Environments.rule_based import RuleBaseControl, MacroEnvironment\nfrom torch.utils.tensorboard import SummaryWriter\n\n\ndef get_microgrid(id=1, export_price_factor=0.0):\n # Create 25 defaults microgrids\n env = m_gen.MicrogridGenerator(nb_microgrid=25)\n pymgrid25 = env.load(\"pymgrid25\")\n microgrids_25 = pymgrid25.microgrids\n\n # Select the 1 microgrid\n mg = microgrids_25[1]\n\n # Modify export prices to not be 0\n mg._grid_price_export[0] = mg._grid_price_import[0] * export_price_factor\n mg._grid_status_ts[0] = pd.Series(np.ones(len(mg._grid_status_ts)))\n\n return mg\n\n\ndir = os.path.dirname(__file__)\nwith open(os.path.join(dir, \"data/fakeYears_archId1_15042022.pkl\"), \"rb\") as f:\n fake_data = pickle.load(f)\n\n\ndef get_train_env(year=0, export_price_factor=0):\n starts = list(range(0, 6759, 2000))\n mg = get_microgrid(id=1, export_price_factor=export_price_factor)\n mg_train = copy(mg)\n return CSPLAScenarioEnvironment(\n starts,\n 2000,\n {\"microgrid\": mg_train},\n fake_data[\"tsSample\"][year][0][:, None],\n fake_data[\"tsSample\"][year][1][:, None],\n )\n\n\ndef get_macro_environments(micropolicies, export_price_factor=0, pv_factor=1):\n mg = get_microgrid(id=1, export_price_factor=export_price_factor)\n mg_train = copy(mg)\n mg_test = copy(mg)\n\n mg_env_train = MacroEnvironment(\n {\"microgrid\": mg_train}, micropolicies, pv_factor=pv_factor\n )\n mg_env_eval = MacroEnvironment(\n {\"microgrid\": mg_test}, micropolicies, pv_factor=pv_factor\n )\n return mg_env_train, mg_env_eval\n\n\ndef get_opposite_environments(\n pv_factor=1.0, action_design=\"original\", export_price_factor=0\n):\n mg_plus = get_microgrid(id=1, export_price_factor=export_price_factor)\n mg_min = get_microgrid(id=1, export_price_factor=-export_price_factor)\n microgrids = []\n for mg in (mg_plus, mg_min):\n mg._data_set_to_use = \"all\"\n mg.dataset_to_use_default = \"all\"\n mg.TRAIN = False\n mg_train = copy(mg)\n mg_test = copy(mg)\n microgrids.append({\"train\": mg_train, \"test\": mg_test})\n LEN = 1000\n # starts = list(range(0, 6759, 2000))\n starts = [np.random.randint(8739 - LEN) for i in range(5000)]\n # starts = [0]\n mg_env_train = RuleBaseControl(\n starts,\n LEN,\n {\"microgrid\": (microgrids[0][\"train\"], microgrids[1][\"train\"])},\n customPVTs=fake_data[\"tsSample\"][0][1][:, None],\n customLoadTs=fake_data[\"tsSample\"][0][0][:, None],\n action_design=action_design,\n )\n\n mg_env_eval = RuleBaseControl(\n [0],\n 8760,\n {\"microgrid\": (microgrids[0][\"test\"], microgrids[1][\"test\"])},\n action_design=action_design,\n )\n return mg_env_train, mg_env_eval\n\n\ndef get_environments(pv_factor=1.0, action_design=\"original\", export_price_factor=0):\n mg = get_microgrid(id=1, export_price_factor=export_price_factor)\n mg._data_set_to_use = \"all\"\n mg.dataset_to_use_default = \"all\"\n mg.TRAIN = False\n mg_train = copy(mg)\n mg_test = copy(mg)\n LEN = 1000\n # starts = list(range(0, 6759, 2000))\n starts = [np.random.randint(8739 - LEN) for i in range(5000)]\n # starts = [0]\n mg_env_train = RuleBaseControl(\n starts,\n LEN,\n {\"microgrid\": mg_train},\n customPVTs=fake_data[\"tsSample\"][0][1][:, None],\n customLoadTs=fake_data[\"tsSample\"][0][0][:, None],\n action_design=action_design,\n )\n\n mg_env_eval = RuleBaseControl(\n [0],\n 8760,\n {\"microgrid\": mg_test},\n # customPVTs=fake_data[\"tsSample\"][0][1][:, None],\n # customLoadTs=fake_data[\"tsSample\"][0][0][:, None],\n action_design=action_design,\n pv_factor=pv_factor,\n )\n return mg_env_train, mg_env_eval\n\n\ndef get_environments_for_cluster(\n cluster,\n pv_factor=1.0,\n starts_file=\"clusteringResultPymgrid25_configcfgN10k200_fakeYearsAssignmentNN.pkl\",\n action_design=\"original\",\n export_price_factor=0,\n seed=42,\n):\n mg = get_microgrid(id=1, export_price_factor=export_price_factor)\n mg_train = copy(mg)\n mg_test = copy(mg)\n object = read_pickle(f\"data/{starts_file}\")[0]\n cluster_ids = object[\"clusterAssignments\"][0]\n starts = object[\"pieceStarts\"][0]\n max_cluster = max(cluster_ids)\n if cluster > max_cluster:\n raise ValueError(\n f\"Cluster {cluster} does not exist, max cluster is {max_cluster}\"\n )\n\n # Get the whole starts list for the current cluster\n starts_cluster = starts[cluster_ids == cluster]\n # Split the starts list between train and test\n train_starts, test_starts = train_test_split(starts_cluster, seed=42)\n mg_env_train = CSPLAScenarioEnvironment(\n starts_cluster,\n 1000,\n {\"microgrid\": mg_train},\n customPVTs=fake_data[\"tsSample\"][0][1][:, None],\n customLoadTs=fake_data[\"tsSample\"][0][0][:, None],\n action_design=action_design,\n pv_factor=pv_factor,\n )\n mg_env_eval = CSPLAScenarioEnvironment(\n [0],\n 8760,\n {\"microgrid\": mg_test},\n # customPVTs=fake_data[\"tsSample\"][0][1][:, None],\n # customLoadTs=fake_data[\"tsSample\"][0][0][:, None],\n action_design=action_design,\n )\n\n return mg_env_train, mg_env_eval\n\n\ndef read_pickle(file):\n objects = []\n with (open(file, \"rb\")) as openfile:\n while True:\n try:\n objects.append(pickle.load(openfile))\n except EOFError:\n break\n return objects\n\n\ndef train_test_split(list_in, ratio=0.7, seed=None):\n if seed is not None:\n random.seed(seed)\n random.shuffle(list_in)\n split_idx = int(ratio * len(list_in))\n train = list_in[:split_idx]\n test = list_in[split_idx:]\n return train, test\n\n\nLOG_DIR = \"./logs\"\nos.makedirs(LOG_DIR, exist_ok=True)\nwriter = SummaryWriter(log_dir=LOG_DIR)\n\n\ndef log_timestep(reward, timestep, out: str):\n if out.lower() == \"wandb\":\n wandb.log({\"Test/reward\": reward}, step=timestep)\n elif out.lower() == \"tensorboard\":\n writer.add_scalar(\"Test/reward\", reward, timestep)\n else:\n raise ValueError(f\"Unrecognized output for logging : {out}\")\n\n\ndef log_episode(reward_sum, out: str):\n if out.lower() == \"wandb\":\n wandb.log({\"Test/reward_sum\": reward_sum}, step=1)\n elif out.lower() == \"tensorboard\":\n print(\"LOGGING SUM\")\n writer.add_scalar(\"Test/reward_sum\", reward_sum, 1)\n else:\n raise ValueError(f\"Unrecognized output for logging : {out}\")\n","repo_name":"YannBerthelot/PymgridExperiments","sub_path":"src/pymgridexperiments/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"}